diff --git "a/3143.jsonl" "b/3143.jsonl" new file mode 100644--- /dev/null +++ "b/3143.jsonl" @@ -0,0 +1,621 @@ +{"seq_id":"609367698","text":"\nimport datetime\nimport os\n\n# -------------------------------------------------------------------------------------------------\n\nAPPNAME = 'vr_rt'\nVERSION = '0.1'\n\ntop = '.'\nout = 'out'\napps = 'apps'\n\n# .................................................................................................\n\nPRJ_REFS = ['vr_common', 'vr_common_testlib']\nREF_DIR = os.path.abspath ('..')\n\nOPT_INCLUDE = [ ]\nOPT_LIB = [ ]\n\n# .................................................................................................\n\ndef libset (env, ID, libs = ()): # a helper for multi-libs like boost\n libs = tuple (libs) if libs else (ID, ) # a one-lib libset can be abbreviated to its ID only\n for l in libs: env ['LIB_' + l] = l\n env ['libset.' + ID] = libs # used to recover individual libs for 'use' lists in build tasks\n\n global OPT_INCLUDE, OPT_LIB\n\n include_path = os.getenv ('_VR_' + ID + '_INCLUDE')\n if include_path: OPT_INCLUDE.extend (filter (None, include_path.split (':')))\n lib_path = os.getenv ('_VR_' + ID + '_LIB')\n if lib_path: OPT_LIB.extend (filter (None, lib_path.split (':')))\n\n# .................................................................................................\n\ndef shlib (ctx, target, source, includes = [], libsets = [], prj_libs = [], ** kwargs):\n use = prj_libs\n for ls in libsets: use.extend (ctx.env ['libset.' + ls])\n ctx.shlib (target = target, source = source, includes = ['src', 'test/src'] + includes, use = use, ** kwargs)\n\ndef app (ctx, target, source, includes = [], libsets = [], prj_libs = [], ** kwargs):\n use = prj_libs\n for ls in libsets: use.extend (ctx.env ['libset.' + ls])\n ctx.program (target = target, source = source, includes = ['src', 'test/src'] + includes, use = use, ** kwargs)\n\n# .................................................................................................\n\nDEFAULT_BUILD_VARIANT = 'debug'\nDEFAULT_INSTALL_VARIANT = 'bin'\n\nDEFAULT_OPT_LEVEL = '0'\nDEFAULT_DEBUG_INFO = '2' # min level at which gcc emits lineno info\n\nDEFAULT_SRC_VERSION = str (datetime.datetime.now ())\n\nBUILD_VARIANTS = (DEFAULT_BUILD_VARIANT, 'release')\nINSTALL_VARIANTS = (DEFAULT_INSTALL_VARIANT, 'devel')\n\nSO_VERSION = '0'\n\nCXXFLAGS = ['-std=c++14', '-faligned-new', '-Wall', '-march=native']\nDEFINES = ['BOOST_SYSTEM_NO_DEPRECATED', 'GTEST_HAS_TR1_TUPLE=0', 'DGTEST_USE_OWN_TR1_TUPLE=0']\n\nUSE_BOOST_LOCALE = False\n\nBOOST_LIBS = ['boost_system', 'boost_thread', 'boost_date_time', 'boost_filesystem', 'boost_iostreams', 'boost_program_options', 'boost_regex']\nPROF_LIBS = [ ]\n\n# .................................................................................................\n\nif USE_BOOST_LOCALE: BOOST_LIBS.append ('boost_locale')\n\n# .................................................................................................\n\ndef options (ctx):\n ctx.add_option ('-b', '--build-variant', action = 'store', choices = BUILD_VARIANTS, default = DEFAULT_BUILD_VARIANT, help = 'build variant')\n ctx.add_option ('-i', '--install-variant', action = 'store', choices = INSTALL_VARIANTS, default = DEFAULT_INSTALL_VARIANT, help = 'install variant')\n ctx.add_option ('-f', '--full-tests', action = 'store_true', help = 'build full testsuite (increases link time)')\n ctx.add_option ('-g', '--debug-info', action = 'store', default = DEFAULT_DEBUG_INFO, help = 'set specific debug info level')\n ctx.add_option ('-p', '--profiler', action = 'store_true', help = 'add profiling (link with gperfcpu)')\n ctx.add_option ( '--src-version', action = 'store', default = DEFAULT_SRC_VERSION, help = 'specify build source version')\n\ndef init (ctx):\n global out, SO_VERSION, CXXFLAGS, DEFINES, PROF_LIBS\n\n out = '.'.join ((out, ctx.options.build_variant))\n if ctx.options.build_variant == 'release':\n SO_VERSION = '1'\n CXXFLAGS.append ('-O3')\n DEFINES.extend (('NDEBUG', 'BOOST_DISABLE_ASSERTS'))\n\n if ctx.options.full_tests:\n DEFINES.append ('VR_FULL_TESTS')\n out = '.'.join ((out, 'full'))\n \n CXXFLAGS.append ('-g' + ctx.options.debug_info)\n if ctx.options.debug_info != DEFAULT_DEBUG_INFO:\n out = '.'.join ((out, 'g' + str (ctx.options.debug_info)))\n\n if ctx.options.profiler:\n PROF_LIBS.append ('gperfcpu')\n out = '.'.join ((out, 'prof'))\n \n # --- [finished determining 'out'] ---\n \n DEFINES.append ('VR_BUILD_SRC_VERSION=%s' % ctx.options.src_version)\n\n for ref in PRJ_REFS:\n os.environ ['_VR_' + ref + '_INCLUDE'] = ':'.join ((os.path.join (REF_DIR, ref, \"src\"), os.path.join (REF_DIR, ref, \"test/src\")))\n os.environ ['_VR_' + ref + '_LIB'] = ':'.join ((os.path.join (REF_DIR, ref, out), os.path.join (REF_DIR, ref, out, 'lib'))) # 2nd alternative for CD integration\n\n# ................................................................................................. \n\ndef configure (ctx):\n ctx.load ('g++')\n \n env = ctx.env\n\n env.NO_LOCK_IN_RUN = True\n env.NO_LOCK_IN_TOP = True\n\n libset (env, 'gcc') # grab custom gcc's lib64 path so it ends up in rpath\n\n libset (env, 'm')\n libset (env, 'pthread')\n libset (env, 'elfutils', ['dw'])\n libset (env, 'hwloc')\n libset (env, 'libicu', ['icuuc', 'icui18n', 'icudata'])\n libset (env, 'archive')\n libset (env, 'zstd')\n libset (env, 'hdf5')\n libset (env, 'curl')\n \n libset (env, 'lttng', ['lttng-ust', 'lttng-ctl', 'urcu-qsbr', 'dl'])\n libset (env, 'babeltrace', ['babeltrace', 'babeltrace-ctf'])\n libset (env, 'glog')\n libset (env, 'boost', BOOST_LIBS)\n libset (env, 'json') # header-only\n libset (env, 'googletest', ['gtest'])\n libset (env, 'gperfcpu', ['profiler'])\n \n libset (env, 'vr_common')\n libset (env, 'vr_common_testlib')\n \n env.CXXFLAGS = CXXFLAGS\n env.DEFINES = DEFINES\n env.INCLUDES = OPT_INCLUDE\n env.LIBPATH = OPT_LIB\n env.RPATH = ['$ORIGIN/.', '$ORIGIN/../lib'] + OPT_LIB\n\n env.vnum = SO_VERSION\n env.prof_libs = PROF_LIBS\n \n# ................................................................................................. \n\nREF_LIBS = ['vr_common']\nREF_TEST_LIBS = ['vr_common_testlib']\n\nSYS_LIBS = ['m', 'pthread', 'elfutils', 'hwloc', 'libicu', 'lttng']\nOPT_LIBS = ['glog', 'boost', 'archive', 'zstd', 'hdf5', 'curl'] + REF_LIBS\nTEST_LIBS = ['googletest', 'babeltrace'] + REF_TEST_LIBS\n\n# .................................................................................................\n\ndef build (ctx):\n install_is_devel = ctx.options.install_variant == 'devel'\n \n # core lib:\n \n shlib (ctx, target = APPNAME,\n source = ctx.path.ant_glob ('src/**/*.cpp', excl='src/**/*_test.*'),\n libsets = OPT_LIBS + SYS_LIBS,\n install_path = 'lib', vnum = ctx.env.vnum)\n \n # test support lib (factored out to be used by other projects):\n\n shlib (ctx, target = APPNAME + '_testlib',\n source = ctx.path.ant_glob ('test/src/**/*.cpp', excl='test/src/**/main.cpp'),\n libsets = ctx.env.prof_libs + TEST_LIBS + OPT_LIBS + SYS_LIBS,\n prj_libs = [APPNAME],\n install_path = 'lib' if install_is_devel else None)\n\n # core testsuite (note: compiling testcases into an .so as well, to fully reproduce PIC/TLS model etc)\n \n shlib (ctx, target = APPNAME + '_tests',\n source = ctx.path.ant_glob ('src/**/*_test.cpp'),\n libsets = OPT_LIBS + SYS_LIBS,\n prj_libs = [APPNAME, APPNAME + '_testlib'],\n install_path = None)\n\n # testsuite driver:\n \n app (ctx, target = APPNAME + '-tests',\n source = ctx.path.ant_glob ('test/src/**/main.cpp'),\n libsets = ctx.env.prof_libs + TEST_LIBS + OPT_LIBS + SYS_LIBS,\n prj_libs = [APPNAME + '_tests'],\n install_path = None)\n \n # apps:\n \n for a in os.listdir (apps):\n a_src_path = os.path.join (apps, a, 'src')\n a_source = ctx.path.ant_glob (a_src_path + '/**/*.cpp')\n if a_source: # avoid confusing build failure messages for apps with incomplete src trees\n app (ctx, target = a,\n source = a_source,\n includes = [ a_src_path ],\n libsets = ctx.env.prof_libs + OPT_LIBS + SYS_LIBS,\n prj_libs = [APPNAME])\n \n # install artifacts that aren't \"built\": \n\n ctx.install_files ('${PREFIX}/data', ctx.path.ant_glob ('data/**/*', excl='data/grammar data/**/.git*'),\n cwd = ctx.path.find_dir ('data'), relative_trick = True)\n \n if install_is_devel:\n ctx.install_files ('${PREFIX}/include', ctx.path.ant_glob ('src/**/*.h'),\n cwd = ctx.path.find_dir ('src'), relative_trick = True)\n ctx.install_files ('${PREFIX}/include', ctx.path.ant_glob ('test/src/**/*.h'),\n cwd = ctx.path.find_dir ('test/src'), relative_trick = True)\n\n# -------------------------------------------------------------------------------------------------\n","sub_path":"vr/vr_rt/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":9163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115937034","text":"from math import exp\nfrom numpy import cumsum\nfrom climate import rf_CH4, decay_CH4, rf_CO2, decay_CO2, dt_CH4, dt_CO2\n\n\ndef metric(length, measure, form, tau, a0_k = 2.17E-01, a1_k = 2.24E-01, \n a2_k = 2.82E-01, a3_k = 2.76E-01, tau1_k = 3.94E+02, tau2_k = 3.65E+01, \n tau3_k = 4.30E+00, tau_m = 12.4, A_k = 1.37e-2, A_m = 3.63e-4, f1_m = 0.5,\n f2_m = 0.15):\n # Calculates a list of yearly metric values starting in the present and\n # with specificed length, form, and time of interest tau.\n metric = [0] * length\n \n for te in range(0, length):\n # Calculate the metric value for each emission year as the ratio of\n # CH4 and CO2 proportional impacts:\n metric[te] = absolute_CH4(te, measure, form, tau, tau_m, A_m, f1_m, \n f2_m)/ absolute_CO2(te, measure, form, tau, a0_k, a1_k, a2_k,\n a3_k, tau1_k, tau2_k, tau3_k, A_k)\n\n return metric\n\ndef absolute_CH4(te, measure, form, tau, tau_m = 12.4, A_m = 3.63e-4, \n f1_m = 0.5, f2_m = 0.15):\n # Calculates the absolute metric for CH4 for a given emission year te\n # and specificed measure, form, and time of interest tau.\n late_date = te + 120\n absolute = 0\n\n for ti in range(te, late_date):\n # Add up impact of emission in te in weighted impact years ti:\n absolute += weight(ti, te, form, tau) * \\\n impact_CH4(ti, te, measure, tau_m, A_m, f1_m, f2_m)\n \n return absolute\n\ndef absolute_CO2(te, measure, form, tau, a0_k = 2.17E-01, a1_k = 2.24E-01, \n a2_k = 2.82E-01, a3_k = 2.76E-01, tau1_k = 3.94E+02, tau2_k = 3.65E+01, \n tau3_k = 4.30E+00, A_k = 1.37e-2):\n # Calculates the absolute metric for CO2 for a given emission year te \n # and specificed measure, form, and time of interest tau.\n late_date = te + 120\n absolute = 0\n\n for ti in range(te, late_date):\n # Add up impact of emission in te in weighted impact years ti:\n absolute += weight(ti, te, form, tau) * impact_CO2(ti, te, measure,\n a0_k, a1_k, a2_k, a3_k, tau1_k, tau2_k, tau3_k, A_k)\n \n return absolute\n\ndef impact_CH4(ti, te, measure, tau_m = 12.4, A_m = 3.63e-4, f1_m = 0.5,\n f2_m = 0.15):\n # Calculates the impact of a pulse of CH4 emitted at te in ti.\n conversion = 1000/16.04 #units: proportional ppb CH4 per gram\n\n # Calculate radiative forcing impact:\n conc = decay_CH4(ti - te, tau_m) * conversion\n\n if measure == 'rf':\n impact = rf_CH4(conc, A_m, f1_m, f2_m)\n elif measure == 'temp':\n dt_CH4(conc)\n\n return impact\n\ndef impact_CO2(ti, te, measure, a0_k = 2.17E-01, a1_k = 2.24E-01, \n a2_k = 2.82E-01, a3_k = 2.76E-01, tau1_k = 3.94E+02, tau2_k = 3.65E+01, \n tau3_k = 4.30E+00, A_k = 1.37e-2):\n # Calculates the impact of a pulse of CO2 emitted at te in ti.\n conversion = 1/44.01 #units: proportional ppm CO2 per gram \n \n # Calculate radiative forcing impact:\n conc = decay_CO2(ti - te, a0_k, a1_k, a2_k, a3_k, tau1_k, tau2_k, \n tau3_k) * conversion\n\n if measure == 'rf':\n impact = rf_CO2(conc, A_k)\n elif measure == 'temp':\n dt_CO2(conc)\n \n return impact\n\ndef weight(ti, te, form, tau):\n # Calculates the price (or something proportional to it) in impact year\n # ti of a pulse in emission year te given year of interest tau. \n \n # A pulse emission can't have an effect on an earlier impact year:\n if ti < te:\n weight = 0\n # Dynamic end-point ICI metric with stabilizaton year tau:\n elif form == 'ici':\n # For emissions years prior to tau, we care about impacts in tau:\n if te <= tau and ti == tau:\n weight = 1\n # We want the instantaneous tradeoff for emissions after tau:\n elif te > tau and ti == te:\n weight = 1\n else: weight = 0 \n # Dynamic integrated CCI metric with stabilizaton year tau: \n elif form == 'cci':\n # For emissions prior to tau, we care about impacts up until tau:\n if te <= tau and ti <= tau:\n weight = 1\n # We want the instantaneous tradeoff for emissions after tau:\n elif te > tau and ti == te:\n weight = 1\n else: weight = 0\n # Static integrated GWP metric with time horizon tau:\n elif form == 'gwp':\n # Regardless of te, assign a weight of 1 for tau years after te:\n if ti >= te and ti <= te + tau:\n weight = 1\n else: weight = 0\n \n return weight ","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"193076263","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pytest\n\nfrom octane.helpers import transformations as ts\n\n\ndef test_reset_gw_admin(mocker):\n host_config = DEPLOYMENT_INFO\n gateway = '10.10.10.10'\n\n res = ts.reset_gw_admin(host_config, gateway)\n\n assert res['network_scheme']['endpoints']['br-fw-admin']['gateway'] == \\\n gateway\n\n\ndef test_get_network_gw(mocker):\n net_name = 'test_net'\n gateway = '10.10.10.10'\n data = {\n 'networks': [\n {\n 'name': net_name,\n 'gateway': gateway\n }\n ]\n }\n\n res = ts.get_network_gw(data, net_name)\n\n assert res == gateway\n\n\ndef test_get_network_gw_no_gw(mocker):\n net_name = 'test_net'\n data = {\n 'networks': [{\n 'name': net_name,\n }]\n }\n\n res = ts.get_network_gw(data, net_name)\n\n assert res is None\n\n\ndef test_get_network_gw_no_net(mocker):\n net_name = 'test_net'\n data = {\n 'networks': [{\n 'name': 'another_test_net',\n 'gateway': '10.10.10.10'\n }]\n }\n\n res = ts.get_network_gw(data, net_name)\n\n assert res is None\n\n\nDEPLOYMENT_INFO = {\n 'network_scheme': {\n 'endpoints': {\n 'br-ex': {'gateway': '172.16.0.1', },\n 'br-fw-admin': {}\n }\n }\n}\n\nDEFAULT_OVS_ACTION = {\n 'action': 'add-patch',\n 'bridges': ['test-br']\n}\nDEFAULT_LNX_ACTION = {\n 'action': 'add-port',\n 'bridge': 'test-br'\n}\nOVS_ACTION = {\n 'action': 'add-patch',\n 'bridges': ['test-br'],\n 'provider': 'ovs'\n}\nLNX_ACTION = {\n 'action': 'add-port',\n 'bridge': 'test-br',\n 'provider': 'lnx'\n}\nADD_LNX_BR_ACTION = {\n 'action': 'add-br',\n 'provider': 'lnx',\n 'name': 'test-br'\n}\nADD_OVS_BR_ACTION = {\n 'action': 'add-br',\n 'provider': 'ovs',\n 'name': 'test-br'\n}\nHOST_CONFIG_6_0 = {\n 'openstack_version': '2014.2-6.0',\n 'network_scheme': {\n 'transformations': [\n DEFAULT_LNX_ACTION,\n DEFAULT_OVS_ACTION\n ]\n }\n}\nHOST_CONFIG_6_0_1 = {\n 'openstack_version': 'fake-6.0.1',\n 'network_scheme': {\n 'transformations': [\n DEFAULT_LNX_ACTION,\n DEFAULT_OVS_ACTION\n ]\n }\n}\nHOST_CONFIG_6_1 = {\n 'openstack_version': '2014.2.2-6.1',\n 'network_scheme': {\n 'transformations': [\n DEFAULT_LNX_ACTION,\n ]\n }\n}\nHOST_CONFIG_7_0 = {\n 'openstack_version': '2015.1.0-7.0',\n 'network_scheme': {\n 'transformations': [\n OVS_ACTION,\n DEFAULT_OVS_ACTION,\n ADD_OVS_BR_ACTION\n ]\n }\n}\n\n\n@pytest.mark.parametrize('host_config,expected_action', [\n (HOST_CONFIG_6_0, DEFAULT_OVS_ACTION),\n (HOST_CONFIG_6_0_1, DEFAULT_OVS_ACTION),\n (HOST_CONFIG_6_1, DEFAULT_LNX_ACTION),\n (HOST_CONFIG_7_0, OVS_ACTION)])\ndef test_patch_port_action(host_config, expected_action):\n bridge = 'test-br'\n\n res, _ = ts.get_patch_port_action(host_config, bridge)\n assert res == expected_action\n","sub_path":"octane/tests/test_transformations.py","file_name":"test_transformations.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64258254","text":"import numpy as np\n\ndef reshape_to_components(tensor, GMM_c):\n return tensor.reshape(tensor.shape[:-1] + (GMM_c, -1))\n\nclass GMM2D_Numpy(object):\n def __init__(self, log_pis, mus, log_sigmas, corrs, \n clip_lo=-10, clip_hi=10):\n\n # GMM_c: GMM components\n # pis: [..., GMM_c]\n # mus: [..., GMM_c * 2]\n # sigmas: [..., GMM_c * 2]\n # corrs: [..., GMM_c]\n GMM_c = log_pis.shape[-1]\n\n # Sigma = [s1^2 p*s1*s2 L = [s1 0\n # p*s1*s2 s2^2 ] p*s2 sqrt(1-p^2)*s2]\n log_pis = log_pis - torch.logsumexp(log_pis, dim=-1, keepdim=True)\n mus = reshape_to_components(mus, GMM_c) # [..., GMM_c, 2]\n log_sigmas = reshape_to_components(torch.clamp(log_sigmas, min=clip_lo, max=clip_hi), GMM_c)\n sigmas = torch.exp(log_sigmas) # [..., GMM_c, 2]\n one_minus_rho2 = 1 - corrs ** 2 # [..., GMM_c]\n\n self.L1 = sigmas * torch.stack([torch.ones_like(corrs), corrs], dim=-1)\n self.L2 = sigmas * torch.stack([torch.zeros_like(corrs), \n torch.sqrt(one_minus_rho2)], dim=-1)\n\n self.batch_shape = log_pis.shape[:-1]\n self.GMM_c = GMM_c\n self.log_pis = log_pis # [..., GMM_c]\n self.mus = mus # [..., GMM_c, 2]\n self.log_sigmas = log_sigmas # [..., GMM_c, 2]\n self.sigmas = sigmas # [..., GMM_c, 2]\n self.corrs = corrs # [..., GMM_c]\n self.one_minus_rho2 = one_minus_rho2 # [..., GMM_c]\n self.cat = Categorical(logits=log_pis)\n\n\n def sample(self):\n MVN_samples = (self.mus \n + self.L1 * torch.unsqueeze(\n torch.randn_like(self.corrs), dim=-1) # [..., GMM_c, 2]\n + self.L2 * torch.unsqueeze(\n torch.randn_like(self.corrs), dim=-1) # (manual 2x2 matmul)\n ) \n cat_samples = self.cat.sample()\n selector = torch.unsqueeze(to_one_hot(cat_samples, self.GMM_c), dim=-1)\n return torch.sum(MVN_samples * selector, dim=-2)","sub_path":"modules/gmm2d_numpy.py","file_name":"gmm2d_numpy.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"411239768","text":"from django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.template import Context\nfrom django.views.decorators.http import require_http_methods\n\nfrom app.models import Notificacao, Pedido\nfrom app.views.snippet_template import render_block_to_string\n\n\n@require_http_methods([\"GET\"])\ndef notificacao_pedido(request):\n notificacao = Notificacao.objects.filter(to=request.user, type_message='NOVO_PEDIDO', is_read=False).last()\n context = Context({'notificacao': notificacao, 'user': request.user})\n return_str = render_block_to_string('painel/includes/notificacao.html', context)\n # Nao marcar como lido. Marcar somente depois que aceitar ou rejeitar.\n # if notificacao:\n # notificacao.is_read = True\n # notificacao.save()\n return HttpResponse(return_str)\n\n\ndef mark_read(request):\n notificacao = Notificacao.objects.filter(to=request.user, type_message='NOVO_PEDIDO', is_read=False).last()\n if notificacao:\n notificacao.is_read = True\n notificacao.save()\n\n\ndef aceitar_pedido(request, pk):\n mark_read(request)\n pedido = Pedido.objects.get(id=pk)\n pedido.status_pedido = 'ACEITO'\n pedido.save()\n return redirect('/dashboard')\n\n\ndef rejeitar_pedido(request, pk):\n mark_read(request)\n pedido = Pedido.objects.get(id=pk)\n pedido.status_pedido = 'REJEITADO'\n pedido.save()\n return redirect('/dashboard')\n","sub_path":"app/views/painel/pedido/PedidoView.py","file_name":"PedidoView.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"211629019","text":"import random\nfrom flask import Flask\nfrom flask import render_template\n#######################################\nchoose = ['rock', 'paper', 'scisors']\n\napp = Flask(__name__)\n#######################################\ndef winners():\n\tif winner == 'compucter':\n\t\treturn render_template('2.html')\n\telse:\n\t\treturn render_template('3.html')\n\n\ndef random2():\n\tchoose = random.choice(choose)\ndef game(player, bot):\n\tif (bot == 'rock' and player == 'paper') or (bot == 'paper' and player == 'scisors') or (bot == 'scisors' and player == 'rock'):\n\t\twinner = 'player'\n#######################################\n\telif (bot == 'paper' and player == 'rock') or (bot == 'scisors' and player == 'paper') or (bot == 'rock' and player == 'scisors'):\n\t\twinner = 'compucter'\n\telse:\n\t\twinner = 'Tie'\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n#######################################\n@app.route('/rock')\n\ndef rock():\n\tbot =random2()\n\tplayer = \"rock\"\n\tgame(player, )\n#######################################\n@app.route('/paper')\n\ndef paper():\n\trandom2()\n\tplayer = \"paper\"\n\tgame()\n#######################################\n@app.route('/scisors')\n\ndef scisors():\n\trandom2()\n\tplayer = \"scisors\"\n\tgame()\n#######################################\n@app.route('/tie')\n\ndef tie():\n\twinner = 'Tie'\n\treturn render_template('1.html')\n#######################################\nif __name__ == '__main__':\n app.run()","sub_path":".history/l5/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/app_20200705181610.py","file_name":"app_20200705181610.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305987321","text":"import codecs\nimport os\nfrom collections import defaultdict\n\nimport editdistance\nimport numpy as np\nimport pandas as pd\n\ninput_datapath = \"data/input/raw\"\noutput_datapath = \"data/output\"\ngroundtruth_datapath = \"data/groundtruth\"\n\nif __name__ == \"__main__\":\n true_count_dict = defaultdict(lambda: 0)\n total_count_dict = defaultdict(lambda: 0)\n edit_distance_dict = defaultdict(lambda: 0)\n org_edit_distance_dict = defaultdict(lambda: 0)\n for file_name in os.listdir(output_datapath):\n output_file_path = os.path.join(output_datapath, file_name)\n groundtruth_file_path = os.path.join(groundtruth_datapath, file_name)\n input_file_path = os.path.join(input_datapath, file_name)\n\n with codecs.open(input_file_path, encoding=\"utf-8\") as reader:\n input_data = list(reader.readlines())\n\n with codecs.open(output_file_path, encoding=\"utf-8\") as reader:\n output_data = list(reader.readlines())\n\n with codecs.open(groundtruth_file_path, encoding=\"utf-8\") as reader:\n groundtruth_data = list(reader.readlines())\n\n for idx, line in enumerate(output_data):\n print(file_name, idx)\n # print(line.strip(), groundtruth_data[idx].strip())\n try:\n if line.strip() == groundtruth_data[idx].strip():\n true_count_dict[file_name] += 1\n edit_distance_dict[file_name] += editdistance.eval(line.strip(), groundtruth_data[idx].strip())\n org_edit_distance_dict[file_name] += editdistance.eval(groundtruth_data[idx].strip(),\n input_data[idx].strip())\n except:\n edit_distance_dict[file_name] += len(line.strip())\n org_edit_distance_dict[file_name] += len(input_data[idx].strip())\n pass\n total_count_dict[file_name] += 1\n\n result_list = []\n for key in total_count_dict:\n result_list.append([key, true_count_dict[key] * 1.0 / total_count_dict[key],\n edit_distance_dict[key] * 1.0 / total_count_dict[key],\n org_edit_distance_dict[key] * 1.0 / total_count_dict[key]])\n\n result_list.append([\"Mean\", np.mean([x[1] for x in result_list]), np.mean([x[2] for x in result_list]),\n np.mean([x[3] for x in result_list])])\n\n result_list.append([\"Total\", sum(true_count_dict.values()) * 1.0 / sum(total_count_dict.values()),\n sum(edit_distance_dict.values()) * 1.0 / sum(total_count_dict.values()),\n sum(org_edit_distance_dict.values()) * 1.0 / sum(total_count_dict.values())])\n\n df = pd.DataFrame(result_list)\n df.to_csv(\"result.csv\", header=[\"Name\", \"Accuracy\", \"Avg Distance\", \"Avg Org Distance\"], index=False)\n","sub_path":"src/main/run_calculate_accuracy.py","file_name":"run_calculate_accuracy.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166986393","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ninfile = open(\"../data/N500_jacobi.txt\",\"r\")\nfirst_line = infile.readline().split()\ntolerance = eval(first_line[0].split(\"=\")[1])\niterations = eval(first_line[1].split(\"=\")[1])\nn = eval(first_line[2].split(\"=\")[1])\ninfile.readline()\nrho = infile.readline().split()\nfor i_,x_ in enumerate(rho):\n rho[i_] = eval(x_)\n\ninfile.readline()\n\nLlambda = []\nu = []\n\ntemp = []\nfor line in infile:\n line = line.split()\n Llambda.append(eval(line[0]))\n\n for u_val in line[1:]:\n temp.append(eval(u_val))\n u.append(temp)\n temp = []\n\n\nfor i in range(0,3):\n plt.plot(rho,u[i],label=r\"$\\lambda_%g=%.2f$\"%(i+1,Llambda[i]))\n #plt.text(rho[15],u[i][15],r\"$\\lambda_%g=%.2f$\"%(i+1,Llambda[i]),fontsize=14)\nplt.xlabel(r\"$\\rho$\",fontsize=12)\nplt.ylabel(\"u\",fontsize=12)\nplt.legend()\nplt.savefig(\"lambda_one_electron.png\")\n","sub_path":"code/psi_plotter.py","file_name":"psi_plotter.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"130657121","text":"import numpy\nimport theano\n\ntexts = numpy.asarray([[1,1],[2,0],[1,0],[0,1],[0,1]])\nscores = numpy.asarray([-2,2,0,1,-1])\n\ninput_vector = theano.tensor.fvector('input_vector') #theano variable representing image\ntarget_values = theano.tensor.fvector('target_values') #theano variable representing the label of that image\n\nW = theano.shared(numpy.zeros((5,2)), 'W')\nactivations = theano.tensor.dot(W, input_vector)\npredicted_values = theano.tensor.nnet.sigmoid(activations)\npredicted_class = theano.tensor.argmax(predicted_values)\nAccuracy = -theano.tensor.sqr(predicted_values - target_values).sum()\ngradients = theano.tensor.grad(Accuracy, W)\nlist_of_updates = [(W, W + 1 * gradients)]\ntrain = theano.function(\n [input_vector, target_values],\n [W, activations, predicted_values, predicted_class, Accuracy, gradients],\n updates = list_of_updates , allow_input_downcast=True)\n\ndata_vector = [0., 1.]\ntarget_vector = [1,0,0,0,0]\nW, activations, predicted_values, predicted_class, Accuracy, gradients_W \\\n = train(data_vector, target_vector)\nprint (W, activations, predicted_values, predicted_class, Accuracy, '\\n', gradients_W)\n\n\n","sub_path":"Archive/CS412 - 2020/Assignment 3 answers/Lab 5 task F.py","file_name":"Lab 5 task F.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"648640636","text":"#!/usr/bin/python3\n\"\"\" Create a new view for State that handles all default RestFul API \"\"\"\n\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.city import City\nfrom models.user import User\nfrom models.place import Place\nfrom flask import Flask, jsonify, abort, request\n\n\n@app_views.route('/cities//places', methods=['GET'],\n strict_slashes=False)\ndef Places_Get(city_id):\n \"\"\"Retrieves the list of all Place objects of a City \"\"\"\n\n city = storage.get(\"City\", city_id)\n\n if city is None:\n abort(404)\n\n data = storage.all('Place')\n places_list = []\n\n for key, value in data.items():\n if value.city_id == city_id:\n places_list.append(value.to_dict())\n\n return jsonify(places_list), 200\n\n\n@app_views.route('/places/', methods=['GET'], strict_slashes=False)\ndef Place_Get(place_id):\n \"\"\" Retrieves a Place object \"\"\"\n place = storage.get('Place', place_id)\n\n if place is None:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/', methods=['DELETE'],\n strict_slashes=False)\ndef Places_Delete(place_id):\n \"\"\" Delete a Place object \"\"\"\n data = storage.all('Place')\n del_place = storage.get('Place', place_id)\n if del_place is None:\n abort(404)\n storage.delete(del_place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities//places', methods=['POST'],\n strict_slashes=False)\ndef Places_Post(city_id):\n \"\"\" Create a Place \"\"\"\n data_req = request.get_json()\n data_city = storage.get('City', city_id)\n\n if data_city is None:\n abort(404)\n if not data_req:\n return jsonify({\"message\": \"Not a JSON\"}), 400\n if \"user_id\" not in data_req:\n return jsonify({\"message\": \"Missing user_id\"}), 400\n else:\n data_user = storage.get('User', data_req[\"user_id\"])\n if data_user is None:\n print(\"entro\", data_req[\"user_id\"])\n abort(404)\n if \"name\" not in data_req:\n return jsonify({\"message\": \"Missing name\"}), 400\n\n data_req[\"city_id\"] = city_id\n print(data_req)\n new_place = Place(**data_req)\n new_place.save()\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/', methods=['PUT'],\n strict_slashes=False)\ndef Places_Put(place_id):\n \"\"\" Updates a Place object \"\"\"\n data = storage.get('Place', place_id)\n data_req = request.get_json()\n\n if data is None:\n abort(404)\n if not data_req:\n return jsonify({\"message\": \"Not a JSON\"}), 400\n\n for key, value in data_req.items():\n if key in ['id', 'user_id', 'city_id', 'created_at', 'updated_at']:\n continue\n setattr(data, key, value)\n data.save()\n return jsonify(data.to_dict()), 200\n","sub_path":"api/v1/views/places.py","file_name":"places.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"645212569","text":"'''\nhttps://leetcode.com/problems/flipping-an-image/\n\nIn Python, the shortcut row[~i] = row[-i-1] = row[len(row) - 1 - i]\nhelps us find the i-th value of the row, counting from the right.\n'''\n\n\nclass Solution_0:\n def flipAndInvertImage(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n for row in A:\n i, j = 0, len(row) - 1\n while i < j:\n row[i], row[j] = 1 ^ row[j], 1 ^ row[i]\n i += 1\n j -= 1\n\n if i == j:\n row[i], row[j] = 1 ^ row[i], 1 ^ row[j]\n\n return A\n\n\nclass Solution_1:\n def flipAndInvertImage(self, A):\n for row in A:\n for i in range((len(row) + 1) / 2):\n row[i], row[~i] = row[~i] ^ 1, row[i] ^ 1\n return A\n","sub_path":"python/array/flipping_an_image.py","file_name":"flipping_an_image.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"625464684","text":"#!/usr/bin/env python\nimport psycopg2\nimport datetime\n\n# Query for most popular 3 articles of all time\npopularArticlesSQL = (\n \" SELECT a.title, maxes.num \"\n \" FROM articles a\"\n \" JOIN \"\n \" (SELECT path, count(*) as num \"\n \" FROM log GROUP BY path) \"\n \" AS maxes \"\n \" ON substring(maxes.path,10) = a.slug \"\n \" ORDER BY maxes.num DESC LIMIT 3; \"\n)\n\n# Query for most popular article authors of all time\npopularAuthorsSQL = (\n \" SELECT distinct b.name, sum(maxes.num) \"\n \" FROM articles a \"\n \" JOIN \"\n \" (SELECT path, count(*) as num \"\n \" FROM log GROUP BY path) \"\n \" AS maxes \"\n \" ON substring(maxes.path,10) = a.slug \"\n \" JOIN authors b \"\n \" ON a.author = b.id \"\n \" GROUP BY b.name \"\n \" ORDER BY sum(maxes.num) DESC; \"\n)\n\n# Query for days with more than 1% of requests lead to errors\nerrorRequestsSQL = (\n \" SELECT to_char(FILTER.day, 'FMMonth FMDD, YYYY'), FILTER.pct \"\n \" FROM \"\n \" (SELECT ALL_RQ.day AS day, \"\n \" 100.0 * BAD_RQ.num / ALL_RQ.num AS pct \"\n \" FROM \"\n \" (SELECT time::date AS day, count(*) AS num \"\n \" FROM log WHERE status != '200 OK' \"\n \" GROUP BY time::date ) AS BAD_RQ \"\n \" JOIN \"\n \" (SELECT time::date AS day, count(*) AS num \"\n \" FROM log \"\n \" GROUP BY time::date ) AS ALL_RQ \"\n \" ON BAD_RQ.day = ALL_RQ.day \"\n \" ) AS FILTER \"\n \" WHERE FILTER.pct > 1; \"\n)\n\n\ndef get_results(query):\n \"\"\" Return results of the query from 'news database' \"\"\"\n db = psycopg2.connect(database=\"news\")\n c = db.cursor()\n c.execute(query)\n results = c.fetchall()\n db.close()\n return results\n\n\nprint(\"##### Most popular 3 articles of all time ##### ##### ##### #####\")\n\nresults = get_results(popularArticlesSQL)\nfor row in results:\n print('\"{}\" - {} views'.format(row[0], row[1]))\n\nprint\nprint(\"##### Most popular article authors of all time ##### ##### #####\")\n\nresults = get_results(popularAuthorsSQL)\nfor row in results:\n print('{} - {} views'.format(row[0], row[1]))\n\nprint\nprint(\"##### Days with more than 1% of requests lead to errors ##### ####\")\n\nresults = get_results(errorRequestsSQL)\nfor row in results:\n error_pcnt = str(\"{0:.2f}\".format(round(row[1], 2)))\n print(str(row[0]) + ' - ' + error_pcnt + '% errors')\n","sub_path":"logs_analysis/reportingtool.py","file_name":"reportingtool.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169478353","text":"n=int(input('Enter the total number of containers'))\ns=float(input('Enter the size of each container in litres '))\ni=n*s\nif i<=1:\n\tcost= 0.10\n\tprint('The amount that will be refunded is $',cost)\nelif i>=1 :\n\tcost=0.25\n\tprint('The amount that will be refunded is $',cost)\n\n","sub_path":"exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"307214503","text":"#!/usr/bin/env python\n\n\"\"\"\n\nUse Twitter API to grab tweets using hashtags; \nexport text file\n\nUses Twython module to access Twitter API\n\n\"\"\"\n\nimport sys\nimport string\nimport simplejson #install simplejson at https://pypi.python.org/pypi/simplejson/\nfrom twython import Twython #install Twython at https://github.com/ryanmcgrath/twython\n\n#WE WILL USE THE VARIABLES DAY, MONTH, AND YEAR FOR OUR OUTPUT FILE NAME\nimport datetime\nnow = datetime.datetime.now()\nday=int(now.day)\nmonth=int(now.month)\nyear=int(now.year)\n\n'''\n#FOR OAUTH AUTHENTICATION -- NEEDED TO ACCESS THE TWITTER API\nt = Twython(app_key='qtW8Q4270j67gooVD19tvAGo9', #REPLACE 'APP_KEY' WITH YOUR APP KEY, ETC., IN THE NEXT 4 LINES\n app_secret='0Jnd7nIrLYv3BhZdiT98iKcaQKZBEipXziib0CitV2RZ6zXATQ',\n oauth_token='2652772872-W9GTB3c973ayomnFPW1qEFgieNpskT5yJAD0c29',\n oauth_token_secret='nHdpiqLHzQVGSfVqgM7JgFN89FSpdkJpLdTNX1YYskx0G')\n\n'''\n\n#FOR OAUTH AUTHENTICATION -- NEEDED TO ACCESS THE TWITTER API\nt = Twython(app_key='SNHP4nuGP5KdjKjbH1tOygHCc', #REPLACE 'APP_KEY' WITH YOUR APP KEY, ETC., IN THE NEXT 4 LINES\n app_secret='eq9XeCvckFUPFVhmmkziXYKjcKidTgHZDDgr4EonO1KeN7BzWe',\n oauth_token='239916326-JUUgaZtFtFmfRNjtPihtHGJx2bAUF7n8c2jH3Oey',\n oauth_token_secret='C3eH01hk4ctgGpOBPgBBProkS7pK6CqpashaHYDOihCM2')\n\nteams = [ 'east_Celtics', 'east_Knicks', 'east_76ers', 'east_Nets', 'east_Raptors', 'east_Bulls', 'east_Pacers', 'east_Bucks', 'east_Pistons', \n 'east_Cavs', 'east_MiamiHeat', 'east_OrlandoMagic', 'east_Hawks', 'east_Bobcats', 'east_Wizards', \n 'west_okcthunder', 'west_Nuggets', 'west_TrailBlazers', 'west_UtahJazz', 'west_TWolves', 'west_Lakers', 'west_Suns', 'west_GSWarriors', 'west_Clippers', \n 'west_NBAKings', 'west_GoSpursGo', 'west_Mavs', 'west_Hornets', 'west_Grizzlies', 'west_Rockets' ]\n\nfor team in teams:\n\n hashtag = team[5:] ##### this line need to change\n print('hashtag')\n print(hashtag)\n delimiter = ','\n data = t.search(q='#'+hashtag, count=100)\n tweets = data['statuses']\n\n #print(\"Tweets-\")\n #print(data)\n\n #NAME OUR OUTPUT FILE - %i WILL BE REPLACED BY CURRENT MONTH, DAY, AND YEAR\n outfn = hashtag+\".csv\"\n\n #NAMES FOR HEADER ROW IN OUTPUT FILE\n fields = \"created_at text\".split()\n\n #INITIALIZE OUTPUT FILE AND WRITE HEADER ROW \n outfp = open(outfn, \"w\")\n #outfp.write(string.join(fields, \",\") + \"\\n\") # comment out if don't need header\n\n for entry in tweets:\n \n r = {}\n for f in fields:\n r[f] = \"\"\n #ASSIGN VALUE OF 'ID' FIELD IN JSON TO 'ID' FIELD IN OUR DICTIONARY\n r['created_at'] = entry['created_at']\n r['text'] = entry['text']\n \n print (r)\n #CREATE EMPTY LIST\n lst = []\n #ADD DATA FOR EACH VARIABLE\n for f in fields:\n s=unicode(r[f]).replace(\"\\/\", \"/\")\n s=s.replace(\"\\r\", \"\")\n s=s.replace(\"\\n\", \"\")\n s=s.replace(\",\", \"\")\n s=s.replace(\";\", \"\")\n s=s.replace(\"\\\"\", \"\")\n lst.append(s)\n ''' \n for f in fields:\n lst.append(unicode(r[f]).replace(\"\\/\", \"/\"))\n '''\n #WRITE ROW WITH DATA IN LIST\n outfp.write(string.join(lst, delimiter).encode(\"utf-8\") + \"\\n\")\n\n outfp.close() \n","sub_path":"Data Intensive Computing/hw5/accumulo/src/Python_script_and_CSV_files/API_hashtag.py","file_name":"API_hashtag.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133138721","text":"from db_insert import create_tables, flight_to_db, details_to_db, pilot_to_db\nfrom db_update import add_flight_time\nimport datetime\nfrom utils import create_url, rows_from_html, get_flight, daterange, last_day, first_day, last_logged_day\nimport time\nimport threading\nfrom multiprocessing import Queue\nimport requests\nfrom requests_futures.sessions import FuturesSession\nimport sys\nimport argparse\nimport sqlite3\n\n\ndef crawl(\n start=datetime.date.today(),\n end=datetime.date(2006, 10, 10),\n season=None,\n query_country=\"C0\",\n write=True,\n):\n \"\"\"Puts all flights between start and end in database\n\n Attributes:\n season (int): olc-season to crawl\n start (datetime.date object)\n end (datetime.date object)\n query_country (string): country code for table, example: 'C0'\n\n \"\"\"\n\n # first day of olc records\n if end < datetime.date(2006, 10, 10):\n end = datetime.date(2006, 10, 10)\n\n if isinstance(season, int):\n start = last_logged_day(season) - datetime.timedelta(days=1)\n end = first_day(season)\n\n # iterate through dates\n for date, olc_season in daterange(start, end):\n crawl_day(date, olc_season, query_country, write)\n\n return\n\n\ndef crawl_day(date, season, query_country, write):\n \"\"\"Puts all flights of specific day and country in database\n\n Attributes:\n date (datetime.date object)\n season (int): Olc season, needed for url\n query_country (string): country code for table, example: 'C0'\n\n \"\"\"\n crawling_backwards = True # moving from newer to older dates\n if crawling_backwards and date >= last_logged_day(season):\n print(\"Date {} already in database\".format(date))\n return\n\n # iterate through number of pages for day\n n_page = 1\n while True:\n try:\n session = FuturesSession()\n url = create_url(date, season, query_country, n_page)\n rows, n_flights = rows_from_html(url, session)\n\n for row in rows:\n flight = get_flight(row, date)\n\n flight_newly_added = flight_to_db(*flight)\n add_flight_time(flight[0], flight[6], flight[7], flight[8])\n\n if flight_newly_added:\n details_to_db(flight, session)\n pilot_to_db(flight[1], session)\n if flight[9] is not None:\n pilot_to_db(flight[9], session, co=True)\n\n except requests.exceptions.RequestException:\n print(\"ConnectionError, skipping page\")\n except sqlite3.OperationalError:\n print(\"Database locked, returning to start of page\")\n continue\n except AttributeError:\n print(\"AttributeError - no valid html, skipping day\")\n break\n # more flights to find?\n if (n_flights - (50 * n_page)) > 0:\n n_page += 1\n\n # no more flights to find\n elif (n_flights - (50 * n_page)) <= 0:\n if write:\n filename = \"../data/logs/datelog\" + str(season) + \".txt\"\n with open(filename, \"a+\") as file:\n date_string = date.strftime(\"%Y-%m-%d\")\n file.write(date_string + \"\\n\")\n file.close()\n break\n return\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--season\")\n try:\n season = int(parser.parse_args().season)\n except TypeError:\n season = None\n\n if season is not None:\n create_tables()\n crawl(season=season)\n else:\n crawl()\n","sub_path":"crawl/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"422879736","text":"student1 = [1, 2, 3, 4, 5]*2000\r\nstudent2 = [2, 1, 2, 3, 2, 4, 2, 5]*1250\r\nstudent3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]*1000\r\n\r\nanswers = [1, 2, 3, 4, 5]\r\n\r\nanswer = []\r\ncnt = []\r\nst1 = 0\r\nst2 = 0\r\nst3 = 0\r\n\r\nfor i in range(len(answers)):\r\n if student1[i] == answers[i]:\r\n st1 += 1\r\ncnt.append(st1)\r\n\r\nfor i in range(len(answers)):\r\n if student2[i] == answers[i]:\r\n st2 += 1\r\ncnt.append(st2)\r\n\r\nfor i in range(len(answers)):\r\n if student3[i] == answers[i]:\r\n st3 += 1\r\ncnt.append(st3)\r\n\r\nprint(cnt)\r\nm = max(cnt)\r\nanswer = list(i+1 for i, j in enumerate(cnt) if j == m)\r\n\r\nprint(answer)\r\n","sub_path":"python_programmers/Lv1/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"333717622","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.base_request_builder import BaseRequestBuilder\nfrom kiota_abstractions.get_path_parameters import get_path_parameters\nfrom kiota_abstractions.method import Method\nfrom kiota_abstractions.request_adapter import RequestAdapter\nfrom kiota_abstractions.request_information import RequestInformation\nfrom kiota_abstractions.request_option import RequestOption\nfrom kiota_abstractions.serialization import Parsable, ParsableFactory\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from ..models.cloud_communications import CloudCommunications\n from ..models.o_data_errors.o_data_error import ODataError\n from .call_records.call_records_request_builder import CallRecordsRequestBuilder\n from .calls.calls_request_builder import CallsRequestBuilder\n from .get_presences_by_user_id.get_presences_by_user_id_request_builder import GetPresencesByUserIdRequestBuilder\n from .online_meetings.online_meetings_request_builder import OnlineMeetingsRequestBuilder\n from .presences.presences_request_builder import PresencesRequestBuilder\n\nclass CommunicationsRequestBuilder(BaseRequestBuilder):\n \"\"\"\n Provides operations to manage the cloudCommunications singleton.\n \"\"\"\n def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:\n \"\"\"\n Instantiates a new CommunicationsRequestBuilder and sets the default values.\n Args:\n path_parameters: The raw url or the Url template parameters for the request.\n request_adapter: The request adapter to use to execute the requests.\n \"\"\"\n super().__init__(request_adapter, \"{+baseurl}/communications{?%24select,%24expand}\", path_parameters)\n \n async def get(self,request_configuration: Optional[CommunicationsRequestBuilderGetRequestConfiguration] = None) -> Optional[CloudCommunications]:\n \"\"\"\n Get communications\n Args:\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: Optional[CloudCommunications]\n \"\"\"\n request_info = self.to_get_request_information(\n request_configuration\n )\n from ..models.o_data_errors.o_data_error import ODataError\n\n error_mapping: Dict[str, ParsableFactory] = {\n \"4XX\": ODataError,\n \"5XX\": ODataError,\n }\n if not self.request_adapter:\n raise Exception(\"Http core is null\") \n from ..models.cloud_communications import CloudCommunications\n\n return await self.request_adapter.send_async(request_info, CloudCommunications, error_mapping)\n \n async def patch(self,body: Optional[CloudCommunications] = None, request_configuration: Optional[CommunicationsRequestBuilderPatchRequestConfiguration] = None) -> Optional[CloudCommunications]:\n \"\"\"\n Update communications\n Args:\n body: The request body\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: Optional[CloudCommunications]\n \"\"\"\n if not body:\n raise TypeError(\"body cannot be null.\")\n request_info = self.to_patch_request_information(\n body, request_configuration\n )\n from ..models.o_data_errors.o_data_error import ODataError\n\n error_mapping: Dict[str, ParsableFactory] = {\n \"4XX\": ODataError,\n \"5XX\": ODataError,\n }\n if not self.request_adapter:\n raise Exception(\"Http core is null\") \n from ..models.cloud_communications import CloudCommunications\n\n return await self.request_adapter.send_async(request_info, CloudCommunications, error_mapping)\n \n def to_get_request_information(self,request_configuration: Optional[CommunicationsRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:\n \"\"\"\n Get communications\n Args:\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: RequestInformation\n \"\"\"\n request_info = RequestInformation()\n request_info.url_template = self.url_template\n request_info.path_parameters = self.path_parameters\n request_info.http_method = Method.GET\n request_info.headers[\"Accept\"] = [\"application/json\"]\n if request_configuration:\n request_info.add_request_headers(request_configuration.headers)\n request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)\n request_info.add_request_options(request_configuration.options)\n return request_info\n \n def to_patch_request_information(self,body: Optional[CloudCommunications] = None, request_configuration: Optional[CommunicationsRequestBuilderPatchRequestConfiguration] = None) -> RequestInformation:\n \"\"\"\n Update communications\n Args:\n body: The request body\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: RequestInformation\n \"\"\"\n if not body:\n raise TypeError(\"body cannot be null.\")\n request_info = RequestInformation()\n request_info.url_template = self.url_template\n request_info.path_parameters = self.path_parameters\n request_info.http_method = Method.PATCH\n request_info.headers[\"Accept\"] = [\"application/json\"]\n if request_configuration:\n request_info.add_request_headers(request_configuration.headers)\n request_info.add_request_options(request_configuration.options)\n request_info.set_content_from_parsable(self.request_adapter, \"application/json\", body)\n return request_info\n \n @property\n def call_records(self) -> CallRecordsRequestBuilder:\n \"\"\"\n Provides operations to manage the callRecords property of the microsoft.graph.cloudCommunications entity.\n \"\"\"\n from .call_records.call_records_request_builder import CallRecordsRequestBuilder\n\n return CallRecordsRequestBuilder(self.request_adapter, self.path_parameters)\n \n @property\n def calls(self) -> CallsRequestBuilder:\n \"\"\"\n Provides operations to manage the calls property of the microsoft.graph.cloudCommunications entity.\n \"\"\"\n from .calls.calls_request_builder import CallsRequestBuilder\n\n return CallsRequestBuilder(self.request_adapter, self.path_parameters)\n \n @property\n def get_presences_by_user_id(self) -> GetPresencesByUserIdRequestBuilder:\n \"\"\"\n Provides operations to call the getPresencesByUserId method.\n \"\"\"\n from .get_presences_by_user_id.get_presences_by_user_id_request_builder import GetPresencesByUserIdRequestBuilder\n\n return GetPresencesByUserIdRequestBuilder(self.request_adapter, self.path_parameters)\n \n @property\n def online_meetings(self) -> OnlineMeetingsRequestBuilder:\n \"\"\"\n Provides operations to manage the onlineMeetings property of the microsoft.graph.cloudCommunications entity.\n \"\"\"\n from .online_meetings.online_meetings_request_builder import OnlineMeetingsRequestBuilder\n\n return OnlineMeetingsRequestBuilder(self.request_adapter, self.path_parameters)\n \n @property\n def presences(self) -> PresencesRequestBuilder:\n \"\"\"\n Provides operations to manage the presences property of the microsoft.graph.cloudCommunications entity.\n \"\"\"\n from .presences.presences_request_builder import PresencesRequestBuilder\n\n return PresencesRequestBuilder(self.request_adapter, self.path_parameters)\n \n @dataclass\n class CommunicationsRequestBuilderGetQueryParameters():\n \"\"\"\n Get communications\n \"\"\"\n def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n \"\"\"\n Maps the query parameters names to their encoded names for the URI template parsing.\n Args:\n original_name: The original query parameter name in the class.\n Returns: str\n \"\"\"\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"select\":\n return \"%24select\"\n return original_name\n \n # Expand related entities\n expand: Optional[List[str]] = None\n\n # Select properties to be returned\n select: Optional[List[str]] = None\n\n \n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n @dataclass\n class CommunicationsRequestBuilderGetRequestConfiguration(BaseRequestConfiguration):\n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n \"\"\"\n Configuration for the request such as headers, query parameters, and middleware options.\n \"\"\"\n # Request query parameters\n query_parameters: Optional[CommunicationsRequestBuilder.CommunicationsRequestBuilderGetQueryParameters] = None\n\n \n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n @dataclass\n class CommunicationsRequestBuilderPatchRequestConfiguration(BaseRequestConfiguration):\n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n \"\"\"\n Configuration for the request such as headers, query parameters, and middleware options.\n \"\"\"\n \n\n","sub_path":"msgraph/generated/communications/communications_request_builder.py","file_name":"communications_request_builder.py","file_ext":"py","file_size_in_byte":9846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"242619917","text":"class PositionTracker():\n def __init__(self, mods):\n self.mods = []\n for ele in mods:\n if ele is None or ele < 1:\n self.mods.append(1)\n else:\n self.mods.append(ele)\n self._height = len(mods)\n self._cur_state = [0] * self._height\n \n def increment(self, index = None):#incrementor function for the naive traversal\n if index is None:\n index = self._height - 1\n sig = self._height - 1\n while sig > index:\n self._cur_state[sig] = 0 \n sig -= 1\n while sig >= 0 and (self._cur_state[sig] + 1) % self.mods[sig] == 0:\n self._cur_state[sig] = 0\n sig -= 1\n if sig >= 0:\n self._cur_state[sig] += 1\n #else:\n # raise StopIteration(\"Can not increment further.\")\n return sig\n \n def min_changed_index(self):\n for index, val in enumerate(self._cur_state):\n if val > 0:\n return index\n return self._height - 1\n \n \n def __getitem__(self, key):\n return self._cur_state[key]\n \n def __str__(self):\n return str(self._cur_state)\n ","sub_path":"Prototype/Python/Playground/Mark1/position_tracker.py","file_name":"position_tracker.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"463091313","text":"# LISTA EXTRA CONT\n# QUESTÃO A\n\ndef josephus(n, m):\n m -= 1\n x = m\n while len(n) > 1:\n n.pop(x)\n x = (x + m) % len(n)\n print (n[0])\n\njosephus(list(range(1,51)),3)","sub_path":"Lista Extra - Desafio CONT/listaextra-A.py","file_name":"listaextra-A.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108529433","text":"import shutil\nimport torch\nfrom torch.autograd import Variable\nfrom os import makedirs, remove\nfrom os.path import exists, join, basename, dirname\nimport numpy as np\nfrom skimage import io\nfrom collections import OrderedDict\n\nclass BatchTensorToVars(object):\n \"\"\"Convert tensors in dict batch to vars\n \"\"\"\n def __init__(self, use_cuda=True):\n self.use_cuda=use_cuda\n \n def __call__(self, batch):\n batch_var = {}\n for key,value in batch.items():\n batch_var[key] = Variable(value,requires_grad=False)\n if self.use_cuda:\n batch_var[key] = batch_var[key].cuda()\n \n return batch_var\n \ndef save_checkpoint(state, is_best, file):\n model_dir = dirname(file)\n model_fn = basename(file)\n # make dir if needed (should be non-empty)\n if model_dir!='' and not exists(model_dir):\n makedirs(model_dir)\n torch.save(state, file)\n if is_best:\n shutil.copyfile(file, join(model_dir,'best_' + model_fn))\n \ndef str_to_bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef readImage(img_path, affineTnf):\n image = io.imread(img_path)\n # get image size\n im_size = np.asarray(image.shape)\n\n # convert to torch Variable\n image = np.expand_dims(image.transpose((2, 0, 1)), 0)\n image = torch.Tensor(image.astype(np.float32))\n image_var = Variable(image, requires_grad=False)\n\n # Resize image using bilinear sampling with identity affine tnf\n image = affineTnf(image_var).data\n\n im_size = torch.Tensor(im_size.reshape(1, -1).astype(np.float32))\n\n return (image, im_size)\n# Compute PCK\ndef correct_keypoints(source_points,warped_points,L_pck,alpha=0.1):\n # compute correct keypoints\n torch_sum = torch.sum(torch.pow(source_points - warped_points, 2), 1)\n point_distance = torch.pow(torch_sum, 0.5).squeeze(1)\n L_pck_mat = L_pck.expand_as(point_distance)\n correct_points = torch.le(point_distance,L_pck_mat*alpha)\n num_of_correct_points = torch.sum(correct_points.data)\n num_of_points = correct_points.numel()\n #temp = torch.sum(torch_sum.data)\n reprojection_error = torch.mean(point_distance.data)#math.mean(temp)\n return (num_of_correct_points,num_of_points,reprojection_error)\n\n# Compute Reprojection Error\ndef compute_reprojection_error(source_points,warped_points):\n torch_sum = torch.sum(torch.pow(source_points - warped_points, 2), 1)\n point_distance = torch.pow(torch_sum, 0.5).squeeze(1)\n mse = torch.mean(point_distance.data)\n return mse\n\ndef convertGpuWeightsToCpu(weights_path):\n checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage)\n new_state_dict = OrderedDict()\n for k, v in checkpoint['state_dict'].items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict","sub_path":"util/torch_util.py","file_name":"torch_util.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"86818791","text":"# -*- coding: utf-8 -*-\n# This is a minimal blogofile config file.\n# See the docs for config options\n# or run `blogofile help init` to learn how to initialize\n# a site from a plugin.\nsite.url = \"http://localhost:8080\"\n\ncontrollers.chapters.enabled = True\n#### Blog Settings ####\nblog = controllers.blog\n\n## blog_enabled -- Should the blog be enabled?\n# (You don't _have_ to use blogofile to build blogs)\nblog.enabled = False\n\n","sub_path":"web/_config.py","file_name":"_config.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566687551","text":"class Node():\n def __init__(self, parent=None):\n self.name = ''\n self.parent = parent\n self.children = []\n self.weight = 1\n self.splits = ''\n \n def __str__(self, indentation=''):\n s = f'{indentation}{self.name if self.name else \"(No name)\"} {self.weight} {self.splits}\\n'\n indent_child = indentation + ' '\n for child in self.children:\n s += child.__str__(indentation=indent_child)\n return s\n \n def find_distance(self, name, source):\n #print(f'Finding distance from {self.name} to {name}')\n if self.name == name:\n return 0\n \n distances = []\n # When travelling to a child, add the child's weight\n for child in self.children:\n if child == source:\n pass\n else:\n distances.append(child.weight + child.find_distance(name, self))\n \n # When travelling to a parent, add own weight\n if self.parent and self.parent != source:\n distances.append(self.weight + self.parent.find_distance(name, self))\n \n if len(distances) == 0: return 99999\n \n return min(distances)\n \n def calculate_size(self, ignore=None):\n size = 1 + sum([child.calculate_size(ignore=self) for child in self.children if child != ignore])\n size += self.parent.calculate_size(ignore=self) if self.parent and self.parent != ignore else 0\n return size\n \n def assign_tree_split(self, split, ignore=None):\n self.splits += str(split)\n for child in self.children:\n if child != ignore:\n child.assign_tree_split(split, ignore=self)\n if self.parent and self.parent != ignore:\n self.parent.assign_tree_split(split, ignore=self)\n\nclass Graph():\n def __init__(self, graph_text):\n print(f'Building graph from {graph_text}')\n self.root = Node()\n self.current_node = self.root\n self.current_child = Node(parent=self.current_node)\n self.current_node.children.append(self.current_child)\n \n self.node_map = {}\n \n self.all_nodes = [self.current_child]\n \n self.current_name = ''\n \n for char in graph_text:\n \n # Open parentheses - go one level deeper\n if char == '(':\n #print(f'Entering child node')\n self.current_node = self.current_child\n self.current_child = Node(parent=self.current_node)\n self.current_node.children.append(self.current_child)\n self.all_nodes.append(self.current_child)\n \n # Close parentheses - go one level up\n elif char == ')':\n #print(f'Returning to parent node')\n self.current_child = self.current_node\n self.current_node = self.current_node.parent\n \n # Comma - finish working on the current child, and create a new one\n elif char == ',':\n #print(f'Completed node {self.current_child.name}, creating new')\n self.current_child = Node(parent=self.current_node)\n self.current_node.children.append(self.current_child)\n self.all_nodes.append(self.current_child)\n \n # Letter - add to the current name\n elif char.isalpha() or char == '_':\n if self.current_child.name: self.node_map.pop(self.current_child.name)\n self.current_child.name += char\n self.node_map[self.current_child.name] = self.current_child\n #print(f'Name is now {self.current_child.name}')\n\n # Digit - append to weight\n elif char.isdigit():\n self.current_child.weight *= 10\n self.current_child.weight += int(char)\n \n # Colon will fall here - ignore\n else:\n if char != ':': print(f'Unrecognized character |{char}|')\n pass\n \n def __str__(self):\n return self.root.__str__()\n \n def find_distance(self, a, b):\n node_a = self.node_map[a]\n return node_a.find_distance(b, None)\n \n def get_nodes_lex(self):\n keys = list(self.node_map.keys())\n keys.sort()\n return keys\n \n def get_splits(self, a):\n return self.root.children[0].find_distance(a, None) - 1\n \n def get_all_nodes(self):\n return sorted([child.name for child in self.all_nodes if child.name])\n \n def determine_splits(self):\n nodes = [child for child in self.all_nodes]\n for node in nodes:\n # Pretend to disconnect this node from the tree - how big is it, and the remainder?\n child_tree_size = node.calculate_size(ignore=node.parent)\n parent_tree_size = node.parent.calculate_size(ignore=node)\n \n #print(f'{node.name} - Tree size {child_tree_size} - Parent Tree size {parent_tree_size}')\n \n if child_tree_size > 1 and parent_tree_size > 1:\n node.assign_tree_split( split=1, ignore=node.parent)\n node.parent.assign_tree_split(split=0, ignore=node)\n \n def get_splits(self):\n splits = ''\n named_nodes = sorted([child for child in self.all_nodes if child.name], key=lambda x: x.name)\n for split_row in range(len(self.root.splits)):\n splits += ''.join([node.splits[split_row] for node in named_nodes]) + '\\n'\n return splits\n \n\nif __name__=='__main__':\n with open('dataset.txt', 'r') as fp:\n data = fp.read().split('\\n')\n \n results = []\n \n # Strip the ending semicolon\n graph_text = data[0][:-1]\n \n graph = Graph(graph_text)\n \n #print(graph)\n \n keys = graph.get_nodes_lex()\n \n graph.determine_splits()\n \n #print(graph)\n \n result = graph.get_splits()\n \n print(result)\n \n with open('result.txt', 'w') as fp2:\n fp2.write(result)","sub_path":"062_CTBL/ctbl.py","file_name":"ctbl.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"601056218","text":"from tkinter import *\nimport mysql.connector\n\n\n# Funções\n# Função do botão de limpar campos\ndef limpar_campos():\n primeiro_nome_entry.delete(0, END)\n sobrenome_entry.delete(0, END)\n endereco_1_entry.delete(0, END)\n endereco_2_entry.delete(0, END)\n cidade_entry.delete(0, END)\n estado_entry.delete(0, END)\n cep_entry.delete(0, END)\n pais_entry.delete(0, END)\n telefone_entry.delete(0, END)\n email_entry.delete(0, END)\n tipo_de_pagamento_entry.delete(0, END)\n desconto_entry.delete(0, END)\n valor_pago_entry.delete(0, END)\n\n\n# Função do botão de adicionar clientes ao banco\ndef adicionar_cliente():\n # Comando sql para inserir os dados dos campos de texto dentro do banco\n comando_sql = f\"INSERT INTO clientes (primeiro_nome, sobrenome, cep, valor_pago, email, endereco_1, endereco_2, \" \\\n f\"cidade, estado, pais, telefone, tipo_de_pagamento, desconto) VALUES (\" \\\n f\"'{primeiro_nome_entry.get()}', '{sobrenome_entry.get()}', {cep_entry.get()}, \" \\\n f\"{valor_pago_entry.get()}, '{email_entry.get()}', '{endereco_1_entry.get()}', \" \\\n f\"'{endereco_2_entry.get()}', '{cidade_entry.get()}', '{estado_entry.get()}', \" \\\n f\"'{pais_entry.get()}', {telefone_entry.get()}, '{tipo_de_pagamento_entry.get()}', \" \\\n f\"{desconto_entry.get()})\"\n my_cursor.execute(comando_sql)\n\n # Commitar as mudanças para o banco\n my_db.commit()\n\n # Limpar os campos de texto\n limpar_campos()\n\n\n# Main\nroot = Tk()\nroot.title(\"Doidera\")\nroot.geometry(\"400x600+200+200\")\n\n# Conectar ao banco\nmy_db = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"YourPassword\",\n database=\"doidera\"\n)\n\n# Criar um cursor e inicializa-lo\nmy_cursor = my_db.cursor()\n\n# Titulo da interface\ntitulo_label = Label(root, text=\"Cadastro Clientes\", font=(\"Helvetica\", 30))\ntitulo_label.grid(row=0, column=0, columnspan=2)\n\n# Labels dos campos de cadastro\nprimeiro_nome_label = Label(root, text=\"Primeiro Nome\")\nsobrenome_label = Label(root, text=\"Sobrenome\")\ncep_label = Label(root, text=\"Cep\")\nvalor_pago_label = Label(root, text=\"Valor Pago\")\nemail_label = Label(root, text=\"Email\")\nendereco_1_label = Label(root, text=\"Endereço 1\")\nendereco_2_label = Label(root, text=\"Endereço 2\")\ncidade_label = Label(root, text=\"Cidade\")\nestado_label = Label(root, text=\"Estado\")\npais_label = Label(root, text=\"País\")\ntelefone_label = Label(root, text=\"Telefone\")\ntipo_de_pagamento_label = Label(root, text=\"Tipo de pagamento\")\ndesconto_label = Label(root, text=\"Desconto\")\n\n# Posicionamento das labels na interface\nprimeiro_nome_label.grid(row=1, column=0, sticky=W, padx=10)\nsobrenome_label.grid(row=2, column=0, sticky=W, padx=10)\nendereco_1_label.grid(row=3, column=0, sticky=W, padx=10)\nendereco_2_label.grid(row=4, column=0, sticky=W, padx=10)\ncidade_label.grid(row=5, column=0, sticky=W, padx=10)\nestado_label.grid(row=6, column=0, sticky=W, padx=10)\ncep_label.grid(row=7, column=0, sticky=W, padx=10)\npais_label.grid(row=8, column=0, sticky=W, padx=10)\ntelefone_label.grid(row=9, column=0, sticky=W, padx=10)\nemail_label.grid(row=10, column=0, sticky=W, padx=10)\ntipo_de_pagamento_label.grid(row=11, column=0, sticky=W, padx=10)\ndesconto_label.grid(row=12, column=0, sticky=W, padx=10)\nvalor_pago_label.grid(row=13, column=0, sticky=W, padx=10)\n\n# Campos de Texto\nprimeiro_nome_entry = Entry(root)\nsobrenome_entry = Entry(root)\nendereco_1_entry = Entry(root)\nendereco_2_entry = Entry(root)\ncidade_entry = Entry(root)\nestado_entry = Entry(root)\ncep_entry = Entry(root)\npais_entry = Entry(root)\ntelefone_entry = Entry(root)\nemail_entry = Entry(root)\ntipo_de_pagamento_entry = Entry(root)\ndesconto_entry = Entry(root)\nvalor_pago_entry = Entry(root)\n\n# Posicionando campos de texto\nprimeiro_nome_entry.grid(row=1, column=1)\nsobrenome_entry.grid(row=2, column=1)\nendereco_1_entry.grid(row=3, column=1)\nendereco_2_entry.grid(row=4, column=1)\ncidade_entry.grid(row=5, column=1)\nestado_entry.grid(row=6, column=1)\ncep_entry.grid(row=7, column=1)\npais_entry.grid(row=8, column=1)\ntelefone_entry.grid(row=9, column=1)\nemail_entry.grid(row=10, column=1)\ntipo_de_pagamento_entry.grid(row=11, column=1)\ndesconto_entry.grid(row=12, column=1)\nvalor_pago_entry.grid(row=13, column=1)\n\n# Criar botões\nadicionar_btn = Button(root, text=\"ADICIONAR\", command=adicionar_cliente)\nlimpar_btn = Button(root, text=\"LIMPAR\", command=limpar_campos)\n\n# Posicionar botões\nadicionar_btn.grid(row=14, column=0, padx=10, pady=10)\nlimpar_btn.grid(row=14, column=1, padx=10, pady=10)\n\nroot.mainloop()\n","sub_path":"Curso/ModuloTkinter/Aula030MySQLInputBoxes3.py","file_name":"Aula030MySQLInputBoxes3.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"181510248","text":"#!/usr/bin/python3\r\n\r\n\r\nimport sys\r\nimport numpy\r\nimport Num\r\noption=0\r\nfrag=0\r\ncount=0\r\nargs=sys.argv\r\nf=open(args[1]) #ファイルオープン\r\nsym=\"\"\r\nLYS_atom=[\"NZ\",\"HZ1\",\"HZ2\",\"HZ3\"]\r\nGLU_atom=[\"CD\",\"OE1\",\"OE2\"]\r\nASP_atom=[\"CG\",\"OD1\",\"OD2\"]\r\nARG_atom=[\"CZ\",\"2HH1\",\"2HH2\"]\r\ncordination=[]\r\nions=[]\r\n\r\nfrom Num import Nums\r\n\r\n\r\n\r\ndata1=f.readlines()\r\nf.close\r\nnum=Nums()\r\nnum.fram=len(data1)\r\n\r\nfor length in range(len(data1)):\r\n\t\tnum.num_ditect(length,data1[length])\r\n\t\tnum.name_ditect(length,data1[length])\r\n#print(num.arr_num)\r\nprint(num.arr_name)\r\n\r\nnum.joint()\r\nnum.acid_check()#電荷のチェック\r\nif num.charge!=0:\r\n\tnum.charged_check()\r\n\tprint(\"------------以下に電荷を持つアミノ酸の番号と種類を表示します----------\")\r\n\tprint(num.charged_list)\r\n\tfor i in range(len(num.charged_list)):\r\n\t\tprint(\" number: {0} , name: {1} \".format(int(num.charged_list[i]),num.dict[int(num.charged_list[i])]))\r\n\tprint(\"途中で終了する場合には0を入力してください\")\r\n\tnum.askacid()\r\n#\tnum.addlines(args[1])\r\nelse:\r\n\tprint(\" 構造の電荷はゼロです \")\r\nfor i in range(len(num.addlist)):#選択した荷電アミノ酸の分行う\r\n\teach_atom=[]\r\n\taaa=int(num.addlist[i])\r\n#------ARGの場合-----------\r\n\tif num.dict[aaa]==\"ARG\":\r\n\t\tfor var in range(len(ARG_atom)):\r\n\t\t\tword=ARG_atom[var]\r\n\t\t\tfor t in range(10):\r\n\t\t\t\tif data1[int(num.cordinate[str(aaa)])-t].find(word)!=-1:\r\n\t\t\t\t\teach_atom.append(int(num.cordinate[str(aaa)])-t)\r\n\t\tnum.cord_cal.append(each_atom)\r\n\t\t\t\r\n#------LYSの場合-----------\r\n\telif num.dict[aaa]==\"LYS\":\r\n\t\tfor var in range(len(LYS_atom)):\r\n\t\t\tword=LYS_atom[var]\r\n\t\t\tfor t in range(10):\r\n\t\t\t\tif data1[int(num.cordinate[str(aaa)])-t].find(word)!=-1:\r\n\t\t\t\t\teach_atom.append(int(num.cordinate[str(aaa)])-t)\r\n\t\tnum.cord_cal.append(each_atom)\r\n#------GLUの場合-----------\r\n\telif num.dict[aaa]==\"GLU\":\r\n\t\tfor var in range(len(GLU_atom)):\r\n\t\t\tword=GLU_atom[var]\r\n\t\t\tfor t in range(10):\r\n\t\t\t\tif data1[int(num.cordinate[str(aaa)])-t].find(word)!=-1:\r\n\t\t\t\t\teach_atom.append(int(num.cordinate[str(aaa)])-t)\r\n\t\tnum.cord_cal.append(each_atom)\r\n#------ASPの場合-----------\r\n\telif num.dict[aaa]==\"ASP\":\r\n\t\tfor var in range(len(ASP_atom)):\r\n\t\t\tword=ASP_atom[var]\r\n\t\t\tfor t in range(10):\r\n\t\t\t\tif data1[int(num.cordinate[str(aaa)])-t].find(word)!=-1:\r\n\t\t\t\t\teach_atom.append(int(num.cordinate[str(aaa)])-t)\r\n\t\tnum.cord_cal.append(each_atom)\r\n\telse:\r\n\t\tprint(num.dict[aaa])\r\nprint(num.cord_cal)\r\nfor length in range(len(num.cord_cal)):\t\r\n\tnum.location(data1,length)\r\n#-----ここまでで書くアミノ酸ごとに座標を計算したほうがいいかもーーーーー\r\n\tprint(\"----------\")\r\nprint(num.xyz)\r\nfor i in range(len(num.addlist)):\r\n\tlast=num.calculation(i)\r\n\tions.append(last)\r\nprint(ions)\r\nnum.addlines(ions,\"ion\")\r\nprint(data1[num.cord_cal[1][0]])\r\n","sub_path":"insion.py","file_name":"insion.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22978452","text":"import numpy as np\nimport torch\nfrom argparse import ArgumentParser\nfrom skimage import measure\nfrom .network import SALD\nfrom trimesh import Trimesh, Scene\nfrom trimesh.viewer.windowed import SceneViewer\nfrom pathlib import Path\nimport re\n\ndef main(args):\n if args.epoch is None:\n epochs = Path(f\"lightning_logs/version_{args.version}/checkpoints/\").glob('*.ckpt')\n epochs = list(map(lambda x: int(re.match(r\"epoch=([0-9]+)\", x.stem).group(1)), epochs))\n print(epochs)\n epoch = max(epochs)\n else:\n epoch = args.epoch\n\n\n model = SALD.load_from_checkpoint(f\"lightning_logs/version_{args.version}/checkpoints/epoch={epoch}.ckpt\")\n\n if args.object is not None:\n objId = int(args.object)\n else:\n objId = np.random.randint(0, model.embedding.num_embeddings)\n\n grid = model.voxelize(objId, args.res).detach().numpy()\n verts, faces, normals, values = measure.marching_cubes(grid, 0)\n mesh = Trimesh(vertices=verts, faces=faces)\n scene = Scene([mesh])\n viewer = SceneViewer(scene)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n '--epoch',\n '-e',\n dest='epoch',\n default=None,\n )\n parser.add_argument(\n '--version',\n '-v',\n dest='version',\n default=None,\n )\n parser.add_argument(\n '--object',\n '-o',\n dest='object',\n default=None,\n )\n parser.add_argument(\n '--resolution',\n '-r',\n dest='res',\n default=64,\n )\n\n args = parser.parse_args()\n main(args)\n","sub_path":"sald/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9012049","text":"# ---------------------------------\n# More compatible with python 3 code\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\ntry:\n range = xrange\nexcept NameError:\n pass\n\n# End of more compatible with python 3\n# ---------------------------------\n\nfrom src.utils import weighted_choice\nfrom src.tspsolver_edgeswap import single_edge_swapped \n\ndef antcolony_solver(tspdata, opts):\n n = int(opts['n'])\n distance_exponent = float(opts['dexp'])\n distance_norm = float(opts['dnorm'])\n pheromone_exponent = float(opts['pexp'])\n pheromone_decay = float(opts['pdecay'])\n\n d = len(tspdata)\n\n # pheromone[i][j]: on path from node i to node j\n pheromones = [[0 for i in range(d)] for j in range(d)]\n min_travelled = -1\n\n for i in range(n):\n # New ant!\n point = 0\n course = [point]\n points_remaining = list(range(1, d))\n\n while len(points_remaining) > 0:\n dists = tspdata.all_dists(point)\n weights = [distance_function(dists[i], distance_exponent, distance_norm) \n for i in points_remaining]\n weights = [weights[i] * (1 + \n pheromones[point][points_remaining[i]]) for i in range(len(weights))]\n point = weighted_choice(points_remaining, weights)\n\n course.append(point)\n points_remaining.remove(point)\n\n \n travelled = tspdata.course_distance(course)\n (travelled, course) = single_edge_swapped(travelled, course, tspdata, opts) \n if i == 0:\n pheromone_norm = travelled\n\n if travelled < min_travelled or min_travelled < 0:\n min_travelled = travelled\n min_course = course\n\n # Decay pheromones\n pheromones = [[pheromones[i][j]*pheromone_decay for i in range(d)] for j in range(d)]\n\n # Add pheromones\n for i in range(len(course)-1):\n pheromones[course[i]][course[i+1]] += (pheromone_norm / travelled)**pheromone_exponent\n\n return (min_travelled, min_course)\n\ndef distance_function(dist, distance_exponent, distance_norm):\n return (distance_norm / dist)**distance_exponent\n\n","sub_path":"travelling-salesman/src/tspsolver_antcolony.py","file_name":"tspsolver_antcolony.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"202421522","text":"import collections\nimport json\nimport uuid\n\nfrom mock import patch\nfrom nose.tools import eq_\nimport waffle\n\nfrom django.conf import settings\n\nfrom amo.tests import TestCase\nfrom mkt.api.tests.test_oauth import BaseOAuth, get_absolute_url, OAuthClient\nfrom mkt.api.base import list_url, get_url\nfrom mkt.constants.apps import INSTALL_TYPE_REVIEWER\nfrom mkt.site.fixtures import fixture\nfrom mkt.webapps.models import Installed\nfrom users.models import UserProfile\n\n\n@patch.object(settings, 'SITE_URL', 'http://api/')\nclass TestAccount(BaseOAuth):\n fixtures = fixture('user_2519', 'user_10482', 'webapp_337141')\n\n def setUp(self):\n super(TestAccount, self).setUp(api_name='account')\n self.list_url = list_url('settings')\n self.get_url = get_url('settings', '2519')\n self.anon = OAuthClient(None, api_name='account')\n self.user = UserProfile.objects.get(pk=2519)\n\n def test_verbs(self):\n self._allowed_verbs(self.list_url, ())\n self._allowed_verbs(self.get_url, ('get', 'patch', 'put'))\n\n def test_not_allowed(self):\n eq_(self.anon.get(self.get_url).status_code, 401)\n\n def test_allowed(self):\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200, res.content)\n data = json.loads(res.content)\n eq_(data['display_name'], self.user.display_name)\n eq_(data['installed'], [])\n\n def test_install(self):\n ins = Installed.objects.create(user=self.user, addon_id=337141)\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200, res.content)\n data = json.loads(res.content)\n eq_(data['installed'],\n [get_absolute_url(get_url('app', ins.addon.pk), absolute=False)])\n\n def test_install_reviewer(self):\n Installed.objects.create(user=self.user, addon_id=337141,\n install_type=INSTALL_TYPE_REVIEWER)\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200, res.content)\n data = json.loads(res.content)\n eq_(data['installed'], [])\n\n def test_other(self):\n eq_(self.client.get(get_url('settings', '10482')).status_code, 403)\n\n def test_own(self):\n res = self.client.get(get_url('settings', 'mine'))\n eq_(res.status_code, 200)\n data = json.loads(res.content)\n eq_(data['display_name'], self.user.display_name)\n\n def test_patch(self):\n res = self.client.patch(self.get_url,\n data=json.dumps({'display_name': 'foo'}))\n eq_(res.status_code, 202)\n user = UserProfile.objects.get(pk=self.user.pk)\n eq_(user.display_name, 'foo')\n\n def test_put(self):\n res = self.client.put(self.get_url,\n data=json.dumps({'display_name': 'foo'}))\n eq_(res.status_code, 204)\n user = UserProfile.objects.get(pk=self.user.pk)\n eq_(user.display_name, 'foo')\n eq_(user.username, self.user.username) # Did not change.\n\n def test_patch_extra_fields(self):\n res = self.client.patch(self.get_url,\n data=json.dumps({'display_name': 'foo',\n 'username': 'bob'}))\n eq_(res.status_code, 202)\n user = UserProfile.objects.get(pk=self.user.pk)\n eq_(user.display_name, 'foo') # Got changed successfully.\n eq_(user.username, self.user.username) # Did not change.\n\n def test_patch_other(self):\n res = self.client.patch(get_url('settings', '10482'),\n data=json.dumps({'display_name': 'foo'}))\n eq_(res.status_code, 403)\n\nbrowserid_url = 'http://firepla.ce:8675/'\n\n\nclass FakeUUID(object):\n hex = '000000'\n\n\n@patch.object(settings, 'FIREPLACE_SECRET_KEY', 'gubbish')\n@patch.object(settings, 'FIREPLACE_URL', browserid_url)\nclass TestLoginHandler(TestCase):\n def setUp(self):\n super(TestLoginHandler, self).setUp()\n self.list_url = get_absolute_url(list_url('login'), api_name='account')\n self.create_switch('browserid-login')\n\n @patch.object(uuid, 'uuid4', FakeUUID)\n @patch('requests.post')\n def test_login_success(self, http_request):\n FakeResponse = collections.namedtuple('FakeResponse',\n 'status_code content')\n http_request.return_value = FakeResponse(200, json.dumps(\n {'status': 'okay',\n 'email': 'cvan@mozilla.com'}))\n res = self.client.post(self.list_url,\n dict(assertion='fake-assertion',\n audience='fakeamo.org'))\n eq_(res.status_code, 200)\n data = json.loads(res.content)\n eq_(data['token'],\n 'cvan@mozilla.com,95c9063d9f249aacfe5697fc83192ed6480c01463e2a80b3'\n '5af5ecaef11754700f4be33818d0e83a0cfc2cab365d60ba53b3c2b9f8f6589d1'\n 'c43e9bbb876eef0,000000')\n\n @patch('requests.post')\n def test_login_failure(self, http_request):\n FakeResponse = collections.namedtuple('FakeResponse',\n 'status_code content')\n http_request.return_value = FakeResponse(200, json.dumps(\n {'status': 'busted'}))\n res = self.client.post(self.list_url,\n dict(assertion='fake-assertion',\n audience='fakeamo.org'))\n eq_(res.status_code, 401)\n","sub_path":"mkt/account/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"283878478","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nDNA_SIZE = 10 # DNA length\nPOP_SIZE = 100 # population size\nCROSS_RATE = 0.8 # mating probability (DNA crossover)\nMUTATION_RATE = 0.003 # mutation probability\nN_GENERATIONS = 200\nX_BOUND = [0, 5] # x upper and lower bounds\n\npop = np.random.randint(0, 2, (1, DNA_SIZE)).repeat(POP_SIZE, axis=0) # initialize the pop DNA\npop1 = np.random.randint(0, 2, (1, DNA_SIZE)).repeat(POP_SIZE, axis=0) # initialize the pop DNA\n\nplt.ion() # something about plotting\nx = np.linspace(*X_BOUND, 200)\n\ndef F(x): return np.sin(10*x)*x + np.cos(2*x)*x\n\ndef translateDNA(pop): return pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / (2**DNA_SIZE-1) * X_BOUND[1]\n\ndef get_fitness(pred): return pred + 1e-3 - np.min(pred)\n\ndef select(pop, fitness): # nature selection wrt pop's fitness\n idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,\n p=fitness/fitness.sum())\n return pop[idx]\n\n\nresult=F(pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / (2**DNA_SIZE-1) * X_BOUND[1])\nprint((2**DNA_SIZE-1))\nprint(pop)\nprint(2 ** np.arange(DNA_SIZE)[::-1])\nprint(pop.dot(2 ** np.arange(DNA_SIZE)[::-1]))\nprint(result)\n\nplt.plot(x,F(x))\n\nF_values = F(translateDNA(pop)) # compute function value by extracting DNA\nF_values1 = F(translateDNA(pop1))\nprint(F_values)\n# something about plotting\nif 'sca' in globals(): sca.remove()\n\nplt.pause(0.05)\nfitness = get_fitness(F_values)\nfitness1 = get_fitness(F_values1)\nprint(fitness)\nprint(pop[np.argmax(fitness), :])\npop = select(pop, fitness)\npop1 = select(pop1, fitness1)\nprint(pop)\n\n# for parent in pop:\n# dna=[1,0,1,0,0,1,1,1,1,1]\n# for point in range(DNA_SIZE):\n# parent[point]=dna[point]\n#\n# print(F(translateDNA(pop)))\nsca = plt.scatter(translateDNA(pop), F(translateDNA(pop)), s=200, lw=0, c='red', alpha=0.5)\n\ni_ = np.random.randint(0, POP_SIZE, size=1)[0]\ncross_points = np.random.randint(0, 2, size=DNA_SIZE).astype(np.bool)\nprint(i_)\nprint(cross_points)\nprint(pop[i_])\nprint(pop1[i_])\nprint(pop[i_][cross_points])\nprint(pop1[i_][cross_points])\nprint(pop1[i_, cross_points])\npop[i_][cross_points] = pop1[i_, cross_points]\nprint(pop[:])\nplt.ioff()\nplt.show()","sub_path":"tutorial-contents/Genetic Algorithm/nptest.py","file_name":"nptest.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"560503503","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\n\n__all__ = [\n 'network_split_edge',\n]\n\n\ndef network_split_edge(network, u, v, t=0.5):\n \"\"\"Split and edge by inserting a vertex along its length.\n\n Parameters\n ----------\n u : str\n The key of the first vertex of the edge.\n v : str\n The key of the second vertex of the edge.\n t : float\n The position of the inserted vertex.\n\n Returns\n -------\n str\n The key of the inserted vertex.\n\n Raises\n ------\n ValueError\n If `t` is not `0 <= t <= 1`.\n Exception\n If `u` and `v` are not neighbors.\n\n Examples\n --------\n .. plot::\n :include-source:\n\n import compas\n from compas.datastructures import Network\n from compas.plotters import NetworkPlotter\n\n network = Network.from_obj(compas.get('lines.obj'))\n\n u, v = network.get_any_edge()\n\n a = network.split_edge(u, v)\n\n lines = []\n for u, v in network.edges():\n lines.append({\n 'start': network.vertex_coordinates(u, 'xy'),\n 'end' : network.vertex_coordinates(v, 'xy'),\n 'arrow': 'end',\n 'width': 4.0,\n 'color': '#00ff00'\n })\n\n plotter = NetworkPlotter(network)\n\n plotter.draw_lines(lines)\n\n plotter.draw_vertices(\n radius=0.2,\n text={key: key for key in network.vertices()},\n facecolor={key: '#ff0000' for key in (a,)}\n )\n plotter.draw_edges()\n\n plotter.show()\n\n \"\"\"\n if t <= 0.0:\n raise ValueError('t should be greater than 0.0.')\n if t >= 1.0:\n raise ValueError('t should be smaller than 1.0.')\n\n # the split vertex\n x, y, z = network.edge_point(u, v, t)\n w = network.add_vertex(x=x, y=y, z=z)\n\n network.add_edge(u, w)\n network.add_edge(w, v)\n\n if v in network.edge[u]:\n del network.edge[u][v]\n elif u in network.edge[v]:\n del network.edge[v][u]\n else:\n raise Exception\n\n # split half-edge UV\n network.halfedge[u][w] = None\n network.halfedge[w][v] = None\n del network.halfedge[u][v]\n\n # split half-edge VU\n network.halfedge[v][w] = None\n network.halfedge[w][u] = None\n del network.halfedge[v][u]\n\n # return the key of the split vertex\n return w\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n\n import compas\n from compas.datastructures import Network\n from compas.plotters import NetworkPlotter\n\n network = Network.from_obj(compas.get('lines.obj'))\n\n a = network.split_edge(0, 22)\n b = network.split_edge(2, 30)\n c = network.split_edge(17, 21)\n d = network.split_edge(28, 16)\n\n lines = []\n for u, v in network.edges():\n lines.append({\n 'start': network.vertex_coordinates(u, 'xy'),\n 'end' : network.vertex_coordinates(v, 'xy'),\n 'arrow': 'end',\n 'width': 4.0,\n 'color': '#00ff00'\n })\n\n plotter = NetworkPlotter(network)\n\n plotter.draw_vertices(radius=0.2,\n facecolor={key: '#ff0000' for key in (a, b, c, d)},\n text={key: key for key in network.vertices()})\n\n plotter.draw_edges(color={(u, v): '#cccccc' for u, v in network.edges()})\n\n plotter.draw_lines(lines)\n\n plotter.show()\n","sub_path":"src/compas/datastructures/network/operations/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80718646","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport os\n\nclass Bot():\n def __init__(self):\n self.driver = webdriver.Chrome()\n self.driver.set_window_position(0, 0)\n self.driver.set_window_size(0, 0)\n def login(self):\n self.driver.get('http://orteil.dashnet.org/cookieclicker/')\n def click(self):\n cookie = self.driver.find_element_by_xpath('//*[@id=\"bigCookie\"]')\n cookie.click()\n\ndef main():\n os.system('clear')\n bot = Bot()\n bot.login()\n time.sleep(5)\n c = 0\n while True:\n bot.click()\n c = c+1\n print(c)\nmain()","sub_path":"cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429621604","text":"from ast import literal_eval\n\nimport telebot\n\nTOKEN = \"734820541:AAEkVd8f-haWGhYPmiB3AHXbJtAqMOcNWqY\"\nbot = telebot.TeleBot(TOKEN)\n\nwhite_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0'] + ['*', '/', '+', '-', '%', ' '] + ['(', ')']\n\n\n@bot.message_handler(commands=['start'])\ndef start_handler(message):\n bot.send_message(message.chat.id, 'Hello:) I can say you results of expressions.\\n', parse_mode='Markdown')\n\n\n@bot.message_handler(content_types=['text'])\ndef text_handler(message):\n text = message.text.lower()\n chat_id = message.chat.id\n good = all(map(lambda x: x in white_list, text))\n print(good, text, list(map(lambda x: x in white_list, text)))\n\n if not good:\n print(\"It was not good\")\n bot.send_message(chat_id, \"I can't understand\")\n else:\n try:\n print(\"I want to do it\", text)\n result = eval(str(text))\n print(\"I did it\")\n bot.send_message(chat_id, text + '=' + str(result))\n except:\n print(\"I cant\")\n bot.send_message(chat_id, \"I can't understand\")\n\n\nbot.polling(none_stop=True)\n\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"292116015","text":"from pptx import Presentation\nfrom pptx.util import Inches,Pt\n#安装使用pip install pptx\n\nppt = Presentation()\nslide = ppt.slides.add_slide(ppt.slide_layouts[1]) #在ppt中插入一个幻灯片\n\nbody_shape = slide.shapes.placeholders\n# body_shape[0].text = '这是占位符[0]'\n# body_shape[1].text = '这是占位符[1]'\n\ntitle_shape = slide.shapes.title\ntitle_shape.text = '这是标题'\n# subtitle = slide.shapes.placeholders[1] #取出本页第二个文本框\n# subtitle.text = '这是文本框‘ #在第二个文本框中写入文字\n\nnew_paragraph = body_shape[1].text_frame.add_paragraph()\nnew_paragraph.text = '新段落'\nnew_paragraph.font.bold = True #字体加粗\nnew_paragraph.font.italic = True #文字斜体\nnew_paragraph.font.size = Pt(15) #文字大小\nnew_paragraph.font.underline = True #文字下划线\n\n#添加新的文本框\nleft = Inches(2)\ntop = Inches(2)\nwidth = Inches(3)\nheight = Inches(3)\n\ntextbox = slide.shapes.add_textbox(left, top, width, height)\ntextbox.text = '这是新文本框'\nnew_para = textbox.text_frame.add_paragraph()\nnew_para.text = '这是新文本框里的第二段'\n\n#插入图片\nleft = Inches(2)\ntop = Inches(2)\nwidth = Inches(3)\nheight = Inches(3)\n\npic = slide.shapes.add_picture('b.jpg', left, top, width, height)\n\n#插入表格\nrows = 2\ncols = 2\nleft = Inches(1)\ntop = Inches(1)\nwidth = Inches(4)\nheight = Inches(4)\ntable = slide.shapes.add_table(rows, cols, left, width, height)\ntable.columns[0].width = Inches(1)\ntable.columns[1].width = Inches(3)\ntable.cell(0, 0).text = '1'\ntable.cell(0, 1).text = '2'\ntable.cell(1, 0).text = '3'\ntable.cell(1, 1).text = '4'\n\nppt.save('test.pptx')\n","sub_path":"自动办公/3、ppt/1、ppt自动生成.py","file_name":"1、ppt自动生成.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315398946","text":"\"\"\"\nPerformance logging utility functions.\n\"\"\"\nfrom typing import List\nimport datetime\nfrom dataclasses import dataclass, field\n\nfrom .logentry import LogEntry, RequestLog, PerfLog, UNDEFINED_XPAIR, MARK_END, MARK_START, ArtilleryLog\nfrom . import helper as cg\nfrom .helper import group_by_function, group_by, uniq_by\n\n\n@dataclass\nclass Call:\n id: tuple\n function: str\n duration: datetime.timedelta\n entries: List[LogEntry]\n calls: List[\"Call\"] = field(default_factory=lambda: list())\n\n @property\n def start_time(self):\n if len(self.entries) == 0:\n return None\n return sorted(self.entries, key=lambda e: e.timestamp)[0]\n\n @property\n def end_time(self):\n if len(self.entries) == 0:\n return None\n return sorted(self.entries, key=lambda e: e.timestamp)[-1]\n\n @property\n def log_duration(self):\n if self.end_time is None or self.start_time is None:\n return None\n return self.end_time - self.start_time\n\n def __repr__(self):\n fmt_id = \"-\".join(map(str, self.id))\n call_str = f\"{self.function}:{fmt_id}:({len(self.calls)} subcalls)\"\n all_calls = [call_str]\n for subcall in self.calls:\n all_calls.append(\" \" + repr(subcall))\n return \"\\n\".join(all_calls)\n\n\ndef artillery_to_call(entries: List[LogEntry]) -> Call:\n id, = uniq_by(entries, lambda e: e.id)\n called_id, = uniq_by(entries, lambda e: e.called_id)\n start = cg.get_one(entries, lambda e: e.event[\"type\"] == \"before\")\n end = cg.get_one(entries, lambda e: e.event[\"type\"] == \"after\")\n duration = end.timestamp - start.timestamp\n function, = uniq_by(entries, lambda e: e.url)\n\n calls = [\n Call(id=called_id, function=function, entries=[], duration=duration)\n ]\n return Call(\n id=id,\n function=\"artillery\",\n duration=duration,\n calls=calls,\n entries=entries,\n )\n\n\ndef single_request_to_call(entries: List[LogEntry], id=None, function=None) -> Call:\n if id is None:\n id, = uniq_by(entries, lambda e: e.id)\n if function is None:\n function, = uniq_by(entries, lambda e: e.fn[\"name\"])\n\n measure = cg.get_one(entries, lambda e: e.perf[\"entryType\"] == \"measure\")\n duration = datetime.timedelta(milliseconds=measure.perf[\"duration\"])\n\n return Call(\n id=id,\n function=function,\n duration=duration,\n entries=entries,\n )\n\n\ndef get_rpc_out_function(entries):\n fun, = uniq_by(entries, lambda e: e.perf_type_data.split(\":\")[0])\n return fun\n\n\ndef get_rpc_out_id(entries):\n id, = uniq_by(entries, lambda e: tuple(e.perf_type_data.split(\":\")[1].split(\"-\")))\n return id\n\n\ndef request_to_call(entries: List[LogEntry]) -> Call:\n perf_entries = [e for e in entries if isinstance(e, PerfLog)]\n incoming_entries = [e for e in perf_entries if PerfLog.is_incoming_entry(e)]\n outgoing_entries = [e for e in perf_entries if PerfLog.is_outgoing_entry(e)]\n call = single_request_to_call(incoming_entries)\n call.calls = [\n single_request_to_call(e, get_rpc_out_id(e), get_rpc_out_function(e))\n for e in group_by(outgoing_entries, lambda e: e.perf_type_data).values()\n ]\n request_entries = [e for e in entries if isinstance(e, RequestLog)]\n if len(request_entries) > 1:\n raise ValueError(f\"Too many request entries in single group: {request_entries}\")\n call.entries += request_entries\n return call\n\n\ndef misc_to_call(entries: List[LogEntry]) -> Call:\n return Call(\n id=(None, UNDEFINED_XPAIR),\n function=None,\n duration=None,\n entries=entries,\n )\n\n\ndef id_groups_to_call(entry_id, entries: List[LogEntry]) -> Call:\n if entry_id[0] is None:\n return misc_to_call(entries)\n\n if all(isinstance(e, ArtilleryLog) for e in entries):\n return artillery_to_call(entries)\n\n return request_to_call(entries)\n\n\ndef url_to_function_name(name):\n # AWS URL is DOMAIN/dev/PATH\n if \"/dev/\" in name:\n parts = name.split(\"/dev/\", 1)[1].split(\"/\")\n else:\n parts = name.split(\"/\", 1)[1:]\n\n parts = [p for p in parts if p]\n\n new_name = \"/\".join(parts[:2])\n\n return new_name\n\n\ndef normalize_call_names(calls):\n id_names = group_by(\n [c for c in calls] + [s for c in calls for s in c.calls],\n lambda c: c.id\n )\n\n # built id name translation mapping\n id_translated = {}\n for key, id_calls in id_names.items():\n names = {c.function for c in id_calls}\n if len(names) == 1:\n name, = names\n if \"http\" in name:\n name = url_to_function_name(name)\n id_translated[key] = name\n elif len(names) > 1:\n name, = [url_to_function_name(n) for n in names if \"http\" in n]\n id_translated[key] = name\n\n # rename calls\n for call in calls:\n call.function = id_translated[call.id]\n for subcall in call.calls:\n subcall.function = id_translated[subcall.id]\n\n return calls\n\n\ndef create_requestgroups(data: List[LogEntry]) -> List[Call]:\n \"\"\"Create a list of logs based on request behavior.\"\"\"\n context_id_groups = group_by(data, lambda e: e.id)\n calls = [\n id_groups_to_call(id, entries)\n for id, entries in context_id_groups.items()\n ]\n\n # remove calls without a context ID, these are most probably platform\n # messages\n len_all_calls = len(calls)\n calls = [c for c in calls if c.id[0] is not None]\n print(f\"Keep with context ids only: {len(calls)}/{len_all_calls}\")\n\n # normalize artillery call urls\n calls = normalize_call_names(calls)\n return calls\n","sub_path":"faastermetrics/calls.py","file_name":"calls.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"30522813","text":"from os import path\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\n\n# constants\nBASE_URL = 'https://www.muziker.sk'\nLIST_URL = '/gitary?per=60&page='\nSORT_URL = '&sort_by=price%20asc'\n\nMAX_NUM_PAGES = 200\nITEMS_PER_PAGE = 60\n\nSHORT_SLEEP = 2\nLONG_SLEEP = 4\n\n\n# begin crawling from the last list page\nif path.exists('to_be_crawled.txt'):\n tbc_f = open('to_be_crawled.txt', 'r')\n page_num = int(tbc_f.readline())\n tbc_f.close()\nelse:\n tbc_f = open('to_be_crawled.txt', 'w')\n tbc_f.write(str(1))\n page_num = 1\n\nskipped_f = open('skipped.txt', 'a')\nencod_f = open('encoding_trouble.txt', 'a')\ndiff_f = open('different_encoding.txt', 'a')\n\n# crawl till the last list page\nwhile page_num < MAX_NUM_PAGES:\n current = BASE_URL + LIST_URL + str(page_num) + SORT_URL\n\n r = requests.get(current)\n if r.status_code != 200:\n print(\"!!!!!!!!Could not open the url \" + current+\"!!!!!!!!!!!Skipping\")\n skipped_f.write(current)\n page_num = page_num + 1\n continue\n\n print(\"Getting links from \" + current)\n\n html = r.text\n\n soup = BeautifulSoup(html, 'html.parser')\n content = soup.find('div', {'class': 'product-list'})\n products = content.findAll('div', {'class': 'product-tile'})\n\n links = list()\n\n for div in products:\n link = div.find('a', {'class': 'link-overlay'})\n links.append(link.attrs['href'])\n\n print(len(links))\n\n # open links and save their htmls in products folder\n for link in links:\n r = requests.get(BASE_URL + link)\n\n if r.encoding != 'utf-8':\n print(\"iny encoding\")\n diff_f.write(link + '\\n')\n\n if r.status_code != 200:\n print(\"!!!!!!!!Could not open the url \" + current + \" !!!!!!!!!!Skipping\")\n skipped_f.write(link + '\\n')\n continue\n\n print(link)\n name = link.replace('/', '_')\n f = open('products/' + name[1:] + '.html', 'w', encoding=\"utf8\")\n try:\n f.write(r.text)\n except:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!Problem saving the file !!!!!!!!!!!!!!!!!!!!Skipping\")\n encod_f.write(link + '\\n')\n f.write(r.content)\n finally:\n f.close()\n\n time.sleep(SHORT_SLEEP)\n\n page_num = page_num + 1\n tbc_f = open('to_be_crawled.txt', 'w')\n tbc_f.write(str(page_num))\n tbc_f.close()\n time.sleep(LONG_SLEEP)\n\nskipped_f.close()\nencod_f.close()\ndiff_f.close()\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"530354583","text":"#!/usr/bin/env python\nfrom setuptools import setup, __version__\nimport sys\n\n# backports.csv required for Python 2.7\nINSTALL_REQUIRES = []\nEXTRAS_REQUIRE = {}\n\n# conditionally pass install_requires arg if setuptools older than v18\nif int(__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] == (2, 7):\n INSTALL_REQUIRES.append(\"backports.csv\")\n# otherwise pass extra_requires arg\nelse:\n EXTRAS_REQUIRE[\":python_version=='2.7'\"] = [\"backports.csv\"]\n\nsetup(name=\"opencivicdata-divisions\",\n version='2015.04.27',\n py_modules=['opencivicdata.divisions'],\n author=\"James Turk\",\n author_email='jturk@sunlightfoundation.com',\n license=\"BSD\",\n description=\"python opencivicdata library\",\n long_description=\"\",\n url=\"\",\n packages=['opencivicdata.divisions'],\n namespace_packages=['opencivicdata'],\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRAS_REQUIRE,\n platforms=[\"any\"],\n classifiers=[\"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"287197600","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File name: read_c04.py\n\"\"\"\nCreated on Tue Dec 12 16:10:27 2017\n\n@author: Neo(liuniu@smail.nju.edu.cn)\n\"\"\"\n\nimport numpy as np\n\n\n# ----------------------------- FUNCTIONS -----------------------------\ndef read_c04(C04_file):\n '''Fetch the C04 series.\n\n\n Parameters\n ----------\n C04_file : string\n path and name of C04 data file.\n\n Returns\n ----------\n mjd : array, float\n epoch in modified Julian date\n Xp : array, float\n Xp position of CIP in ITRS, mas\n Yp : array, float\n Yp position of CIP in ITRS, mas\n U : array, float\n UT1 - UTC, ms\n # LOD : array, float\n # length of day, ms\n dX : array, float\n Xp component of CPO, mas\n dY : array, float\n Yp component of CPO, mas\n XpErr : array, float\n formal uncertainty of Xp, mas\n YpErr : array, float\n formal uncertainty of Yp, mas\n UErr : array, float\n formal uncertainty of U, ms\n # LODErr : array, float\n # formal uncertainty of LOD, ms\n dXErr : array, float\n formal uncertainty of dX, mas\n dYErr : array, float\n formal uncertainty of dY, mas\n '''\n\n mjd, Xp, Yp, U = np.genfromtxt(C04_file, skip_header=14,\n usecols=np.arange(3, 7), unpack=True)\n dX, dY = np.genfromtxt(C04_file, skip_header=14,\n usecols=(8, 9), unpack=True)\n XpErr, YpErr, UErr = np.genfromtxt(C04_file, skip_header=14,\n usecols=np.arange(10, 13),\n unpack=True)\n dXErr, dYErr = np.genfromtxt(C04_file, skip_header=14,\n usecols=(14, 15),\n unpack=True)\n\n # arc-sec --> mas, second --> ms\n Xp, Yp, U = Xp * 1000, Yp * 1000, U * 1000\n XpErr, YpErr, UErr = XpErr * 1000, YpErr * 1000, UErr * 1000\n\n dX, dY = dX * 1000, dY * 1000\n dXErr, dYErr = dXErr * 1000, dYErr * 1000\n\n # print(mjd[0], Xp[0], Yp[0], U[0], LOD[0], dX[0], dY[0],\n # XpErr[0], YpErr[0], UErr[0], LODErr[0],\n # dXErr[0], dYErr[0])\n\n # return [mjd, Xp, Yp, U, LOD, dX, dY,\n # XpErr, YpErr, UErr, LODErr,\n # dXErr, dYErr]\n\n return mjd, Xp, Yp, U, dX, dY, XpErr, YpErr, UErr, dXErr, dYErr\n\n# --------------------------------- END --------------------------------\n","sub_path":"read_c04.py","file_name":"read_c04.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"538329368","text":"from flask import jsonify, request, current_app, url_for\nfrom sqlalchemy import or_, and_\nfrom . import api\nfrom .errors import bad_request, ok\nfrom ..models import Model, ModelResult\nfrom .. import db\n\n\n@api.route('/models', methods=['GET'])\ndef get_models():\n try:\n model_ids = request.args.getlist(\"model_id\")\n models = db.session.query(Model)\\\n .filter(or_(Model.id.in_(model_ids), len(model_ids) == 0))\\\n .filter(Model.status_id == 1)\\\n .all()\n return jsonify([x.to_json() for x in models])\n except Exception as e:\n return bad_request(e)\n\n\n@api.route('/models', methods=['DELETE'])\ndef delete_models():\n try:\n model_ids = request.args.getlist('model_id')\n db.session.query(ModelResult)\\\n .filter(and_(ModelResult.model_id.in_(model_ids), len(model_ids) != 0))\\\n .filter(ModelResult.status_id == 1)\\\n .update({ModelResult.status_id: 2}, synchronize_session=False)\n db.session.query(Model)\\\n .filter(and_(Model.id.in_(model_ids), len(model_ids) != 0))\\\n .filter(Model.status_id == 1)\\\n .update({Model.status_id: 2}, synchronize_session=False)\n db.session.commit()\n return ok(\"Successfully removed models with id {}\".format(model_ids))\n except Exception as e:\n return bad_request(e)\n","sub_path":"app/api_1_0/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"153166352","text":"import unittest\n\nfrom lbrynet.schema.uri import URI, URIParseError\n\nclaim_id_1 = \"63f2da17b0d90042c559cc73b6b17f853945c43e\"\n\nparsed_uri_matches = [\n (\"test\", URI(\"test\"), False, False, \"test\", None),\n (\"test#%s\" % claim_id_1, URI(\"test\", claim_id=claim_id_1), False, False, \"test\", None),\n (\"test:1\", URI(\"test\", claim_sequence=1), False, False, \"test\", None),\n (\"test$1\", URI(\"test\", bid_position=1), False, False, \"test\", None),\n (\"lbry://test\", URI(\"test\"), False, False, \"test\", None),\n (\"lbry://test#%s\" % claim_id_1, URI(\"test\", claim_id=claim_id_1), False, False, \"test\", None),\n (\"lbry://test:1\", URI(\"test\", claim_sequence=1), False, False, \"test\", None),\n (\"lbry://test$1\", URI(\"test\", bid_position=1), False, False, \"test\", None),\n (\"@test\", URI(\"@test\"), True, True, None, \"@test\"),\n (\"@test#%s\" % claim_id_1, URI(\"@test\", claim_id=claim_id_1), True, True, None, \"@test\"),\n (\"@test:1\", URI(\"@test\", claim_sequence=1), True, True, None, \"@test\"),\n (\"@test$1\", URI(\"@test\", bid_position=1), True, True, None, \"@test\"),\n (\"lbry://@test1:1/fakepath\", URI(\"@test1\", claim_sequence=1, path=\"fakepath\"), True, False, \"fakepath\", \"@test1\"),\n (\"lbry://@test1$1/fakepath\", URI(\"@test1\", bid_position=1, path=\"fakepath\"), True, False, \"fakepath\", \"@test1\"),\n (\"lbry://@test1#abcdef/fakepath\", URI(\"@test1\", claim_id=\"abcdef\", path=\"fakepath\"), True, False, \"fakepath\",\n \"@test1\"),\n (\"@z\", URI(\"@z\"), True, True, None, \"@z\"),\n (\"@yx\", URI(\"@yx\"), True, True, None, \"@yx\"),\n (\"@abc\", URI(\"@abc\"), True, True, None, \"@abc\")\n]\n\nparsed_uri_raises = [\n (\"lbry://\", URIParseError),\n (\"lbry://test:3$1\", URIParseError),\n (\"lbry://test$1:1\", URIParseError),\n (\"lbry://test#x\", URIParseError),\n (\"lbry://test#x/page\", URIParseError),\n (\"lbry://test$\", URIParseError),\n (\"lbry://test#\", URIParseError),\n (\"lbry://test:\", URIParseError),\n (\"lbry://test$x\", URIParseError),\n (\"lbry://test:x\", URIParseError),\n (\"lbry://@test@\", URIParseError),\n (\"lbry://@test:\", URIParseError),\n (\"lbry://test@\", URIParseError),\n (\"lbry://tes@t\", URIParseError),\n (\"lbry://test:1#%s\" % claim_id_1, URIParseError),\n (\"lbry://test:0\", URIParseError),\n (\"lbry://test$0\", URIParseError),\n (\"lbry://test/path\", URIParseError),\n (\"lbry://@test1#abcdef/fakepath:1\", URIParseError),\n (\"lbry://@test1:1/fakepath:1\", URIParseError),\n (\"lbry://@test1:1ab/fakepath\", URIParseError),\n (\"lbry://test:1:1:1\", URIParseError),\n (\"whatever/lbry://test\", URIParseError),\n (\"lbry://lbry://test\", URIParseError),\n (\"lbry://@/what\", URIParseError),\n (\"lbry://abc:0x123\", URIParseError),\n (\"lbry://abc:0x123/page\", URIParseError),\n (\"lbry://@test1#ABCDEF/fakepath\", URIParseError),\n (\"test:0001\", URIParseError),\n (\"lbry://@test1$1/fakepath?arg1&arg2&arg3\", URIParseError)\n]\n\n\nclass TestURIParser(unittest.TestCase):\n\n maxDiff = 4000\n longMessage = True\n\n def test_uri_parse(self):\n for test_string, expected_uri_obj, contains_channel, is_channel, claim_name, channel_name in parsed_uri_matches:\n try:\n # string -> URI\n self.assertEqual(URI.from_uri_string(test_string), expected_uri_obj, test_string)\n # URI -> dict -> URI\n self.assertEqual(URI.from_dict(expected_uri_obj.to_dict()), expected_uri_obj,\n test_string)\n # contains_channel\n self.assertEqual(URI.from_uri_string(test_string).contains_channel, contains_channel,\n test_string)\n # is_channel\n self.assertEqual(URI.from_uri_string(test_string).is_channel, is_channel,\n test_string)\n # claim_name\n self.assertEqual(URI.from_uri_string(test_string).claim_name, claim_name,\n test_string)\n # channel_name\n self.assertEqual(URI.from_uri_string(test_string).channel_name, channel_name,\n test_string)\n\n # convert-to-string test only works if protocol is present in test_string\n if test_string.startswith('lbry://'):\n # string -> URI -> string\n self.assertEqual(URI.from_uri_string(test_string).to_uri_string(), test_string,\n test_string)\n # string -> URI -> dict -> URI -> string\n uri_dict = URI.from_uri_string(test_string).to_dict()\n self.assertEqual(URI.from_dict(uri_dict).to_uri_string(), test_string,\n test_string)\n # URI -> dict -> URI -> string\n self.assertEqual(URI.from_dict(expected_uri_obj.to_dict()).to_uri_string(),\n test_string, test_string)\n except URIParseError as err:\n print(\"ERROR: \" + test_string)\n raise\n\n def test_uri_errors(self):\n for test_str, err in parsed_uri_raises:\n try:\n URI.from_uri_string(test_str)\n except URIParseError:\n pass\n else:\n print(\"\\nSuccessfully parsed invalid url: \" + test_str)\n self.assertRaises(err, URI.from_uri_string, test_str)\n","sub_path":"tests/unit/schema/test_uri.py","file_name":"test_uri.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"473278833","text":"\"\"\"\nReleased under BSD 3-Clause License,\nModifications are Copyright (c) 2019 Cerebras, Inc.\nAll rights reserved.\n\nURL to the original source code: https://github.com/Cerebras/online-normalization\n\nReleased under BSD 3-Clause License,\nModifications are Copyright (c) 2021 Hao-Yuan Chang\nAll rights reserved.\n\n== BSD 3-Clause License ==\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nimport os\nimport sys\nimport time\nimport shutil\n\nimport torch\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom model import model as models\nfrom model.norm import norm as norm_layer\n\nimport clab\nimport re\n\nbest_acc1 = 0\n\ndef main_worker(train_loader, val_loader, num_classes, args, cifar=False):\n global best_acc1\n\n scale_lr_and_momentum(args, cifar=cifar)\n \n device = torch.device(f'cuda:{args.gpu}' if torch.cuda.is_available() else 'cpu')\n if args.cpu == 1:\n device = torch.device('cpu')\n\n norm_kwargs = {'mode': args.norm_mode,\n 'alpha_fwd': args.afwd,\n 'alpha_bkw': args.abkw,\n 'ecm': args.ecm,\n 'gn_num_groups': args.gn_num_groups}\n model_kwargs = {'num_classes': num_classes,\n 'norm_layer': norm_layer,\n 'norm_kwargs': norm_kwargs,\n 'cifar': cifar,\n 'kernel_size': 3 if cifar else 7,\n 'stride': 1 if cifar else 2,\n 'padding': 1 if cifar else 3,\n 'inplanes': 16 if cifar else 64}\n # if the user has specified a model, use it instead\n if args.model != '': \n args.arch = args.model\n elif cifar:\n model_kwargs['depth'] = args.depth\n args.arch = 'resnetD'\n \n\n # create model\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True,\n **model_kwargs).to(device)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](**model_kwargs).to(device)\n\n if args.snapshot:\n # activation is a dictionary (since we want exact mapping of layerIDs as keys & python can't find\n # it somehow if it's an array)\n # it records the stats of the activation during both training and evaluation modes\n # all the convolution layers in the model will be labeled as 0...n\n # for each entry, the value is a tuple with these elements:\n # (epoch, training mode?, min value of this layer, max, mean, std)\n activation = {}\n # this is a hook, which will be called at every forward pass\n def get_activation(layerID):\n def hook(model, input, output):\n activation[layerID] += [(epoch,\n model.training,\n layerID,\n float(output.min().detach().data), \n float(output.max().detach().data), \n float(output.mean().detach().data), \n float(output.std().detach().data))]\n return hook\n\n layerID = 0\n # loop through all the layers in the model\n # layer_index labels all layers from top to bottom including non-conv layers\n # layerID labels all conv layers (we will only record their activations)\n for layer_index, (name, param) in enumerate(model.named_modules()):\n # search for all the conv layers\n match = re.search('.*conv.*', name)\n if match:\n # convert the name to a pointer to the actual layer module\n # i.e. convert a string to python module pointer\n layer = clab.multi_getattr(model,name)\n # initation the activation dictionary entry with an empty array \n # for this layerID\n activation[layerID] = []\n # register the hook. This hook function will be executed after each time\n # the forward function in this layer is called\n layer.register_forward_hook(get_activation(layerID))\n # add the name of this \n # recorded_layers+=[(layerID,name,layer_index)]\n layerID += 1\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().to(device)\n\n optimizer = torch.optim.SGD(get_parameter_groups(model, cifar=cifar),\n args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n scheduler = MultiStepLR(optimizer,\n milestones=args.lr_milestones,\n gamma=args.lr_multiplier)\n logOverwrite = True\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n logOverwrite = False\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = False if args.seed else True\n \n if args.evaluate:\n # if the evaluate flag is on, search for the model file at sim dir\n modelpath = f\"{args.logdir}/{args.name}/model_best.pth.tar\"\n if os.path.isfile(modelpath):\n print(\"=> loading checkpoint '{}'\".format(modelpath))\n checkpoint = torch.load(modelpath)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(modelpath, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(modelpath))\n # run inference with the validation dataset\n validate(val_loader, model, criterion, device, args)\n return\n\n # only write to log if we are in training mode\n notes = '''\n TBTime = training time for a single batch\n TDTime = training time for loading one batch of data\n TLoss = training loss\n TAcc@1 = training accuracy @1\n TAcc@5 = training accuracy @5\n VTime = valitation batch time\n VLoss = valitation loss\n VAcc@1 = valitation accuracy @ 1\n VAcc@5 = valitation accuracy @ 5\n '''\n # logger for hdf5 and tensorboard is activated only in training mode.\n if not args.evaluate:\n logger = clab.H5Logger(f\"{args.logdir}/{args.name}/measurements.h5\",clean=logOverwrite,args=args,notes=notes)\n tblogger = clab.TBlogger(f\"{args.logdir}/{args.name}\",clean=logOverwrite) # this is a directory\n \n for epoch in range(args.start_epoch, args.epochs):\n if epoch: scheduler.step()\n print(model)\n\n # train for one epoch\n testRecord=train(train_loader, model, criterion, optimizer, epoch, device, args)\n if not args.evaluate:\n tblogger.log(testRecord,epoch) # write record to tensorboard\n logger.log(testRecord,epoch)\n # evaluate on validation set\n acc1,valRecord = validate(val_loader, model, criterion, device, args)\n if not args.evaluate:\n tblogger.log(valRecord,epoch) # write record to tensorboard\n logger.log(valRecord,epoch)\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n }, is_best, args)\n if args.snapshot and not args.evaluate:\n # logging the activations of the last layer to tensorboard\n layerID = sorted(activation.keys())[-1]\n # ['epoch','training','layerID','min','max','mean','std']\n # 0 1 2 3 4 5 6\n entry=activation[layerID][-1]\n tblogger.log([('Activation @ last layer (min)',entry[3])],epoch)\n tblogger.log([('Activation @ last layer (avg)',entry[5])],epoch)\n tblogger.log([('Activation @ last layer (max)',entry[4])],epoch)\n tblogger.log([('Activation @ last layer (std)',entry[6])],epoch)\n\n # logging the statistics of the weights to tensorboard\n maximums, averages, minimums = [],[],[]\n for param in model.parameters():\n maximums += [param.max()]\n averages += [param.mean()]\n minimums += [param.min()]\n maximum=torch.stack(maximums).max()\n average=torch.stack(averages).mean()\n minimum=torch.stack(minimums).min()\n tblogger.log([('All layer param (max)',maximum)],epoch)\n tblogger.log([('All layer param (avg)',average)],epoch)\n tblogger.log([('All layer param (min)',minimum)],epoch)\n\n if not args.evaluate:\n logger.close()\n tblogger.close()\n if args.snapshot:\n logger.activationSnapshot(activation)\n \n \ndef train(train_loader, model, criterion, optimizer, epoch, device, args):\n logfile = f\"{args.logdir}/{args.name}/train.log\"\n batch_time = AverageMeter('TBTime', ':6.3f') # training time for a single batch\n data_time = AverageMeter('TDTime', ':6.3f') # training time for loading one batch of data\n losses = AverageMeter('TLoss', ':.4e') # training loss\n top1 = AverageMeter('TAcc@1', ':6.2f') # training accuracy @1\n top5 = AverageMeter('TAcc@5', ':6.2f') # training accuracy @5\n progress = ProgressMeter(len(train_loader), batch_time, data_time, losses,\n top1, top5, prefix=\"Epoch: [{}]\".format(epoch),logfile=logfile)\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n input = input.to(device)\n target = target.to(device)\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5, = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i,args)\n\n return [(meter.name, meter.avg) for meter in progress.meters]\n\ndef validate(val_loader, model, criterion, device, args):\n logfile = f\"{args.logdir}/{args.name}/test.log\"\n batch_time = AverageMeter('VTime', ':6.3f') # valitation batch time\n losses = AverageMeter('VLoss', ':.4e') # valitation loss\n top1 = AverageMeter('VAcc@1', ':6.2f') # valitation accuracy @ 1\n top5 = AverageMeter('VAcc@5', ':6.2f') # valitation accuracy @ 5\n progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,\n prefix='Test: ',logfile=logfile)\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n input = input.to(device)\n target = target.to(device)\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5, = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i,args)\n\n # TODO: this should also be done with the ProgressMeter\n with open(logfile,'a+') as f:\n f.write(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5)+'\\n')\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n return top1.avg, [(meter.name, meter.avg) for meter in progress.meters]\n \n\n\ndef save_checkpoint(state, is_best, args, filename='checkpoint.pth.tar'):\n foldername = f\"{args.logdir}/{args.name}\"\n model_fname = os.path.join(foldername, filename)\n torch.save(state, model_fname)\n if is_best:\n shutil.copyfile(model_fname,\n os.path.join(foldername, 'model_best.pth.tar'))\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\",logfile):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n self.logfile = logfile\n\n def print(self, batch,args):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n \n with open(self.logfile,'a+') as f:\n f.write('\\t'.join(entries)+'\\n')\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"\n Computes the accuracy over the k top predictions for the specified values\n of k\n \"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef get_parameter_groups(model, norm_weight_decay=0, cifar=False):\n \"\"\"\n Separate model parameters from scale and bias parameters following norm if\n training imagenet\n \"\"\"\n if cifar:\n return model.parameters()\n\n model_params = []\n norm_params = []\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n if 'fc' not in name and ('norm' in name or 'bias' in name):\n norm_params += [p]\n else:\n model_params += [p]\n\n return [{'params': model_params},\n {'params': norm_params,\n 'weight_decay': norm_weight_decay}]\n\n\ndef scale_lr_and_momentum(args, cifar=False, skip=False):\n \"\"\"\n Scale hyperparameters given the adjusted batch_size from input\n hyperparameters and batch size\n\n Arguements:\n args: holds the script arguments\n cifar: boolean if we are training imagenet or cifar\n skip: boolean skipping the hyperparameter scaling.\n\n \"\"\"\n if skip:\n return args\n\n print('=> adjusting learning rate and momentum. '\n f'Original lr: {args.lr}, Original momentum: {args.momentum}')\n\n std_b_size = 128 if cifar else 256\n \n old_momentum = args.momentum\n args.momentum = old_momentum ** (args.batch_size / std_b_size)\n args.lr = args.lr * (args.batch_size / std_b_size *\n (1 - args.momentum) / (1 - old_momentum))\n\n print(f'lr adjusted to: {args.lr}, momentum adjusted to: {args.momentum}')\n\n return args\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"229396202","text":"#Hide the welcome message from pygame.\nfrom os import environ\nenviron['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\n\n#Import dependencies.\nimport pygame as pg\nimport math\nimport numpy as np\nimport ModularRoboticsToolkit as mrt\n\n#Used to create an instance of the screen.\ndef createScreen(screen, font):\n\n #Set the window colour and update the display.\n screen.fill((dark))\n pg.display.update()\n pg.display.flip()\n\n #By taking advantage of the symmetry, draw the grid in which\n #the structure can be drawn.\n counter = 0\n while counter <= 1000:\n pg.draw.line(screen, blue, (0, counter), (1000, counter), 2)\n counter+=100\n \n counter = 0\n while counter <= 1000:\n pg.draw.line(screen, blue, (counter, 0), (counter, 1000), 2)\n counter+=100\n\n #Draw the rest of the borders of the grid.\n pg.draw.line(screen, blue, (0, 998), (1000, 998), 2)\n pg.draw.line(screen, lightGrey, (1052, 0), (1052, 1000), 100)\n\n #Draw the navigational arrows.\n down = font.render(str('↑'), False, white)\n up = font.render(str('↓'), False, white)\n screen.blit(down, (1025, 110))\n screen.blit(up, (1025, 210))\n\n #Draw the quit button.\n quit = font.render(str('Q'), False, white)\n screen.blit(quit, (1025, 310))\n\n #Draw the module ID selection buttons.\n voxelTypeOne = font.render(str('1'), False, white)\n screen.blit(voxelTypeOne, (1025, 410))\n voxelTypeTwo = font.render(str('2'), False, white)\n screen.blit(voxelTypeTwo, (1025, 510))\n voxelTypeThree = font.render(str('3'), False, white)\n screen.blit(voxelTypeThree, (1025, 610))\n voxelTypeFour = font.render(str('4'), False, white)\n screen.blit(voxelTypeFour, (1025, 710))\n voxelTypeFive = font.render(str('5'), False, white)\n screen.blit(voxelTypeFive, (1025, 810))\n voxelTypeSix = font.render(str('6'), False, white)\n screen.blit(voxelTypeSix, (1025, 910))\n\n #Update the screen and return it to the main program.\n pg.display.update()\n pg.display.flip()\n return screen\n\n#Draw the newly selected level.\ndef drawLevel(array, levelCounter, screen):\n\n counter = 0\n rowCounter = 0\n\n #Get the array splice that for the desired level.\n arraySplice = array[levelCounter]\n\n #Draw the splice.\n for row in arraySplice:\n for voxel in row:\n\n if voxel != 0:\n pg.draw.rect(screen, mrt.getColourOfModule(voxel), [counter*100+2, rowCounter*100+2, 98, 98])\n counter += 1\n\n counter = 0\n rowCounter += 1\n\n #Update the display.\n pg.display.update()\n pg.display.flip()\n\n#Contains the main loop which runs while a structure is being edited. \ndef makerLoop(screen, levelCounter, array, font):\n\n #Set an initial module type to put down when the user clicks.\n moduleType = 1\n\n #MAIN LOOP\n run = True\n while run == True:\n\n #Update to show changes \n pg.display.update()\n pg.display.flip()\n\n #Draw the menu bar on the right of the screen.\n #This is done in the main loop so that previous level indicators are drawn over.\n pg.draw.rect(screen, lightGrey, [1004, 2, 96, 96])\n\n #Draw the level ID indicator.\n levelID = font.render(str(levelCounter), False, white)\n screen.blit(levelID, (1020, 2))\n\n #Checks the event stack for mouse clicks.\n for event in pg.event.get():\n\n #If click is detected.\n if event.type == pg.MOUSEBUTTONUP:\n pos = pg.mouse.get_pos()\n\n #Get the location of the click.\n y = math.floor(pos[1]/100)\n x = math.floor(pos[0]/100)\n\n #If the click is in the structure display area. \n if x < 10:\n voxelState = array[int(levelCounter), y, x]\n\n #If drawing a module, get the colour, draw the module and edit the array.\n if voxelState == 0:\n array[int(levelCounter), y, x] = moduleType\n x=x*100\n y=y*100\n pg.draw.rect(screen, mrt.getColourOfModule(moduleType), [x+2, y+2, 98, 98])\n\n #If a module exists there already, get rid of it.\n elif voxelState != 0:\n array[int(levelCounter), y, x] = 0\n x=x*100\n y=y*100\n pg.draw.rect(screen, dark, [x+2, y+2, 98, 98])\n\n break\n\n #If the click is in the menu bar respond accordingly.\n elif x == 10:\n\n #If user clicks increase level counter.\n if y == 1:\n\n #Overshoot protection.\n if levelCounter < 9:\n\n #Increase level by one.\n levelCounter += 1\n createScreen(screen, font)\n drawLevel(array, levelCounter, screen)\n\n #If user clicks decrease level counter. \n elif y == 2:\n\n #Undershoot protection.\n if levelCounter > 0:\n\n #Decrease Level counter by one.\n levelCounter -= 1\n createScreen(screen, font)\n drawLevel(array, levelCounter, screen)\n\n #If user wants to quit.\n elif y == 3:\n array = np.transpose(array, (1, 2, 0))\n run = False\n\n #If user selects a module type, choose that module.\n elif y > 3:\n moduleType = y-3\n\n #Detect if user clicks the cross on the window.\n elif event.type == pg.QUIT:\n run = False\n\n #print(array[0])\n pg.quit()\n return array\n\n#Run to start the program.\ndef main():\n\n #Set some colours up.\n #Globally within this program.\n global dark\n dark = (37, 37, 38)\n global blue\n blue = (14, 99, 156)\n global green\n green = (94, 149, 85)\n global lightGrey\n lightGrey = (61, 61, 61)\n global white\n white = (225,225,225)\n\n #Start pygame and the font engine.\n pg.init()\n pg.font.init()\n \n #Set the screen size + title.\n #The array editing screen size is 1000x1000 to make representing a 10x10 array easier.\n screen = pg.display.set_mode((1100, 1000))\n pg.display.set_caption('Structure Maker - ')\n\n #Set the font, level counter and array to store the structure up.\n font = pg.font.SysFont(\"monospace\", 90)\n levelCounter = 0\n array = np.zeros((10,10,10))\n\n #Create the screen, \n screen = createScreen(screen, font)\n array = makerLoop(screen, levelCounter, array, font)\n\n #Return the array to the GUI\n return array\n","sub_path":"MeltSortGrow/StructMaker.py","file_name":"StructMaker.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"392962144","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param two ListNodes\n # @return the intersected ListNode\n def getIntersectionNode(self, headA, headB):\n lengthA = 0\n lengthB = 0\n \n nodeA = headA\n nodeB = headB\n \n while nodeA != None:\n lengthA = lengthA + 1\n nodeA = nodeA.next\n while nodeB != None:\n lengthB = lengthB + 1\n nodeB = nodeB.next\n if lengthA > lengthB:\n diff = lengthA - lengthB\n while diff > 0:\n diff = diff - 1\n headA = headA.next\n elif lengthB > lengthA:\n diff = lengthB - lengthA\n while diff > 0:\n diff = diff - 1\n headB = headB.next\n \n while headA != None and headB != None:\n if headA == headB:\n return headA\n else:\n headA = headA.next\n headB = headB.next\n return None\n \n\n\n# Solution from https://oj.leetcode.com/discuss/17153/python-solution-for-intersection-of-two-singly-linked-lists\nclass Solution:\n # @param two ListNodes\n # @return the intersected ListNode\n def getIntersectionNode(self, headA, headB):\n curA,curB = headA,headB\n lenA,lenB = 0,0\n while curA is not None:\n lenA += 1\n curA = curA.next\n while curB is not None:\n lenB += 1\n curB = curB.next\n curA,curB = headA,headB\n if lenA > lenB:\n for i in range(lenA-lenB):\n curA = curA.next\n elif lenB > lenA:\n for i in range(lenB-lenA):\n curB = curB.next\n while curB != curA:\n curB = curB.next\n curA = curA.next\n return curA\n\n","sub_path":"IntersectionOfTwoLinkedList.py","file_name":"IntersectionOfTwoLinkedList.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"28417691","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 15 16:25:48 2019\r\n\r\n@author: mirco\r\n\"\"\"\r\n\"\"\"intervallo di fiducia per l'attesa\"\"\r\n\"\"\"\"\"\r\nimport numpy as np\r\nimport tavole\r\n\r\n\r\n\r\narrx=np.array([-1.34,1.32,-0.96,0.29,-1.41,0.23,-0.56,-0.32,0.66,1.27])\r\nn=len(arrx)\r\n\r\narry=np.array([4.83,-2.52,-1.79,-2.85,1.45,1.09,1.87,2.03,-2.60])\r\nm=len(arry)\r\n\r\nalfa=0.05\r\n\r\n\r\n\r\n\"\"\"1_mediax \"\"\"\r\ni=0\r\nbuf=0\r\nsum=0\r\nwhile iFisheralfa2 and Fo>): %s\" % (str(e)))\n rospy.logerr(\"Failed to publish collision pointcloud\")\n return False\n\n\ndef attach_sphere(link, name, pose, radius, touch_links=[]):\n aco = AttachedCollisionObject()\n\n co = CollisionObject()\n co.operation = CollisionObject.ADD\n co.id = name\n co.header = pose.header\n sphere = SolidPrimitive()\n sphere.type = SolidPrimitive.SPHERE\n sphere.dimensions = [radius]\n co.primitives = [sphere]\n co.primitive_poses = [pose.pose]\n aco.object = co\n\n aco.link_name = link\n if len(touch_links) > 0:\n aco.touch_links = touch_links\n else:\n aco.touch_links = [link]\n scene._pub_aco.publish(aco)\n\n\ndef attach_cylinder(link, name, pose, height, radius, touch_links=[]):\n aco = AttachedCollisionObject()\n\n co = CollisionObject()\n co.operation = CollisionObject.ADD\n co.id = name\n co.header = pose.header\n cylinder = SolidPrimitive()\n cylinder.type = SolidPrimitive.CYLINDER\n cylinder.dimensions = [height, radius]\n co.primitives = [cylinder]\n co.primitive_poses = [pose.pose]\n aco.object = co\n\n aco.link_name = link\n if len(touch_links) > 0:\n aco.touch_links = touch_links\n else:\n aco.touch_links = [link]\n scene._pub_aco.publish(aco)\n\n\ndef add_object(center, name=\"Object\", radius=0.17):\n pose = PoseStamped()\n pose.header.frame_id = \"/base_link\"\n pose.header.stamp = rospy.Time.now()\n pose.pose = center\n while scene._pub_co.get_num_connections() == 0:\n rospy.sleep(0.01)\n scene.add_sphere(\n name=name,\n pose=pose,\n radius=radius,\n )\n\n\ndef add_object(center, name=\"Object\", radius=0.17):\n pose = PoseStamped()\n pose.header.frame_id = \"/base_link\"\n pose.header.stamp = rospy.Time.now()\n pose.pose = center\n while scene._pub_co.get_num_connections() == 0:\n rospy.sleep(0.01)\n scene.add_sphere(\n name=name,\n pose=pose,\n radius=radius,\n )\n\n\ndef remove_object(name=\"Object\"):\n co = CollisionObject()\n co.operation = CollisionObject.REMOVE\n co.id = name\n co.header.frame_id = \"/base_link\"\n co.header.stamp = rospy.Time.now()\n while scene._pub_co.get_num_connections() == 0:\n rospy.sleep(0.01)\n scene._pub_co.publish(co)\n\n while True:\n rospy.sleep(0.1)\n result, success = get_planning_scene(\n PlanningSceneComponents(\n PlanningSceneComponents.WORLD_OBJECT_NAMES\n + PlanningSceneComponents.WORLD_OBJECT_GEOMETRY\n )\n )\n if not success:\n continue\n found = False\n for object in result.scene.world.collision_objects:\n if object.id == name:\n found = True\n if not found:\n return\n","sub_path":"apc_util/src/apc_util/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"542833743","text":"import numpy as np\nimport argparse\nimport imutils\nimport cv2\n\ndef detector(img):\n image = cv2.imread(img)\n gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n max_val = float('-inf')\n\n for angle in np.arange(0, 360, 45):\n gray = imutils.rotate(gray_scale, angle)\n\n ddepth = cv2.cv.CV_32F if imutils.is_cv2() else cv2.CV_32F\n gradX = cv2.Sobel(gray, ddepth=ddepth, dx=1, dy=0, ksize=-1)\n gradY = cv2.Sobel(gray, ddepth=ddepth, dx=0, dy=1, ksize=-1)\n\n gradient = cv2.subtract(gradX, gradY)\n gradient = cv2.convertScaleAbs(gradient)\n\n blurred = cv2.blur(gradient, (3, 3))\n\n if (blurred.sum() > max_val):\n max_val = blurred.sum()\n best_blurred = blurred\n best_angle = angle\n\n\n (_, thresh) = cv2.threshold(best_blurred, 225, 255, cv2.THRESH_BINARY)\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (35, 35))\n ###########################################################\n closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)\n closed = cv2.erode(closed, None, iterations = 4)\n closed = cv2.dilate(closed, None, iterations = 4)\n ###########################################################\n cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n ##################################################ROTATING IMAGE TO BEST ANGLE\n image = imutils.rotate(image, best_angle)\n\n list_of_boxes = []\n #######################################################\n for ct in cnts:\n if (cv2.contourArea(ct) <= 2000):\n continue\n\n rect = cv2.minAreaRect(ct)\n box = cv2.cv.BoxPoints(rect) if imutils.is_cv2() else cv2.boxPoints(rect)\n box = np.int0(box)\n ###DRAWING PERFECT BOUNDARIES TO EACH BARCODE REGION\n cv2.drawContours(image, [box], -1, (0, 255, 0), 3)\n minX = box[0][0]\n maxX = box[2][0]\n minY = box[1][1]\n maxY = box[0][1]\n list_of_boxes.append([minX, maxX, minY, maxY])\n\n return list_of_boxes","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"397814131","text":"from django.conf.urls import patterns, url\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom front.views import MediaListView, TitleDetailView\n\nurlpatterns = patterns('front.views',\n url(r'^$', MediaListView.as_view()),\n url(r'^season/(?P[\\w]+)/(?P[\\d]+)', MediaListView.as_view()),\n url(r'^anime/(?P[\\w\\-]+)$', TitleDetailView.as_view(slug_field='slug')),\n)\n\nurlpatterns += staticfiles_urlpatterns()\n\nhandler404 = \"front.views.handler404\"\nhandler500 = \"front.views.handler500\"\n","sub_path":"ani_pipe/front/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26297386","text":"class Solution(object):\n def wiggleSort(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n nums.sort()\n res = []\n tmp = '0'\n l,r=(len(nums)-1)/2,len(nums)-1\n while l>=0:\n res.append(nums[l])\n if r>(len(nums)-1)/2:\n res.append(nums[r])\n l-=1\n r-=1\n nums[:]=res\n","sub_path":"算法面试题汇总/7排序与检索/2摆动排序II.py","file_name":"2摆动排序II.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"587867399","text":"\"\"\"\n\nUsing names.txt (right click and 'Save Link/Target As...'), a 46K text file \ncontaining over five-thousand first names, begin by sorting it into alphabetical\norder. Then working out the alphabetical value for each name, multiply this value\nby its alphabetical position in the list to obtain a name score.\n\nFor example, when the list is sorted into alphabetical order, COLIN, which is\nworth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would\nobtain a score of 938 × 53 = 49714.\n\nWhat is the total of all the name scores in the file?\n\n\"\"\"\n\nfrom io import open\n\nnames_file = open('p022_names.txt', 'r')\nnames = names_file.read().split(',')\nnames.sort()\n\nposition = 1\ntotal_score = 0\n\nfor name in names:\n\tname_value = 0\n\n\tfor letter in name:\n\t\t# obtiene el valor del nombre con el valor ASCII de las letras\n\t\tname_value += ord(letter)-64\n\n\tscore = name_value * position\n\tposition += 1\n\ttotal_score += score\n\nprint(total_score)\n\n\nnames_file.close()\n\n# [Finished in 348ms]\n","sub_path":"problem022.py","file_name":"problem022.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425309679","text":"#!/usr/bin/env python\nimport time\n\nimport rospy\nfrom geometry_msgs.msg import Pose2D\nfrom christiauto_robaldo.msg import RobotState\n\nimport numpy as np\n\n\"\"\"\nYou could do state estimation here for the robots. For now,\nthis node's job is simply to take the Pose2D vision message\nand patch it through as a RobotState message.\n\nNote that one of these nodes is created per robot and all the\nplumbing is done in the `robot.launch` file.\n\"\"\"\n\n_state_pub = None\n_team_side = None\n\n# Again, this could be done in the vision code\nfrom soccerref.msg import GameState\n_game_state = GameState\n\ndef _handle_game_state(msg):\n global _game_state\n _game_state = msg\n\ndef _handle_vision_position(msg):\n # Flip the coordinate system so that our side is always the negative\n # side. This is nice because we won't have to think about this later\n # in our high-level AI code and elsewhere.\n # This could also be done in the vision node; see original christiauto_robaldo code\n if (_team_side != 'home') ^ bool(_game_state.second_half):\n msg.x = -1*msg.x\n msg.y = -1*msg.y\n\n if msg.theta < 180:\n msg.theta += 180\n else:\n msg.theta -= 180\n\n new_msg = RobotState()\n new_msg.xhat = new_msg.vision_x = new_msg.xhat_future = msg.x\n new_msg.yhat = new_msg.vision_y = new_msg.yhat_future = msg.y\n new_msg.thetahat = new_msg.vision_theta = new_msg.thetahat_future = msg.theta\n new_msg.correction = True\n _state_pub.publish(new_msg)\n\ndef main():\n rospy.init_node('robot_estimator', anonymous=False)\n\n global _team_side\n param_name = rospy.search_param('team_side')\n _team_side = rospy.get_param(param_name, 'home')\n\n # Sub/Pub\n global _state_pub, _game_state\n # Use remap in roslaunch file to create separate channels per robot\n _state_pub = rospy.Publisher('robot_state', RobotState, queue_size=10)\n\n rospy.Subscriber('vision_position', Pose2D, _handle_vision_position)\n rospy.Subscriber('/game_state', GameState, _handle_game_state)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\nif __name__ == '__main__':\n main()\n","sub_path":"christiauto_robaldo/nodes/estimation/robot_estimator_node.py","file_name":"robot_estimator_node.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"355798868","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 08 00:09:10 2014\r\n\r\n@author: Mads Stilling\r\n\"\"\"\r\nimport pygame\r\nimport sound\r\nfrom sound import Sound\r\nclass Wall(pygame.sprite.Sprite):\r\n \"\"\" object the player can run into. \"\"\"\r\n \r\n\r\n def __init__(self, x, y, width, height, color, screenWidth, screenHeight):\r\n \"\"\" Constructs the wall the player can run into. \"\"\"\r\n \r\n self.position = []\r\n \r\n # Call the parent's constructor\r\n pygame.sprite.Sprite.__init__(self)\r\n \r\n # Make a blue wall, of the size specified in the parameters\r\n self.image = pygame.Surface([width, height])\r\n self.image.fill(color)\r\n \r\n # Make our buttom left corner the passed-in location.\r\n self.rect = self.image.get_rect()\r\n # Make height the passed-in location for y\r\n self.rect.y = y + screenHeight - height\r\n self.rect.x = x\r\n \r\n def update(self):\r\n\r\n self.rect.x = self.position[0]\r\n \r\n self.rect.y = self.position[1]\r\n\r\n def move(self, a, d):\r\n \r\n if a:\r\n self.position[0] += self.playerSpeed \r\n \r\n elif d:\r\n self.position[0] -= self.playerSpeed\r\n \r\nclass Healthbar():\r\n \"\"\" creates our timebar\"\"\"\r\n \r\n def __init__(self, position, timelimit):\r\n \r\n # Set timebar position from the supplied position \r\n self.position = position \r\n \r\n # Loads images \r\n self.image = pygame.image.load(\"shitometerbg.png\")\r\n self.imageFull = pygame.image.load(\"shitometercolor.png\")\r\n \r\n # Resizes the bar into 10% intervals and create new variables\r\n self.image0 = pygame.transform.scale(self.imageFull, [0, 16])\r\n self.image10 = pygame.transform.scale(self.imageFull, [10, 16])\r\n self.image20 = pygame.transform.scale(self.imageFull, [20, 16])\r\n self.image30 = pygame.transform.scale(self.imageFull, [30, 16])\r\n self.image40 = pygame.transform.scale(self.imageFull, [40, 16]) \r\n self.image50 = pygame.transform.scale(self.imageFull, [50, 16])\r\n self.image60 = pygame.transform.scale(self.imageFull, [60, 16])\r\n self.image70 = pygame.transform.scale(self.imageFull, [70, 16])\r\n self.image80 = pygame.transform.scale(self.imageFull, [80, 16])\r\n self.image90 = pygame.transform.scale(self.imageFull, [90, 16])\r\n self.imageFill = self.image0\r\n\r\n self.imageFullSize = self.imageFull\r\n \r\n # Setup loose sound\r\n self.playSound = True \r\n self.sound = Sound(\"fartlong.wav\", 0)\r\n # Setup timer\r\n self.timelimit = timelimit\r\n self.timer = 0\r\n \r\n def update(self, dt):\r\n \r\n if self.timer == 0:\r\n self.timelimit = self.timelimit * dt \r\n if self.timer == (self.timelimit / 100) * 10:\r\n self.imageFill = self.image10\r\n elif self.timer == (self.timelimit / 100) * 20:\r\n self.imageFill = self.image20 \r\n elif self.timer == (self.timelimit / 100) * 30:\r\n self.imageFill = self.image30\r\n elif self.timer == (self.timelimit / 100) * 40:\r\n self.imageFill = self.image40\r\n elif self.timer == (self.timelimit / 100) * 50:\r\n self.imageFill = self.image50 \r\n elif self.timer == (self.timelimit / 100) * 60:\r\n self.imageFill = self.image60 \r\n elif self.timer == (self.timelimit / 100) * 70:\r\n self.imageFill = self.image70 \r\n elif self.timer == (self.timelimit / 100) * 80:\r\n self.imageFill = self.image80 \r\n elif self.timer == (self.timelimit / 100) * 90:\r\n self.imageFill = self.image90\r\n elif self.timer == self.timelimit: \r\n self.imageFill = self.imageFull\r\n self.playSound = False\r\n \r\n elif self.timer > 100 and self.playSound == False:\r\n self.sound.playsound()\r\n self.playSound = True\r\n \r\n self.timer = self.timer +1\r\n \r\n \r\n\r\n ","sub_path":"Violent Evacuation/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"428982591","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom behave import *\nimport time\n\nfrom features.Pages.SignUpPage import SignUp\n\n@given ('I open the web browser and navigate the url \"{url}\"')\ndef start_browser(context, url):\n context.driver = webdriver.Chrome('G:\\\\Python_Projects\\\\Test_Login\\\\features\\\\webdrivers\\\\chromedriver.exe')\n context.driver.implicitly_wait(10)\n context.driver.maximize_window()\n context.driver.get(url)\n assert \"InsuranceWeb: Home\" in context.driver.title\n\n@when ('I click on SignUp Button')\ndef click_signup(context):\n driver = context.driver\n register = SignUp(driver)\n register.hit_signup_button()\n assert \"InsuranceWeb: Sign up\" in context.driver.title\n\n@when ('I type user registration detail and click on signup button')\ndef enter_registration_detail(context):\n driver = context.driver\n reg = SignUp(driver)\n\n reg.enter_first_name('test1')\n reg.enter_last_name('user')\n reg.enter_birthday('10/10/1988')\n reg.enter_email('test1user@mail.com')\n reg.enter_mailing_address('test')\n reg.enter_city('test')\n #Select(reg.enter_state).select_by_value('IN')\n Select(context.driver.find_element_by_id(reg.state_id)).select_by_value('IN')\n reg.enter_pin_code('100001')\n reg.enter_password('user@123')\n\n reg.click_signup()\n\n@then ('Successful message is displayed')\ndef success_message_displayed(context):\n driver = context.driver\n suc = SignUp(driver)\n suc.success_message()\n time.sleep(5)\n\n@when ('I do not enter user registration detail and click on signup button')\ndef empty_registration_detail(context):\n driver = context.driver\n error = SignUp(driver)\n error.click_signup()\n\n@then ('Error message displayed: Required field')\ndef error_message_shown(context):\n driver = context.driver\n err = SignUp(driver)\n err.error_message_incomplete_form()\n time.sleep(5)\n\n@when ('I enter only few details in registration detail and click on signup button')\ndef enter_incomplete_signup_form(context):\n driver = context.driver\n err_msg = SignUp(driver)\n err_msg.enter_first_name('test2')\n err_msg.enter_last_name('user2')\n err_msg.enter_email('test2@gmail.com')\n err_msg.enter_mailing_address('test UP')\n err_msg.enter_password('test@1234')\n err_msg.click_signup()\n\n@then ('Error message displayed')\ndef error_message(context):\n driver = context.driver\n err = SignUp(driver)\n err.error_message_incomplete_form()\n time.sleep(5)\n\n@when ('I enter user registration detail with same email and click on signup button')\ndef check_email(context):\n driver = context.driver\n reg = SignUp(driver)\n reg.enter_first_name('test1')\n reg.enter_last_name('user')\n reg.enter_birthday('10/10/1988')\n reg.enter_email('test1user@mail.com')\n reg.enter_mailing_address('test')\n reg.enter_city('test')\n # Select(reg.enter_state).select_by_value('IN')\n Select(context.driver.find_element_by_id(reg.state_id)).select_by_value('IN')\n reg.enter_pin_code('100001')\n reg.enter_password('user@123')\n reg.click_signup()\n\n@then ('Error message displayed: Email already registered')\ndef error_existing_email(context):\n driver = context.driver\n chk = SignUp(driver)\n chk.check_email_exist()\n time.sleep(5)","sub_path":"Test_Login/features/steps/RegisterPage.py","file_name":"RegisterPage.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91579870","text":"from tornado.ioloop import IOLoop\nfrom tornado.web import Application, StaticFileHandler\nimport ts.config as cfg\nfrom ts.sockets import LogSocket\n\nclass Application(Application):\n def __init__(self):\n super().__init__([\n (r\"/()$\", StaticFileHandler, {'path': 'application.html'}),\n (r\"/socket$\", LogSocket),\n (r\"/static/(.*)\", StaticFileHandler, {'path': 'static/'})\n ], debug=cfg.DEBUG)\n\n def go(self):\n self.listen(cfg.PORT)\n IOLoop.current().start()\n","sub_path":"ts/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"321268445","text":"import time\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold, KFold, cross_val_predict, cross_validate\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\nfrom lightgbm import LGBMClassifier\r\n\r\nimport os\r\nimport socket\r\n\r\nclass LGBMClassifierCV(object):\r\n \"\"\"cross_val_predict\"\"\"\r\n\r\n def __init__(self, params=None, cv=5, random_state=None, n_repeats=None):\r\n self.clf = LGBMClassifier()\r\n if params:\r\n self.clf.set_params(**params)\r\n if n_repeats:\r\n self._kf = RepeatedStratifiedKFold(cv, True, random_state) # 复制N次\r\n self._num_preds = cv * n_repeats\r\n else:\r\n self._kf = StratifiedKFold(cv, True, random_state)\r\n self._num_preds = cv\r\n\r\n def fit(self, X, y, X_test=None,\r\n feval=roc_auc_score,\r\n sample_weight=None,\r\n init_score=None,\r\n eval_metric='auc',\r\n early_stopping_rounds=100,\r\n verbose=100,\r\n feature_name='auto',\r\n categorical_feature='auto',\r\n callbacks=None):\r\n\r\n \"\"\"输入数组\"\"\"\r\n if X_test is None:\r\n X_test = X[:1] # 将第一行作为test集\r\n\r\n self.oof_train = np.zeros(len(X))\r\n self.oof_test = np.zeros((len(X_test), self._num_preds)) # num_preds:有多少折\r\n\r\n for n_fold, (train_index, valid_index) in enumerate(self._kf.split(X, y)):\r\n if verbose:\r\n print(\"\\033[94mFold %s started at %s\\033[0m\" % (n_fold + 1, time.ctime()))\r\n X_train, y_train = X[train_index], y[train_index]\r\n X_valid, y_valid = X[valid_index], y[valid_index]\r\n eval_set = [(X_train, y_train), (X_valid, y_valid)] # 需要同时验证两个集合\r\n\r\n ########################################################################\r\n self.clf.fit(X_train,\r\n y_train,\r\n sample_weight,\r\n init_score,\r\n eval_set,\r\n eval_names=('Train', 'Valid'),\r\n eval_sample_weight=None,\r\n eval_class_weight=None,\r\n eval_init_score=None,\r\n eval_metric=eval_metric,\r\n early_stopping_rounds=early_stopping_rounds,\r\n verbose=verbose,\r\n feature_name=feature_name,\r\n categorical_feature=categorical_feature,\r\n callbacks=callbacks)\r\n\r\n self.oof_train[valid_index] = self.clf.predict_proba(X_valid)[:, 1]\r\n self.oof_test[:, n_fold] = self.clf.predict_proba(X_test)[:, 1]\r\n ########################################################################\r\n\r\n # 输出 测试集 out-of-fold\r\n self.oof_test_rank = (pd.DataFrame(self.oof_test).rank().mean(axis=1)/len(self.oof_test)).values\r\n self.oof_test = self.oof_test.mean(axis=1) # 测试集的oof score算平均\r\n\r\n assert len(X) == len(self.oof_train)\r\n assert len(X_test) == len(self.oof_test)\r\n\r\n # 计算 训练集 oof 得分(out_of_fold)\r\n if feval:\r\n self.oof_train_score = feval(y, self.oof_train)\r\n print(f\"\\n\\033[94mtrain CV Score: {self.oof_train_score} ended at {time.ctime()}\\033[0m\")\r\n return self.oof_train_score\r\n\r\n def oof_submit(self, ids, pred_ranking=False, file=None, preds=None):\r\n \"\"\"preds分用于submit\"\"\"\r\n if file is None:\r\n file = f'submit_{self.oof_train_score}.csv'\r\n print(f'Save {file} ...')\r\n\r\n if preds is None:\r\n preds = self.oof_test if pred_ranking else self.oof_test_rank\r\n\r\n if not isinstance(ids, pd.DataFrame):\r\n ids = pd.DataFrame(ids)\r\n ids.assign(preds=preds).to_csv(file, index=False, header=False)\r\n\r\n @property\r\n def oof_train_and_test(self):\r\n return np.r_[self.oof_train, self.oof_test]\r\n\r\n def oof_save(self, file='./oof_train_and_test.csv'):\r\n pd.DataFrame(self.oof_train_and_test, columns=['oof_train_and_test']).to_csv(file, index=False)\r\n\r\n def plot_feature_importances(self, feature_names=None, topk=20, figsize=(10, 6), pic_name=None):\r\n columns = ['Importances', 'Features']\r\n importances = self.clf.feature_importances_.tolist()\r\n if feature_names is None:\r\n feature_names = list(map(lambda x: f'F_{x}', range(len(importances))))\r\n _ = list(zip(importances, feature_names))\r\n df = pd.DataFrame(_, columns=columns).sort_values('Importances', 0, False)\r\n\r\n plt.figure(figsize=figsize)\r\n sns.barplot(*columns, data=df[:topk])\r\n plt.title('Features Importances\\n')\r\n plt.tight_layout()\r\n if pic_name is None:\r\n plt.savefig(f'importances_{self.oof_train_score}.png')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from sklearn.datasets import make_classification\r\n\r\n X, y = make_classification()\r\n X_test, _ = make_classification()\r\n\r\n clf = LGBMClassifierCV()\r\n clf.fit(X, y, X_test)\r\n clf.plot_feature_importances()\r\n \"\"\"\r\n 一组lightgbmcv参数:\r\n params = {\r\n 'class_weight':'balanced',\r\n 'metric': 'auc',\r\n 'boosting_type': 'gbdt',\r\n 'objective': 'binary',\r\n 'max_depth': -1,\r\n 'num_leaves': 16,\r\n 'learning_rate': 0.005,\r\n 'min_split_gain': 0.884,\r\n 'min_child_weight': 0.01,\r\n 'min_child_samples': 31,\r\n 'subsample': 0.788,\r\n 'subsample_freq': 8,\r\n 'colsample_bytree': 0.617,\r\n 'reg_alpha': 0.631,\r\n 'reg_lambda': 0.81,\r\n 'scale_pos_weight': 1,\r\n 'random_state': 666,\r\n 'verbosity': -1,\r\n 'n_jobs': -1,\r\n 'n_estimators': 30000} # 300分数好像很高\r\n \r\n oof8 = LGBMClassifierCV(params, 8, 999)\r\n oof8.fit(X, y, X_test, early_stopping_rounds=300)\r\n \"\"\"","sub_path":"test_numpy/test_sklearn/lightgbm/lightgbm_cv.py","file_name":"lightgbm_cv.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"586696576","text":"\r\nimport pandas as pd\r\nimport os \t\r\nimport json\r\n\r\ndef output_json(all_data, keyword, loc):\r\n\tif 'json' not in os.listdir():\r\n\t\tos.mkdir('json')\r\n\twith open(f'json/{keyword.replace(\"+\", \"-\")}' + f'({loc}).json', 'w') as f:\r\n\t\tjson.dump(all_data, f, indent=4)\r\n\r\n\r\ndef output_csv(all_data, keyword, loc):\r\n\tif 'csv' not in os.listdir():\r\n\t\tos.mkdir('csv')\r\n\tdf = pd.DataFrame(all_data)\r\n\tdf.to_csv(f'csv/{keyword.replace(\"+\", \"-\")}' + f'({loc}).csv', index=False)\r\n","sub_path":"outputs.py","file_name":"outputs.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561571089","text":"# -*- coding: utf-8 -*- #\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Flags and helpers for the compute packet mirroring commands.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.command_lib.compute import completers as compute_completers\nfrom googlecloudsdk.command_lib.compute import flags as compute_flags\n\n\nclass PacketMirroringCompleter(compute_completers.ListCommandCompleter):\n\n def __init__(self, **kwargs):\n super(PacketMirroringCompleter, self).__init__(\n collection='compute.packetMirrorings',\n list_command='alpha compute packet-mirrorings list --uri',\n **kwargs)\n\n\ndef PacketMirroringArgument(required=True, plural=False):\n return compute_flags.ResourceArgument(\n resource_name='packet mirroring',\n completer=PacketMirroringCompleter,\n plural=plural,\n custom_plural='packet mirrorings',\n required=required,\n regional_collection='compute.packetMirrorings')\n","sub_path":"lib/googlecloudsdk/command_lib/compute/packet_mirrorings/flags.py","file_name":"flags.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616267848","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response, redirect\n#from django.http import Http404 # se usa con raise Http404\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\n#from django.views.decorators.cache import cache_page\n#from django.contrib import messages\n#from models import *\n#import json\nfrom forms import *\nfrom utilidades.email import enviar_mail\n\ndef home(request):\n positionweb='home'\n return render_to_response('home.html',locals(),RequestContext(request))\n\n\ndef servicios(request):\n positionweb='servicios'\n return render_to_response('servicios.html',locals(),RequestContext(request))\n\n@csrf_protect\ndef contacto(request):\n positionweb='contacto'\n if request.method == 'POST': # If the form has been submitted...\n form = contacto_form(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n nombre = form.cleaned_data['nombre']\n email = form.cleaned_data['email']\n titulo = form.cleaned_data['titulo']\n mensaje = form.cleaned_data['mensaje']\n mensaje = '%s
%s

%s'%(nombre,email,mensaje)\n enviar_mail(mensaje,titulo,'kase@boredsoft.com')\n ok = True\n else:\n form = contacto_form() # An unbound form\n return render_to_response('contacto.html',locals(),RequestContext(request))\n\n \n#def home_json(request):\n# response_data={'a':'b'}\n# return HttpResponse(json.dumps(response_data), content_type=\"application/json\",mimetype='application/json')\n","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"536668983","text":"\"\"\"\n Zip\n - expects any no. of iterators with equal indeces\n to combine it into a single object.\n\n * Note * if iterators contains unequal length,\n iteration stops at the smallest index\n\"\"\"\n\nnames = [\"rodel\", \"ryan\"]\ngrades = [1, 2]\n\n# combine\nresult = zip(names, grades)\nlist_result = list(result)\nprint(list_result) # [('rodel', 1), ('ryan', 2)]\n\n# unzip\nnames, grades = zip(*list_result)\nprint(names, grades) # ('rodel', 'ryan') (1, 2)\n","sub_path":"core/zip.py","file_name":"zip.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"139621817","text":"'''\ndraw_lanes.py memory_size input_video output_video \n'''\n\nimport numpy as np\nimport cv2\nfrom PIL.Image import fromarray, BILINEAR\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\nfrom keras.models import model_from_json\nimport sys\nimport warnings\nwarnings.filterwarnings(action='ignore') # 귀찮은 경고 감추기\n\nscaler = 6\nresized_shape = (1640//scaler, 590//scaler)\n\nmemory_size = int(sys.argv[1])\njson_fname = \"model_structure_when_mem_is_{}.json\".format(memory_size)\nweights_fname =\"mem_is_{}.h5\".format(memory_size)\n\n# Load Keras model\njson_file = open(json_fname, 'r')\njson_model = json_file.read()\njson_file.close()\nmodel = model_from_json(json_model)\nmodel.load_weights(weights_fname)\n\nmodel.summary()\n\n# Class to average lanes with\nclass Lanes():\n def __init__(self):\n self.recent_question = np.empty((1, 96, 272, 1))\n self.initialized = False\n self.recent_ans = []\n self.avg_ans = []\n\ndef road_lines(image):\n \"\"\" Takes in a road image, re-sizes for the model,\n predicts the lane to be drawn from the model in G color,\n recreates an RGB image of a lane and merges with the\n original road image.\n \"\"\"\n\n # Image를 memory size 만큼 받아서 한번에 predict\n small_img = fromarray(image).resize(resized_shape)\n small_img = np.asarray(small_img,dtype=\"uint8\")\n small_img = small_img[None,:,:,:]/255.0 # (1, 96, 272, 1)\n\n if lanes.recent_question.shape[0] >= memory_size:\n # 이 경우에만 예측과 갈아치우기를 한다.\n # 이전 프레임 지우기\n lanes.recent_question = np.append(lanes.recent_question, small_img, axis=0)\n lanes.recent_question = lanes.recent_question[1:]\n prediction = model.predict(lanes.recent_question[np.newaxis])[0]*255\n\n lanes.recent_ans.append(prediction)\n\n if len(lanes.recent_ans) > 5:\n lanes.recent_ans = lanes.recent_ans[1:]\n\n # Calculate average detection\n lanes.avg_ans = np.mean(np.array([i for i in lanes.recent_ans]), axis = 0)\n\n # Generate fake R & B color dimensions, stack with G\n blanks = np.zeros_like(lanes.avg_ans)\n lane_drawn = np.dstack((blanks, lanes.avg_ans, blanks))\n lane_drawn = lane_drawn.astype(\"uint8\")\n\n # Re-size to match the original image\n lane_image = fromarray(lane_drawn)\n lane_image = lane_image.resize((1280, 720),BILINEAR)\n lane_image = np.asarray(lane_image,dtype=\"uint8\")\n\n # Merge the lane drawing onto the original image\n result = cv2.addWeighted(image, 1, lane_image, 1, 0)\n\n elif lanes.initialized == True:\n print(\"=== Case 1 : image stacking only ===\")\n lanes.recent_question = np.append(lanes.recent_question, small_img, axis=0)\n result = fromarray(image).resize((1280, 720))\n result = np.array(result)\n\n elif lanes.initialized == False:\n print(\"=== Case 2 : initializing ===\")\n lanes.recent_question = small_img# (1, 96, 272, 1)\n result = fromarray(image).resize((1280, 720))\n result = np.array(result) # (720, 1280, 3) \n lanes.initialized = True\n\n return result\n\n# Global variable lanes.recent_question\nlanes = Lanes()\n\n# Where to save the output video\nvid_output = sys.argv[3]\n\n# Location of the input video\nclip1 = VideoFileClip(sys.argv[2])\n\nvid_clip = clip1.fl_image(road_lines)\nvid_clip.write_videofile(vid_output, audio=False)\n","sub_path":"draw_lanes_avg.py","file_name":"draw_lanes_avg.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163940233","text":"\nfrom django.urls import path\n\nfrom . import views\n\napp_name = \"kanban\"\n\n\n\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"home/\", views.home, name=\"home\"),\n path('signup/', views.signup, name='signup'),\n path(\"users//\", views.UserDetailView.as_view(), name=\"users_detail\"),\n path(\"users//update/\", views.UserUpdateView.as_view(), name=\"users_update\"),\n path(\"lists/\", views.ListListView.as_view(), name=\"lists_list\"),\n path(\"lists/create/\", views.ListCreateView.as_view(), name=\"lists_create\"), \n path(\"lists//\", views.ListDetailView.as_view(), name=\"lists_detail\"),\n path(\"lists//update/\", views.ListUpdateView.as_view(), name=\"lists_update\"),\n path(\"lists//delete/\", views.ListDeleteView.as_view(), name=\"lists_delete\"),\n\n]\n\n\n","sub_path":"kanban/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22047888","text":"from flask import Blueprint, url_for, render_template, redirect, flash, send_from_directory\nfrom .. import db\nfrom . import main \nfrom ..seminar.models import Seminar\nfrom ..research.models import Publication, Member\nfrom ..workshop.models import Workshop\nfrom ..visitor.models import Visitor\n\n@main.route('/')\ndef index():\n SeminarItem = Seminar.query.order_by(db.desc(Seminar.date)).first()\n WorkshopItem = Workshop.query.order_by(db.desc(Workshop.start_date)).first()\n PublicationItem = Publication.query.first()\n MemberItem = Member.query.first()\n VisitorItem = Visitor.query.first()\n if SeminarItem is None:\n flash(\"No Seminar Item!\")\n return redirect(url_for('index'))\n\n if WorkshopItem is None:\n flash(\"No Workshop Item!\")\n return redirect(url_for('index'))\n\n if MemberItem is None:\n flash(\"No Member Item!\")\n return redirect(url_for('index'))\n \n if PublicationItem is None:\n flash(\"No Publication Item!\")\n return redirect(url_for('index'))\n \n if VisitorItem is None:\n flash(\"No Visitor Item!\")\n return redirect(url_for('index'))\n\n GroupLeader1 = Member.query.filter_by(group='Ultrafast Science and High-Precision Quantum Technology', title='Principal investigator').first()\n GroupLeader2 = Member.query.filter_by(group='Quantum Computing and Communication Networks', title='Principal investigator').first()\n GroupLeader3 = Member.query.filter_by(group='Quantum Design of Exotic Phenomenon in Novel Materials', title='Principal investigator').first()\n GroupLeader4 = Member.query.filter_by(group='Computational Science and Engineering', title='Principal investigator').first()\n\n return render_template('main/main.html', SeminarItem=SeminarItem, WorkshopItem=WorkshopItem, G1=GroupLeader1, G2=GroupLeader2, G3=GroupLeader3, G4=GroupLeader4)\n\n@main.route('/member/')\n@main.route('/member/')\ndef member(group=None):\n if group:\n MemberItems = Member.query.filter_by(group=group).all()\n else:\n MemberItems = Member.query.all()\n return render_template('main/member.html',group=group, Items=MemberItems)\n\n@main.route('/publication/')\ndef publication(id):\n Items = db.session.query(Publication).filter_by(member_id=id).all()\n\n return render_template('main/publication.html', Items=Items)\n\n@main.route('/seminar/')\n@main.route('/seminar/')\ndef seminar(year=None):\n if year:\n SeminarItems = db.session.query(Seminar).filter(db.extract('year', Seminar.date)==year).all()\n else:\n SeminarItems = Seminar.query.all()\n\n return render_template('main/seminar.html', Items=SeminarItems)\n\n@main.route('/workshop/')\n@main.route('/workshop/')\ndef workshop(year=None):\n if year:\n WorkshopItems = db.session.query(Workshop).filter(db.extract('year', Workshop.start_date)==year).all()\n else:\n WorkshopItems = db.session.query(Workshop).all()\n \n return render_template('main/workshop.html', Items=WorkshopItems)\n\n@main.route('/visitor/')\n@main.route('/visitor/')\ndef visitor(year=None):\n if year:\n VisitorItems = db.session.query(Visitor).filter(db.extract('year', Visitor.start_date)==year).all()\n else:\n VisitorItems = db.session.query(Visitor).all()\n \n date_min = db.session.query(db.func.min(Visitor.start_date)).scalar()\n date_max = db.session.query(db.func.max(Visitor.start_date)).scalar()\n years=range(date_min.year,date_max.year+1)\n\n return render_template('main/visitor.html', Items=VisitorItems)\n\n@main.route('/information')\ndef information():\n return render_template('main/information.html')","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"623856208","text":"import json\nimport joblib\n\n\ndef predict_match(model, prediction_data):\n predictions = {\n 'file_path': prediction_data['file_path'],\n 'winner': prediction_data['winner'],\n 1: {},\n 2: {},\n }\n\n from copy import deepcopy\n\n for i in range(0, 2):\n # remove avg_collection_rate value\n filtered_data = deepcopy(prediction_data['data'][i])\n filtered_data.pop(7)\n\n raw_prediction = model.predict([filtered_data]).tolist()[0]\n # SVM doesn't do probability\n raw_probability = model.predict_proba([filtered_data]).tolist()[0]\n # raw_probability = (0, 0)\n\n predicted = (i + 1) if raw_prediction is True else prediction_data['winner']\n if predicted == (i + 1):\n probability = raw_probability[1]\n else:\n probability = raw_probability[0]\n\n predictions[i + 1] = {\n 'predicted': predicted,\n 'probability': probability,\n 'raw': {\n 'predicted': raw_prediction,\n 'probability': raw_probability,\n },\n }\n\n return predictions\n\n\ndef predict_outcomes():\n with open('prediction_data.json', 'r') as prediction_data:\n data = json.load(prediction_data)['prediction_data']\n lr_model = joblib.load('predict_outcome_lr.joblib')\n rf_model = joblib.load('predict_outcome_rf.joblib')\n nb_model = joblib.load('predict_outcome_nb.joblib')\n # svm_model = joblib.load('predict_outcome_svm.joblib')\n\n lr_predicted = []\n rf_predicted = []\n nb_predicted = []\n # svm_predicted = []\n for match_data in data:\n lr_predicted.append(predict_match(lr_model, match_data))\n rf_predicted.append(predict_match(rf_model, match_data))\n nb_predicted.append(predict_match(nb_model, match_data))\n # svm_predicted.append(predict_match(svm_model, match_data))\n\n print(f'{len(data)} replays')\n\n for predicted in [lr_predicted, rf_predicted, nb_predicted]:\n matching = 0\n diverge = 0\n consensus = 0\n correct = 0\n incorrect = 0\n for match in predicted:\n for i in range(1, 3):\n if match['winner'] == match[i]['predicted']:\n correct += 1\n else:\n incorrect += 1\n\n if match[1]['predicted'] == match[2]['predicted']:\n matching += 1\n if match['winner'] == match[1]['predicted']:\n consensus += 1\n else:\n diverge += 1\n\n print(f'Overall Accuracy: {round(correct / (len(data) * 2), 3) * 100}% ({correct}/{len(data) * 2})')\n print(f'Match predictions agreed {round(matching / len(data), 3) * 100}% of the time')\n print(f'When match predictions agreed, they were right {round(consensus / matching, 3) * 100}% of the time\\n')\n\n with open('predictions.json', 'w') as outcome_data:\n json.dump({'data': lr_predicted}, outcome_data, ensure_ascii=False, indent=4)\n\n\npredict_outcomes()\n","sub_path":"predict_outcomes.py","file_name":"predict_outcomes.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"458977958","text":"\nimport math\nimport matplotlib.pyplot as plt\nplt.style.use('bmh')\n\nclass Shape:\n def __init__(self, base, height, color):\n self.base = base\n self.height = height\n self.color = color\n\n def describe(self):\n print(\"Base: \", self.base)\n print(\"Height: \", self.height)\n print(\"Color: \", self.color)\n print(\"Perimeter: \", self.perimeter)\n print(\"Area: \", self.area)\n print(\"Vertices: \", self.vertices)\n print()\n\n def render(self):\n x_values = [ i for i,j in self.vertices ] + [ self.vertices[0][0] ]\n y_values = [ j for i,j in self.vertices ] + [ self.vertices[0][1] ] \n plt.plot(\n x_values, \n y_values,\n color=self.color\n )\n plt.gca().set_aspect(\"equal\")\n plt.savefig(self.plot_filename)\n plt.clf()\n\nclass Rectangle(Shape):\n def __init__(self, base, height, color):\n super().__init__(base, height, color)\n\n self.perimeter = 2 * self.base + 2 * self.height\n self.area = self.base * self.height\n self.vertices = [ (0,0), (self.base, 0), (self.base, self.height), (0, self.height) ]\n self.plot_filename = \"rectangle.png\"\n\n\nclass RightTriangle(Shape):\n def __init__(self, base, height, color):\n super().__init__(base, height, color)\n\n self.perimeter = self.base + self.height + math.sqrt(self.base**2 + self.height**2)\n self.area = self.base * self.height / 2\n self.vertices = [ (0,0), (self.base, 0), (0, self.height) ]\n self.plot_filename = \"triangle.png\"\n\n\nclass Square(Rectangle):\n def __init__(self, side, color ):\n super().__init__(side, side, color)\n self.plot_filename = \"square.png\"\n\n\nrect = Rectangle(5, 2, \"red\")\nrect.describe()\nrect.render()\n\ntri = RightTriangle(5, 2, \"blue\")\ntri.describe()\ntri.render()\n\nsq = Square( 5, 'green')\nsq.describe()\nsq.render()\n","sub_path":"shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"563523157","text":"import logging\n\nfrom .utils.cache import memory\nfrom brownie import chain\nfrom . import compound, constants, uniswap\n\nlogger = logging.getLogger(__name__)\n\n\nclass PriceError(Exception):\n pass\n\n\n@memory.cache()\ndef get_price(token, block=None):\n token = str(token)\n print(f\"chainid: {chain.id}\")\n # # NOTE: Special handling required for some proxy tokens\n # if token in constants.PROXIES: # snx\n # logger.info('Replacing proxy address with implementation address')\n # token = constants.PROXIES[token]\n \n logger.debug(\"unwrapping %s\", token)\n price = None\n\n if token in constants.STABLECOINS:\n logger.debug(\"stablecoin -> %s\", 1)\n return 1\n\n if chain.id == 1: # eth mainnet\n from . import aave, balancer, chainlink, cream, curve, piedao, tokensets, yearn\n\n if token == \"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE\":\n token = str(constants.weth)\n\n # we can exit early with known tokens\n if token in chainlink.feeds:\n price = chainlink.get_price(token, block=block)\n logger.debug(\"chainlink -> %s\", price)\n\n elif aave.is_atoken(token):\n price = aave.get_price(token, block=block)\n logger.debug(\"atoken -> %s\", price)\n\n elif cream.is_creth(token):\n price = cream.get_price_creth(token, block)\n logger.debug(\"atoken -> %s\", price)\n\n elif yearn.is_yearn_vault(token):\n price = yearn.get_price(token, block=block)\n logger.debug(\"yearn -> %s\", price)\n\n elif curve.is_curve_lp_token(token):\n price = curve.get_price(token, block=block)\n logger.debug(\"curve lp -> %s\", price)\n\n elif compound.is_compound_market(token):\n price = compound.get_price(token, block=block)\n logger.debug(\"compound -> %s\", price)\n\n elif uniswap.is_uniswap_pool(token):\n price = uniswap.lp_price(token, block=block)\n logger.debug(\"uniswap pool -> %s\", price)\n\n elif balancer.is_balancer_pool(token):\n price = balancer.get_price(token, block=block)\n logger.debug(\"balancer pool -> %s\", price)\n\n elif tokensets.is_token_set(token):\n price = tokensets.get_price(token, block=block)\n logger.debug(\"token set -> %s\", price)\n\n elif piedao.is_pie(token):\n price = piedao.get_price(token, block=block)\n logger.debug(\"piedeo -> %s\", price)\n\n \n # peel a layer from [multiplier, underlying]\n if isinstance(price, list):\n price, underlying = price\n logger.debug(\"peel %s %s\", price, underlying)\n return price * get_price(underlying, block=block)\n\n # a few more attempts to fetch a price a token\n if price is None: # NOTE: 'if not price' returns True if price == 0 but we actually only want to proceed if price == None\n price = uniswap.get_price(token, router=\"sushiswap\", block=block)\n logger.debug(\"sushiswap -> %s\", price)\n \n if price is None:\n price = uniswap.get_price(token, router=\"uniswap\", block=block)\n logger.debug(\"uniswap -> %s\", price)\n \n if price is None:\n price = uniswap.get_price_v1(token, block=block)\n logger.debug(\"uniswap v1 -> %s\", price)\n # NOTE let's improve before we use\n #if price is None and (not block or block >= 11153725): # NOTE: First block of curve registry\n # price = curve.get_token_price(token, block=block)\n # logger.debug(\"curve -> %s\", price)\n\n if price is None:\n price = balancer.get_price(token, block=block)\n logger.debug(\"balancer -> %s\", price)\n\n if price is None:\n logger.error(\"failed to get price for %s\", token)\n\n if price is None:\n raise PriceError(f'could not fetch price for {token}')\n\n if chain.id == 56: # binance smart chain\n print(token)\n\n if token == \"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE\":\n token = str(constants.wbnb)\n\n # we can exit early with known tokens\n if compound.is_compound_market(token):\n price = compound.get_price(token, block=block)\n logger.debug(\"compound -> %s\", price)\n\n elif uniswap.is_uniswap_pool(token):\n price = uniswap.lp_price(token, block=block)\n logger.debug(\"uniswap pool -> %s\", price)\n\n # peel a layer from [multiplier, underlying]\n if isinstance(price, list):\n price, underlying = price\n logger.debug(\"peel %s %s\", price, underlying)\n return price * get_price(underlying, block=block)\n \n if price is None:\n price = uniswap.get_price(token, router=\"pancakeswapv2\", block=block)\n logger.debug(\"uniswap -> %s\", price)\n\n if price is None:\n price = uniswap.get_price(token, router=\"pancakeswapv1\", block=block)\n logger.debug(\"uniswap -> %s\", price)\n\n if chain.id == 137: # polygon\n print(token)\n\n if token == \"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE\":\n token = str(constants.wmatic)\n\n # we can exit early with known tokens\n if compound.is_compound_market(token):\n price = compound.get_price(token, block=block)\n logger.debug(\"compound -> %s\", price)\n\n elif uniswap.is_uniswap_pool(token):\n price = uniswap.lp_price(token, block=block)\n logger.debug(\"uniswap pool -> %s\", price)\n\n # peel a layer from [multiplier, underlying]\n if isinstance(price, list):\n price, underlying = price\n logger.debug(\"peel %s %s\", price, underlying)\n return price * get_price(underlying, block=block)\n \n if price is None:\n print('test')\n price = uniswap.get_price(token, router=\"quickswap\", block=block)\n logger.debug(\"uniswap -> %s\", price)\n\n return price\n","sub_path":"build/lib/ypricemagic/magic.py","file_name":"magic.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459219622","text":"#Dylan Drueding\n#Total Trivia Cheat\n\n#BEGIN IMPORTS\nimport requests\nfrom bs4 import BeautifulSoup\nimport argparse\nfrom PIL import Image\nimport pytesseract\n#END IMPORTS\n\n\n#BEGINNING STEPS\ndef parseImg():\n \n #LOAD IMG (EVENTUALLY ADD A SCREEN SHOT FUNCTION)\n img = Image.open(\"img5.png\")\n \n #CROP TO THE QUESTION AND ANSWER\n imgQ = img.crop((0, 200, 600, 500))\n imgA = img.crop((0, 600, 500, 1100))\n \n #imgQ.show()\n #imgA.show()\n \n #GET STRINGS\n question = pytesseract.image_to_string(imgQ, lang = \"eng\")\n answers = pytesseract.image_to_string(imgA, lang = \"eng\")\n \n #PRINT (PRINT CHECK)\n print(question)\n print(\"---------\")\n print(answers)\n print(\"---------\")\n #END PRINT CHECK\n #REMOVE PRINT EVENTUALLY & COMMENT\n \n #RUN GOOGLE SEARCH\n searchText(answers, question)\n #END SEARCH\n\n#END PARSE\n\n\n#DO THE GOOGLE SEARCH AND PRE ANALYSIS STUFF\ndef searchText(text, question):\n\n #ASSIGN ANSWERS TO STRINGS\n a1 = text.split(\"\\n\")[0]\n a2 = text.split(\"\\n\")[2]\n a3 = text.split(\"\\n\")[4]\n a4 = text.split(\"\\n\")[6]\n \n\t\n #ASSIGN QUESTIONS TO STRINGS\n q = question.split(\"\\n\")\n q = \" \".join(q)\n\t\n #COMBINE EACH ANSWER WITH THE QUESTION\n q1 = q + \" \" + a1\n q2 = q + \" \" + a2\n q3 = q + \" \" + a3\n q4 = q + \" \" + a4\n \n\t\n #RUN SEARCH AN GET RESULT TOTAL\t\n a1Result = requests.get('https://www.google.com/search', params={'q': q1})\n a2Result = requests.get('https://www.google.com/search', params={'q': q2})\n a3Result = requests.get('https://www.google.com/search', params={'q': q3})\n a4Result = requests.get('https://www.google.com/search', params={'q': q4})\n\n\n soup = BeautifulSoup(a1Result.text, 'lxml')\n print(a1 + \": \" + soup.find('div', {'id':'resultStats'}).text)\n \n soup = BeautifulSoup(a2Result.text, 'lxml')\n print(a2 + \": \" + soup.find('div', {'id':'resultStats'}).text)\n \n soup = BeautifulSoup(a3Result.text, 'lxml')\n print(a3 + \": \" + soup.find('div', {'id':'resultStats'}).text)\n\n soup = BeautifulSoup(a4Result.text, 'lxml')\n print(a4 + \": \" + soup.find('div', {'id':'resultStats'}).text)\n#END SEARCH/PROGRAM\n\nparseImg()\n\n","sub_path":"total_trivia_cheat.py","file_name":"total_trivia_cheat.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503304499","text":"'''Finding Common Elements in 2 different List using List Comprehension'''\n\na = [1,2,3,4,5]\nb = [2,4,6,8]\nresult=[]\n\n'''Using Normal Programming'''\n'''\nfor i in a:\n if i in b: # This method in python checks that whether the 'i' in 'a' is in 'b' or not\n result.append(i)\n\nprint(result)\n'''\n\n'''Using List Comprehensions'''\n\nresult = [i for i in a if i in b]\nprint(result)","sub_path":"10 List Comprehensions/commonelements.py","file_name":"commonelements.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"626187018","text":"#!/usr/bin/env python\nimport os, sys\n\n# Find PETSc/BuildSystem\nif 'PETSC_DIR' in os.environ:\n configDir = os.path.join(os.environ['PETSC_DIR'], 'config')\n bsDir = os.path.join(configDir, 'BuildSystem')\n fiatDir = os.path.join(os.environ['PETSC_DIR'],os.environ['PETSC_ARCH'],'lib', 'python'+'.'.join(map(str, sys.version_info[0:2])), 'site-packages')\n sys.path.insert(0, bsDir)\n sys.path.insert(0, configDir)\n if os.path.isdir(os.path.join(fiatDir,'FIAT')):\n sys.path.insert(0, fiatDir)\n\nimport PETSc.FEM\nfrom FIAT.reference_element import default_simplex\nfrom FIAT.lagrange import Lagrange\nfrom FIAT.discontinuous_lagrange import DiscontinuousLagrange\n\ngenerator = PETSc.FEM.QuadratureGenerator()\ngenerator.setup()\nelements = []\nif not (len(sys.argv)-2) % 5 == 0:\n sys.exit('Incomplete set of arguments')\nfor n in range((len(sys.argv)-2) / 5):\n dim = int(sys.argv[n*5+1])\n order = int(sys.argv[n*5+2])\n components = int(sys.argv[n*5+3])\n numBlocks = int(sys.argv[n*5+4])\n operator = sys.argv[n*5+5]\n if order == 0:\n element = DiscontinuousLagrange(default_simplex(dim), order)\n else:\n element = Lagrange(default_simplex(dim), order)\n element.numComponents = components\n elements.append(element)\nfilename = sys.argv[-1]\ngenerator.quadDegree = max([e.order for e in elements])\ngenerator.run(elements, numBlocks, operator, filename)\n","sub_path":"bin/pythonscripts/PetscGenerateFEMQuadrature.py","file_name":"PetscGenerateFEMQuadrature.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237457708","text":"import numpy as np\nimport cv2 as cv\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom tensorflow.python.keras import utils\n\n# Path to folder of encoded non encoded pictures\npath = \"/Users/qwerty/Downloads/data/\"\n\ndata = []\ntarget = []\nfiles = []\n\n# Takes all .png files from the path whose dimensions are square with side 40\nfor i, filename in enumerate(os.listdir(path)):\n if filename[-4:] == \".png\" and cv.imread(path + filename).shape[0] == 40:\n files.append(filename)\n\nfiles.sort()\n\n# Appends integer array of the image pixel values to 'data' list\nfor i in range(len(files)):\n image_name = path + files[i]\n b = bytearray(cv.imread(image_name))\n data.append(list(b))\n\n # Target/label values determined from file name identified by \"Sten\" or not\n if \"Sten\" in image_name:\n target.append(1)\n else:\n target.append(0)\n\n# Preps features and labels into NumPy arrays to easily load into model\ntarget = utils.to_categorical(target)\nx = np.array(data)\ny = np.array(target)\n\n# Splits 20% of the data into the testing set that will determine its accuracy\n# and 80% of the data to train the model\ntrain_x, test_x, train_y, test_y = train_test_split(\n x, y, test_size=0.2, random_state=42)\n\n# Basic three-layered sequential model\nmodel = keras.Sequential([\n keras.layers.Dense(\n train_x.shape[1],\n activation=tf.nn.relu,\n input_shape=(train_x.shape[1], )),\n keras.layers.Dense(1000, activation=tf.nn.relu),\n keras.layers.Dense(2, activation=tf.nn.softmax)\n])\n\n# Passes proper parameters to model for classification\nloss = \"categorical_crossentropy\"\nmodel.compile(\n loss=loss,\n optimizer=tf.train.AdamOptimizer(learning_rate=0.1),\n metrics=[\"accuracy\"])\nmodel.fit(train_x, train_y, epochs=5, batch_size=64)\n\n# Determine success of model using testing set\ntest_loss, test_acc = model.evaluate(test_x, test_y, steps=20)\nprint(f\"Accuracy = {test_acc}\")\n\npredictions = model.predict(test_x)\n\nfor i in range(10):\n print(f\"{predictions[i]} {test_y}\")\n","sub_path":"d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407284992","text":"p = int(input('Digite o primeiro termo: ' ))\nr = int(input('Digite a razão: '))\ns = p\nx = 0\nwhile x < 10:\n print(p, end='')\n print(end=' → ' if x < 9 else print(end=' → FIM'))\n p += r\n x += 1\n\nprint('Acima os primeiros dez termos da PA de razão {} e primeiro termo {}.'.format(r, s))\n","sub_path":"Ex0061/ex0061.py","file_name":"ex0061.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"250307195","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nimport random\nimport pickle\nimport sys\nimport math\nimport time\n\n#Levels\n#Formula = prev. + (prev. * 1.1)\nlvls_xp = [(5*(i**3)+50*i+100) for i in range(500)]\n\ndef LevelSet(xp):\n lvl=0\n while xp >= lvls_xp[lvl]:\n lvl += 1\n return lvl\n\ndef XPRem(xp):\n lvl=0\n while xp >= lvls_xp[lvl]:\n lvl += 1\n xp -= lvls_xp[lvl]\n return -xp\n\n#Colouring\ndef RankColour(rank):\n if rank == 1:\n return 0xFFFF11\n elif rank == 2:\n return 0xAAAAAA\n elif rank == 3:\n return 0x994400\n elif rank > 3 and rank <= 5:\n return 0x00FFFF\n elif rank > 5 and rank <= 10:\n return 0xFF3377\n elif rank > 10 and rank <= 25:\n return 0x00CC66\n elif rank > 25 and rank <= 50:\n return 0xCC4411\n elif rank > 50 and rank <= 100:\n return 0x990055\n elif rank > 100 and rank <= 250:\n return 0x9999FF\n else:\n return 0xFFFFFF\n\n#Format:\n#User Coins (time.time())OfNextCoinClaim Wins Loses\nclass SetupCog:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(name=\"setup\")\n #@commands.has_permissions(manage_guild=True)\n async def setup(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"You haven't sent a setup subcommand.\")\n\n @setup.command(name=\"reset\")\n #@commands.has_permissions(administrator=True)\n async def SetupReset(self,ctx):\n guilds = []\n guilds = pickle.load(open(\"guilds.data\", \"rb\"))\n for a in range(len(guilds)):\n if guilds[a][0] == ctx.guild.id:\n guilds[a] = [ctx.guild.id, [False], [False], [False], [False, []]]\n pickle.dump(guilds, open(\"guilds.data\", \"wb\"))\n print(\"Server Reset\")\n await ctx.send(\"Server Setup Reset\")\n return\n guilds.append([guild.id, [False], [False], [False], [False, []]])\n print(\"Server Added\")\n pickle.dump(guilds, open(\"guilds.data\", \"wb\"))\n\n @commands.command(\"first\")\n async def FirstTime(self,ctx):\n await ctx.send(\"https://scrapbox.io/GreenBOT/First_Time%3F_Check_Here!\\nImprovements <:soon:233642257817927680>.\")\n\n @setup.command(name=\"enable\")\n async def SetupEnable(self,ctx,module:str=None):\n module = module.lower()\n guilds = []\n guilds = pickle.load(open(\"guilds.data\", \"rb\"))\n for a in range(len(guilds)):\n if guilds[a][0] == ctx.guild.id:\n if module == \"misc\":\n guilds[a][1] = [True]\n elif module == \"trivia\":\n guilds[a][2] = [True]\n elif module == \"members\":\n guilds[a][3] = [True]\n elif module == None:\n await ctx.send(\"Please select a module. The avaliable modules are `misc`,`trivia`,`members`.\")\n return\n else:\n await ctx.send(\"That isn't an avaliable module. The avaliable modules are `misc`, `trivia`, `members`.\")\n return\n await ctx.send(f\"Enabled module `{module}`\")\n pickle.dump(guilds, open(\"guilds.data\", \"wb\"))\n return\n await ctx.send(\"You have not run `&&setup reset` yet.\")\n\n @setup.command(name=\"disable\")\n async def SetupDisable(self,ctx,module:str=None):\n module = module.lower()\n guilds = []\n guilds = pickle.load(open(\"guilds.data\", \"rb\"))\n for a in range(len(guilds)):\n if guilds[a][0] == ctx.guild.id:\n if module == \"misc\":\n guilds[a][1] = [False]\n elif module == \"trivia\":\n guilds[a][2] = [False]\n elif module == \"members\":\n guilds[a][3] = [False]\n elif module == None:\n await ctx.send(\"Please select a module. The avaliable modules are `misc`,`trivia`,`members`.\")\n return\n else:\n await ctx.send(\"That isn't an avaliable module. The avaliable modules are `misc`, `trivia`, `members`.\")\n return\n await ctx.send(f\"Disabled module `{module}`.\")\n pickle.dump(guilds, open(\"guilds.data\", \"wb\"))\n return\n await ctx.send(\"You have not run `&&setup reset` yet.\")\n\n @setup.command('fix')\n @commands.is_owner()\n async def SetupFix(self, ctx):\n #Debug\n #import ipdb; ipdb.set_trace()\n guilds = []\n guilds = pickle.load(open(\"guilds.data\", \"rb\"))\n guildsList = self.bot.guilds\n for y in range(len(guilds)):\n for x in range(0,len(guildsList)):\n if guildsList[x].id == guilds[y][0]:\n guildsList.pop(x)\n break\n for z in range(len(guildsList)):\n guilds.append([guildsList[z].id, [False], [False], [False], [False, []]])\n print(len(guilds))\n pickle.dump(guilds, open(\"guilds.data\", \"wb\"))\n\n \n# The setup fucntion below is neccesarry. Remember we give bot.add_cog() the name of the class in this case MembersCog.\n# When we load the cog, we use the name of the file.\ndef setup(bot):\n bot.add_cog(SetupCog(bot))\n random.seed()\n","sub_path":"cogs/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360405231","text":"import datetime\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pythonwhois\nimport time\nimport random\n\ndef wait():\n time.sleep(random.randint(4,5))\n\nd = datetime.datetime.now() + datetime.timedelta(days=1) #Freeing domain names in next day\nLink = \"http://www.skvotte.ru/current/free.php\"\n#print(Link)\n\n#Requests - open page and get the code\nparameters = {'date': d.strftime(\"%Y-%m-%d\"), 'cy': ''}\ncont = requests.get(Link, params = parameters)\nc = cont.content\n#print(c)\n\n\n#Let's parse and do the List\nsoup = BeautifulSoup(c)\nsamples = soup.find_all(\"a\", \"state2\")\n\nsource = []\ni = 0\nfor x in range(0,len(samples)):\n source.extend(samples[i])\n i += 1\n#print(source)\n\n#Clear the List from digits and \"-\"\n\n### !!! TO DO: I don't love this part, find the way to make it simply/compact !!!\nregex = re.compile(\".*([0-9]|\\-).*\")\nsource2 = [m.group(0) for l in source for m in [regex.search(l)] if m]\nsourceSet = set(source)\nsourceSet2 = set(source2)\nlistClearFromDigitsAndOtherSymbols = sourceSet-sourceSet2\nlistReady = list(sorted(listClearFromDigitsAndOtherSymbols)) # RESULT\n#print(listReady)\n\n#Check the each domains for free\n\nstart_time = time.time()\n\n### !!! TO DO: def ...() !!!\npending = []\nfor dom in listReady:\n\ttry:\n\t\ta = pythonwhois.get_whois(dom)\n\t\tprint(\"Checking \", dom, \" ...\")\n\t\tif '2016' in a['raw'][0]:\n\t\t\t#print(\"Paid: \" + dom)\n\t\t\twait()\n\t\telse:\n\t\t\tprint(dom, \" is not paid yet, yahoo!\")\n\t\t\tpending.append(dom)\n\t\t\twait()\n\texcept Exception:\n\t\tprint(\"Check it again...\")\n\t\ta = pythonwhois.get_whois(dom)\n\t\tif '2015' in a['raw'][0]:\n\t\t\t#print(\"Paid: \" + dom)\n\t\t\twait()\n\t\telse:\n\t\t\tprint(dom, \" is not paid yet, yahoo!\")\n\t\t\tpending.append(dom)\n\t\t\twait()\n\n#Create txt-file \n\nwith open(\"notPaidDomains\" + d.strftime(\" - %d.%m.%Y\") + \".txt\", \"w+\") as fp:\n\tfor item in pending:\n\t\tfp.write(\"{}\\n\".format(item))\nfp.close()\n\n#Timer\nend_time = time.time() - start_time\nprint(\"DONE. {} seconds were burned.\".format(end_time))\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311214347","text":"import os\n\n# FUNÇÕES GLOBAIS\ndef status():\n\tprint(\"Status:\")\n\tos.system(\"git status\")\n\ndef stage():\n\tstatus()\n\tprint(\"\")\n\tprint(\"Stageando alterações...\")\n\tos.system(\"git add .\")\n\tprint(\"\")\n\tprint(\"Status:\")\n\tos.system(\"git status\")\n\tprint(\"\")\n\ndef commit():\n\tmsg = input(\"o que foi alterado nessa versão? -> \")\n\tos.system('git commit -m \"'+ msg +' (IGD3 Bot Commit)\"')\n\tprint(\"\")\n\tprint(\"Efetuando commit local...\")\t\t\n\tprint(\"\")\n\tprint(\"Commitado!\")\n\tprint(\"\")\n\ndef fast_commit():\n\tprint(\"Efetuando commit local rápido...\")\n\tos.system('git commit -m \"Committed With IGD3 <3\"')\n\tprint(\"\")\n\ndef push():\n\tremote = input(\"remote-> \")\n\tbranch = input(\"branch-> \")\n\tprint(\"Fazendo upload para repositório remoto...\")\n\tos.system(\"git push \" + remote + \" \" + branch)\n\tprint(\"\")\n\ndef fast_push():\n\tprint(\"Fazendo upload para repositório remoto...\")\n\tos.system('git push origin master')\n\ndef pull():\n\tremote = input(\"remote-> \")\n\tbranch = input(\"branch-> \")\n\tprint(\"Fazendo download para repositório remoto...\")\n\tos.system(\"git pull \" + remote + \" \" + branch)\n\tprint(\"\")\n\ndef linux_or_win():\n\tquest = input(\"Está rodando esse software em um terminal Linux?\")\n\tif (quest == \"s\"):\n\t\tpacotes = [\"git\", \"nodejs\"]\n\t\tpacotes_size=len(pacotes)\n\t\tloop = 0\n\t\twhile (loop < pacotes_size):\n\t\t\tkey = False\n\t\t\twhile (key == False):\n\t\t\t\tprint(\"Essa aplicação depende do pacote \"+ pacotes[loop] +\" para funcionar corretamente.\")\n\t\t\t\task = input(\"Deseja instalá-lo agora?->\")\t\n\t\t\t\tif (ask == \"s\"):\n\t\t\t\t\tos.system(\"sudo apt-get install \"+ pacotes[loop])\n\t\t\t\t\tkey = True\n\t\t\t\t\tloop += 1\n\t\t\t\telif (ask == \"n\"):\n\t\t\t\t\tprint(pacotes[loop] + \" não será instalado.\")\n\t\t\t\t\tkey = True\n\t\t\t\t\tloop += 1\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Responda apenas 's' ou 'n'!\")\n\telif (quest == \"n\"):\n\t\tminimal = \"\"\"\n----------------IGD3 FOR WINDOWS-------------------------\n\n\tRequisitos Mínimos\n\tCopie e Cole os seguintes links em seu browser\n\t\tpara instalar os pacotes necessários!\n\n\t(*) Git: ( https://git-scm.com/download/win )\n\t(*) Node.js: ( https://nodejs.org/en/ )\n\n---------------------------------------------------------\n\t\t\"\"\"\n\t\tprint(minimal)\n\telse:\n\t\tprint(\"Responda apenas 's' ou 'n'!\")\n\ndef session_start():\n\t# CALLBACK FUNCTIONS\n\tdef run_branch():\n\t\tbranch_name = input(\"Nome da branch ->\")\n\t\tos.system(\"git checkout -b \" + branch_name )\n\tdef run_igd():\n\t\tconsole = [\"npm init -y\",\n\t\t\"npm install --save-dev -g gulp\",\n\t\t\"npm install --save-dev gulp-sass\", \n\t\t\"npm install --save-dev gulp-rename\", \n\t\t\"npm install --save-dev gulp-minify\",\n\t\t\"npm install --save-dev gulp-watch\",\n\t\t\"npm install --save-dev gulp.spritesmith\",\n\t\t\"npm install --save-dev gulp-imagemin\",\n\t\t\"npm install --save-dev gulp-changed\",\n\t\t\"npm install --save-dev browser-sync\",\n\t\t\"gulp\"]\n\t\tquant = len(console)\n\t\tloop = 0\n\t\twhile(loop < quant):\n\t\t\tos.system(console[loop])\n\t\t\tloop += 1\n\tdef hello_git():\n\t\tprint(\"Iniciando um novo repositório git...\")\n\t\tos.system(\"git init\")\n\t\tprint(\"Configurando as informações do autor...\")\n\t\tos.system('git config user.name \"'+ user +'\"')\n\t\tos.system('git config user.email \"'+ mail +'\"')\n\t\tclone = input(\"Link do repositório remoto (origin)->\")\n\t\tos.system(\"git remote add origin \"+ clone)\n\t\t\n\t\task = input(\"Deseja subir a versão para o repositório remoto agora? ->\")\n\t\tif (ask == \"s\"):\n\t\t\tstage()\n\t\t\tfast_commmit()\n\t\t\tfast_push()\n\t\t\tprint(\"\")\n\t\telif (ask == \"n\"):\n\t\t\tprint(\"Repositório remoto não atualizado!\")\n\t\t\tprint(\"\")\n\t\telse:\n\t\t\tprint(\"Responda apenas 's' ou 'n'!\")\n\t\t\tprint(\"\")\n\tdef git_commit():\n\t\tstage()\n\t\tcommit()\n\n\t# Listeners Tree\n\twhile (True):\n\t\tshell = input(user + \"-> \")\n\t\tif (shell == \"branch\"):\n\t\t\trun_branch()\n\t\tif (shell == \"hello node\"):\n\t\t\trun_igd()\n\t\tif (shell == \"hello git\"):\n\t\t\thello_git()\n\t\tif (shell == \"commit\"):\n\t\t\tgit_commit()\n\t\tif (shell == \"log\"):\n\t\t\tos.system(\"git log\")\n\t\tif (shell == \"cls\"):\n\t\t\tos.system(\"cls\")\n\t\tif (shell == \"push\"):\n\t\t\tpush()\n\t\tif (shell == \"status\"):\n\t\t\tstatus()\n\t\tif (shell == \"checkout\"):\n\t\t\tbranch = input(\"branch->\")\n\t\t\tos.system(\"git checkout \"+ branch)\n\t\tif (shell == \"merge\"):\n\t\t\tbranch = input(\"branch->\")\n\t\t\tos.system(\"git merge \" + branch)\n\t\tif (shell == \"pull\"):\n\t\t\tpull()\n\n\nprint(\"---------IGD3 v.1.0.0----------\")\nprint(\"Automatize Já!\")\nprint(\"Desenvolvido por Petrus Rennan\")\nprint(\"-------------------------------\")\nwhile (True):\n\task = input(\"Já possui conta?\")\n\tif (ask == \"s\"):\n\t\tuser = \"Visitante\"\n\t\tmail = \"email@padrao.com.br\"\n\t\tpassw = \"root\"\n\t\tsession_start()\n\telif (ask == \"n\"):\n\t\twarning = \"\"\"\n\n----CADASTRO----------------------------------------------------------\n\nÉ essencial que você forneça seus dados pessoais para nosso algoritmo,\ncom eles iremos automatizar taarefas chatas como por exemplo cadastros\nde configs locais no git, ou registro de informações autorais em .json.\n\n----------------------------------------------------------------------\n\n\n\t\t\"\"\"\n\t\tlinux_or_win()\n\t\tprint(warning)\n\t\tuser = input(\"Digite seu nome: \")\n\t\tmail = input(\"Digite seu e-mail: \")\n\t\tkey = False\n\t\twhile (key == False):\n\t\t\tpassw = input(\"Digite uma senha bem forte: \")\n\t\t\tconfirm_p = input(\"Confirme sua senha: \")\n\t\t\tif(passw == confirm_p):\n\t\t\t\tprint(\"Usuário cadastrado no banco de dados!\")\n\t\t\t\tsession_start()\n\t\t\telse:\n\t\t\t\tprint(\"As senhas não conferem!\")\n\t\t\t\tkey = False\n\t\t\n\t\n\telse:\n\t\tprint(\"Responda apenas 's' ou 'n'!\")","sub_path":"igd3.py","file_name":"igd3.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325358788","text":"import pyglet, random, math\nfrom game import load\nfrom game import resources\nfrom game import physicalobject\nfrom game import player\n\n# Initializes 800x600 windows\ngame_window = pyglet.window.Window(800, 600)\n\n# Create batch - anything part of this batch will be drawn via the on_draw function\n# We don't want to use batches to update already drawn objects' positions though\n# For that, we use a game_object list and update functions\nmain_batch = pyglet.graphics.Batch()\n\n# Generate score label at top of screen\nscore_label = pyglet.text.Label(text=\"Score: 0\", x=10, y=575, batch=main_batch)\n\n# Generate name of game at top of window\nlevel_label = pyglet.text.Label(text=\"Ben's Asteroid Game\",\n x=400, y=575, anchor_x='center', batch=main_batch)\n\n# Generate player ship \nplayer_ship = player.Player(x=400, y=300, batch=main_batch)\n\n# generate player lives at top right of screen\nplayer_lives = load.player_lives(3, main_batch)\n\n# generate asteroids at start of game\nasteroids = load.asteroids(3, player_ship.position, main_batch)\n\n# Create list of object in game to update each frame\ngame_objects = [player_ship] + asteroids\n\n# Tell pyglet that player_ship is an event handler\n# Question - What is an event handler?\n# Well, this is pushing player_ship into the event stack\n# game_window.push_handlers(player_ship)\ngame_window.push_handlers(player_ship.key_handler)\n\n\n@game_window.event\ndef on_draw():\n game_window.clear()\n\n main_batch.draw()\n \n \ndef update(dt):\n for obj in game_objects:\n obj.update(dt)\n for i in xrange(len(game_objects)):\n for j in xrange(i+1, len(game_objects)):\n\n obj_1 = game_objects[i]\n obj_2 = game_objects[j]\n\n if not obj_1.dead and not obj_2.dead:\n if obj_1.collides_with(obj_2):\n obj_1.handle_collision_with(obj_2)\n obj_2.handle_collision_with(obj_1)\n\n for to_remove in [obj for obj in game_objects if obj.dead]:\n # Remove object fRom batches it is a member of\n to_remove.delete()\n\n # Remove object from our object list\n game_objects.remove(to_remove)\n\n\n\n\nif __name__ == '__main__':\n\n # Update 120 times per second (twice as fast as common monitor refresh rate of 60 Hz)\n pyglet.clock.schedule_interval(update, 1/120.0)\n\n pyglet.app.run()\n\n\n\n","sub_path":"python/Libraries/pyglet/asteroids/version3/asteroids.py","file_name":"asteroids.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"384714593","text":"import sys\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom imblearn.over_sampling import SMOTE\n\nfrom utils import *\n\ndef approach():\n args = sys.argv\n\n data_path = args[1]\n score_path = args[2]\n approach_name = args[3]\n drop_months_end = int(args[4])\n num_test_commits = int(args[5])\n\n ######################################\n # Loop for within project prediction #\n # Loads only one project at a time #\n ######################################\n \n for project_name in list_all_projects(path=data_path):\n print(project_name)\n data = load_project(path=data_path, project_name=project_name)\n\n train_df, test_df = prepare_within_project_data(data, drop_months_end=drop_months_end, num_test_commits=num_test_commits)\n\n #########################################\n # Build Classifier #\n # (should be adopted for your approach) #\n #########################################\n\n # extract columns with feature values from available data\n # we prepared the following feature lists for your convenience:\n #\n # ALL_FEATURES\n # STATIC_FEATURES\n # STATIC_FILE_FEATURES\n # STATIC_CLASS_FEATURES\n # STATIC_INTERFACE_FEATURES\n # STATIC_ENUM_FEATURES\n # STATIC_METHOD_FEATURES\n # FGJIT_FEATURES\n # JIT_FEATURES\n # WD_FEATURES\n # PMD_FEATURES\n #\n # please check the documentation to see which features are included in each list\n # https://github.com/smartshark/promise-challenge/blob/main/dataset.md\n # \n # we use all available features for our baseline\n X_train = train_df[ALL_FEATURES].values\n X_test = test_df[ALL_FEATURES].values\n\n # binary labels are in the column 'is_inducing'\n y_train = train_df['is_inducing']\n y_test = test_df['is_inducing']\n\n # we recommend using a fixed random seed for reproducibility, but this is up to you\n RANDOM_SEED = 42\n np.random.seed(RANDOM_SEED)\n\n # we resample with SMOTE and build a random forest for our baseline\n X_res, y_res = SMOTE(random_state=RANDOM_SEED).fit_resample(X_train, y_train)\n rf = RandomForestClassifier()\n rf.fit(X_res, y_res)\n y_pred = rf.predict(X_test)\n\n ######################################################\n # DO NOT TOUCH FROM HERE #\n # This is where the scores are calculated and stored #\n ######################################################\n\n scores = score_model(test_df, y_pred)\n print_summary(train_df, test_df, scores)\n write_scores(score_path, approach_name, project_name, scores)\n\n \nif __name__ == '__main__':\n approach()","sub_path":"approaches/baseline_rf_wp/approach.py","file_name":"approach.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"423804551","text":"from io import BytesIO\nfrom random import choice\n\nfrom sqlitedict import SqliteDict\n\n\nclass Database:\n def __init__(self):\n self.db = SqliteDict(\"database.sqlite\")\n\n if \"echoes\" not in self.db.keys():\n self.db[\"echoes\"] = []\n self.db.commit()\n\n def add_to_echoes(self, echo_text: str):\n if echo_text not in self.db[\"echoes\"]:\n self.db[\"echoes\"] += [echo_text]\n self.db.commit()\n\n def get_random_echo(self) -> str:\n try:\n return choice(self.db[\"echoes\"])\n except IndexError:\n return \"Congrats, that's the first message in my database!\"\n\n def get_stats(self) -> str:\n return f\"Message count: {len(self.db['echoes'])}\"\n\n def get_dump(self) -> BytesIO:\n dump_bytes = BytesIO(bytes(\"\\n\".join(self.db['echoes']), encoding=\"utf-8\"))\n dump_bytes.name = \"dump.txt\"\n return dump_bytes\n\n def overwrite(self, ow_bytes: BytesIO):\n ow_bytes.seek(0)\n ow_list = ow_bytes.read().decode(\"utf-8\").split(\"\\n\")\n self.db[\"echoes\"] = ow_list\n self.db.commit()\n\n def remove_echo(self, echo_text: str) -> str:\n try:\n self.db[\"echoes\"].remove(echo_text)\n self.db.commit()\n return \"Successfully removed from my database!\"\n except ValueError:\n return \"That text isn't in my database!\"\n","sub_path":"echo_bot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"88299624","text":"import numpy as np\nimport cv2\n\nclass tracker():\n\n\tdef __init__(self, win_width, win_height, margin, ym, xm, smooth_fac):\n\t\t# list for storing center values\n\t\tself.centers = []\n\n\t\t# window pixel dimensions of center vals to determine curve\n\t\tself.window_width = win_width\n\t\tself.window_height = win_height\n\n\t\tself.margin = margin\n\n\t\t# meters per pixel\n\t\tself.ym_per_pix = ym\n\t\tself.xm_per_pix = xm\n\n\t\tself.smooth_factor = smooth_fac\n\n\n\t# apply sliding window approach with a convolution like in lesson\n\tdef find_window_centroids(self, warped):\n\n\t\t# set window width/ height/ margin\n\t\twindow_width = self.window_width\n\t\twindow_height = self.window_height\n\t\tmargin = self.margin\n\t \n\t # store window centroid positions\n\t\twindow_centroids = [] \n\t\t# convolution templace for window\n\t\twindow = np.ones(window_width) \n\n\t # get the vertical image slice and then get starting positions for lanes then do 1D convolution the vertical image slice\n\t # take warped image and pick slice and squash it together to see pixel density\n\t\tl_sum = np.sum(warped[int(3*warped.shape[0]/4):,:int(warped.shape[1]/2)], axis=0)\n\t\tl_center = np.argmax(np.convolve(window,l_sum))-window_width/2\n\t\tr_sum = np.sum(warped[int(3*warped.shape[0]/4):,int(warped.shape[1]/2):], axis=0)\n\t\tr_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(warped.shape[1]/2)\n\t \n\t # collect centroid positions\n\t\twindow_centroids.append((l_center,r_center))\n\t \n\t\t# loop through the other slices\n\t\tfor level in range(1,(int)(warped.shape[0]/window_height)):\n\t\t\t# convolve the window into the vertical slice of the image\n\t\t\timage_layer = np.sum(warped[int(warped.shape[0]-(level+1)*window_height):int(warped.shape[0]-level*window_height),:], axis=0)\n\t\t\tconv_signal = np.convolve(window, image_layer)\n\t\t\t# Find the best left centroid by using past left center as a reference\n\t\t\t# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window\n\t\t\toffset = window_width/2\n\t\t\t# use padding to consider region of convolved signal\n\t\t\tl_min_index = int(max(l_center+offset-margin,0))\n\t\t\tl_max_index = int(min(l_center+offset+margin,warped.shape[1]))\n\t\t\t# gets max pixel density per local region\n\t\t\tl_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset\n\t\t\t# Find the best right centroid by using past right center as a reference\n\t\t\tr_min_index = int(max(r_center+offset-margin,0))\n\t\t\tr_max_index = int(min(r_center+offset+margin,warped.shape[1]))\n\t\t\tr_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset\n\t\t\t# add to centriod list for that layer\n\t\t\twindow_centroids.append((l_center,r_center))\n\t\t# add to the line_tracker recent centers\n\t\tself.centers.append(window_centroids)\n\t\t# use the smooth factor to average line center values and prevent marker from jumping around\n\t\treturn np.average(self.centers[-self.smooth_factor:], axis = 0)\n","sub_path":"line_tracker.py","file_name":"line_tracker.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"34043720","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/i3visiotools/wrappers/livejournal.py\n# Compiled at: 2014-12-25 06:48:18\nfrom platforms import Platform\n\nclass Livejournal(Platform):\n \"\"\" \n A object for Livejournal.\n \"\"\"\n\n def __init__(self):\n \"\"\" \n Constructor... \n \"\"\"\n self.platformName = 'Livejournal'\n self.tags = [\n 'social', 'opinions']\n self.NICK_WILDCARD = ''\n self.url = self.NICK_WILDCARD + '.' + 'livejournal.com'\n self.notFoundText = [\n ' is not currently registered.']\n self.forbiddenList = ['.']","sub_path":"pycfiles/i3visiotools-v0.2.3.linux-i686.tar/livejournal.py","file_name":"livejournal.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"454235595","text":"from __future__ import division\n\nimport os\nimport argparse\n\nfrom utils.train_tiny_quan_utils import Quan_train\n\n\ndef parser_argument(parser):\n # Params\n parser.add_argument(\"--epochs\", type=int, default=100, help=\"number of epochs\")\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"size of each image batch\")\n parser.add_argument(\"--img_size\", type=int, default=512, help=\"size of each image dimension\")\n parser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\n parser.add_argument(\"--gradient_accumulations\", type=int, default=2, help=\"number of gradient accums before step\")\n\n # Model config\n parser.add_argument(\"--model_def\", type=str, default=None, help=\"path to definition file\")\n parser.add_argument(\"--pretrained_weights\", type=str, help=\"if specified starts from checkpoint model\") # as a checkpoint in training\n parser.add_argument(\"--gpu\", type=int, default=0, help=\"assign a gpu to this porject, start from 0\")\n\n # Eval & Save interval\n parser.add_argument(\"--checkpoint_interval\", type=int, default=2, help=\"interval between saving model weights\")\n parser.add_argument(\"--evaluation_interval\", type=int, default=2, help=\"interval evaluations on validation set\")\n parser.add_argument(\"--model_save_path\", type=str, default=None)\n\n # Dataset info\n parser.add_argument(\"--data_config\", type=str, default=\"config/dac.data\", help=\"path to data config file\")\n parser.add_argument(\"--data_norm\", type=bool, default=False, help=\"/255 for every pixel\")\n\n # Others\n parser.add_argument(\"--compute_map\", default=False, help=\"if True computes mAP every tenth batch\")\n parser.add_argument(\"--multiscale_training\", default=False, help=\"allow for multi-scale training\")\n\n # Quantisize Settings:\n parser.add_argument(\"--quan_weitht_bit\", type=int, default=8, help=\"\")\n parser.add_argument(\"--quan_activation_bit\", type=int, default=8, help=\"\")\n\n # Full Precision model:\n parser.add_argument(\"--fp_pretrained\", type=str, default=None, help=\"path to full precision model\") # as a init_weight for Quan-Net\n\n opt = parser.parse_args()\n print(opt)\n\n return opt\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n opt = parser_argument(parser)\n\n # Re Settings\n opt.name = 'Tiny-YOLO-Release Full-Precision anchor:little '\n opt.hasQ = True\n\n # Set env GPU\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(opt.gpu)\n\n Quan_train(opt)\n\n# python train_tiny_qan.py \\\n# --gpu 0 \\\n# --fp_pretrained 'checkpoints/fp_ckpt/Tiny_yolo_ckpt_90.pth' \\\n# --model_save_path 'checkpoints/Quan_model/' \\\n# --model_def \"config/yolov3-tiny_lsq.cfg\" \\\n# --quan_weitht_bit 3 \\\n# --quan_activation_bit 3\n","sub_path":"train_tiny_qan.py","file_name":"train_tiny_qan.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"142746223","text":"# import the python module time (to use Time() functions etc. and libraries that will be needed\n# Import numpy (foundamental package for scientific computing) and pandas (BSD-licensed library\n# providing high-performance, easy-to-use data structures and data analysis tools for the Python\n# programming language)\n\nimport time\nimport numpy as np\nimport pandas as pd\n\n\n# import the data files (.csv format) of the three cities Chicago, New York and store them\n# in a dictionary, a data structure that stores pairs of elements, keys (elements names)\n# and values\n\n\nCITY_DATA={ 'chicago': './chicago.csv',\n 'new york city': './new_york_city.csv',\n 'washington': './washington.csv' }\n\n# define a list to contain the three cities'names\n\ncities = ['chicago','new york city','washington']\n\n# define a list to contain the months'names\n\nmonths = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n\n# define a list to contain the weekdays'names\n\ndays = ['monday', 'tuesday', 'wednsday', 'thursday', 'friday', 'saturday', 'sunday', 'all']\n\n# define a function to get some user defined/ selected filters for the analysis\n\ndef get_filters():\n\n# Asks user to specify a city, month, and day to analyze.\n# Returns:\n# (str) city - name of the city to analyze\n# (str) month - name of the month to filter by, or \"all\" to apply no month filter\n# (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n # Print a line to separate results\n # I Get user input for city (chicago, new york city, washington).\n # HINT: Use a while loop to handle invalid inputs\n # - I create a while True loop to get the user's string input for the city choice,\n # - Since \"raw_input\" function appears to be deprecated I use the function \"input\"\n # - I use str() function to define the data type of the inputs\n # - I use the .lower() method to prevent case sensitive errors on the user side\n # - I use \\n to create a line break\n # - I use \"while True\" function to create a while loop that iterate indefinitely until\n # a condition is met\n\n while True:\n city = str(input('\\n Please enter a filter for the city you would like to explore. Enter: New York City, Chicago or Washington?\\n')).lower()\n if city not in cities:\n print('{} is not a valid option, please input your choice again'.format(city))\n continue\n else:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n\n # I Get user input for months (from january until june and I provide the option 'all').\n # HINT: Use a while loop to handle invalid inputs\n # - I create a while True loop to get the user's string input for the month's choice,\n # - Since \"raw_input\" function appears to be deprecated I use the function \"input\"\n # - I use str() function to define the data type of the inputs\n # - I use the .lower() method to prevent case sensitive errors on the user side\n # - I use \\n to create a line break\n # - I use \"while True\" function to create a while loop that iterate indefinitely until\n # a condition is met\n\n while True:\n month = str(input('\\n Please enter a filter for the month you would like to explore. Enter a month between january and june or simply type \\\"All\\\" \\n')).lower()\n if month not in months:\n print('{} is not a valid option, please input your choice again'.format(month))\n continue\n else:\n break\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n # I Get user input for weekdays (I provide the option 'all').\n # HINT: Use a while loop to handle invalid inputs\n # - I create a while True loop to get the user's string input for the day's choice,\n # - Since \"raw_input\" function appears to be deprecated I use the function \"input\"\n # - I use str() function to define the data type of the inputs\n # - I use the .lower() method to prevent case sensitive errors on the user side\n # - I use \\n to create a line break\n # - I use \"while True\" function to create a while loop that iterate indefinitely until\n # a condition is met\n\n while True:\n day = str(input('\\n Please enter a filter for the weekday you would like to explore or simply type \\\"All\\\" \\n')).lower()\n if day not in days:\n print('{} is not a valid option, please input your choice again'.format(day))\n continue\n else:\n break\n\n print('-'*40)\n return city, month, day\n\ndef load_data(city, month, day):\n\n# Loads data for the specified city and filters by month and day if applicable.\n\n# Args:\n# (str) city - name of the city to analyze\n# (str) month - name of the month to filter by, or \"all\" to apply no month filter\n# (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n# Returns:\n# df - Pandas DataFrame containing city data filtered by month and day\n\n # load data file into dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n#------------------------------------\n\n# I define a function to ask the user if he/she wants to see the first 5 rows of the raw data\ndef get_rawdata(df):\n\n# I create a list of possible answers to the question. The list can be used only in this function\n\n answers = ['y','n']\n\n# I use a while loop to ask the user if he/she wants to see some row data\n\n while True:\n answer = str(input('\\n would you like to see 5 sample rows from a raw data file? Please type \"y\" for yes and \"n\" for no.\\n')).lower()\n if answer not in answers:\n print('{} is not a valid option, please input your choice again')\n continue\n if answer == 'y':\n # I create a panda's dataframe and I provide a sample .csv file (as it is a sample to see how data look I provide no choice on the city)\n #df = pd.read_csv(CITY_DATA[city]).sample(5)\n # I use the function .sample() to provide the view of 5 random rows\n # (instead of using .head() or .tail() to provide respectively the first 5 rows or the last five rows )\n df = df.sample(5)\n print (df)\n continue\n else:\n break\n\n print('-'*40)\n\n#------------------------------------\n\ndef time_stats(df):\n# Displays statistics on the most frequent times of travel\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['month'].mode()[0]\n print(\"The month bikeshare is used the most is :\", most_common_month)\n\n # display the most common day of week\n most_common_day_of_week = df['day_of_week'].mode()[0]\n print(\"The most chosen day of week is :\", most_common_day_of_week)\n\n # display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print('The most popular start hour is:', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef station_stats(df):\n# Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = df['Start Station'].mode()[0]\n print(\"The most commonly used start station is:\", most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = df['End Station'].mode()[0]\n print(\"The most commonly used end station is :\", most_common_end_station)\n\n # display most frequent combination of start station and end station trip\n origin_destination = df[['Start Station', 'End Station']].mode().loc[0]\n print(\"The most common origin-destination is: {}\"\\\n .format(origin_destination[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef trip_duration_stats(df):\n# Displays statistics on the total and average trip duration\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel = df['Trip Duration'].sum()\n print(\"The total travel time is:\", total_travel)\n\n # display mean travel time\n mean_travel = df['Trip Duration'].mean()\n print(\"the mean travel time is:\", mean_travel)\n\n # display max travel time\n max_travel = df['Trip Duration'].max()\n print(\"Max travel time :\", max_travel)\n\n # display max travel time\n min_travel = df['Trip Duration'].min()\n print(\"Max travel time :\", max_travel)\n \n # display standard deviation on travel times\n std_travel_times = df['Trip Duration'].std()\n print(\"Standard deviation travel time :\", std_travel_times)\n\n # display median on travel times\n median_travel_times = df['Trip Duration'].std()\n print(\"Median travel time :\", median_travel_times)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types)\n\n if 'Gender' in df.columns:\n\n # Display counts of gender\n gender = df['Gender'].value_counts()\n print(gender)\n\n # Display earliest, most recent, and most common year of birth\n older_user = df ['Birth Year'].min()\n younger_user = df ['Birth Year'].max()\n average_year = df ['Birth Year'].mode()[0]\n print(\"The oldest user was born in year:\", older_user)\n print(\"The youngest user was born in year:\", younger_user)\n print(\"The most common user was born in the year:\", average_year)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n get_rawdata(df)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":11388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"35528967","text":"import csv\n\ndef loadTest(file):\t\n\tresults = []\n\twith open(file, newline='') as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=';', quotechar='|')\n\t\tfor row in reader:\n\t\t\tresults.append(row)\n\treturn results\n\ndef evaluate(templates, answers):\n\tpoint = 0\n\ttotal = sum(int(a[2]) for a in templates)\n\tfor index, answer in enumerate(templates):\n\t\tif(answers[index] == answer[1]):\n\t\t\tpoint += int(answer[2])\n\n\treturn point, total","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"531569251","text":"def create_meta_header(folder):\n \"\"\"\n Add metadata to a converted markdown notebook\n \"\"\"\n import yaml\n with open('{}/metadata.yml'.format(folder), 'r') as f:\n meta = yaml.load(f)\n \n meta_objects = [\"---\"]\n def _add_meta(meta, key, meta_objects, meta_key=None):\n if key in meta:\n meta_key = meta_key or key\n meta_value = meta[key]\n if type(meta_value) is list:\n meta_value = str([str(x) for x in meta_value])\n else:\n meta_value = '\\\"{}\\\"'.format(meta_value)\n meta_objects.append('{}: {}'.format(meta_key, meta_value))\n\n meta_objects.append('layout: \\\"page\\\"')\n _add_meta(meta, 'title', meta_objects)\n _add_meta(meta, 'description', meta_objects)\n\n import time\n meta.setdefault('date', time.strftime(\"%Y-%m-%d %H:%M:%S %z\"))\n _add_meta(meta, 'date', meta_objects)\n \n _add_meta(meta, 'keywords', meta_objects, meta_key='categories')\n \n meta.setdefault('accepted', 'false')\n _add_meta(meta, 'accepted', meta_objects)\n \n meta.setdefault('notebook_url', '{}/executed_notebook.ipynb'.format(folder))\n _add_meta(meta, 'notebook_url', meta_objects)\n \n meta_objects.append('---')\n \n with open('{}/metadata.yml'.format(folder), 'w') as f:\n yaml.dump(meta, f)\n \n return '\\n'.join(meta_objects), time.strftime(\"%Y-%m-%d-{}\".format('-'.join(meta['title'].split(' ')))), meta\n\ndef create_docs_filename(title):\n # Thanks to Django framework: https://docs.djangoproject.com\n import re\n title = re.sub('[^\\w\\s-]', '', title).strip().lower()\n title = re.sub('[-\\s]+', '-', title)\n return title\n\nif __name__ == \"__main__\":\n import sys, os, subprocess\n\n argv = sys.argv[1:]\n try:\n note_folder = argv[0]\n executed_file = argv[1]\n except:\n print(\"Usage: python travis_make_docs.py \")\n sys.exit(2)\n\n header, filename, meta_data = create_meta_header(note_folder) # Generate metadata header\n # Make blog post with metadata header\n filename = create_docs_filename(meta_data['title'])\n import codecs\n outfolder='_under_review'\n\n if str(meta_data['accepted']).lower() in ['true', 'yes', 'y']:\n reviewpath = 'docs/{}/{}.html'.format(outfolder, filename)\n if os.path.exists(reviewpath):\n subprocess.call([\"git\", \"rm\", reviewpath])\n outfolder='_accepted' # put the new file into accepted\n outpath = 'docs/{}/{}.html'.format(outfolder, filename)\n if not os.path.exists('docs/{}'.format(outfolder)):\n os.makedirs('docs/{}'.format(outfolder))\n\n content = \"\"\n\n if 1:\n import nbformat, nbconvert\n with open(executed_file, 'r') as f:\n nb = nbformat.read(f, as_version=4)\n \n from nbconvert.exporters import HTMLExporter\n htmlexport = HTMLExporter()\n htmlnb, htmlresources = htmlexport.from_notebook_node(nb, resources=dict(\n #output_files_dir='images/'.format(filename), \n encoding='utf-8')\n )\n content = htmlnb.replace('{{', '{ {').replace('', '```')\n \n with codecs.open(outpath, 'w', 'utf-8') as f:\n # Write a header for the gh-pages website and safe it for later usage\n f.seek(0)\n f.write(header)\n f.write('\\n')\n f.write(content)\n\n subprocess.call([\"git\", \"add\", outpath])\n ","sub_path":"travis_make_docs.py","file_name":"travis_make_docs.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405002689","text":"from django import forms\n\nfrom utils.api.client import MarketAccessAPIClient\n\n\nclass FeedbackForm(forms.Form):\n satisfaction = forms.ChoiceField(\n label=\"Overall, how do you feel about your use of the Digital Market Access Service (DMAS) today?\",\n choices=(\n (\"VERY_SATISFIED\", \"Very satisfied\"),\n (\"SATISFIED\", \"Satisfied\"),\n (\"NEITHER\", \"Neither satisfied nor dissatisfied\"),\n (\"DISSATISFIED\", \"Dissatisfied\"),\n (\"VERY_DISSATISFIED\", \"Very dissatisfied\"),\n ),\n widget=forms.RadioSelect(attrs={\"class\": \"govuk-radios__input\"}),\n required=False,\n )\n attempted_actions = forms.MultipleChoiceField(\n label=\"What were you trying to do today?\",\n help_text=\"Select all that apply.\",\n choices=(\n (\"REPORT_BARRIER\", \"Report a barrier\"),\n (\"PROGRESS_UPDATE\", \"Set a progress update\"),\n (\"EXPORT_BARRIER_CSV\", \"Export a barrier CSV report\"),\n (\"ACTION_PLAN\", \"Create or edit an action plan\"),\n (\"OTHER\", \"Other\"),\n (\"DONT_KNOW\", \"Don't know\"),\n ),\n widget=forms.CheckboxSelectMultiple(attrs={\"class\": \"govuk-checkboxes__input\"}),\n error_messages={\n \"required\": \"You must select one or more activities\",\n },\n )\n feedback_text = forms.CharField(\n label=\"How could we improve the service?\",\n help_text=\"Don't include any personal information, like your name or email address.\",\n max_length=3000,\n required=False,\n widget=forms.Textarea(attrs={\"class\": \"govuk-textarea\", \"rows\": 7}),\n )\n csat_submission = forms.CharField()\n csat_submission_id = forms.CharField(required=False)\n\n def __init__(self, *args, **kwargs):\n self.token = kwargs.pop(\"token\", None)\n super().__init__(*args, **kwargs)\n\n def clean(self):\n cleaned_data = super().clean()\n satisfaction = cleaned_data.get(\"satisfaction\", None)\n csat_submission = cleaned_data.get(\"csat_submission\", False)\n if not satisfaction:\n self.add_error(\"satisfaction\", \"You must select a level of satisfaction\")\n elif csat_submission == \"True\":\n client = MarketAccessAPIClient(self.token)\n feedback = client.feedback.send_feedback(\n token=self.token, **self.cleaned_data\n )\n self.data = self.data.copy()\n self.data[\"csat_submission_id\"] = feedback[\"id\"]\n # Request extra feedback\n # self.add_error(\"feedback_text\", \"Tell us how we can improve\")\n raise forms.ValidationError(\"Let us know how we can improve\")\n return cleaned_data\n\n def save(self):\n client = MarketAccessAPIClient(self.token)\n csat_submission_id = self.cleaned_data.get(\"csat_submission_id\")\n if csat_submission_id == \"None\":\n client.feedback.send_feedback(token=self.token, **self.cleaned_data)\n else:\n client.feedback.add_comments(\n token=self.token, feedback_id=csat_submission_id, **self.cleaned_data\n )\n","sub_path":"barriers/forms/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"523674315","text":"# -*- coding: utf-8 -*-\n\nimport math\n\n\ndef es_sumfactx(n):\n \"\"\" indica si un número es suma de sus factoriales \"\"\"\n if n < 10:\n return False\n\n s = 0\n for i in str(n):\n s += math.factorial(int(i))\n\n return s == n\n\n\ndef result():\n t = 0\n for n in range(1, 1000000):\n if es_sumfactx(n):\n t += n\n\n return t\n","sub_path":"projecteuler/problems/d0025/p0034/r0034.py","file_name":"r0034.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"595291020","text":"from __future__ import absolute_import\n\nimport six\n\nfrom datetime import timedelta\nfrom django.core.urlresolvers import reverse\nfrom django.utils import timezone\n\nfrom sentry.models import EventUser, GroupTagValue, OrganizationMemberTeam\nfrom sentry.testutils import APITestCase\n\n\nclass OrganizationUserIssuesTest(APITestCase):\n def setUp(self):\n super(OrganizationUserIssuesTest, self).setUp()\n self.org = self.create_organization()\n self.org.flags.allow_joinleave = False\n self.org.save()\n self.team1 = self.create_team(organization=self.org)\n self.team2 = self.create_team(organization=self.org)\n self.project1 = self.create_project(team=self.team1)\n self.project2 = self.create_project(team=self.team2)\n self.group1 = self.create_group(\n project=self.project1,\n last_seen=timezone.now() - timedelta(minutes=1),\n )\n self.group2 = self.create_group(\n project=self.project2,\n )\n\n self.euser1 = EventUser.objects.create(email='foo@example.com', project_id=self.project1.id)\n self.euser2 = EventUser.objects.create(email='bar@example.com', project_id=self.project1.id)\n self.euser3 = EventUser.objects.create(email='foo@example.com', project_id=self.project2.id)\n\n GroupTagValue.objects.create(\n key='sentry:user',\n value=self.euser1.tag_value,\n group_id=self.group1.id,\n project_id=self.project1.id\n )\n GroupTagValue.objects.create(\n key='sentry:user',\n value=self.euser2.tag_value,\n group_id=self.group1.id,\n project_id=self.project1.id\n )\n GroupTagValue.objects.create(\n key='sentry:user',\n value=self.euser3.tag_value,\n group_id=self.group2.id,\n project_id=self.project2.id\n )\n self.path = reverse(\n 'sentry-api-0-organization-user-issues', args=[\n self.org.slug,\n self.euser1.id,\n ]\n )\n\n def test_no_team_access(self):\n user = self.create_user()\n self.create_member(user=user, organization=self.org)\n self.login_as(user=user)\n\n response = self.client.get(self.path)\n assert response.status_code == 200\n assert len(response.data) == 0\n\n def test_has_access(self):\n user = self.create_user()\n member = self.create_member(\n user=user,\n organization=self.org,\n teams=[self.team1],\n )\n\n self.login_as(user=user)\n\n response = self.client.get(self.path)\n\n # result shouldn't include results from team2/project2 or bar@example.com\n assert response.status_code == 200\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group1.id)\n\n OrganizationMemberTeam.objects.create(\n team=self.team2,\n organizationmember=member,\n is_active=True,\n )\n\n response = self.client.get(self.path)\n\n # now result should include results from team2/project2\n assert response.status_code == 200\n assert len(response.data) == 2\n assert response.data[0]['id'] == six.text_type(self.group2.id)\n assert response.data[1]['id'] == six.text_type(self.group1.id)\n","sub_path":"tests/sentry/api/endpoints/test_organization_user_issues.py","file_name":"test_organization_user_issues.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474143363","text":"from datetime import datetime, timedelta\nimport io\n\nfrom flask import Blueprint, render_template, redirect, request, flash, jsonify, send_file\nfrom flask_login import login_user, login_required, logout_user, current_user\nfrom sqlalchemy import text\nfrom app import app, models, forms, db\n\nadmin = Blueprint('admin', __name__, url_prefix='/admin', template_folder='../templates/admin')\n\n# .............................................................................\n@admin.route('')\ndef admin_dashboard():\n setup_required = False\n return render_template('admin_index.html', title = u'Admin Dashboard', setup_required = setup_required)\n\n# .............................................................................\n@admin.route('/trips')\ndef manage_trips():\n settings = models.Setting.query.first()\n trips = models.Trip.query.all()\n return render_template('manage_trips.html', title = u'Admin: Manage Trips', trips = trips, settings = settings)\n\n# .............................................................................\n@admin.route('/users', methods=['GET', 'POST'])\ndef admin_users():\n people = models.Person.query.all()\n\n # Apply the selections to database\n if request.method == 'POST':\n attending_list = [ int(x) for x in request.form.keys() ]\n for person in people:\n if person.id in attending_list:\n person.attending = True\n else:\n person.attending = False\n db.session.commit()\n flash(u'Attending list updated', 'success')\n return redirect('/admin/users')\n\n return render_template('admin_users_list.html', title = 'Users Admin Panel', people = people)\n\n# .............................................................................\n@admin.route('/connections')\ndef admin_connections():\n senate_sql = \"SELECT * FROM (SELECT conn.*, people.lastname, people.firstname, people.organization FROM (SELECT members.state_code, connections.chamber, members.party, members.senate_class, members.id, members.lastname AS senator_lastname, members.firstname AS senator_firstname, connections.score AS conn_score, members.score AS member_score, connections.person_id FROM members JOIN connections ON (connections.chamber='senate' AND connections.state=members.state_code AND connections.district=senate_class) WHERE members.chamber_id=1) AS conn JOIN people ON person_id=people.id WHERE people.attending=1 ORDER BY state_code, senate_class);\"\n\n res = db.engine.execute(text(senate_sql))\n senate_data = []\n for row in res:\n senate_data.append(row)\n\n\n house_sql = \"SELECT * FROM (SELECT conn.*, people.lastname, people.firstname, people.organization FROM (SELECT members.state_code, connections.chamber, members.party, members.house_district, members.id, members.lastname AS senator_lastname, members.firstname AS senator_firstname, connections.score AS conn_score, members.score AS member_score, connections.person_id FROM members JOIN connections ON (connections.chamber='house' AND connections.state=members.state_code AND connections.district=house_district) WHERE members.chamber_id=2) AS conn JOIN people ON person_id=people.id WHERE people.attending=1 ORDER BY state_code, house_district);\"\n\n res = db.engine.execute(text(house_sql))\n house_data = []\n for row in res:\n house_data.append(row)\n\n missing_sql = \"SELECT members.id, members.lastname, members.firstname, members.party, (SELECT name FROM chambers WHERE id=members.chamber_id) chamber, members.state_code, members.house_district, members.senate_class, members.score FROM members LEFT JOIN connections ON state_code=connections.state AND (members.house_district=connections.district OR members.senate_class=connections.district) WHERE connections.id is NULL ORDER BY members.state_code;\"\n\n res = db.engine.execute(text(missing_sql))\n missing_connections = []\n for row in res:\n missing_connections.append(row)\n\n return render_template('admin_connections_list.html', title='Connections Admin Panel', senate_data=senate_data, house_data=house_data, missing_connections=missing_connections, count=len(senate_data)+len(house_data), missing_count=len(missing_connections))\n\n# .............................................................................\n@admin.route('/analytics/coverage')\ndef admin_coverage_connections():\n return render_template('connection_map.html', title='Coverage Analytics Panel')\n","sub_path":"app/routes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152849966","text":"def did_x_and_y_act_together(data, actor_id_1, actor_id_2):\n # data= list: [[act1, act2, filmid], [act1, act2, filmid],...]\n # film_id is irrelevant\n # lists can't be keys, but tuples, integers, strings, etc can be keys\n\n dict1 = {}\n for each_film in data:\n dict1[each_film[0], each_film[1]] = 0 #NOTE: set these equal to anything... just shows that keys in dict1 are tuples\n\n# NOTE diff btw set & dictionary: set= {'a', 'b', 'c'} dictionary= {'a':1, 'b':2, 'c':3}\n return (actor_id_1, actor_id_2) in dict1.keys() # BOOLEAN: (actor1,actor2) exists in data\n\n\ndef get_actors_with_bacon_number(data, n):\n\n bacon_num = 4724 #bacon_level 0\n graph = create_adj_list(data) #dictionary mapping actors to actors they've acted with\n\n distances = bfs(graph,bacon_num)[1] #distances= bacon numbers\n\n result = [] # will hold actors with bacon number n\n for actor in distances: #actors are keys in distances\n if distances[actor] == n: # if actors baconnumber=n\n result.append(actor) #add him to our results list\n return sorted(result) #sort result according to bacon number\n\n # for i in range(n): #from 0 to bacon_num n\n # next= [] #initialize empty list for things after bacon_num level we're at\n # for each_film in data: #format of each film: [act1,act2,filmid]\n # if each_film[0] in bacon_num[i]: #if act1 from film1 in bacon num from last film, bacon_num[0]= 4724\n # next.append(each_film[1]) #append the thing in the current film that wasn't in the last level\n # if each_film[1] in bacon_num[i]: #\n # next.append(each_film[0])\n # data.remove(each_film)\n # bacon_num.append(next)\n\n\n# def bfs(graph, start, end):\n# # maintain a list of paths\n# queue = []\n#\n# # push the first path into the queue\n# queue.append([start])\n#\n# while queue:\n# # get the first path from the queue\n# path = queue.pop(0) #value of path becomes what we pop off (which is our first path in queue)\n#\n# # get the last node from the path\n# node = path[-1] #when u have a list, list[-1] is last element in the list\n#\n# # path found\n# if node == end: # if node= what we're looking for, return the path\n# return path\n#\n# # enumerate all adjacent nodes, construct a new path and push it into the queue\n# for adjacent in graph.get(node, []): #retrieve graph according to given parameters(these define the desired output)\n# new_path = list(path)\n# new_path.append(adjacent) #append adjacent node to this path\n# queue.append(new_path) #append this path to out list of paths\n#list.append(value) adds value to the end\n#list.insert(index, value) --> add value at the index\n\ndef bfs(graph,start):\n '''\n input: graph from adjacency list & start bacon id (Kevin Bacon's is 4724) \n '''\n from Queue import Queue #try to put at top of file\n d = {} # here we will map bacon id's to their distance from kevin bacon\n Q = Queue()\n d[start] = 0\n p = {} #parent\n p[start] = start #initialize map with 1st element (Kevin Bacon)\n Q.put(start) #1st element into Queue= start\n while(not Q.empty()): #while Q isn't empty\n current = Q.get()\n for node in graph[current]: #node= current actor id\n if not node in d: #if node we're at isn't already in our stored distance then....\n d[node] = d[current] + 1 #node we're at has distance that is in level after the current distance\n p[node] = current # 'node' parent becomes 'current'(node we just analyzed)\n Q.put(node) #add 'node' (our most recent) to back of the queue\n return (p,d) #p= maps current ids to parents of our current, d= maps actor ids to distance/bacon number to him/her\n\n\ndef create_adj_list(data):\n graph = dict() #set variable graph= an empty dictionary\n for each_film in data:\n id1 = each_film[0] #in EACH ITERATION, set id1= actor1 from THAT film and id2= actor2 from THAT film\n id2 = each_film[1]\n\n if id1 in graph: #add id2 from that film to set of ids linked to key id1---> {id1: 423, 32, 23 , id2}\n graph[id1].add(id2)\n else:\n graph[id1] = set([id2]) #if id1 not already a key in graph, {id1: set([id2])};set initial value of key\n#NOTE: Syntax for set is ([])\n if id2 in graph:\n graph[id2].add(id1)\n else:\n graph[id2] = set([id1])\n return graph #after making all the links from each_film[0] to each[film[1]'s\n\ndef get_bacon_path(data, actor_id):\n graph = create_adj_list(data) #mapping of actid: ids of people he's acted with\n bacon = 4724\n p , _ = bfs(graph, bacon) #p= dictionary that maps ids to their parents\n if not actor_id in p: #if actor_id we're at has no parents, return none\n return None\n path = [] #will hold actor_ids until we reach parent of the end\n end = actor_id\n while (end != bacon):\n path.append(end) # start at id we want\n end = p[end] # end =becomes parent of that id\n path.append(bacon) #because bacon not included in our path\n\n reversed_path = [] #because path looks like node, parent, grandparent, bacon right now\n for i in range(len(path)):\n reversed_path.append(path[-1 - i]) #to keep iterating from last value until we get to bacon ??\n return reversed_path\n","sub_path":"lab_3/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9110331","text":"from cmat.basic import Tuplet, NoteType\n\nwhole = NoteType(4)\nhalf = NoteType(2)\nquarter = NoteType(1)\neighth = NoteType(1/2)\nsixteenth = NoteType(1/4)\nthirtySecond = NoteType(1/8)\nsixtyFourth = NoteType(1/16)\n\nduplet = Tuplet(2,3)\ntriplet = Tuplet(3,2)\nquadruplet = Tuplet(4,3)\nquintuplet = Tuplet(5,4)\nsextuplet = Tuplet(6,4)\nseptuplet = Tuplet(7,4)\noctuplet = Tuplet(8,6)\nnonuplet = Tuplet(9,8)\ndecuplet = Tuplet(10,8)\nundecuplet = Tuplet(11,8)\ndodecuplet = Tuplet(12,8)\ntredecuplet = Tuplet(13,8)\n\navailable_tuplets = [duplet,triplet,quadruplet,quintuplet,\n sextuplet,septuplet,octuplet,nonuplet,\n decuplet,undecuplet,dodecuplet,tredecuplet]\n\n","sub_path":"constants/duration.py","file_name":"duration.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"90679453","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom v2ray.models import Server\nfrom base.models import Setting\nfrom init import db\nfrom socket import *\nimport os\nimport json\nimport struct\n\n\ndef config_changed():\n svrs = Server.query.all()\n config_path = Setting.query.filter_by(key=\"v2_config_path\").first()\n total = len(svrs)\n suc = 0\n for svr in svrs:\n cli = socket(AF_INET, SOCK_STREAM)\n cli.settimeout(5)\n print(\"[I] Ready to send config file to server: %s(%s)...\" % (svr.address, svr.remark))\n try:\n cli.connect((svr.address, 40001))\n except Exception as e:\n print('[E] Send config file to server [%s] failed: %s' % (svr.remark, str(e)))\n continue\n filename = config_path.value\n filebytes = os.path.getsize(filename)\n header = {\n \"command\": \"config_changed\",\n \"filename\": filename,\n \"filesize\": filebytes\n }\n header = json.dumps(header)\n header_len = struct.pack('i', len(header))\n cli.send(header_len)\n cli.send(header.encode(\"utf-8\"))\n with open(filename, \"rb\") as f:\n data = f.read()\n cli.sendall(data)\n print(\"[I] Send config file to server [%s] success.\" % svr.remark)\n cli.close()\n suc += 1\n print(\"[I] %d/%d successfully synced.\" % (suc, total))\n\n\ndef node_added(address, remark):\n cli = socket(AF_INET, SOCK_STREAM)\n try:\n cli.connect((address, 40001))\n except Exception as e:\n print(\"[E] Adding node server failed: %s\" % str(e))\n return -1\n header = {\"command\": \"node_added\"}\n header = json.dumps(header)\n header_len = struct.pack('i', len(header))\n cli.send(header_len)\n cli.send(header.encode(\"utf-8\"))\n data = cli.recv(1024).decode(\"utf-8\")\n if data == \"ack\":\n print(\"[I] Confirmed\")\n print(\"[I] Adding node: %s(%s)...\" % (address, remark), end='')\n svr = Server(address, remark)\n db.session.add(svr)\n db.session.commit()\n print(\"done.\")\n else:\n print(data)\n cli.close()\n\n\ndef list_nodes():\n svrs = Server.query.all()\n for svr in svrs:\n print(\"%02d: %s %s\" % (svr.id, svr.address, svr.remark))\n\n\ndef del_node(id):\n Server.query.filter_by(id=id).delete()\n db.session.commit()\n print(\"Server with id: %d has been deleted\" % id)\n\ndef list_nodes_status():\n svrs = Server.query.all()\n svrs_status = []\n for i, svr in enumerate(svrs):\n svr_status = node_status(svr)\n svrs_status.append(svr_status)\n return svrs_status\n\ndef node_status(svr):\n cli = socket(AF_INET, SOCK_STREAM)\n cli.settimeout(5)\n print(\"[I] Start getting node status: %s(%s)...\" % (svr.address, svr.remark), end='')\n try:\n cli.connect((svr.address, 40001))\n except Exception as e:\n print('[E] Send config file to server [%s] failed: %s' % (svr.remark, str(e)))\n return -1\n\n header = {\"command\": \"node_status\"}\n header = json.dumps(header)\n header_len = struct.pack('i', len(header))\n cli.send(header_len)\n cli.send(header.encode(\"utf-8\"))\n print(\"[I] Send CMD to server [%s] success.\" % svr.remark)\n\n data_len = cli.recv(4)\n if data_len:\n print(\"[I] Ready to receive data.\")\n data_len = struct.unpack('i', data_len)[0]\n data = cli.recv(data_len).decode(\"utf-8\")\n data = {**json.loads(data), **{\"remark\": svr.remark, \"address\":svr.address}}\n cli.close()\n print(\"[I] Received data\")\n return data\n","sub_path":"util/cmd2node.py","file_name":"cmd2node.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405619942","text":"from vnpy.app.cta_strategy import (\n CtaTemplate,\n StopOrder,\n TickData,\n BarData,\n TradeData,\n OrderData,\n BarGenerator,\n ArrayManager,\n)\n\n\nclass DemoStrategy(CtaTemplate):\n \"\"\"演示用的简单双均线\"\"\"\n\n # 策略作者\n author = \"Smart Trader\"\n\n # 定义参数\n fast_window = 10\n slow_window = 20\n\n # 定义变量\n fast_ma0 = 0.0\n fast_ma1 = 0.0\n slow_ma0 = 0.0\n slow_ma1 = 0.0\n\n # 添加参数和变量名到对应的列表\n parameters = [\"fast_window\", \"slow_window\"]\n variables = [\"fast_ma0\", \"fast_ma1\", \"slow_ma0\", \"slow_ma1\"]\n\n def __init__(self, cta_engine, strategy_name, vt_symbol, setting):\n \"\"\"\"\"\"\n super().__init__(cta_engine, strategy_name, vt_symbol, setting)\n\n # K线合成器:从Tick合成分钟K线用\n self.bg = BarGenerator(self.on_bar)\n\n # 时间序列容器:计算技术指标用\n self.am = ArrayManager()\n\n def on_init(self):\n \"\"\"\n 当策略被初始化时调用该函数。\n \"\"\"\n # 输出个日志信息,下同\n self.write_log(\"策略初始化\")\n\n # 加载10天的历史数据用于初始化回放\n self.load_bar(10)\n\n def on_start(self):\n \"\"\"\n 当策略被启动时调用该函数。\n \"\"\"\n self.write_log(\"策略启动\")\n\n # 通知图形界面更新(策略最新状态)\n # 不调用该函数则界面不会变化\n self.put_event()\n\n def on_stop(self):\n \"\"\"\n 当策略被停止时调用该函数。\n \"\"\"\n self.write_log(\"策略停止\")\n\n self.put_event()\n\n def on_tick(self, tick: TickData):\n \"\"\"\n 通过该函数收到Tick推送。\n \"\"\"\n self.bg.update_tick(tick)\n\n def on_bar(self, bar: BarData):\n \"\"\"\n 通过该函数收到新的1分钟K线推送。\n \"\"\"\n am = self.am\n\n # 更新K线到时间序列容器中\n am.update_bar(bar)\n\n # 若缓存的K线数量尚不够计算技术指标,则直接返回\n if not am.inited:\n return\n\n # 计算快速均线\n fast_ma = am.sma(self.fast_window, array=True)\n self.fast_ma0 = fast_ma[-1] # T时刻数值\n self.fast_ma1 = fast_ma[-2] # T-1时刻数值\n\n # 计算慢速均线\n slow_ma = am.sma(self.slow_window, array=True)\n self.slow_ma0 = slow_ma[-1]\n self.slow_ma1 = slow_ma[-2]\n\n # 判断是否金叉\n cross_over = (self.fast_ma0 > self.slow_ma0 and\n self.fast_ma1 < self.slow_ma1)\n\n # 判断是否死叉\n cross_below = (self.fast_ma0 < self.slow_ma0 and\n self.fast_ma1 > self.slow_ma1)\n\n # 如果发生了金叉\n if cross_over:\n # 为了保证成交,在K线收盘价上加5发出限价单\n price = bar.close_price + 5\n\n # 当前无仓位,则直接开多\n if self.pos == 0:\n self.buy(price, 1)\n # 当前持有空头仓位,则先平空,再开多\n elif self.pos < 0:\n self.cover(price, 1)\n self.buy(price, 1)\n\n # 如果发生了死叉\n elif cross_below:\n price = bar.close_price - 5\n\n # 当前无仓位,则直接开空\n if self.pos == 0:\n self.short(price, 1)\n # 当前持有空头仓位,则先平多,再开空\n elif self.pos > 0:\n self.sell(price, 1)\n self.short(price, 1)\n\n self.put_event()\n\n def on_order(self, order: OrderData):\n \"\"\"\n 通过该函数收到委托状态更新推送。\n \"\"\"\n pass\n\n def on_trade(self, trade: TradeData):\n \"\"\"\n 通过该函数收到成交推送。\n \"\"\"\n # 成交后策略逻辑仓位发生变化,需要通知界面更新。\n self.put_event()\n\n def on_stop_order(self, stop_order: StopOrder):\n \"\"\"\n 通过该函数收到本地停止单推送。\n \"\"\"\n pass\n","sub_path":"vntrader/strategies/demo_strategy.py","file_name":"demo_strategy.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31165898","text":"from django.shortcuts import render\nfrom django.views.generic.detail import DetailView\nfrom apps.pages.models import PageContacts, PageAbout, PageGroup, Page\nfrom apps.shop.forms import MessagesForm\nfrom django.forms.models import model_to_dict\n\nclass PageDetailView(DetailView):\n model = Page\n template_name = 'shop/pages/page__static.html'\n context_object_name = 'page'\n\n\ndef page_about(request):\n \n return render(request, 'shop/pages/page__about.html', {\n 'page' : PageAbout.objects.first(),\n })\n\n\ndef page_constacts(request):\n form_valid = False\n if request.method == 'POST':\n form = MessagesForm(data=request.POST)\n if form.is_valid():\n form.save()\n form_valid = True\n else:\n data = {}\n if request.user.is_authenticated:\n user = request.user\n data = model_to_dict(user)\n form = MessagesForm(initial=data)\n return render(request, 'shop/pages/page__contacts.html', {\n 'page' : PageContacts.objects.first(),\n 'form' : form, \n 'form_valid' : form_valid,\n })","sub_path":"apps/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"537816844","text":"#Time Complexity: O(n* aloga) where n is the length of input array and a is the length of longest string in the input array\n#Space Complexity: O(n) where n is the length of input array\n# Does it runs on leetcode? : Yes\n# Approach: use sorted string as hash to group all the anagrams together \nclass Solution:\n\t#Function to group anagrams together from an input array\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n mapp = {}\n for st in strs:\n s = ''.join(sorted(st))\n if s in mapp:\n mapp[s].append(st)\n else:\n mapp[s] = [st]\n return [v for v in mapp.values()]","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"260596180","text":"import string\nimport os\n\nwith open(\"tes.txt\", 'r+') as f:\n printable = set(string.printable)\n lines = []\n for l in f:\n l = list(filter(lambda x: x in printable, l))\n lines.append(''.join(l))\n\nos.remove(\"TrainingData.txt\")\nwith open(\"TrainingData.txt\", 'w') as f:\n for line in lines:\n cindex = line.rfind(',')\n if cindex != -1:\n line = list(line)\n line[cindex] = '.'\n line = ''.join(line)\n f.write(line)\n\n print(len(lines))\n","sub_path":"CNN/data_fixer.py","file_name":"data_fixer.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66338476","text":"# -*- coding: utf-8 -*-\nfrom PySide2 import QtCore, QtWidgets\nfrom PySide2.QtWebEngineWidgets import QWebEngineView\nfrom Views.mainVendas import Ui_ct_MainVendas\nfrom Views.formVendas import Ui_ct_FormVenda\nfrom functools import partial\nfrom Crud.CrudPedidos import CrudPedidos\nfrom Crud.CrudProdutos import CrudProdutos\nfrom Crud.CrudClientes import CrudClientes\nfrom Crud.CrudAReceber import CrudAReceber\nfrom Funcoes.data import DataAtual\n\n# from Funcoes.BuscaProdutos import BuscaProdutos\n\n\nclass MainVendas(Ui_ct_MainVendas, Ui_ct_FormVenda, DataAtual):\n\n def mainvendas(self, frame):\n super(MainVendas, self).setMainVendas(frame)\n self.frameMainVendas.show()\n\n \"\"\" Definindo funcões widgets\"\"\"\n # Botao Adicionar Venda\n self.bt_AddNovoVenda.clicked.connect(self.FormVendas)\n\n # Busca Vendas\n self.bt_BuscaVendas.clicked.connect(self.DataTabVendas)\n\n # Setando data Inicio e Fim da Consulta\n self.dt_InicioVenda.setDate(self.primeiroDiaMes())\n self.dt_FimVenda.setDate(self.ultimoDiaMes())\n\n # Tamanho das Colunas Tabela Vendas\n self.tb_Vendas.blockSignals(True)\n self.tb_Vendas.setColumnHidden(0, True)\n\n self.tb_Vendas.resizeRowsToContents()\n self.tb_Vendas.setColumnWidth(1, 10)\n self.tb_Vendas.setColumnWidth(2, 384)\n self.tb_Vendas.setColumnWidth(3, 160)\n self.tb_Vendas.setColumnWidth(4, 160)\n self.tb_Vendas.setColumnWidth(5, 160)\n self.tb_Vendas.setColumnWidth(6, 20)\n\n # Icones dos Botoes\n self.IconeBotaoForm(self.bt_AddNovoVenda,\n self.resourcepath('Images/addVenda.svg'))\n self.IconeBotaoMenu(self.bt_BuscaVendas,\n self.resourcepath('Images/search.png'))\n self.IconeBotaoMenu(self.bt_PrintRelatVendas,\n self.resourcepath('Images/gtk-print.png'))\n\n self.DataTabVendas()\n\n # Populando tabela vendas\n def DataTabVendas(self):\n cliente = self.tx_BuscaVendas.text()\n busca = CrudPedidos()\n busca.dataEmissao = QtCore.QDate.toString(\n self.dt_InicioVenda.date(), \"yyyy-MM-dd\")\n\n busca.dataFim = QtCore.QDate.toString(\n self.dt_FimVenda.date(), 'yyyy-MM-dd')\n busca.ListaVendatabela(cliente)\n\n while self.tb_Vendas.rowCount() > 0:\n self.tb_Vendas.removeRow(0)\n pass\n\n i = 0\n while i < len(busca.nomeCliente):\n self.tb_Vendas.insertRow(i)\n self.conteudoTabela(self.tb_Vendas, i, 0, str(busca.idPedido[i]))\n\n self.TabelaStatus(self.tb_Vendas, i, 1,\n self.StatusEntrega(busca.idStatusEntrega[i],\n busca.idStatusPagamento[i]))\n\n self.TabelaNomeTelefone(\n self.tb_Vendas, i, 2, busca.nomeCliente[i],\n busca.telefoneCliente[i])\n self.TabelaEntrega(self.tb_Vendas, i, 3,\n busca.dataEmissao[i],\n self.StatusEntrega(busca.idStatusEntrega[i]), \"\")\n self.TabelaEntrega(self.tb_Vendas, i, 4,\n busca.prazoEntrega[i],\n self.StatusEntrega(busca.idStatusEntrega[i]),\n busca.statusEntrega[i].upper())\n self.TabelaPagamento(self.tb_Vendas, i, 5,\n busca.valorTotal[i],\n self.StatusEntrega(\n busca.idStatusPagamento[i]),\n busca.statusPagamento[i].upper())\n\n self.botaoTabela(self.tb_Vendas, i, 6,\n partial(self.SelectVendaId, busca.idPedido[i]), \"#069\")\n\n i += 1\n\n # Janela Form Vendas\n def FormVendas(self):\n self.DesativaBotaoVendas()\n self.LimpaFrame(self.ct_containerVendas)\n super(MainVendas, self).setFormVendas(self.ct_containerVendas)\n self.fr_FormVenda.show()\n\n \"\"\" Chamanda de funções localizadas no arquivo comercial.py na pasta \n Funcoes \"\"\"\n # Setando Datas\n self.setDatas()\n\n # Setando Validação\n self.validaCampos()\n\n # Definindo acao de calculo de frete e desconto\n self.acaoCalculo()\n\n # Setando Icones dos Botoes\n self.setIcones()\n\n # Setando tamanho das tabelas\n self.tamanhoTabelas()\n\n # Setando autocomplete\n self.setAutocomplete()\n\n # Botao Gerar Parcela\n self.bt_GerarParcela.clicked.connect(\n partial(self.gerarParcela, \"Receber\"))\n\n # Autocomplete Produto\n self.tx_BuscaItem.textEdited.connect(self.autocompleteProduto)\n\n # Add Item Tabela\n self.tx_ObsItem.returnPressed.connect(self.ValidaFormAdd)\n self.bt_IncluirItem.clicked.connect(self.ValidaFormAdd)\n \"\"\" Fim chamandas comercial.py \"\"\"\n\n \"\"\" Chamanda de funções localizadas no arquivo clientes.py na \n pasta Funcoes \"\"\"\n # Campo Busca por nome e Autocompletar Cliente\n self.tx_NomeFantasia.textEdited.connect(self.autocompleCliente)\n self.tx_NomeFantasia.returnPressed.connect(\n partial(self.BuscaClienteNome, self.tx_IdBuscaItem))\n\n # Return Press Busca Id Cliente\n self.tx_Id.returnPressed.connect(\n partial(self.BuscaClienteId, self.tx_IdBuscaItem))\n \"\"\" Fim Chamadas clientes.py\"\"\"\n\n \"\"\" Chamanda de funções localizadas no arquivo FormaPagamento.py na pasta Funcoes \"\"\"\n # Populando combobox Forma de Pagamento\n self.CboxFPagamento(self.cb_FormaPagamento)\n \"\"\" Fim Chamanda FormaPagamento.py \"\"\"\n\n # Setando Foco no Cliente id TX\n self.tx_Id.setFocus()\n\n # Checando se existe ID válido\n self.IdCheckPedido()\n\n \"\"\" Definindo funcões widgets\"\"\"\n\n # Return Press Busca Id Produto\n self.tx_IdBuscaItem.returnPressed.connect(self.BuscaProdutoId)\n\n # Busca Produto por Nome\n self.tx_BuscaItem.returnPressed.connect(self.BuscaProdutoNome)\n\n # Calculo total produto por qtde item\n self.tx_QntdItem.returnPressed.connect(self.TotalItem)\n\n # Entregar\n self.bt_Entregar.clicked.connect(self.Entregar)\n\n # Botao Salvar\n self.bt_Salvar.clicked.connect(self.CadVenda)\n\n # Botao Cancelar\n self.bt_Voltar.clicked.connect(self.janelaVendas)\n\n # Botao Imprimir\n self.bt_Imprimir.clicked.connect(self.imprimirVenda)\n\n # checando campo Id se é Edicao ou Nova Venda\n def IdCheckPedido(self):\n if not self.tx_Cod.text():\n busca = CrudPedidos()\n self.tx_Cod.setText(str(busca.lastIdPedido()))\n # setando dataAtual campo entrega e emissão\n\n # Busca Produto por nome\n def BuscaProdutoNome(self):\n produto = self.tx_BuscaItem.text()\n busca = CrudProdutos()\n busca.ListaProdutoTabela(produto)\n self.tx_IdBuscaItem.setText(str(busca.idProduto[0]))\n self.BuscaProdutoId()\n\n # Busca produtos por ID\n def BuscaProdutoId(self):\n id = int(self.tx_IdBuscaItem.text())\n busca = CrudProdutos()\n busca.SelectProdutoId(id)\n if busca.descricaoProduto:\n self.tx_BuscaItem.setText(busca.descricaoProduto)\n self.tx_ValorUnitarioItem.setText(busca.valorUnitario)\n self.tx_QntdItem.setFocus()\n else:\n self.tx_BuscaItem.setText(\"Produto não encontrado\")\n self.tx_IdBuscaItem.clear()\n self.tx_IdBuscaItem.setFocus()\n\n # Calculo ValorTotalItem\n def TotalItem(self):\n id = self.tx_IdBuscaItem.text()\n busca = CrudProdutos()\n busca.SelectProdutoId(id)\n if self.tx_QntdItem.text() and self.tx_ValorUnitarioItem.text():\n if float(self.tx_QntdItem.text()) >= int(busca.qtdeAtacado):\n self.tx_ValorUnitarioItem.setText(busca.valorAtacado)\n else:\n self.tx_ValorUnitarioItem.setText(busca.valorUnitario)\n TotalItem = float(self.tx_QntdItem.text()) * \\\n float(self.tx_ValorUnitarioItem.text())\n self.tx_ValorTotalItem.setText(format(TotalItem, \".2f\"))\n self.bt_IncluirItem.setEnabled(True)\n self.tx_ObsItem.setFocus()\n\n # Removendo item da tabela e banco de dados se ouver\n\n def RemoveLInha(self, linha):\n REMOVE = CrudPedidos()\n REMOVE.idItemTabela = self.tb_Itens.item(linha, 7).text()\n REMOVE.DelItem()\n self.tb_Itens.removeRow(linha)\n for row in range(self.tb_Itens.rowCount()):\n self.botaoRemoveItem(self.tb_Itens, row, 6,\n partial(self.RemoveLInha, row), \"#005099\")\n self.TotalFinal()\n self.bt_GerarParcela.setDisabled(True)\n\n # Desativando Botões\n def DesativaBotaoVendas(self):\n self.bt_AddNovoVenda.setEnabled(False)\n self.tx_BuscaVendas.setEnabled(False)\n self.bt_BuscaVendas.setEnabled(False)\n\n def AtivaBotaoVendas(self):\n self.bt_AddNovoVenda.setEnabled(True)\n self.tx_BuscaVendas.setEnabled(True)\n self.bt_BuscaVendas.setEnabled(True)\n\n # Cadastro a venda\n def CadVenda(self):\n if not int(self.tb_Itens.rowCount()) < 1:\n INSERI = CrudPedidos()\n INSERI.idPedido = self.tx_Cod.text()\n INSERI.idCliente = self.tx_Id.text()\n INSERI.dataEmissao = QtCore.QDate.toString(\n self.dt_Emissao.date(), 'yyyy-MM-dd')\n INSERI.prazoEntrega = QtCore.QDate.toString(\n self.dt_Prazo.date(), 'yyyy-MM-dd')\n INSERI.desconto = self.tx_Desconto.text()\n INSERI.frete = self.tx_Frete.text()\n INSERI.valorTotal = self.tx_TotalFinal.text()\n if float(self.lb_ValorPendente.text()) == 0:\n INSERI.statusPagamento = 1\n else:\n INSERI.statusPagamento = 2\n INSERI.valorPendente = self.lb_ValorPendente.text()\n INSERI.CadVenda()\n self.CadItemVenda()\n pass\n\n # Cadastrando Itens referente ao pedido\n def CadItemVenda(self):\n INSERI = CrudPedidos()\n i = 0\n while i < self.tb_Itens.rowCount():\n INSERI.idItem = self.tb_Itens.item(i, 0).text()\n INSERI.idPedido = self.tx_Cod.text()\n INSERI.idItemTabela = self.tb_Itens.item(i, 7).text()\n INSERI.qtde = self.tb_Itens.item(i, 3).text()\n INSERI.valorItem = self.tb_Itens.item(i, 4).text()\n INSERI.totalItem = self.tb_Itens.item(i, 5).text()\n INSERI.obsItem = self.tb_Itens.item(i, 2).text()\n INSERI.CadItensPedido()\n i += 1\n\n self.CadContaVenda()\n self.SelectVendaId(self.tx_Cod.text())\n\n pass\n\n # Cadastro de parcelas\n def CadContaVenda(self):\n INSERI = CrudAReceber()\n\n if self.tb_Parcelas.rowCount() > 0:\n for i in range(self.tb_Parcelas.rowCount()):\n try:\n self.tb_Parcelas.item(i, 0).text()\n INSERI.idConta = self.tb_Parcelas.item(i, 0).text()\n except:\n INSERI.idConta = ''\n INSERI.idVenda = self.tx_Cod.text()\n INSERI.idCliente = self.tx_Id.text()\n INSERI.descricao = \"\"\"Pedido de Venda {}. Parcela {} de {} \"\"\".format(\n self.tx_Cod.text(), i + 1, self.tb_Parcelas.rowCount())\n INSERI.obs = \"\"\n INSERI.categoria = 1\n INSERI.dataVencimento = QtCore.QDate.toString(\n self.tb_Parcelas.cellWidget(i, 1).date(), \"yyyy-MM-dd\")\n INSERI.valor = self.tb_Parcelas.item(i, 2).text()\n INSERI.formaPagamento = self.cb_FormaPagamento.currentData()\n INSERI.cadContaReceber()\n\n # Recebendo parcela Venda\n def Receber(self, id):\n # print(self.tb_Parcelas.item(id, 0).text())\n\n if self.tb_Parcelas.cellWidget(id, 3).text():\n INSERI = CrudAReceber()\n INSERI.idConta = self.tb_Parcelas.item(id, 0).text()\n INSERI.valorRecebido = self.tb_Parcelas.cellWidget(\n id, 3).text().replace(\",\", \".\")\n INSERI.formaPagamento = self.cb_FormaPagamento.currentData()\n INSERI.dataRecebimento = QtCore.QDate.toString(\n QtCore.QDate.currentDate(), \"yyyy-MM-dd\")\n INSERI.ReceberConta()\n self.ParcelasAReceber()\n\n # Entregando Produtos DB\n def Entregar(self):\n INSERI = CrudPedidos()\n INSERI.dataEntrega = QtCore.QDate.toString(\n self.dt_Entrega.date(), \"yyyy-MM-dd\")\n INSERI.idPedido = self.tx_Cod.text()\n INSERI.Entregar()\n self.SaidaEstoque()\n self.SelectVendaId(self.tx_Cod.text())\n\n # Dando Saida no Estoque\n def SaidaEstoque(self):\n INSERI = CrudProdutos()\n i = 0\n while i < self.tb_Itens.rowCount():\n INSERI.idProduto = self.tb_Itens.item(i, 0).text()\n INSERI.qtdeProduto = self.tb_Itens.item(i, 3).text()\n INSERI.SaidaProduto()\n i += 1\n\n # Selecionando Venda pela tabela\n def SelectVendaId(self, id):\n busca = CrudPedidos()\n self.FormVendas()\n self.tx_Cod.setText(str(id))\n busca.SelectVendaID(id)\n\n self.tx_Id.setText(str(busca.idCliente))\n self.BuscaClienteId(self.tx_IdBuscaItem)\n self.tx_Desconto.setText(str(busca.desconto))\n self.tx_Frete.setText(str(busca.frete))\n self.dt_Prazo.setDate(busca.prazoEntrega)\n if busca.valorRecebido:\n self.tx_valorRecebido.setText(str(busca.valorRecebido))\n if busca.statusPagamento == 2:\n self.bt_GerarParcela.setEnabled(True)\n if busca.statusEntrega == 2:\n self.bt_Entregar.setEnabled(True)\n if busca.statusEntrega == 1:\n self.tb_Itens.setColumnHidden(6, True)\n for item in self.fr_addProduto.findChildren(QtWidgets.QLineEdit):\n item.setReadOnly(True)\n\n i = 0\n while i < len(busca.itemDescricao):\n\n self.tb_Itens.insertRow(i)\n self.conteudoTabela(self.tb_Itens, i, 0,\n str(busca.idItem[i]))\n self.conteudoTabelaLeft(self.tb_Itens, i, 1,\n busca.itemDescricao[i])\n self.conteudoTabelaLeft(self.tb_Itens, i, 2,\n str(busca.obsItem[i]))\n self.conteudoTabela(self.tb_Itens, i, 3,\n str(busca.qtde[i]))\n self.conteudoTabela(self.tb_Itens, i, 4,\n str(busca.valorItem[i]))\n self.conteudoTabela(self.tb_Itens, i, 5,\n str(busca.totalItem[i]))\n self.botaoRemoveItem(self.tb_Itens, i, 6,\n partial(self.RemoveLInha, i), \"#005099\")\n self.conteudoTabela(self.tb_Itens, i, 7,\n str(busca.idItemTabela[i]))\n self.TotalFinal()\n\n i += 1\n self.bt_Imprimir.setEnabled(True)\n self.ParcelasAReceber()\n\n pass\n\n # Populando tabela Parcelas\n def ParcelasAReceber(self):\n while self.tb_Parcelas.rowCount() > 0:\n self.tb_Parcelas.removeRow(0)\n\n busca = CrudAReceber()\n busca.idVenda = self.tx_Cod.text()\n busca.selectAReceberId()\n\n if busca.dataVencimento:\n self.bt_GerarParcela.setDisabled(True)\n self.tb_Itens.setColumnHidden(6, True)\n\n for i in range(len(busca.dataVencimento)):\n self.tb_Parcelas.insertRow(i)\n self.conteudoTabela(self.tb_Parcelas, i,\n 0, str(busca.idConta[i]))\n self.dt_tabela(self.tb_Parcelas, i,\n 1, busca.dataVencimento[i], busca.idStatus[i])\n self.conteudoTabela(self.tb_Parcelas, i,\n 2, str(busca.valor[i]))\n self.tx_tabelaReceber(self.tb_Parcelas, i, 3, busca.idStatus[\n i], str(busca.valorPendente[i]))\n self.botaoReceberParcela(self.tb_Parcelas, i, 4,\n partial(self.Receber, i), \"Receber\", busca.idStatus[i])\n\n def imprimirVenda(self):\n self.documento = QWebEngineView()\n\n headertable = [\"Produto\", \"Obs. \", \"Qnte.\", \"$ Unitário\", \"$ Total\"]\n buscaFornecedor = CrudClientes()\n buscaFornecedor.ListaClientesTabela('')\n html = self.renderTemplate(\n \"venda.html\",\n estilo=self.resourcepath('Template/estilo.css'),\n titulo=\"Pedido Nº:\",\n idPedido=self.tx_Cod.text(),\n headertable=headertable,\n codcliente=buscaFornecedor.idCliente,\n nomeCliente=buscaFornecedor.nomeCliente,\n telefoneFornecedor=buscaFornecedor.celularCliente,\n emailFornecedor=buscaFornecedor.emailCliente\n )\n\n self.documento.load(QtCore.QUrl(\"file:///\" +\n self.resourcepath(\"report.html\")))\n self.documento.loadFinished['bool'].connect(self.previaImpressao)\n","sub_path":"controle_estoque/mainvendas.py","file_name":"mainvendas.py","file_ext":"py","file_size_in_byte":17405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"274518966","text":"\r\nfrom common.functions import *;\r\nimport requests,sys,os,time,json;\r\npath = os.path.split(os.path.realpath(__file__))[0];#获取当前文件路径\r\n\r\nurl = \"https://e.qq.com/ads/\";\r\n# url = \"http://leads.qq.com\";\r\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3278.0 Safari/537.36'}\r\n\r\nconn = requests.Session();\r\nconn.get(url,headers=headers);\r\ncookiefile = path+'/cookie.json';\r\nif not os.path.exists(cookiefile):\r\n\tprint('请先获取cookie');\r\n\texit()\r\n\r\n\r\ntry:\r\n\twith open(cookiefile, 'r', encoding='utf8') as f:\r\n\t\tlistCookies = json.loads(f.read())\r\nexcept Exception as e:\r\n\tprint('cookie文件格式错误',e);\r\n\texit()\r\n\r\ntry:\r\n\tfor cookie in listCookies:\r\n\t\tconn.cookies.set(cookie.get('name'),cookie.get('value'));\r\nexcept Exception as e:\r\n\tprint('cookie错误');\r\n\texit();\r\n\r\n#下载xls\r\nurl = \"https://a.fy.qq.com/editor/form/batchdownload?f_id=gdt_6378651&outer_landing_page_id_list=192940_0459c2f2,190360_4b613580,190306_440502a2,188331_de9e091d,187201_71289b72,106239_d134109f\";\r\nwhile True:\r\n\ttry:\r\n\t\tdata = conn.get(url,headers=headers);\r\n\t\tfile = path+'/data/'+date(format_=\"%Y-%m-%d-%H-%M-%S\")+'.xls';\r\n\t\twith open(file,'wb') as f:\r\n\t\t\tfor chunk in data.iter_content(chunk_size=512):\r\n\t\t\t\tif chunk:\r\n\t\t\t\t\tf.write(chunk);\r\n\t\t\t\tprint('正在下载');\r\n\t\t\tprint('下载完成',file);\r\n\r\n\texcept Exception as e:\r\n\t\tprint(e);\r\n\t\tcontinue;\r\n\ttime.sleep(10*60);\r\n\r\n\r\n","sub_path":"gdt/getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450737042","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Carlos Gómez del Fierro\n\"\"\"\nimport sys\nfrom time import clock\n\n\"\"\"\nDegree array\n\"\"\"\ndef deg(graph):\n n_nodes, n_edges = graph[0]\n degree = [0] * n_nodes\n for edge in graph[1:]:\n degree[edge[0] - 1] += 1\n degree[edge[1] - 1] += 1\n return degree\n\ntry:\n with open(sys.argv[1], \"r\", encoding=\"utf-8\") as ini_file:\n data = ini_file.read().splitlines()\nexcept FileNotFoundError:\n print(\"File not found\")\n sys.exit(1)\nexcept:\n data = sys.stdin.read().splitlines()\n\ngraph = [(int(x), int(y)) for edge in data for x, y in [edge.split()]]\n\nstart = clock()\ndegree = \" \".join(str(i) for i in deg(graph))\nend = clock()\ntime = end - start\n\nwith open(\"output/deg.txt\", \"w\", encoding=\"utf-8\", newline=\"\\n\") as out_file:\n out_file.write(degree + \"\\n\")\n\nprint(degree)\nprint(\"Time: {} segs.\".format(time))\n","sub_path":"algorithmic-heights/deg.py","file_name":"deg.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"612436235","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 28 11:27:37 2020\n\n@author: wyx\n\"\"\"\n\n\nimport xlrd,xlwt\nfrom xlutils.copy import copy\ndata=xlrd.open_workbook('./楼宇安防.xls',formatting_info=True)\nworkbook=xlwt.Workbook(encoding='utf-8')\nworkbook=copy(data)\ntableRead=data.sheet_by_index(0)\ntableWrite=workbook.get_sheet(0)\n\n#设置高亮\nstyle=xlwt.XFStyle()\npattern=xlwt.Pattern()\npattern.pattern=xlwt.Pattern.SOLID_PATTERN\npattern.pattern_fore_colour=xlwt.Style.colour_map['yellow']\nstyle.pattern=pattern\n\n#高亮数据\ncol_index=tableRead.row_values(0).index('体温')\nfor i in range(1,tableRead.nrows):\n temp=tableRead.cell_value(i,col_index)\n if temp>37.4:\n tableWrite.write(i,col_index,temp,style)\n\n#写入平均体温\ntableWrite.write(tableRead.nrows,0,'平均体温')\ntableWrite.write(tableRead.nrows,\n col_index,\n sum(tableRead.col_values(col_index,\n start_rowx=1,\n end_rowx=None))/tableRead.nrows-1)\n\n#保存数据\nworkbook.save('./作业.xls')\n","sub_path":"Excel报表自动高亮/Excel报表自动高亮.py","file_name":"Excel报表自动高亮.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"219276807","text":"#!/usr/bin/python2\n\nimport matplotlib.pyplot as plt\nimport matplotlib.axes as ax\nimport sys\nimport numpy as np\n\nstreamerA1 = \"Output/Streamer1_B.dat\"\nstreamerA2 = \"Output/Streamer1_C.dat\"\nstreamerA3 = \"Output/Streamer1_D.dat\"\nstreamerA4 = \"Output/Streamer1_E.dat\"\nstreamerA5 = \"Output/Streamer1_F.dat\"\nstreamerA6 = \"Output/Streamer1_G.dat\"\nstreamerA7 = \"Output/Streamer1_H.dat\"\nstreamerA8 = \"Output/Streamer1_I.dat\"\nstreamerA9 = \"Output/Streamer1_J.dat\"\nstreamerA10 = \"Output/Streamer1_K.dat\"\nstreamerA11 = \"Output/Streamer1_L.dat\"\nstreamerA12 = \"Output/Streamer1_M.dat\"\nstreamerA13 = \"Output/Streamer1_N.dat\"\nstreamerA14 = \"Output/Streamer1_O.dat\"\nstreamerA15 = \"Output/Streamer1_P.dat\"\nstreamerA16 = \"Output/Streamer1_Q.dat\"\nstreamerA17 = \"Output/Streamer1_R.dat\"\nstreamerA18 = \"Output/Streamer1_S.dat\"\nstreamerA19 = \"Output/Streamer1_T.dat\"\nstreamerA20 = \"Output/Streamer1_U.dat\"\nstreamerA21 = \"Output/Streamer1_V.dat\"\nstreamerA22 = \"Output/Streamer1_W.dat\"\nstreamerA23 = \"Output/Streamer1_X.dat\"\nstreamerA24 = \"Output/Streamer1_Y.dat\"\nstreamerA25 = \"Output/Streamer1_Z.dat\"\nstreamerA26 = \"Output/Streamer1_[.dat\"\nstreamerA27 = \"Output/Streamer1_\\.dat\"\nstreamerA28 = \"Output/Streamer1_].dat\"\nstreamerA29 = \"Output/Streamer1_^.dat\"\nstreamerA30 = \"Output/Streamer1__.dat\"\nstreamerA31 = \"Output/Streamer1_`.dat\"\nstreamerA32 = \"Output/Streamer1_a.dat\"\nstreamerA33 = \"Output/Streamer1_b.dat\"\nstreamerA34 = \"Output/Streamer1_c.dat\"\nstreamerA35 = \"Output/Streamer1_d.dat\"\nstreamerA36 = \"Output/Streamer1_e.dat\"\nstreamerA37 = \"Output/Streamer1_f.dat\"\nstreamerA38 = \"Output/Streamer1_g.dat\"\nstreamerA39 = \"Output/Streamer1_h.dat\"\nstreamerA40 = \"Output/Streamer1_i.dat\"\nstreamerA41 = \"Output/Streamer1_j.dat\"\n\nstreamerB1 = \"Output/Streamer2_B.dat\"\nstreamerB2 = \"Output/Streamer2_C.dat\"\nstreamerB3 = \"Output/Streamer2_D.dat\"\nstreamerB4 = \"Output/Streamer2_E.dat\"\nstreamerB5 = \"Output/Streamer2_F.dat\"\nstreamerB6 = \"Output/Streamer2_G.dat\"\nstreamerB7 = \"Output/Streamer2_H.dat\"\nstreamerB8 = \"Output/Streamer2_I.dat\"\nstreamerB9 = \"Output/Streamer2_J.dat\"\nstreamerB10 = \"Output/Streamer2_K.dat\"\nstreamerB11 = \"Output/Streamer2_L.dat\"\nstreamerB12 = \"Output/Streamer2_M.dat\"\nstreamerB13 = \"Output/Streamer2_N.dat\"\nstreamerB14 = \"Output/Streamer2_O.dat\"\nstreamerB15 = \"Output/Streamer2_P.dat\"\nstreamerB16 = \"Output/Streamer2_Q.dat\"\nstreamerB17 = \"Output/Streamer2_R.dat\"\nstreamerB18 = \"Output/Streamer2_S.dat\"\nstreamerB19 = \"Output/Streamer2_T.dat\"\nstreamerB20 = \"Output/Streamer2_U.dat\"\nstreamerB21 = \"Output/Streamer2_V.dat\"\nstreamerB22 = \"Output/Streamer2_W.dat\"\nstreamerB23 = \"Output/Streamer2_X.dat\"\nstreamerB24 = \"Output/Streamer2_Y.dat\"\nstreamerB25 = \"Output/Streamer2_Z.dat\"\nstreamerB26 = \"Output/Streamer2_[.dat\"\nstreamerB27 = \"Output/Streamer2_\\.dat\"\nstreamerB28 = \"Output/Streamer2_].dat\"\nstreamerB29 = \"Output/Streamer2_^.dat\"\nstreamerB30 = \"Output/Streamer2__.dat\"\nstreamerB31 = \"Output/Streamer2_`.dat\"\nstreamerB32 = \"Output/Streamer2_a.dat\"\nstreamerB33 = \"Output/Streamer2_b.dat\"\nstreamerB34 = \"Output/Streamer2_c.dat\"\nstreamerB35 = \"Output/Streamer2_d.dat\"\nstreamerB36 = \"Output/Streamer2_e.dat\"\nstreamerB37 = \"Output/Streamer2_f.dat\"\nstreamerB38 = \"Output/Streamer2_g.dat\"\nstreamerB39 = \"Output/Streamer2_h.dat\"\nstreamerB40 = \"Output/Streamer2_i.dat\"\nstreamerB41 = \"Output/Streamer2_j.dat\"\n\nfilesA = [streamerA1, streamerA2, streamerA3, streamerA4, streamerA5, streamerA6, streamerA7, streamerA8, streamerA9, streamerA10, streamerA11, streamerA12, streamerA13, streamerA14, streamerA15, streamerA16, streamerA17, streamerA18, streamerA19, streamerA20, streamerA21, streamerA22, streamerA23, streamerA24, streamerA25, streamerA26, streamerA27, streamerA28, streamerA29, streamerA30, streamerA31, streamerA32, streamerA33, streamerA34, streamerA35, streamerA36, streamerA37, streamerA38, streamerA39, streamerA40, streamerA41]\n\nfilesB = [streamerB1, streamerB2, streamerB3, streamerB4, streamerB5, streamerB6, streamerB7, streamerB8, streamerB9, streamerB10, streamerB11, streamerB12, streamerB13, streamerB14, streamerB15, streamerB16, streamerB17, streamerB18, streamerB19, streamerB20, streamerB21, streamerB22, streamerB23, streamerB24, streamerB25, streamerB26, streamerB27, streamerB28, streamerB29, streamerB30, streamerB31, streamerB32, streamerB33, streamerB34, streamerB35, streamerB36, streamerB37, streamerB38, streamerB39, streamerB40, streamerB41]\n \nxAxis = []\nstart = 0\nend = len(filesA)\nyAxis = [[] for i in range(start, end)]\n\n \nfor i in range(start, end):\n f = open(filesA[i], \"r\")\n cont = f.readlines()\n f.close()\n f = open(filesB[i], \"r\")\n cont2 = f.readlines()\n f.close()\n xAxis=[]\n for j in range(0,len(cont)/300,10):\n #xAxis.append(np.log10(float(cont[j].split(\" \")[0])))\n xAxis.append(1000*float(cont[j].split(\" \")[0]))\n yAxis[i].append(int(int(cont[j].split(\" \")[-1]) == 0 and int(cont2[j].split(\" \")[-1]) == 0))\n \n\nx = xAxis\ny = np.arange(50+(start)*0.75, 50+(end)*0.75, 0.75)\nX, Y = np.meshgrid(x, y)\nlevels = [-1,0,1]\n\nZ = yAxis\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.tick_params(labelsize=40)\n#ax.set_xticks([-4, -3, -2, -1, 0])\nCS = ax.contourf(X, Y, Z, levels)\nax.set_ylabel(\"Altitude (km)\", fontsize=40)\n#ax.set_xlabel(r\"logarithmic Time ($\\mathrm{s}$)\", fontsize=40)\nax.set_xlabel(r\"Time ($\\mathrm{ms}$)\", fontsize=40)\n#cb = plt.colorbar(CS)\n#cb.ax.tick_params(labelsize=40)\n#plt.savefig(\"{}.png\".format(sys.argv[1]))\nplt.show()\n","sub_path":"Streamer.py","file_name":"Streamer.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"400400019","text":"import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2, time, glob, pickle\nfrom sklearn.svm import LinearSVC\nfrom skimage.feature import hog\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom scipy.ndimage.measurements import label\nfrom moviepy.editor import VideoFileClip\nfrom config import *\nfrom lesson_functions import *\n\ndef single_img_features(img, color_space='YCrCb', spatial_size=(32, 32),\n hist_bins=32, orient=9,pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True, hogVis=False, dbg=False):\n #1) Define an empty list to receive features\n img_features, imgShapes = [], {}\n #2) Apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(img)\n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n #4) Append features to list\n img_features.append(spatial_features)\n if dbg: imgShapes[\"spatial\"]=spatial_features.shape[0]\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n hist_features = color_hist(feature_image, nbins=hist_bins)\n #6) Append features to list\n img_features.append(hist_features)\n if dbg: imgShapes[\"histgrm\"]=hist_features.shape[0]\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.extend(get_hog_features(feature_image[:,:,channel],\n orient, pix_per_cell, cell_per_block,\n vis=False, feature_vec=True))\n if dbg: imgShapes['HOG'+str(channel)] = len(hog_features)\n else:\n if hogVis: return get_hog_features(feature_image[:,:,hog_channel], orient,\n pix_per_cell, cell_per_block, vis=True, feature_vec=True)\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,\n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n if dbg: imgShapes['HOG0'] = len(hog_features)\n #8) Append features to list\n img_features.append(hog_features)\n #9) Return concatenated array of features\n if dbg: print(imgShapes)\n return np.concatenate(img_features)\n\ndef extractFeatures(imgFileNms, color_space='YCrCb', spatial_size=(32, 32),\n hist_bins=32, orient=9, pix_per_cell=8, cell_per_block=2, \n hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True):\n features = []\n for file in imgFileNms:\n img = mpimg.imread(file)\n fileFeature = single_img_features(img, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, hog_channel=hog_channel,\n spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n\n features.append(fileFeature)\n return features\n\ndef search_windows(img, windows, clf, scaler, color_space='YCrCb',\n spatial_size=(32, 32), hist_bins=32,\n hist_range=(0, 256), orient=9,\n pix_per_cell=8, cell_per_block=2,\n hog_channel=0, spatial_feat=True,\n hist_feat=True, hog_feat=True, dbg=False):\n\n #1) Create an empty list to receive positive detection windows\n on_windows = []\n #2) Iterate over all windows in the list\n for window in windows:\n #3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))\n #4) Extract features for that window using single_img_features()\n features = single_img_features(test_img, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n #5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n #6) Predict using your classifier\n prediction = clf.predict(test_features)\n #7) If positive (prediction == 1) then save the window\n if prediction == 1: on_windows.append(window)\n if dbg and prediction==1: print('on_windows.append', len(on_windows))\n #8) Return windows for positive detections\n return on_windows\n\ndef getImgFiles(lim=0):\n images = []\n for subDir in glob.glob('./non-vehicles/*'):\n images = images + glob.glob(subDir+'/*.png')\n for subDir in glob.glob('./vehicles/*'):\n images = images + glob.glob(subDir+'/*.png')\n carS, notcars = [], []\n for image in images:\n if 'non-vehicles' in image: notcars.append(image)\n else: carS.append(image)\n if lim > 1: carS, notcars = carS[:lim], notcars[:lim]\n print('# of carS example = ', len(carS))\n print('# of not cars are = ', len(notcars))\n return carS, notcars\n\ndef createSVC(lim=0, pklIt=False):\n cars, notCars = getImgFiles(lim=lim)\n car_features = extractFeatures(cars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n notcar_features = extractFeatures(notCars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n\n X = np.vstack((car_features, notcar_features)).astype(np.float64)\n X_scaler = StandardScaler().fit(X) # Fit a per-column scaler\n scaled_X = X_scaler.transform(X) # Apply the scaler to X\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, y.shape[0])\n X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=rand_state)\n\n print('Using:',orient,'orientations',pix_per_cell,'pixels per cell and', cell_per_block,'cells per block')\n print('Feature vector length:', len(X_train[0]))\n # Use a linear SVC\n svc = LinearSVC()\n # Check the training time for the SVC\n t=time.time()\n svc.fit(X_train, y_train)\n t2 = time.time()\n print(round(t2-t, 2), 'Seconds to train SVC...')\n # Check the score of the SVC\n print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n # Check the prediction time for a single sample\n t=time.time()\n if pklIt:\n with open('./svcModel.pkl', 'wb') as fp: pickle.dump(svc,fp)\n with open('./X_scaler.pkl', 'wb') as fw: pickle.dump(X_scaler, fw)\n\ndef processImg(iFnm, oFnm=None, saveFlev=1, imgWrt=False, dbg=False):\n if type(iFnm) == str: image = mpimg.imread(iFnm)\n elif type(iFnm) == np.ndarray: image = iFnm\n else: raise TypeError('Neither File Name nor Image typeError')\n\n imgCpy = np.copy(image)\n image = image.astype(np.float32)/255 # conversion to 0~1 as trained on png\n heat = np.zeros_like(image[:,:,0]).astype(np.float)\n\n hot_windows = []\n searchWinList = []\n searchWinExtL = []\n for search_win in GVsearchWindows: # gVal: search windows (np.arrays(xmin,xMax, y\n # Id win coord using Modified slide_window: relative to Image\n x_start_stop = ((search_win[0][0] * image.shape[1]).round()).astype(int)\n y_start_stop = ((search_win[0][1] * image.shape[0]).round()).astype(int)\n xy_window = (search_win[1], search_win[1])\n searchWbin = slide_window(image, x_start_stop, y_start_stop, xy_window=xy_window)\n searchWinList += searchWbin\n searchWinExtL.append(searchWbin)\n\n if not imgWrt is False:\n tmpImg = np.copy(image)\n for w in range(len(searchWinExtL)):\n colorL = [0,0,0]\n colorL[w] = 225\n tmpImg = draw_boxes(tmpImg, searchWinExtL[w], color=tuple(colorL), thick=w+1)\n mpimg.imsave('/tmp/boxOut.jpg', tmpImg)\n\n hot_windows = search_windows(image, searchWinList, svc, X_scaler, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat, dbg=dbg)\n\n oBoxdImg = draw_boxes(imgCpy, hot_windows, color=(0,0,255), thick=6)\n heatAdd = add_heat(heat, hot_windows)\n heatMap = np.clip(heatAdd, 0, 255)\n labels = label(heatMap)\n finnImg = draw_labeled_bboxes(np.copy(image), labels)\n\n titles = ('OrigBoxed', 'Heat Map', 'Labels', 'Car Positions',)[-saveFlev:]\n pltImgs = (oBoxdImg, heatMap, labels[0], finnImg)[-saveFlev:]\n #pltImgs = (oBoxdImg, heatMap, (finnImg * 255).astype(np.int16))[-saveFlev:]\n\n if not imgWrt is False:\n for w in range(len(titles)):\n cMap=None\n if titles[w][0] == 'H': cMap='hot'\n elif titles[w][0] == 'L': cMap='gray'\n mpimg.imsave(imgWrt+titles[w]+'.jpg', pltImgs[w], cmap=cMap)\n\n if dbg:\n print(titles)\n fig = plt.figure()\n for i in range(saveFlev):\n plt.subplot(100+(saveFlev+1)*10+i+1)\n plt.title(titles[i])\n if titles[i][0] == 'H': plt.imshow(pltImgs[i], cmap='hot')\n elif titles[i][0] == 'L': plt.imshow(pltImgs[i], cmap='gray')\n else: plt.imshow(pltImgs[i])\n fig.tight_layout()\n if oFnm != None: plt.savefig(oFnm, bbox_inches='tight')\n else: plt.savefig(imgWrt+'ALLout.png', bbox_inches='tight')\n if dbg and imgWrt == None: plt.show()\n return (finnImg * 255).astype(np.int16) # least shows Video \n\ndef proccessVideo(inClipFnm, outClipFnm='./outPut.mp4', setBegEnd=None, setFps=12):\n if setBegEnd is None:\n print('default')\n inVclip = VideoFileClip(inClipFnm).set_fps(setFps)\n else:\n print('BegEnd')\n inVclip = VideoFileClip(inClipFnm).subclip(setBegEnd[0], setBegEnd[-1]).set_fps(setFps)\n outClip = inVclip.fl_image(processImg)\n outClip.write_videofile(outClipFnm, audio=False)\n\ndef procss6Imgs():\n for i in range(6)[:]:\n vFrame = VideoFileClip('./project_video.mp4').get_frame(30.0+i*5)\n x = processImg(vFrame, saveFlev=4, imgWrt='./output_images/'+str(i)+'_', dbg=True)\n \nif __name__ == '__main__':\n if 0: createSVC(lim=0, pklIt=True)\n #inF = './project_video.mp4'; outF=outClipFnm='./PrjVideoOut.mp4'; \\\n #proccessVideo(inF,outF,setFps=8,setBegEnd=None) # (17,45),\n #inF = './test_video.mp4'; outF=outClipFnm='./outPut1.mp4'; proccessVideo(inF, outF)\n #Prb: 21 (no car) 34 (2cars)\n #bboxImg = mpimg.imread('./test_images/bbox-example-image.jpg'); oFnm='./output_images/orig_1stAsIs.jpg'\n #x=processImg(bboxImg, oFnm=oFnm, saveFlev=3, dbg=True)\n #x=processImg(bboxImg, saveFlev=1, dbg=True)\n procss6Imgs()\n","sub_path":"searchClassify.py","file_name":"searchClassify.py","file_ext":"py","file_size_in_byte":12760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"613100229","text":"\nimport time\nimport msvcrt\n\nkeyqueue = ['a', 'b', 'c', 'd', 'e']\n\nexit = 0\nwhile exit == 0:\n time.sleep(2)\n print('a')\n while msvcrt.kbhit():\n x = msvcrt.getch()\n keyqueue.append(x)\n keyqueue.pop(0)\n\n print('keyqueue is: ' + str(keyqueue))\n if keyqueue[0] == keyqueue[1] == keyqueue[2] == keyqueue[3] == keyqueue[4] == 'x':\n \texit = 1\n \t","sub_path":"python/keytest.py","file_name":"keytest.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"359234457","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nBTC = pd.read_csv('BTC_GDAX.csv',\n index_col='Date', thousands=',')\nETH = pd.read_csv('ETH_GDAX.csv',\n index_col='Date', thousands=',')\nBTC.index=pd.to_datetime(BTC.index)\nETH.index=pd.to_datetime(ETH.index)\nBTC=BTC[::-1]\nETH=ETH[::-1]","sub_path":"Jupyter/XX. Introducción al análisis de series de tiempo en Python/TimeSeries_8.py","file_name":"TimeSeries_8.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"276567042","text":"import numpy as np\nimport os as os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import UnivariateSpline, InterpolatedUnivariateSpline, interp1d\n\n# Get C[II] redshift\nfrom COPS_example import z_CII, z_obs_CO\n\n# Get the path of the master catalog file\n_HOME = os.environ.get('HOME')\n\nrand = np.random.RandomState(42)\n\nnp.set_printoptions(precision=4,suppress=True)\n\n\n\n\n\n\n\n\n############### Mass Estimator (from R. Quadri) ###############\n# Data points for interpolation\nzmeans = [0.200, 0.400, 0.600, 0.800, 1.050, 1.350, 1.650, 2.000, 2.400, 2.900, 3.500]\nintercepts = [18.2842,18.9785,19.2706,19.1569,20.5633,21.5504,19.6128,19.8258,19.8795,23.1529,22.1678]\nslopes = [-0.454737,-0.457170,-0.454706,-0.439577,-0.489793,-0.520825,-0.436967,-0.447071,-0.443592,-0.558047,-0.510875]\nslopes_cols = [0.0661783,-0.0105074,0.00262891,0.140916,0.0321968,0.0601271,0.470524,0.570098,0.455855,0.0234542,0.0162301]\n\nintercepts_b = [18.3347,18.9626,19.2789,19.6839,20.7085,21.8991,22.9160,24.1886,22.6673,23.1514,21.6482]\nslopes_b = [-0.456550,-0.456620,-0.455029,-0.460626,-0.495505,-0.534706,-0.570496,-0.617651,-0.543646,-0.556633,-0.487324]\n\ndata_list = [zmeans, intercepts, slopes, slopes_cols, intercepts_b, slopes_b]\n\n\n# Interpolate the data points, and force the extrapolation to asymptote the mean of data points. \ndef intercept_full(z, flag='color_magnitude'):\n dist = np.maximum((z - 0.2)*(z - 3.5), 0)\n # Interpolating data\n if flag == 'color_magnitude':\n sm = InterpolatedUnivariateSpline(zmeans, intercepts, k=3)\n elif flag == 'magnitude_only':\n sm = InterpolatedUnivariateSpline(zmeans, intercepts_b, k=3)\n else:\n raise ValueError('Invalid flag name!')\n # Forcing extrapolation to asymptote\n ans = sm(z) * np.exp(-dist) + np.mean(intercepts) * (1.-np.exp(-dist))\n return ans\n\ndef slope_full(z, flag='color_magnitude'):\n dist = np.maximum((z - 0.2)*(z - 3.5), 0)\n if flag == 'color_magnitude':\n sm = InterpolatedUnivariateSpline(zmeans, slopes, k=3)\n elif flag == 'magnitude_only':\n sm = InterpolatedUnivariateSpline(zmeans, slopes_b, k=3)\n else:\n raise ValueError('Invalid flag name!')\n ans = sm(z) * np.exp(-dist) + np.mean(slopes) * (1.-np.exp(-dist))\n return ans\n\ndef slope_col_full(z, flag='color_magnitude'):\n dist = np.maximum((z - 0.2)*(z - 3.5), 0)\n if flag == 'color_magnitude':\n sm = InterpolatedUnivariateSpline(zmeans, slopes_cols, k=3)\n ans = sm(z) * np.exp(-dist) + np.mean(slopes_cols) * (1.-np.exp(-dist))\n return ans\n elif flag == 'magnitude_only':\n return 0\n else:\n raise ValueError('Invalid flag name!')\n\nfunc_list = [intercept_full, slope_full, slope_col_full]\n\n\n# Define the mass estimation function\ndef Mass(K, JK, z):\n \"\"\" Return the mass estimate for the given K-band magnitude, J-K color and redshift \"\"\"\n if (JK < 0.) or (JK > 5.):\n flag = 'magnitude_only'\n else:\n flag = 'color_magnitude'\n model = slope_full(z, flag) * K + intercept_full(z, flag) + slope_col_full(z, flag) * JK\n return model\nMass = np.vectorize(Mass)\n\n# Flux to magnitude conversion adopted by the UltraVISTA catalog\ndef FluxToMagnitude(flux, ap_corr):\n return 25.0 - 2.5*np.log10(flux*ap_corr)\n \n\n\n\n\n#################### Extended Bootstrapping Technique (EBT) ####################\n# --- EBT assembles new bins for stacking, rather than drawing from original bins\n# Step 1: draw simulated redshifts from the photometric redshift probability distribution\n# Step 2: estimate the mass using the perturbed redshift and observed K magnitude and J-K color\n# Step 3: a simulated catalog is split up into (original) bins and calculate new stacked flux densities\n# Step 4: repeat Step 1-3 many (>1000) times to complete the \"bootstrapping\"\n\nn_bt = 1000 # Number of bootstrapping\n\npath_cat = '/Desktop/Caltech_OBSCOS/DataCollection/simstack_catalogs/UVISTA/DR2/UVISTA_DR2_master_v2.1_USE.csv'\npath_cat = _HOME + path_cat\n\ncol_to_read = ['ra','dec','z_peak','l68','u68','J','Ks','ap_corr','lmass','rf_U_V','rf_V_J']\n\ndf_cat_in = pd.read_csv(path_cat,usecols=col_to_read)\nheader_list = list(df_cat_in.columns.values)\n\ncat_in = df_cat_in.as_matrix(columns=df_cat_in.columns)\nn_sources = cat_in.shape[0]; n_params = cat_in.shape[1]\n#print 'Size Read-In: ', cat_in.shape\n\nc_z_peak = header_list.index('z_peak')\nc_z_l68 = header_list.index('l68')\nc_z_u68 = header_list.index('u68')\nc_J = header_list.index('J')\nc_Ks = header_list.index('Ks')\nc_ap_corr = header_list.index('ap_corr')\nc_lmass = header_list.index('lmass')\n\n\n\n### Redshift of C[II] Signal ###\nz_CII = 6.5\n\nz_CO_32 = z_obs_CO(3,z_CII)\nz_CO_43 = z_obs_CO(4,z_CII)\n\n\n#if z_CII == 6.0:\n#\tz_CO_32 = 0.27\n#if z_CII == 6.5:\n#\tz_CO_32 = 0.36\n#elif z_CII == 7.0:\n#\tz_CO_32 = 0.46\n\n\ninds3 = np.where( (cat_in[:,c_z_peak]>=z_CO_32-0.01) & (cat_in[:,c_z_peak]<=z_CO_32+0.01) )[0]\n\n#print '----- Ks-logM relation is estimated at z in [%.2f, %.2f] with %d galaxies -----' % (z_CO_32-0.01, z_CO_32+0.01, np.size(inds3))\n\nxpts3 = cat_in[inds3,c_lmass]\nypts3 = FluxToMagnitude(cat_in[inds3,c_Ks], cat_in[inds3,c_ap_corr])\n\nfit_coeff3 = np.polyfit(xpts3,ypts3,1)\n\ndef fit_Ks_logM_3(logM):\n return fit_coeff3[1] + fit_coeff3[0] * logM\n \n\ndef fit_logM_Ks_3(Ks):\n\treturn (Ks - fit_coeff3[1]) / fit_coeff3[0]\n \n#print 'J32: ', fit_Ks_logM_3(9.0)\n#print 'J32: ', fit_logM_Ks_3(22.0)\n\n#xss = np.linspace(8.,11.,100)\n#plt.plot(xpts3, ypts3, 'b+')\n#plt.plot(xss, fit_Ks_logM_3(xss), 'r-')\n#plt.show()\n\n\n\n\n\n#if z_CII == 6.0:\n#\tz_CO_43 = 0.70\n#if z_CII == 6.5:\n#\tz_CO_43 = 0.82\n#elif z_CII == 7.0:\n#\tz_CO_43 = 0.94\n\ninds4 = np.where( (cat_in[:,c_z_peak]>=z_CO_43-0.01) & (cat_in[:,c_z_peak]<=z_CO_43+0.01) )[0]\n\n#print '----- Ks-logM relation is estimated at z in [%.2f, %.2f] with %d galaxies -----' % (z_CO_43-0.01, z_CO_43+0.01, np.size(inds4))\n\nxpts4 = cat_in[inds4,c_lmass]\nypts4 = FluxToMagnitude(cat_in[inds4,c_Ks], cat_in[inds4,c_ap_corr])\n\n\n\nfit_coeff4 = np.polyfit(xpts4,ypts4,1)\n\ndef fit_Ks_logM_4(logM):\n return fit_coeff4[1] + fit_coeff4[0] * logM\n \n\ndef fit_logM_Ks_4(Ks):\n\treturn (Ks - fit_coeff4[1]) / fit_coeff4[0]\n\t\n\t\n#print 'J43: ', fit_Ks_logM_4(9.0)","sub_path":"MPack_Core/Ks_Mstar_Estimate.py","file_name":"Ks_Mstar_Estimate.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318407677","text":"# coding=utf-8\nfrom keras.models import Sequential\nfrom keras.layers import Input, Embedding, Dropout, LSTM, Dense\nfrom src.preprocessing import data\nimport numpy as np\n\nEMBED_HIDDEN_SIZE = 50\n\n(x_pos, vocabulary_pos, vocabulary_inv_pos) = data.load_data('../../data/chinese/pos.txt')\n(x_neg, vocabulary_neg, vocabulary_inv_neg) = data.load_data('../../data/chinese/neg.txt')\ny_pos = np.ones(len(x_pos))\ny_neg = np.zeros(len(x_neg))\n\nvocab = set(vocabulary_pos + vocabulary_neg)\nprint(len(vocab))\n\nmodel = Sequential()\nmodel.add(Embedding(len(vocab), EMBED_HIDDEN_SIZE))\nmodel.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n# model.fit(x_pos, y_pos, batch_size=200, epochs=1)\n# model.fit(x_neg, y_neg, batch_size=200, epochs=1)\n\nx = model.predict_classes(x_pos)\nprint(len(x))","sub_path":"src/models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256346901","text":"#!/usr/bin/python3\n\n# List XOR\n\n# Define a function named list_xor. Your function should take three parameters: n, list1 and list2.\n\n# Your function must return whether n is exclusively in list1 or list2.\n\n# In other words, if n is in both lists or in none of the lists, return False. If n is in only one of the lists, return True.\n\"\"\"\n# list_xor(1, [1, 2, 3], [4, 5, 6]) == True\n# list_xor(1, [0, 2, 3], [1, 5, 6]) == True\n# list_xor(1, [1, 2, 3], [1, 5, 6]) == False\n# list_xor(1, [0, 0, 0], [4, 5, 6]) == False\n\"\"\"\n\n\ndef list_xor(n, list1, list2):\n if (n in list1 and n in list2) or (n not in list1 and n not in list2):\n return False\n else:\n return True\n\n\nprint(list_xor(1, [0, 2, 3], [1, 5, 6]))\nprint(list_xor(1, [0, 0, 0], [4, 5, 6])) \n","sub_path":"Lists/list_xor.py","file_name":"list_xor.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"189125110","text":"import random\nimport string\nimport itertools\n \n \n#随机生成num位数的密码,密码里面包含a-z,A-Z,0-9\ndef getRandomNumKey(num):\n\ta=string.ascii_letters+string.digits#数据源是a-z,A-Z,0-9\n\tkey=random.sample(a,num)\n\tkeys=\"\".join(key)\n\treturn keys\n \n#产生所有的密码情况,其实就是全排列,全部列举出来\ndef generateNumKey(num):\n\t'''\n\t参数 num 是位数,返回值是一个列表\n\t'''\n\tkeys=[]\n\talist=list(string.ascii_letters+string.digits)#数据源是a-z,A-Z,0-9\n\tfor i in itertools.product(alist,repeat= num):\n\t\tprint(i)\n\t\tkeys.append(''.join(list(i))+'\\n')\n\treturn keys\ngetRandomNumKey(4)\ngenerateNumKey(4) #4为数的密码有 14776336个\n \n \n\n","sub_path":"密码生成器.py","file_name":"密码生成器.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"643167074","text":"# -*- coding: utf-8 -*-\nimport re\n\nf = open('sv.txt')\ndata = f.read()\n\n# counting\nwords = {}\nfor word in list(data):\n words[word] = words.get(word, 0) + 1\n\n# sort by count\nd = [(v,k) for k,v in words.items()]\nd.sort()\nd.reverse()\nre_hiragana = re.compile(r'[\\u3041-\\u3093]')\nfor count, word in d[:60]:\n print (count, word)\n","sub_path":"Blog/wordcount/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70986097","text":"#\n# Copyright 2022 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport pytest\n\nfrom ocean_lib.web3_internal.constants import ZERO_ADDRESS\nfrom ocean_lib.web3_internal.contract_base import ContractBase\n\n\nclass MyFactory(ContractBase):\n CONTRACT_NAME = \"ERC721Factory\"\n\n\n@pytest.mark.unit\ndef test_name_is_None(config):\n with pytest.raises(Exception):\n # self.name will become None, triggering the error\n ContractBase(config, None)\n\n\n@pytest.mark.unit\ndef test_main(network, alice_wallet, alice_ocean, nft_factory_address, config):\n # test super-simple functionality of child\n factory = MyFactory(config, nft_factory_address)\n factory.deployERC721Contract(\n \"NFT\",\n \"NFTS\",\n 1,\n ZERO_ADDRESS,\n ZERO_ADDRESS,\n \"https://oceanprotocol.com/nft/\",\n True,\n alice_wallet.address,\n {\"from\": alice_wallet},\n )\n\n # test attributes\n assert factory.contract_name == \"ERC721Factory\"\n assert factory.contract is not None\n assert factory.contract.address == nft_factory_address\n assert ContractBase.to_checksum_address(nft_factory_address) == nft_factory_address\n\n # test methods\n assert factory.contract_name == \"ERC721Factory\"\n assert factory.address == nft_factory_address\n assert str(factory) == f\"{factory.contract_name} @ {factory.address}\"\n assert factory.contract.createToken\n assert factory.contract.getCurrentTokenCount\n assert factory.contract.getTokenTemplate\n","sub_path":"ocean_lib/web3_internal/test/test_contract_base.py","file_name":"test_contract_base.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339664767","text":"import sys, time\nfrom PyQt5 import QtWidgets, QtCore\nfrom PyQt5.QtCore import QThread\nfrom PyQt5.QtWidgets import QApplication, QDialog, QMessageBox, QComboBox\n\n\nclass TH_Zakaz(QThread):\n \"\"\"работает с файлом заказа после его прочтения\"\"\"\n catch_back=QtCore.pyqtSignal(object)\n\n def __init__(self,root,zash):\n super().__init__(root)\n self.main=root\n self._l1= [\"Получатель\",\n \"Заказ УИТ\",\n \"Сбыт.зак. SAP\",\n \"Толщина, мм\",\n \"Ширина, мм\",\n \"Длина, мм\",\n \"Марка стали\",\n \"Класс точности\",\n \"Остаток в прокат, т\",\n \"Шифр МВКС\",\n \"Толщина\",\n \"Ширина\",\n \"Длина\"]\n self._df=zash\n\n def run(self):\n self.main.CurStatus.addItem(\"Заказ приехал\")\n self.catch_back.emit(self._df[self._l1])\n\nclass TH_Statys(QThread):\n \"\"\"работает с файлом заказа после его прочтения\"\"\"\n catch_back=QtCore.pyqtSignal(object)\n\n def __init__(self,root,stat):\n super().__init__(root)\n self.main=root\n self._l1= ['Номер к��нтракта',\n \"SMI\",\n \"Обозначение района\",\n \"Внешний номер заказа\",\n \"№ заказа\",\n \"LSD комбината\",\n \"Дата окончания заказа\",\n \"Марка стали\",\n \"НД на марку\",\n \"НД на техтребования\",\n \"Дата поставки по контракту\",\n \"Состояние поставки\",\n \"Si мин\",\n \"Si макс\"]\n self._df=stat\n\n def run(self):\n self.main.CurStatus.addItem(\"Статус приехал\")\n self.catch_back.emit(self._df[self._l1])\n\n# if __name__=='__main__':\n# app = QApplication(sys.argv)\n# win = TH_Zakaz()\n# sys.exit(app.exec_())","sub_path":"Clean_proj/thread_excel_work.py","file_name":"thread_excel_work.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"105468493","text":"import math\r\ndef balance(equation) :\r\n\r\n '''equation format: 'H2+O2=H2O' '''\r\n\r\n eqn=equation.split('=')\r\n \r\n lft=eqn[0].split('+')\r\n rt=eqn[1].split('+')\r\n lft_elmnts=rt_elmnts={}\r\n coeff_list=[]\r\n\r\n '''\r\n spliting the lists\r\n '''\r\n for g in lft :\r\n i=g.strip().upper()\r\n for k in range(len(i)) :\r\n coeff_list.append(1)\r\n coeff=1\r\n '''\r\n if required this can be used when given partially balanced equation\r\n '''\r\n if i[k].isdigit() :\r\n n,coeff=0,0\r\n try :\r\n while i[k+n].isdigit() :\r\n coeff=10*num+i[k+1]\r\n n+=1\r\n except IndexError :\r\n cooeff=1\r\n if i[k].isalpha():\r\n elmnt=i[k]\r\n n,num=1,0\r\n try :\r\n while i[k+n].isdigit() :\r\n num=10*num+int(i[k+1])\r\n n+=1\r\n except IndexError :\r\n num==1\r\n lft_elmnts[elmnt]=coeff*num\r\n '''same thing for the right list'''\r\n for g in rt :\r\n g=i.strip().upper()\r\n for k in range(len(i)) :\r\n coeff=1\r\n '''same here'''\r\n if i[k].isdigit() :\r\n n,coeff=0,0\r\n try :\r\n while i[k+n].isdigit() :\r\n coeff=10*num+int(i[k+1])\r\n n+=1\r\n except :\r\n coeff=1\r\n if i[k].isalpha():\r\n elmnt=i[k]\r\n n,num=1,0\r\n try :\r\n while i[k+n].isdigit() :\r\n num=10*num+int(i[k+1])\r\n n+=1\r\n except IndexError:\r\n num==1\r\n rt_elmnts[elmnt]=coeff*num\r\n\r\n while lft_elmnts != rt_elmnts :\r\n # lft_nos=lft_elmnts.values()\r\n # rt_nos=rt_elmnts.values()\r\n '''idk what is the problem here'''\r\n '''creating list of keys and values coz idk why the shit abv didnt work'''\r\n lft_nos=lft_val=[]\r\n for i in lft_elmnts :\r\n lft_nos.append(i[lft_elmnts])\r\n lft_val.append(i)\r\n\r\n\r\n '''checking for lowest left number'''\r\n low_no=lft_nos[0]\r\n for i in lft_nos :\r\n if i < low_no :\r\n low_no_lft=i\r\n low_elmnt_lft=lft_val[lft_nos.index(low_no_lft)]\r\n\r\n '''(for right)creating list of keys and values coz idk why the shit abv didnt work'''\r\n rt_nos=rt_val=[]\r\n for i in rt_elmnts :\r\n rt_nos.append(i[rt_elmnts])\r\n rt_val.append(i)\r\n low_no=rt_nos[0]\r\n\r\n '''checking for lowest right number'''\r\n for i in rt_nos :\r\n if i < low_no :\r\n low_no_rt=i\r\n low_elmnt_rt=rt_val[rt_nos.index(low_no_rt)]\r\n\r\n if low_no_lft != low_no_rt :\r\n gcd=math.gcd(low_elmnt_rt,low_elmnt_lft)\r\n lcm=low_elmnt_lft*low_elmnt_rt/gcd\r\n ###Need change\r\n lft_elmnts[low_elmnt_lft],rt_elemnts[low_elmnt_rt]=lcm,lcm\r\n print(lft_elmnts,rt_elemnts)\r\n \"\"\"printing the final result\"\"\"\r\n for i in lft_elmnts :\r\n ###Need Change\r\n print(i,lft_elmnts[i])\r\n\r\n\r\nequation=input('Enter your equation :')\r\nbalance(equation)","sub_path":"balancing_equations(errors removed).py","file_name":"balancing_equations(errors removed).py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455683086","text":"'''\n- linear search: search for an element is O(n)\n- binary search: search for an element is O(log n) (★assume the list is sorted!)\n\nInformally sorting requires to see every element in the list at least once so the cost must be at least linear.\n\nAmortized Cost\n- why bother sorting first?\n- in some cases, may sort a list once then do many searches\n- AMORTIZE cost of the sort over many searches\n- SORT + K*O(log n) < K*O(n) ->> for large K, SORT time becomes irrelevant, if cost of sorting is small enough\n'''\n\n#################################\n### BUBBLE SORT - O(n^2)\n#################################\n\ndef bubble_srt(L):\n swap = False\n while not swap: #O(len(L))\n swap = True\n for j in range(1, len(L)): #O(len(L))\n if L[j-1] > L[j]:\n swap = False\n temp = L[j]\n L[j] = L[j-1]\n L[j-1] = temp\n \n#################################\n### SELECTION SORT - O(n^2)\n#################################\n'''\nFind the smallest element from unsorted list and put it at the front\nAs the list is being sorted, the first i elements are sorted and the largest\nof the SORTED part of list should be smaller than the smallest of the UNOSRTED PART of list.\n'''\n\ndef selection_sort(L):\n suffixSt = 0\n while suffixSt != len(L): #O(len(L))\n for i in range(suffixSt, len(L)): #O(len(L))\n if L[i] < L[suffixSt]:\n L[suffixSt], L[i] = L[i], L[suffixSt]\n suffixSt += 1\n\n\n#################################\n### MERGE SORT - O(nlog(n))\n#################################\n'''\nCutting the problem in half\ndivide and conquer \nOnly comparing the ends of the lists!! No inside comparisons going over every element\nO(len(left) + len(right)) copied elements\nO(len(longer list)) comparisons\n'''\ndef merge(left, right):\n result = []\n i,j = 0,0\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n \n while (i < len(left)):\n result.append(left[i])\n i += 1\n \n while (j < len(right)):\n result.append(right[j])\n j += 1\n \n return result\n\ndef merge_sort(L):\n if len(L) < 2: #base case\n return L[:]\n else:\n middle = len(L)//2\n left = merge_sort(L[:middle]) #divide\n right = merge_sort(L[middle:]) #divide\n return merge(left, right) #conquer with the merge step\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"lec12_search_sort.py","file_name":"lec12_search_sort.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"624687720","text":"import os\nimport sys\nfrom PIL import Image\n\nimport xml.etree.ElementTree as ET\n\n\nclass check_data_before_tf():\n\t\n\tdef __init__(self, directory):\n\t\tself.directory = directory\n\n\tdef set_jpg_format_to_all_images(self):\n\t\tfiles_with_not_jpg_format = 0\n\t\tfor filename in os.listdir(os.path.join(self.directory, \"Images\")):\n\t\t\tif not filename.endswith(\".jpg\"): \n\t\t\t\tprint (filename)\n\t\t\t\tfilepath = (os.path.join(self.directory,\"Images\", filename))\n\t\t\t\timg = Image.open(filepath)\n\t\t\t\tprint (img.format)\n\t\t\t\tif (img.format) != 'JPEG':\n\t\t\t\t\tfiles_with_not_jpg_format += 1\n\t\t\t\t\tprint (\"not a JPEG image file: %s \" %filepath)\n\t\t\t\t\tname = filename.split(\".\")[0]\n\t\t\t\t\t#print (\"name %s\" %name)\n\t\t\t\t\t#print (\"filename %s\" %filename)\n\t\t\t\t\t#print (\"self.directory %s\" %self.directory)\n\t\t\t\t\tnew_file_path = (os.path.join(self.directory,\"Images\", name + \".jpg\"))\n\t\t\t\t\t#print (\"new_file_path \", new_file_path) \n\t\t\t\t\trgb_im = img.convert('RGB')\n\t\t\t\t\trgb_im.save(new_file_path)\n\t\t\t\t\tos.remove(filepath)\n\t\tprint (\"files_with_not_jpg_format %s\" %files_with_not_jpg_format)\n\n\tdef set_xml_filename_path(self):\n\t\tfiles_processed = 0\n\t\terrors_not_passed_2_jpg = 0\n\n\t\tfor subdir, dirs, files in os.walk(os.path.join(self.directory, \"Annotations\")):\n\t\t\tfor file_ in files:\n\t\t\t\tfilepath = os.path.join(subdir, file_)\n\t\t\t\t#print (filepath)\n\t\t\t\t#files_processed += 1\n\n\t\t\t\tif filepath.endswith(\".xml\") or filepath.endswith(\".XML\"):\n\t\t\t\t\t#i += 1\n\t\t\t\t\t#print (i)\n\t\t\t\t\tprint (filepath)\n\t\t\t\t\tfiles_processed += 1\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttree = ET.parse(filepath)\n\t\t\t\t\t\troot = tree.getroot()\n\t\t\t\t\t\tname_jpg = \"\"\n\t\t\t\t\t\tfor filename in root.iter('filename'):\n\t\t\t\t\t\t\tfilename.text = filename.text.split(\".\")[0] + \".jpg\"\n\t\t\t\t\t\t\tname_jpg = filename.text \n\n\t\t\t\t\t\tfor path in root.iter('path'):\t\n\t\t\t\t\t\t\tif name_jpg == \"\":\n\t\t\t\t\t\t\t\terrors_not_passed_2_jpg += 1\t\t\t\t\t\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpath.text = os.path.join(subdir, name_jpg)\t\t\t\n\n\t\t\t\t\t\t\t#print (path.text)\n\t\t\t\t\t\ttree.write(filepath)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tos.remove(filepath)\t\n\n\n\n#\t\tprint (\"xml that do not have a jpg filename \", errors_not_passed_2_jpg)\n\t\tprint (\"files_processed \", files_processed)\n\n\tdef get_xml_jpg_paths_from_folder(self, rootdir):\n\t\t\txml_paths = []\n\t\t\tjpg_paths= []\n\t\t\tfor subdir, dirs, files in os.walk(rootdir):\n\t\t\t\tfor file in files:\n\t\t\t\t\t#print (file)\n\t\t\t\t\tif file.endswith(\".xml\") or file.endswith(\".XML\"): \n\t\t\t\t\t\tfile_xml = os.path.join(subdir, file)\n\t\t\t\t\t\txml_paths.append([file_xml, file])\n\t\t\t\t\telif file.endswith(\".jpg\"): \n\t\t\t\t\t\tfile_jpg = os.path.join(subdir, file)\n\t\t\t\t\t\tjpg_paths.append([file_jpg, file])\n\n\t\t\treturn xml_paths, jpg_paths\n\n\n\n\n\tdef attach_xml_jpg_files_from_folder(self, xml_paths, jpg_paths):\n\n\t\tpairs_xml_jpg = []\n\t\tnot_math_xml_files = []\n\n\t\tfor xml_file in xml_paths:\n\t\t\tname_xml = xml_file[1].split(\".\")[0]\n\t\t\tmatch_xml = False\n\t\t\tfor jpg_file in jpg_paths:\n\t\t\t\tname_jpg = jpg_file[1].split(\".\")[0]\n\n\t\t\t\tif name_jpg == name_xml:\n\t\t\t\t\tpairs_xml_jpg.append([xml_file[0], jpg_file[0]])\n\t\t\t\t\tmatch_xml = True\n\t\t\t\t\tbreak\n\t\t\tif match_xml == False: # the xml file did not match any jopg file\n\t\t\t\tnot_math_xml_files.append(xml_file[0])\n\t\t\n\t\treturn pairs_xml_jpg, not_math_xml_files\n\n\n\tdef delete_un_pair_xml_jpg_files(self):\n\t\txml_paths, jpg_paths = self.get_xml_jpg_paths_from_folder(self.directory)\n\t\tprint (\"len xml_paths before deleteing unpair xml files\", len(xml_paths))\n\t\tprint (\"len jpg_paths before deleteing unpair xml files\", len(jpg_paths))\n\n#\t\tprint (\"jpg_paths\", (jpg_paths[0][1]))\n#\t\tprint (\"xml_paths\", (xml_paths[0][1]))\n\t\t\t\n\t\tpairs_xml_jpg, not_math_xml_files = self.attach_xml_jpg_files_from_folder(xml_paths, jpg_paths)\n\t\tprint (\"len pairs_xml_jpg \", len(pairs_xml_jpg))\n\t\tprint (pairs_xml_jpg[0])\n\n\n\t\tprint (\"len deleted not_math_xml_files \", len(not_math_xml_files))\n#\t\tprint (not_math_xml_files[0])\n\n\t\tfor no_match_xml in not_math_xml_files:\n\t\t\tos.remove(no_match_xml)\n\n\t\tprint (\"all not_math_xml_files deleted\")\n\nif __name__ == \"__main__\":\n\n\tprint (sys.argv[1]) # prints python_script.py\n\tdirectory = str(sys.argv[1]) #\"/home/millenium/Juan_Camilo/flores/obj_api_2_flores/flores/training_process/automated_data_tagging/per_class/np-P5_1-P6-P7-P8/data/specific_classes\"\n\n\tchk_data = check_data_before_tf(directory)\n\tchk_data.set_jpg_format_to_all_images()\n\tchk_data.set_xml_filename_path()\n\tchk_data.delete_un_pair_xml_jpg_files()\n\n\n\n\n","sub_path":"get_scaled_split_data/check_image_format.py","file_name":"check_image_format.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"439427002","text":"import logging\n\nfrom mopidy import backend\nfrom mopidy.models import Album, Artist, Ref, Track\n\nlogger = logging.getLogger(__name__)\n\n\nclass SubsonicLibraryProvider(backend.LibraryProvider):\n root_directory = Ref.directory(uri=\"subsonic:root\", name=\"Subsonic\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.client = self.backend.client\n\n self._root = [\n Ref.directory(uri=\"subsonic:directory\", name=\"Folders\"),\n Ref.directory(uri=\"subsonic:album\", name=\"Albums\"),\n Ref.directory(uri=\"subsonic:artist\", name=\"Artists\"),\n Ref.directory(uri=\"subsonic:track\", name=\"Tracks\"),\n Ref.directory(uri=\"subsonic:genre\", name=\"Genres\"),\n ]\n\n def _do(self, *args, **kwargs):\n return self.client.do(*args, **kwargs)\n\n def browse(self, uri):\n logger.debug(\"browse: %s\", str(uri))\n if not uri:\n return []\n\n if uri == self.root_directory.uri:\n return self._root\n\n if uri == \"subsonic:artist\":\n return self._browse_artists()\n\n parts = uri.split(\":\")\n\n if len(parts) == 3 and parts[1] == \"artist\":\n return self._browse_artist(parts[2])\n\n if len(parts) == 3 and parts[1] == \"album\":\n return self._browse_album(parts[2])\n\n logger.debug(\"Unknown uri for browse request: %s\", uri)\n return []\n\n def lookup(self, uri):\n if uri.startswith(\"subsonic:track:\"):\n return self._lookup_track(uri)\n elif uri.startswith(\"subsonic:album:\"):\n return self._lookup_album(uri)\n elif uri.startswith(\"subsonic:artist:\"):\n return self._lookup_artist(uri)\n else:\n return []\n\n def _lookup_track(self, uri):\n track = self._do(\"getSong\", {\"id\": uri.split(\":\")[2]})[\"song\"]\n return [Track(\n uri=\"subsonic:track:\" + track[\"id\"],\n name=track[\"title\"],\n artists=[Artist(uri=\"subsonic:artist:\" + track[\"artistId\"], name=track[\"artist\"])],\n album=Album(uri=\"subsonic:album:\" + track[\"album\"], name=track[\"album\"]),\n track_no=track[\"track\"],\n disc_no=track[\"discNumber\"],\n length=track[\"duration\"] * 1000,\n bitrate=track[\"bitRate\"]\n )]\n\n def _browse_artists(self):\n response = self._do(\"getArtists\")\n artists = [\n artist\n for index in response[\"artists\"][\"index\"]\n for artist in index[\"artist\"]\n ]\n return [\n Ref.directory(uri=\"subsonic:artist:\" + a[\"id\"], name=a[\"name\"])\n for a in artists\n ]\n\n def _browse_artist(self, id):\n response = self._do(\"getArtist\", params={\"id\": id})\n albums = [album for album in response[\"artist\"][\"album\"]]\n return [\n Ref.directory(uri=\"subsonic:album:\" + a[\"id\"], name=a[\"name\"])\n for a in albums\n ]\n\n def _browse_album(self, id):\n response = self._do(\"getAlbum\", params={\"id\": id})\n tracks = [track for track in response[\"album\"][\"song\"]]\n return [\n Ref.track(uri=\"subsonic:track:\" + a[\"id\"], name=a[\"title\"])\n for a in tracks\n ]\n","sub_path":"mopidy_subsonic/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"545849114","text":"import itertools\nfrom abc import abstractmethod\nfrom typing import List, Union, Dict, Any\n\nimport numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom sklearn.base import BaseEstimator\nfrom sklearn.utils.validation import check_array\nfrom sklearn.utils.validation import check_random_state\n\nimport cca_zoo.data\nimport cca_zoo.models.innerloop\nimport cca_zoo.utils.plot_utils\n\n\nclass _CCA_Base(BaseEstimator):\n \"\"\"\n A class used as the base for methods in the package. Allows methods to inherit fit_transform, predict_corr, and gridsearch_fit\n when only fit (and transform where it is different to the default) is provided.\n\n :param latent_dims: number of latent dimensions to fit\n :param scale: normalize variance in each column before fitting\n \"\"\"\n\n @abstractmethod\n def __init__(self, latent_dims: int = 1, scale=True, centre=True, copy_data=True, accept_sparse=True,\n random_state: Union[int, np.random.RandomState] = None):\n \"\"\"\n Constructor for _CCA_Base\n\n :param latent_dims: number of latent dimensions to fit\n :param scale: normalize variance in each column before fitting\n :param centre: demean data by column before fitting (and before transforming out of sample\n :param copy_data: If True, X will be copied; else, it may be overwritten\n :param accept_sparse: Whether model can take sparse data as input\n :param random_state: Pass for reproducible output across multiple function calls\n \"\"\"\n self.weights = None\n self.train_correlations = None\n self.latent_dims = latent_dims\n self.scale = scale\n self.centre = centre\n self.copy_data = copy_data\n self.accept_sparse = accept_sparse\n self.random_state = check_random_state(random_state)\n\n @abstractmethod\n def fit(self, *views: np.ndarray):\n \"\"\"\n Fits a given model\n\n :param views: numpy arrays with the same number of rows (samples) separated by commas\n \"\"\"\n pass\n return self\n\n def transform(self, *views: np.ndarray, view_indices: List[int] = None, **kwargs):\n \"\"\"\n Transforms data given a fit model\n\n :param views: numpy arrays with the same number of rows (samples) separated by commas\n :param view_indices:\n :param kwargs: any additional keyword arguments required by the given model\n \"\"\"\n transformed_views = []\n if view_indices is None:\n view_indices = np.arange(len(views))\n for i, (view, view_index) in enumerate(zip(views, view_indices)):\n view = check_array(view, copy=self.copy_data, accept_sparse=self.accept_sparse)\n if self.centre:\n view = view - self.view_means[view_index]\n if self.scale:\n view = view / self.view_stds[view_index]\n transformed_view = view @ self.weights[view_index]\n transformed_views.append(transformed_view)\n return transformed_views\n\n def fit_transform(self, *views: np.ndarray, view_indices: List[int] = None, **kwargs):\n \"\"\"\n Fits and then transforms the training data\n\n :param views: numpy arrays with the same number of rows (samples) separated by commas\n :param view_indices:\n :param kwargs: any additional keyword arguments required by the given model\n :rtype: np.ndarray\n \"\"\"\n return self.fit(*views).transform(*views, view_indices=view_indices, **kwargs)\n\n def predict_corr(self, *views: np.ndarray, view_indices: List[int] = None, **kwargs) -> np.ndarray:\n \"\"\"\n Predicts the correlation for the given data using the fit model\n\n :param views: numpy arrays with the same number of rows (samples) separated by commas\n :param kwargs: any additional keyword arguments required by the given model\n :param view_indices:\n :return: all_corrs: an array of the pairwise correlations (k,k,self.latent_dims) where k is the number of views\n :rtype: np.ndarray\n \"\"\"\n # Takes two views and predicts their out of sample correlation using trained model\n transformed_views = self.transform(*views, view_indices=view_indices, **kwargs)\n all_corrs = []\n for x, y in itertools.product(transformed_views, repeat=2):\n all_corrs.append(np.diag(np.corrcoef(x.T, y.T)[:self.latent_dims, self.latent_dims:]))\n all_corrs = np.array(all_corrs).reshape((len(views), len(views), self.latent_dims))\n return all_corrs\n\n def _centre_scale(self, *views: np.ndarray):\n \"\"\"\n Removes the mean of the training data for each view and stores it\n\n :param views: numpy arrays with the same number of rows (samples) separated by commas\n :return: train_views: the demeaned numpy arrays to be used to fit the model\n :rtype: np.ndarray\n \"\"\"\n\n train_views = []\n self.view_means = []\n self.view_stds = []\n self.accept_sparse = ['csr', 'csc', 'coo']\n for view in views:\n view = check_array(view, copy=self.copy_data, accept_sparse=self.accept_sparse)\n if self.centre:\n view_mean = view.mean(axis=0)\n self.view_means.append(view_mean)\n view = view - self.view_means[-1]\n if self.scale:\n view_std = view.std(axis=0, ddof=1)\n view_std[view_std == 0.0] = 1.0\n self.view_stds.append(view_std)\n view = view / self.view_stds[-1]\n train_views.append(view)\n return train_views\n\n def gridsearch_fit(self, *views: np.ndarray, K=None, param_candidates: Dict[str, List[Any]] = None,\n folds: int = 5,\n verbose: bool = False,\n jobs: int = 0,\n plot: bool = False):\n \"\"\"\n Implements a gridsearch over the parameters in param_candidates and returns a model fit with the optimal parameters\n in cross validation (measured by sum of correlations).\n\n :param views: numpy arrays with the same number of rows (samples) separated by commas\n :param K: observation matrix which can be used by GCCA\n :param param_candidates:\n :param folds: number of cross-validation folds\n :param verbose: print results of training folds\n :param jobs: number of jobs. If jobs>1 then the function can use parallelism\n :param plot: produce a hyperparameter surface plot\n \"\"\"\n if param_candidates is None:\n param_candidates = {}\n if verbose:\n print('cross validation', flush=True)\n print('number of folds: ', folds, flush=True)\n\n # Set up an array for each set of hyperparameters\n if len(param_candidates) == 0:\n raise ValueError('No param_candidates was supplied.')\n\n param_names = list(param_candidates.keys())\n param_values = list(param_candidates.values())\n param_combinations = list(itertools.product(*param_values))\n\n param_sets = []\n for param_set in param_combinations:\n param_dict = {}\n for i, param_name in enumerate(param_names):\n param_dict[param_name] = param_set[i]\n param_sets.append(param_dict)\n\n cv = _CrossValidate(self, folds=folds, verbose=verbose, random_state=self.random_state)\n\n if jobs > 0:\n out = Parallel(n_jobs=jobs)(delayed(cv.score)(*views, **param_set, K=K) for param_set in param_sets)\n else:\n out = [cv.score(*views, **param_set) for param_set in param_sets]\n cv_scores = np.array(out)\n max_index = np.argmax(cv_scores.mean(axis=1))\n\n if verbose:\n print('Best score : ', cv_scores[max_index].mean(), flush=True)\n print('Standard deviation : ', cv_scores[max_index].std(), flush=True)\n print(param_sets[max_index], flush=True)\n\n self.cv_results_table = pd.DataFrame(zip(param_sets), columns=['params'])\n self.cv_results_table[[f'fold_{f}' for f in range(folds)]] = cv_scores\n self.cv_results_table = self.cv_results_table.join(pd.json_normalize(self.cv_results_table.params))\n self.cv_results_table.drop(columns=['params'], inplace=True)\n\n if plot:\n cca_zoo.utils.plot_utils.cv_plot(cv_scores.mean(axis=1), param_sets, self.__class__.__name__)\n\n self.set_params(**param_sets[max_index])\n self.fit(*views)\n return self\n\n \"\"\"\n def bayes_fit(self, *views: np.ndarray, space=None, folds: int = 5, verbose=True):\n :param views: numpy arrays separated by comma e.g. fit(view_1,view_2,view_3)\n :param space:\n :param folds: number of folds used for cross validation\n :param verbose: whether to return scores for each set of parameters\n :return: fit model with best parameters\n trials = Trials()\n\n cv = CrossValidate(self, folds=folds, verbose=verbose)\n\n best_params = fmin(\n fn=cv.score(*views),\n space=space,\n algo=tpe.suggest,\n max_evals=100,\n trials=trials,\n )\n self.set_params(**param_sets[max_index])\n self.fit(*views)\n return self\n \"\"\"\n\n\nclass _CrossValidate:\n \"\"\"\n Base class used for cross validation\n \"\"\"\n\n def __init__(self, model, folds: int = 5, verbose: bool = True, random_state=None):\n self.folds = folds\n self.verbose = verbose\n self.model = model\n self.random_state = check_random_state(random_state)\n\n def score(self, *views: np.ndarray, K=None, **cvparams):\n scores = np.zeros(self.folds)\n inds = np.arange(views[0].shape[0])\n self.random_state.shuffle(inds)\n if self.folds == 1:\n # If 1 fold do an 80:20 split\n fold_inds = np.array_split(inds, 5)\n else:\n fold_inds = np.array_split(inds, self.folds)\n for fold in range(self.folds):\n train_sets = [np.delete(view, fold_inds[fold], axis=0) for view in views]\n val_sets = [view[fold_inds[fold], :] for view in views]\n if K is not None:\n train_obs = np.delete(K, fold_inds[fold], axis=1)\n val_obs = K[:, fold_inds[fold]]\n scores[fold] = self.model.set_params(**cvparams).fit(\n *train_sets, K=train_obs).predict_corr(\n *val_sets).sum(axis=-1)[np.triu_indices(len(views), 1)].sum()\n else:\n self.model.set_params(**cvparams).fit(\n *train_sets)\n scores[fold] = self.model.predict_corr(\n *val_sets).sum(axis=-1)[np.triu_indices(len(views), 1)].sum()\n scores[np.isnan(scores)] = 0\n std = scores.std(axis=0)\n if self.verbose:\n print(cvparams)\n print(scores.sum(axis=0) / self.folds)\n print(std)\n return scores\n","sub_path":"cca_zoo/models/cca_base.py","file_name":"cca_base.py","file_ext":"py","file_size_in_byte":11014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"55033013","text":"import itertools\nimport numpy as np\n\ndef grid_search(X_train, y_train, X_val, y_val, model_type, params_choices):\n '''\n Grid searches over best parameters for model given training and validation set.\n '''\n NUM_EPOCHS = 20\n num_features = X_train.shape[1]\n keys = list(params_choices.keys())\n grid = itertools.product(*(params_choices[key] for key in keys))\n best_accuracy = 0\n best_params = {}\n\n for values in grid:\n params = {}\n model = model_type(num_features)\n\n # assign parameters\n for i in range(len(keys)):\n params[keys[i]] = values[i]\n\n mistakes = model.train(X_train, y_train, params, NUM_EPOCHS)\n accuracy = model.test(X_val, y_val)\n\n if (accuracy > best_accuracy):\n best_accuracy = accuracy\n best_params = params\n\n return best_accuracy, best_params\n\ndef train_until_convergence(X, y, model_type, params, R = 1000):\n num_train = X.shape[0]\n num_features = X.shape[1]\n counter = 0\n model = model_type(num_features)\n mistake_count = 0\n i = 0\n\n while (counter < R):\n mistake = model.update(X[i, :], y[i], params)\n mistake_count += mistake\n\n if mistake == 1:\n counter = 0\n else:\n counter += 1\n\n i += 1\n if i >= num_train:\n i = 0\n\n return mistake_count\n","sub_path":"hw3-code/python/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514784265","text":"from settings import settings\n\ncacheddns = {}\n\nwith open(settings['CachedDnsFile'],'r') as f:\n lines = f.readlines()\n for line in lines:\n ip , domain = line.strip().split(' ')\n ip = ip.strip()\n domain = domain.strip()\n cacheddns[domain] = ip\n\ndef local_lookup(domain):\n if domain in cacheddns.keys():\n ip = cacheddns[domain]\n return (True,ip)\n else:\n return (False,None)\n\n","sub_path":"localdns.py","file_name":"localdns.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"320925273","text":"#!/usr/bin/env python\nimport mincemeat\n\n\ndef mapfn(k, v):\n \"\"\"\n If the given node has less than 2 neighbors, it cannot be a part of a triangle,\n and thus yields the node with the value 0. Otherwise, yields all triplets\n composed of the node and two of its neighbors, with the value 1.\n \"\"\"\n d = v.split(\"->\")\n first = d[0].strip()\n neighbors = list(d[1].strip().split(\" \"))\n n_neighbors = len(neighbors)\n\n # If the node has less than 2 neighbors, yield the node with the value 0\n if n_neighbors < 2:\n yield first, 0\n\n # Yields all triplets composed of the node and two of its neighbors, with the value 1\n for i in range(n_neighbors-1):\n second = neighbors[i]\n for j in range(i+1, n_neighbors):\n third = neighbors[j]\n yield tuple(sorted([first, second, third])), 1\n\n\ndef reducefn(k, vs):\n \"\"\"\n Returns whether the given triplet is a triangle in the graph. This is done by\n counting the occurrences of the triplet - if there are 3 occurrences, that means\n that all its nodes are connected to each other, and hence it is a clique of size 3.\n \"\"\"\n if sum(vs) == 3:\n return True\n return False\n\n\nif __name__ == \"__main__\":\n # Load the graph\n with open(\"graph.txt\") as f:\n content = f.readlines()\n data = [x.strip() for x in content]\n datasource = dict(enumerate(data))\n\n # Run MapReduce on mincemeat\n s = mincemeat.Server()\n s.datasource = datasource\n s.mapfn = mapfn\n s.reducefn = reducefn\n\n results = s.run_server(password=\"pass\")\n\n # Print all the triangles in the graph\n for key in results:\n if results[key]:\n print(key)\n","sub_path":"find_triangles/find_triangles.py","file_name":"find_triangles.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15731952","text":"from PyQt5 import QtCore,QtGui\nfrom PyQt5.QtWidgets import *\n\nclass CameraViewerWidget(QWidget):\n def __init__(self, parent=None):\n super(CameraViewerWidget, self).__init__(parent)\n self.image = None\n\n def setImage(self, image):\n self.image = image\n sz = image.size()\n self.setMinimumSize(sz)\n self.update()\n\n def paintEvent(self, event):\n qp = QtGui.QPainter()\n qp.begin(self)\n if self.image:\n qp.drawImage(QtCore.QPoint(0, 0), self.image)\n qp.end()","sub_path":"EzLibrarianApplication/CameraViewerWidget.py","file_name":"CameraViewerWidget.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144066807","text":"from django.db import models\n\nPOST_CHOICES = (\n (\"AA\",\"Alumini\"),\n (\"CC\",\"Convenor\"),\n (\"HC\",\"HeadCoordinator\"),\n (\"MM\",\"Manager\"),\n (\"CO\",\"Coordinator\"),\n)\n\nDOMAIN_CHOICE = (\n (\"WA\",\"WEB & APP\"),\n (\"PM\",\"PR MARKETING\"),\n (\"DD\",\"DOCUMENTATION\"),\n (\"SS\",\"SPONSERSHIP\"),\n (\"DD\",\"DESIGN\"),\n (\"TT\",\"TECHNICAL\"),\n)\n# Create your models here.\n\nclass Team(models.Model):\n photo = models.ImageField(upload_to='img/team',null=True)\n name = models.CharField(max_length=50,null=True)\n branch = models.CharField(max_length=50,null=True)\n domain_assign = models.CharField(choices=DOMAIN_CHOICE,max_length=2,null=True)\n post_assign = models.CharField(choices=POST_CHOICES,max_length=2,null=True)\n fb_id = models.URLField(null=True)\n email_id = models.EmailField(null=True)\n phone = models.CharField(max_length=100,null=True)\n joining = models.DateField(auto_created=False,null=True)\n","sub_path":"apps/about/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70194916","text":"# 1 = collude\n# 0 = defect\nimport random\n\n\ndef name():\n return 'Generous_Credit_rate'\n\n\nopponentMove = 0\n\n\ndef play(opponentMove):\n if opponentMove == 'start':\n return 1\n\n\nopponentHistory = [opponentMove]\n\n\ndef play(myMove):\n if myMove: return 1\n myhistory = [myMove]\n average = sum(opponentHistory) + sum(myhistory)/(len(myhistory) + len(opponentHistory))\n val = random.randint(0, 100000)\n if average >= (val/100000):\n return 1\n else:\n return 0\n","sub_path":"PrisonersDilemmaSimulator-unknown_N/Generous_Credit_rate2.py","file_name":"Generous_Credit_rate2.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"523366870","text":"#! /usr/bin/env python3\n# -*- conding: utf-8 -*-\n\n\"\"\"\nUsage:\n count_words [options] ...\n\nOptions:\n -f, --file Read string from file.\n -h, --help Show this screen.\n --version Show version.\n\"\"\"\n\nfrom docopt import docopt\n\n\n# Counts the number of individual words in a string. For added complexity read\n# these strings in from a text file and generate a summary.\n\ndef count_words(string):\n word_list = string.split()\n return len(word_list)\n\n\nif __name__ == '__main__':\n args = docopt(__doc__, version='1.0')\n print(args)\n\n if args['--file']:\n word_num = 0\n for filename in args['']:\n with open(filename, encoding='utf-8') as reader:\n for line in reader:\n word_num += count_words(line)\n else:\n word_num = len(args[''])\n\n print(word_num)\n\n","sub_path":"Text/Count_Words/count_words.py","file_name":"count_words.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"196584353","text":"from django.contrib.auth.decorators import login_required\nimport json\n\nfrom django.core import serializers\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom .forms import *\n\n\ndef index(request):\n title = 'TeeAsk'\n\n id = request.GET.get('id')\n posts = None\n if id is not None:\n post = Post.objects.get(id=id)\n posts = Post.objects.filter(author=post.author)\n #data = serializers.serialize('json', posts);\n #return HttpResponse(data, content_type=\"application/json\")\n return render(request, 'index.html', locals())\n else:\n posts = Post.objects.all()\n return render(request, 'index.html', locals())\n #likes = Like.objects.filter(user=request.user).values_list('post')\n #print(likes[0][0])\n\n\ndef login_page(request):\n title = 'TeeAsk'\n login_form = LoginForm(request.POST or None)\n reg_form = RegistrationForm(request.POST or None)\n\n if request.method == 'POST':\n if login_form.is_valid():\n user = authenticate(username=login_form.cleaned_data[\"username\"], password=login_form.cleaned_data[\"password\"])\n if user is not None:\n login(request, user)\n return redirect('/', user)\n\n if request.method == 'POST' and reg_form.is_valid():\n print(2)\n print(reg_form)\n new_form = reg_form.save()\n\n return render(request, 'login.html', locals())\n\ndef logout_page(request):\n logout(request)\n return redirect('/', {})\n\ndef new_question(request):\n ques_form = QuestionForm(request.POST or None)\n if request.user is None:\n return redirect('/', {})\n\n if request.method == 'POST':\n if ques_form.is_valid():\n data = ques_form.cleaned_data\n post = Post.objects.create(author=Profile.objects.get(username=request.user))\n post.title = data['title']\n post.text = data['text']\n post.save()\n\n\n return render(request, 'new_question.html', locals())\n\n\ndef like(request):\n id = request.GET['id']\n value = request.GET['value']\n new_like, created = Like.objects.get_or_create(user=request.user, post=Post.objects.get(id=id))\n post = Post.objects.get(id=id)\n if not created:\n like = Like.objects.get(user=request.user, post=post)\n if int(like.value) != int(value):\n if int(value) > 0:\n post.likes += 2\n else:\n post.likes -= 2\n like.value = value\n like.save()\n post.save()\n else:\n new_like.value = value\n new_like.save()\n post.likes += int(value)\n post.save()\n likes_count = post.likes\n ctx = {'id':id, 'like':likes_count}\n return HttpResponse(json.dumps(ctx), content_type='application/json')\n\n","sub_path":"TeeAsk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"580306152","text":"from django.core.mail import send_mail\nfrom django.conf import settings\n\nfrom .models import Course, Report\n\n\ndef calculate_average_grade(course: Course):\n grades = course.grades.all()\n average = 0\n attendees = 0\n for grade in grades:\n attendees += grade.get_num_attendees()\n average += grade.average_grade * grade.get_num_attendees()\n if attendees == 0:\n return 0.0\n else:\n average /= attendees\n return average\n\n\ndef calculate_total_attendees(course: Course):\n grades = course.grades.all()\n attendees = 0\n for grade in grades:\n attendees += grade.attendee_count\n\n return attendees\n\n\ndef update_course_stats(course: Course):\n average = calculate_average_grade(course)\n course.average = average\n\n attendee_count = calculate_total_attendees(course)\n course.attendee_count = attendee_count\n\n course.save()\n\n\ndef send_report(report: Report):\n send_mail(\n subject=report.subject,\n message=report.email_description,\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[settings.REPORTS_EMAIL],\n fail_silently=False,\n )\n","sub_path":"grades/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"58081368","text":"from optparse import OptionParser\n\nimport os\nimport json\nimport codecs\n\nimport numpy as np\n\n\ndef main():\n usage = \"%prog input_file output_file\"\n parser = OptionParser(usage=usage)\n #parser.add_option('--keyword', dest='key', default=None,\n # help='Keyword argument: default=%default')\n #parser.add_option('--boolarg', action=\"store_true\", dest=\"boolarg\", default=False,\n # help='Keyword argument: default=%default')\n\n (options, args) = parser.parse_args()\n\n input_filename = args[0]\n output_filename = args[1]\n\n with codecs.open(input_filename, 'r', encoding='utf-8') as input_file:\n data = json.load(input_file)\n\n with codecs.open(output_filename, 'w', encoding='utf-8') as output_file:\n for sentence in data:\n tuples = []\n indices = []\n for token in sentence:\n index = int(token['index']) + 1\n edge_to_parent = token['edge_to_parent']\n if edge_to_parent is None:\n edge_to_parent = 'ROOT'\n word = token['word']\n parent_index = token['parent_index']\n if parent_index is None:\n parent_index = 0\n else:\n parent_index = int(parent_index) + 1\n value = int(token['value'])\n tuples.append((index, word, value, parent_index, edge_to_parent))\n indices.append(index)\n order = np.argsort(indices)\n for i in order.tolist():\n t = tuples[i]\n output_string = u'{0}\\t{1}\\t{2}\\t_\\t_\\t_\\t{3}\\t{4}\\t_\\t_\\n'.format(t[0], t[1], t[2], t[3], t[4])\n output_file.write(output_string)\n output_file.write('\\n')\n\nif __name__ == '__main__':\n main()\n","sub_path":"core/dataset_scripts/sst/convert_dependency_trees_to_conll.py","file_name":"convert_dependency_trees_to_conll.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"558601797","text":"import webapp2\nimport json\nimport decimal\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import users\nimport db_defs\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\n#Add, modify patient records\nclass PatientRecord(webapp2.RequestHandler):\n\n #Delete a patient\n def delete(self, **kwargs):\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"Not Acceptable, API only supports application/json requests.\\n\")\n return\n \n id = (kwargs['PatientId'])\n patient = ndb.Key('Patient', str(id)).get()\n if patient is None:\n message = \"Patient: \" + str(id) + \" not found in database. \\n\"\n self.response.write(message)\n return\n response = patient.key.delete()\n self.response.write(\"Deleted: \" + str(id) + \"\\n\" )\n \n #Returns error if patient doesn't exist. \n def put(self, **kwargs):\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"Not Acceptable, API only supports application/json requests.\\n\")\n return\n \n id = (kwargs['PatientId'])\n if id is None:\n self.response.status = 406\n self.response.status_message = \"Invalid request. PatientId is required.\"\n patient = ndb.Key('Patient', str(id)).get()\n if patient is None:\n message = \"Patient: \" + str(id) + \" not found in database. \\n\"\n self.response.write(message)\n return\n\n#Associates an existing doctor with an existing patient \n# and writes to the database \nclass AssociateDoctor(webapp2.RequestHandler):\n def put(self, **kwargs):\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"Not Acceptable, API only supports application/json requests.\\n\")\n return\n \n patientId = (kwargs['PatientId'])\n patient = ndb.Key('Patient', str(patientId)).get()\n if patient is None:\n message = \"Patient: \" + str(id) + \" not found in database. \\n\"\n self.response.write(message)\n return\n doctorId = (kwargs['Email'])\n doctor = ndb.Key('Doctor', str(doctorId)).get()\n if doctor is None:\n self.response.write (\"Email: \" + str(doctorId) + \" not found in database.\\n\")\n return\n patient.doctor = doctor.key\n key = patient.put()\n patient = key.get()\n out = patient.to_dict()\n message = \"Patient Record Updated\\n\"+ str(out) + \"\\n\"\n self.response.write(message)\n \n#Associates and existing patient with an existing \n# diagnosis and writes to database\nclass AssociateDiagnosis(webapp2.RequestHandler):\n def put(self, **kwargs):\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"Not Acceptable, API only supports application/json requests.\\n\")\n return\n \n patientId = (kwargs['PatientId'])\n patient = ndb.Key('Patient', str(patientId)).get()\n if patient is None:\n message = \"Patient: \" + str(id) + \" not found in database. \\n\"\n self.response.write(message)\n return\n diagnosis = (kwargs['Diagnosis'])\n diagnosis = diagnosis.lower()\n diagnosisObj = ndb.Key('Diagnosis', str(diagnosis)).get()\n if diagnosisObj is None:\n self.response.write (\"Diagnosis: \" + str(diagnosis) + \" not found in database.\\n\")\n return\n patient.condition = diagnosisObj.key\n key = patient.put()\n patient = key.get()\n out = patient.to_dict()\n message = \"Patient Record Updated\\n\"+ str(out) + \"\\n\"\n self.response.write(message)\n\n#Associates an existing patient to an existing medication and writes to database. \nclass AssociateMedication(webapp2.RequestHandler):\n def put(self, **kwargs):\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.status_message = \"Not Acceptable, API only supports application/json requests.\\n\"\n patientId = (kwargs['PatientId'])\n patient = ndb.Key('Patient', str(patientId)).get()\n if patient is None:\n message = \"Patient: \" + str(id) + \" not found in database. \\n\"\n self.response.write(message)\n return\n \n medication = (kwargs['Medication'])\n medication = medication.lower()\n medicationObj = ndb.Key('Medication', str(medication)).get()\n if medicationObj is None:\n self.response.write (\"Medication: \" + str(medication) + \" not found in database.\\n\")\n return\n \n patient.medication = medicationObj.key\n key = patient.put()\n patient = key.get()\n out = patient.to_dict()\n message = \"Patient Record Updated\\n\"+ str(out) + \"\\n\"\n #self.response.headers['Content-Type'] = 'application/json'\n #self.response.headers['Content-Length'] = len(message)\n self.response.write(message)\n\n#Updates Patient Doctor, Medication and Condition information and writes to database \nclass UpdatePatient(webapp2.RequestHandler):\n def post(self):\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"Not Acceptable, API only supports application/json requests.\\n\")\n return\n \n new_Patient = db_defs.Patient()\n patientId = self.request.get('PatientId')\n \n patientFirstName = self.request.get('PatientFirstName')\n patientLastName = self.request.get('PatientLastName')\n patientPhoneNumber = self.request.get('PatientPhoneNumber')\n patientDoctor = self.request.get('PatientDoctor')\n patientMedication = self.request.get('PatientMedication')\n patientConditon = self.request.get('PatientCondition')\n \n if patientId != \"\":\n new_Patient.patientId = int(patientId)\n elif patientId == \"\":\n self.response.status == \"\"\n self.response.write (\"Invalid request. PatientId is required\\n\")\n return\n \n old_Patient = ndb.Key('Patient', str(patientId)).get()\n if old_Patient is None:\n message = \"Patient: \" + patientFirstName + \" \" + patientLastName + \" not in database.\\n\"\n message = message + \"Please use POST to /patient/ to add record.\\n\"\n \n if patientFirstName != \"\":\n new_Patient.firstName = patientFirstName\n else:\n new_Patient.firstName = old_Patient.firstName\n \n if patientLastName != \"\":\n new_Patient.lastName = patientLastName\n else:\n new_Patient.lastName = old_Patient.lastName\n \n if patientPhoneNumber != \"\":\n new_Patient.phoneNumber = int(patientPhoneNumber)\n else:\n new_Patient.phoneNumber = old_Patient.phoneNumber\n \n if patientDoctor != \"\":\n new_Patient.doctor = patientDoctor\n else:\n new_Patient.doctor = old_Patient.doctor\n \n if patientMedication != \"\":\n new_Patient.medication = patientMedication\n else:\n new_Patient.medication = old_Patient.medication\n \n if patientConditon != \"\":\n new_Patient.condition = patientConditon\n else:\n new_Patient.condition = old_Patient.condition\n \n new_Patient.key = ndb.Key('Patient', patientId)\n key = new_Patient.put()\n out = new_Patient.to_dict()\n message = \"Updated Record: \\n\" + str(out) + \"\\n\"\n self.response.write(message)\n \n \n \n \n \n","sub_path":"patient.py","file_name":"patient.py","file_ext":"py","file_size_in_byte":8020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"296793705","text":"from setuptools import setup, find_packages\n\nwith open('install-requirements.txt', 'r') as install_reqf:\n install_req = [req.strip() for req in install_reqf]\n\nsetup(\n name='heptapod',\n version='0.12.3',\n author='Georges Racinet',\n author_email='georges.racinet@octobus.net',\n url='https://foss.heptapod.net/heptapod/py-heptapod',\n description=\"Heptapod server-side Mercurial hooks, extension, etc.\",\n long_description=open('README.md').read(),\n long_description_content_type=\"text/markdown\",\n keywords='hg mercurial heptapod gitlab',\n license='GPLv2+',\n package_data=dict(heptapod=['*.hgrc']),\n packages=find_packages(),\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved\"\n \" :: GNU General Public License v2 or later (GPLv2+)\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 2 :: Only\",\n \"Topic :: Software Development :: Version Control :: Mercurial\",\n ],\n install_requires=install_req,\n)\n","sub_path":"pypi_install_script/heptapod-0.12.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489604956","text":"import logging\nimport os\nimport sys\n\nimport geopandas as gpd\nimport numpy as np\nimport rasterio\nimport shapely.wkt\nfrom django.conf import settings\nfrom django.contrib.gis.gdal import DataSource\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.core.files import File\nfrom django.db import DatabaseError, connection, transaction\nfrom shapely.ops import unary_union\n\nfrom lomas_changes.models import CoverageMeasurement, Mask, Raster\nfrom scopes.models import Scope\n\n# Configure logger\nlogger = logging.getLogger(__name__)\nout_handler = logging.StreamHandler(sys.stdout)\nout_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))\nout_handler.setLevel(logging.INFO)\nlogger.addHandler(out_handler)\nlogger.setLevel(logging.INFO)\n\nDATA_DIR = os.path.join(settings.BASE_DIR, 'data', 'lomas_changes')\nMASK_DIR = os.path.join(DATA_DIR, 'mask')\nRGB_DIR = os.path.join(DATA_DIR, 'rgb')\n\n\ndef load_data(period):\n #post_process(period)\n create_mask_rgb_raster(period)\n create_masks(period)\n generate_measurements(period)\n\n\ndef create_mask_rgb_raster(period):\n period_s = '{dfrom}_{dto}'.format(dfrom=period.date_from.strftime(\"%Y%m\"),\n dto=period.date_to.strftime(\"%Y%m\"))\n\n logger.info(\"Build RGB loss mask raster\")\n src_path = os.path.join(MASK_DIR, period_s, 'cover.tif')\n dst_path = os.path.join(RGB_DIR, period_s, 'loss.tif')\n os.makedirs(os.path.dirname(dst_path), exist_ok=True)\n write_loss_mask_rgb_raster(src_path=src_path, dst_path=dst_path)\n\n raster, _ = Raster.objects.update_or_create(\n period=period, slug=\"loss\", defaults=dict(name=\"Loss mask\"))\n with open(dst_path, 'rb') as f:\n if raster.file:\n raster.file.delete()\n raster.file.save(f'loss.tif', File(f))\n\n\ndef write_loss_mask_rgb_raster(src_path, dst_path):\n with rasterio.open(src_path) as src:\n img = src.read(1)\n profile = src.profile.copy()\n\n colormap = ['ff0000']\n new_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n for i in range(len(colormap)):\n new_img[img == i + 1] = hex_to_dec_string(colormap[i])\n\n os.makedirs(os.path.dirname(dst_path), exist_ok=True)\n profile.update(count=3, dtype=np.uint8, nodata=0)\n with rasterio.open(dst_path, 'w', **profile) as dst:\n for i in range(new_img.shape[2]):\n dst.write(new_img[:, :, i], i + 1)\n\n\ndef create_masks(period):\n period_s = '{dfrom}_{dto}'.format(dfrom=period.date_from.strftime(\"%Y%m\"),\n dto=period.date_to.strftime(\"%Y%m\"))\n\n logging.info(\"Reproject to epsg:4326\")\n src_path = os.path.join(MASK_DIR, period_s, 'cover.geojson')\n dst_path = os.path.join(MASK_DIR, period_s, 'cover_4326.geojson')\n data = gpd.read_file(src_path)\n data_proj = data.copy()\n data_proj['geometry'] = data_proj['geometry'].to_crs(epsg=4326)\n data_proj.to_file(dst_path)\n\n logger.info(\"Load mask to DB\")\n ds = DataSource(dst_path)\n polys = []\n for x in range(0, len(ds[0]) - 1):\n geom = shapely.wkt.loads(ds[0][x].geom.wkt)\n polys.append(geom)\n multipoly = unary_union(polys)\n Mask.objects.update_or_create(\n period=period,\n mask_type='loss',\n defaults=dict(geom=GEOSGeometry(multipoly.wkt)))\n\n\ndef generate_measurements(period):\n logger.info(\"Generate measurements for each scope\")\n\n for scope in Scope.objects.all():\n mask = Mask.objects.filter(period=period, mask_type='loss').first()\n\n # TODO Optimize: use JOINs with Scope and Mask instead of building the shape WKT\n query = \"\"\"\n SELECT ST_Area(a.int) AS area,\n ST_Area(ST_Transform(ST_GeomFromText('{wkt_scope}', 4326), {srid})) as scope_area\n FROM (\n SELECT ST_Intersection(\n ST_Transform(ST_GeomFromText('{wkt_scope}', 4326), {srid}),\n ST_Transform(ST_GeomFromText('{wkt_mask}', 4326), {srid})) AS int) a;\n \"\"\".format(wkt_scope=scope.geom.wkt,\n wkt_mask=mask.geom.wkt,\n srid=32718)\n\n try:\n with connection.cursor() as cursor:\n cursor.execute(query)\n res = cursor.fetchall()\n area, scope_area = res[0]\n\n measurement, created = CoverageMeasurement.objects.update_or_create(\n date_from=period.date_from,\n date_to=period.date_to,\n scope=scope,\n defaults=dict(area=area, perc_area=area / scope_area))\n if created:\n logger.info(f\"New measurement: {measurement}\")\n except DatabaseError as err:\n logger.error(err)\n logger.info(\n f\"An error occurred! Skipping measurement for scope {scope.id}...\"\n )\n\n\ndef hex_to_dec_string(value):\n return np.array([int(value[i:j], 16) for i, j in [(0, 2), (2, 4), (4, 6)]],\n np.uint8)\n","sub_path":"lomas_changes/tasks/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"540458861","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .forms import AddForm, AddItemForm\nfrom .models import receiveTable, item\n\n# Create your views here.\ndef index(request):\n items = item.objects.all()\n return render(request, \"baohanh/all_items.html\",{\"items\": items})\n\ndef addnote(request):\n if request.method == \"POST\":\n pass\n else:\n form = AddForm()\n return render(request, \"baohanh/addnote_page.html\", {\"form\": form})\n\ndef additem(request):\n if request.method == \"POST\":\n pass\n else:\n form = AddItemForm()\n return render(request, \"baohanh/additem_page.html\", {\"form\": form})\n\ndef notedetail(request, note_id):\n if request.method == \"POST\":\n form = AddItemForm(request.POST)\n if form.is_valid():\n add_item = item()\n noteNumber = receiveTable.objects.get(id=note_id)\n # item.noteNumber = receiveTable.objects.get(pk=int(form.cleaned_data[\"noteNumber\"]))\n add_item.noteNumber = noteNumber\n add_item.itemName = form.cleaned_data[\"itemName\"]\n add_item.quantity = form.cleaned_data[\"quantity\"]\n add_item.itemGroup = form.cleaned_data[\"itemGroup\"]\n add_item.status = form.cleaned_data[\"status\"]\n add_item.check = form.cleaned_data[\"check\"]\n add_item.conclude = form.cleaned_data[\"conclude\"]\n add_item.deadline = form.cleaned_data[\"deadline\"]\n add_item.note = form.cleaned_data[\"note\"]\n add_item.done = form.cleaned_data[\"done\"]\n add_item.save()\n return redirect(\"baohanh:notedetail\", note_id=note_id)\n else:\n print(form)\n return HttpResponse(\"Form is not valid\")\n else:\n try:\n noteNumber = receiveTable.objects.get(id=note_id)\n items = noteNumber.item.all()\n context = {\n \"noteNumber\": noteNumber,\n \"items\": items,\n }\n return render(request, \"baohanh/note_detail.html\", context)\n except:\n return HttpResponse(\"Khong co phieu tiep nhan nay\")\n\ndef add(request):\n if request.method == \"POST\":\n form = AddForm(request.POST)\n if form.is_valid():\n note = receiveTable()\n note.noteNumber = form.cleaned_data[\"noteNumber\"]\n note.customers = form.cleaned_data[\"customers\"]\n note.note = form.cleaned_data[\"note\"]\n note.save()\n return redirect(\"baohanh:notedetail\", note_id=note.pk)\n return render(request, \"baohanh/add.html\")","sub_path":"baohanh/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647891909","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport re\nimport json\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\n\n\n\ndef get_html(url):\n data = requests.get( url )\n data.encoding = 'utf-8'\n data = data.text\n return data\n\n\nurl=\"http://www.hngp.gov.cn/henan/content?infoId=1537356211779088&channelCode=H620101&bz=0\"\ndef get_url_1(url):\n print(url + \" start...\")\n\n html = get_html(url)\n soup = BeautifulSoup(html, \"html.parser\")\n div_tag = soup.find_all(\"div\", class_=\"BorderEEE BorderRedTop\")[0]\n\n [s.extract() for s in div_tag('style')]\n title = div_tag.find_all(\"h1\")[0].get_text().strip().replace(\"\\n\", \"\")\n\n date = div_tag.find_all(\"span\", class_=\"Blue\")[2].get_text().strip().replace(\"\\n\", \"\")\n text=\"\"\n script=soup.find_all(\"script\")\n for s in script:\n str=s.get_text()\n f=re.search(\"jQuery\\(document\\).ready\\(function\",str)\n if f:\n int=re.search('\\$\\.get\\(\"/webfile.*\\.htm\"',str).span()\n t_url=\"http://www.hngp.gov.cn\"+str[int[0]+7:int[1]-1]\n\n t_html=get_html(t_url)\n t_soup = BeautifulSoup(t_html, \"html.parser\")\n [s.extract() for s in t_soup('style')]\n text=t_soup.get_text().strip().replace(\"\\n\", \"\")\n print(title+\"##\"+date+\"##\"+text)\n\nget_url_1(url)\n\n'''\n\ndef get_url_1(url,file):\n print(\"请编写方法\")\n\n print(url+\" start...\")\n html = get_html(url)\n soup = BeautifulSoup(html, \"html.parser\")\n [s.extract() for s in soup('script')]\n [s.extract() for s in soup('style')]\n title =soup.find(\"h1\").get_text().strip().replace(\"\\n\", \"\")\n\n date_div=soup.find(\"div\",class_=\"detail_bz\")\n date=date_div.find(\"span\").get_text().strip().replace(\"\\n\", \"\")[6:]\n\n text= soup.find(\"div\", class_=\"TRS_Editor\").get_text().strip().replace(\"\\n\", \"\")\n print(title + date+text)\n\nf=\"\"\nget_url_1(url,f)\n\n\ndef middle_url_1(url):\n urls=[]\n print(\"middle_url_1 start ...\")\n html=get_html(url)\n soup=BeautifulSoup(html,\"html.parser\")\n num=re.search(\"/index\",url).span()[0]\n print(num)\n url=url[:48]\n for li_tag in soup.find_all(\"li\"):\n a_tag=li_tag.find(\"a\")[\"href\"]\n next_url=url+a_tag[1:]\n urls.append(next_url)\n\n return urls\nprint(middle_url_1(url))\n\n\ndef get_next_page_url(url,urls):\n html = get_html(url)\n soup = BeautifulSoup(html, \"html.parser\")\n div_tag = soup.find_all(\"div\", class_=\"fanye\")[0]\n script = div_tag.find_all(\"script\", language=\"JavaScript\")\n string = script[0].get_text().strip()\n num = re.search(',', string).span()[0]\n max_page = string[15:num]\n for i in range(int(max_page)):\n if i==0:\n urls.append(url+\"index.html\")\n continue\n urls.append(url+\"index_\"+str(i)+\".html\")\n print(url+\"index_\"+str(i)+\".html\")\n\nget_next_page_url(url,urls)\n\nurl = \"http://www.hngp.gov.cn/henan/content?infoId=1536140453418362&channelCode=H740601&bz=0\"\n\nhtml=get_html(url)\nsoup = BeautifulSoup(html, \"html.parser\")\nprint(url + \" start...\")\n\nhtml = get_html(url)\nsoup = BeautifulSoup(html, \"html.parser\")\ndiv_tag = soup.find_all(\"div\", class_=\"BorderEEE BorderRedTop\")[0]\nprint(div_tag)\n[s.extract() for s in div_tag('style')]\ntitle = div_tag.find_all(\"h1\")[0].get_text().strip().replace(\"\\n\", \"\")\nprint(title)\ndate = div_tag.find_all(\"span\", class_=\"Blue\")[2].get_text().strip().replace(\"\\n\", \"\")\ntext = div_tag.find_all(\"div\", id=\"content\")#[0].get_text().strip().replace(\"\\n\", \"\")\n\nprint(date)\nprint(text)\n\n\n\n\n\n\n\n\nurl = \"http://www.hnggzy.com/hnsggzy/infodetail/?infoid=91b808ef-79d6-4d45-858f-fe4bbbd49548&categoryNum=002001001\"\n\n\ndef get_url_1(url):\n print(url+\" start...\")\n html = get_html(url)\n soup = BeautifulSoup(html, \"html.parser\")\n table_tag =soup.find_all(\"table\", width=\"887\")[0]\n [s.extract() for s in table_tag('br')]\n title= table_tag.find_all(\"td\", height=\"76\")[0].get_text().strip().replace(\"\\n\", \"\")\n print(\"title \"+title)\n date=table_tag.find_all(\"td\", height=\"30\")[0].get_text().strip().replace(\"\\n\", \"\")[10:19]\n print(\"date \"+date)\n\n text= table_tag.find_all(\"td\", style=\"padding:26px 40px 10px;\")[0].get_text().strip()\n print(\"text \",text)\n #print(\"all:\"+url + \"##\" + date + \"##\" + title + \"##\" + text + \"\\n\")\n\n\ndef get_html(url):\n data = requests.get( url )\n data.encoding = 'gb2312'\n data = data.text\n return data\n\n\nget_url_1(url)\n\n'''\n\n\n","sub_path":"python/test/test_spider.py","file_name":"test_spider.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"218721336","text":"\nfrom random import random\nfrom math import hypot\n\n\ndef monteCarloApprox(trials):\n \"\"\"\n Create random x and y coordinates in 1000000 trials\n For each time the euclidian distance between x and y is less than 1 (it means it is inside the circle) and we add 1\n PI is close to 4 times the result of the division between the nr of times the euclidian distance was inside the circle and the nr of total trials\n\n The Area of a circle is πr^2. If we set r to be less than 1 and positive, then we can more easily approximate pi\n \"\"\"\n inside = 0\n for i in range(trials):\n x = random()\n y = random()\n if hypot(x, y) < 1:\n inside += 1\n print('I think pi is: ', 4.0 * inside / trials, inside)\n\n\nif __name__ == '__main__':\n trials = 10 ** 6\n monteCarloApprox(trials)\n","sub_path":"MonteCarloPI/monteCarloPI.py","file_name":"monteCarloPI.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574131023","text":"from maintain_frontend.services.validation.field_validator import FieldValidator\nfrom maintain_frontend.services.validation.validation_error_builder import ValidationErrorBuilder\n\n\nclass ApplicantInfoValidator(object):\n\n @staticmethod\n def validate(form):\n \"\"\"Extracts value from common fields from applicant info and address_fields_partial for validation.\n\n\n parameters:\n - form: The form which includes the address_fields partial\n\n returns:\n dict: An instance of ValidationErrorBuilder with a ValidationError dict and a heading summary message.\n \"\"\"\n\n applicant_name = form.get('applicant_name', '')\n address = form.get('address_line_1', '') + \\\n form.get('address_line_2', '') + \\\n form.get('address_line_3', '') + \\\n form.get('address_line_4', '') + \\\n form.get('address_line_5', '') + \\\n form.get('address_line_6', '')\n country = form.get('country', '')\n postcode = form.get('postcode', '')\n\n validation_error_builder = ValidationErrorBuilder()\n\n FieldValidator(applicant_name, 'applicant_name', 'Name', validation_error_builder) \\\n .is_required()\n\n FieldValidator(applicant_name, 'applicant_name', 'Name', validation_error_builder,\n \"Reduce your answer to 200 characters or less\", \"Answer is too long\") \\\n .is_length_less_than_or_equal_to(200)\n\n FieldValidator(address, 'address_line_1', 'Address', validation_error_builder) \\\n .is_required()\n\n if country == \"United Kingdom\":\n FieldValidator(postcode, 'postcode', 'Postcode', validation_error_builder) \\\n .is_required().is_postcode()\n\n return validation_error_builder.get()\n","sub_path":"maintain_frontend/add_lon/validation/applicant_info_validator.py","file_name":"applicant_info_validator.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"550256143","text":"# coding: utf-8\nimport logging\nimport pickle\n\nfrom datetime import datetime, timedelta\n\nimport pytest\n\nfrom sentry_sdk import Hub, Client, add_breadcrumb, capture_message\nfrom sentry_sdk.transport import _parse_rate_limits\nfrom sentry_sdk.integrations.logging import LoggingIntegration\n\n\n@pytest.fixture\ndef make_client(request, httpserver):\n def inner(**kwargs):\n return Client(\n \"http://foobar@{}/132\".format(httpserver.url[len(\"http://\") :]), **kwargs\n )\n\n return inner\n\n\n@pytest.mark.forked\n@pytest.mark.parametrize(\"debug\", (True, False))\n@pytest.mark.parametrize(\"client_flush_method\", [\"close\", \"flush\"])\n@pytest.mark.parametrize(\"use_pickle\", (True, False))\ndef test_transport_works(\n httpserver,\n request,\n capsys,\n caplog,\n debug,\n make_client,\n client_flush_method,\n use_pickle,\n maybe_monkeypatched_threading,\n):\n httpserver.serve_content(\"ok\", 200)\n caplog.set_level(logging.DEBUG)\n client = make_client(debug=debug)\n\n if use_pickle:\n client = pickle.loads(pickle.dumps(client))\n\n Hub.current.bind_client(client)\n request.addfinalizer(lambda: Hub.current.bind_client(None))\n\n add_breadcrumb(level=\"info\", message=\"i like bread\", timestamp=datetime.utcnow())\n capture_message(\"löl\")\n\n getattr(client, client_flush_method)()\n\n out, err = capsys.readouterr()\n assert not err and not out\n assert httpserver.requests\n\n assert any(\"Sending event\" in record.msg for record in caplog.records) == debug\n\n\ndef test_transport_infinite_loop(httpserver, request, make_client):\n httpserver.serve_content(\"ok\", 200)\n\n client = make_client(\n debug=True,\n # Make sure we cannot create events from our own logging\n integrations=[LoggingIntegration(event_level=logging.DEBUG)],\n )\n\n with Hub(client):\n capture_message(\"hi\")\n client.flush()\n\n assert len(httpserver.requests) == 1\n\n\nNOW = datetime(2014, 6, 2)\n\n\n@pytest.mark.parametrize(\n \"input,expected\",\n [\n # Invalid rate limits\n (\"\", {}),\n (\"invalid\", {}),\n (\",,,\", {}),\n (\n \"42::organization, invalid, 4711:foobar;transaction;security:project\",\n {\n None: NOW + timedelta(seconds=42),\n \"transaction\": NOW + timedelta(seconds=4711),\n \"security\": NOW + timedelta(seconds=4711),\n # Unknown data categories\n \"foobar\": NOW + timedelta(seconds=4711),\n },\n ),\n (\n \"4711:foobar;;transaction:organization\",\n {\n \"transaction\": NOW + timedelta(seconds=4711),\n # Unknown data categories\n \"foobar\": NOW + timedelta(seconds=4711),\n \"\": NOW + timedelta(seconds=4711),\n },\n ),\n ],\n)\ndef test_parse_rate_limits(input, expected):\n assert dict(_parse_rate_limits(input, now=NOW)) == expected\n\n\ndef test_simple_rate_limits(httpserver, capsys, caplog, make_client):\n client = make_client()\n httpserver.serve_content(\"no\", 429, headers={\"Retry-After\": \"4\"})\n\n client.capture_event({\"type\": \"transaction\"})\n client.flush()\n\n assert len(httpserver.requests) == 1\n assert httpserver.requests[0].url.endswith(\"/api/132/envelope/\")\n del httpserver.requests[:]\n\n assert set(client.transport._disabled_until) == set([None])\n\n client.capture_event({\"type\": \"transaction\"})\n client.capture_event({\"type\": \"event\"})\n client.flush()\n\n assert not httpserver.requests\n\n\n@pytest.mark.parametrize(\"response_code\", [200, 429])\ndef test_data_category_limits(httpserver, capsys, caplog, response_code, make_client):\n client = make_client()\n httpserver.serve_content(\n \"hm\",\n response_code,\n headers={\"X-Sentry-Rate-Limits\": \"4711:transaction:organization\"},\n )\n\n client.capture_event({\"type\": \"transaction\"})\n client.flush()\n\n assert len(httpserver.requests) == 1\n assert httpserver.requests[0].url.endswith(\"/api/132/envelope/\")\n del httpserver.requests[:]\n\n assert set(client.transport._disabled_until) == set([\"transaction\"])\n\n client.capture_event({\"type\": \"transaction\"})\n client.capture_event({\"type\": \"transaction\"})\n client.flush()\n\n assert not httpserver.requests\n\n client.capture_event({\"type\": \"event\"})\n client.flush()\n\n assert len(httpserver.requests) == 1\n\n\n@pytest.mark.parametrize(\"response_code\", [200, 429])\ndef test_complex_limits_without_data_category(\n httpserver, capsys, caplog, response_code, make_client\n):\n client = make_client()\n httpserver.serve_content(\n \"hm\",\n response_code,\n headers={\"X-Sentry-Rate-Limits\": \"4711::organization\"},\n )\n\n client.capture_event({\"type\": \"transaction\"})\n client.flush()\n\n assert len(httpserver.requests) == 1\n assert httpserver.requests[0].url.endswith(\"/api/132/envelope/\")\n del httpserver.requests[:]\n\n assert set(client.transport._disabled_until) == set([None])\n\n client.capture_event({\"type\": \"transaction\"})\n client.capture_event({\"type\": \"transaction\"})\n client.capture_event({\"type\": \"event\"})\n client.flush()\n\n assert len(httpserver.requests) == 0\n","sub_path":"tests/test_transport.py","file_name":"test_transport.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29732539","text":"import pandas as pd \nimport numpy as np \nimport seaborn as sb \nimport matplotlib.pyplot as plt \nimport scipy.stats as sps \npath=\"/home/ashifer/code/datasets/auto/auto_edit_one.csv\"\ndf=pd.read_csv(path)\ndrive_wheel_count=df[\"drive-wheels\"].value_counts()\nprint(drive_wheel_count)\n\n#----------DATA VISUALIZATION---------\n\n\"\"\"\nbox plots->\nused to visualize distributions of data\nmiddle line->median\nbox upper end->75%\nbox lower end->25%\nline above box->upper extreme\nline below box->lower extreme\ndots->outliers of data\n\"\"\"\nsb.boxplot(x=\"drive-wheels\",y=\"price\",data=df)\nplt.show()\n\"\"\"\nscatter plots->\nused for visualization of continuous plots\nshows relation between two variables\npredictor(independent vars)->x-axis\ntarget(value we are trying to predict)->yaxis\n\"\"\"\ny=df[\"price\"]\nx=df[\"engine-size\"]\nplt.scatter(x,y)\nplt.title(\"scatter\")\nplt.xlabel(\"engine-size\")\nplt.ylabel(\"price\")\n#these can be used for anyplot\nplt.show()\ndf_tst=df[['drive-wheels','body-style','price']]\ndf_grp=df_tst.groupby(['drive-wheels','body-style']).mean()\nprint(df_grp)\n#we get avg cost of car by drive-wheel and bodytype\n#this is hard to read so we use pivot\ndf_pivot=df_grp.pivot_table(index=['drive-wheels'],columns=['body-style'])\ndf_pivot.replace(np.nan,0,inplace=True)\nprint(df_pivot)\nplt.pcolor(df_pivot,cmap='RdBu')\nplt.colorbar()\nplt.show()\n\n#----------CORRELATION----------\n\n#relation between variables is observed\nsb.regplot(x,y,df)\nplt.ylim(0,)\nplt.show()\n#regression plot can be seen\n#ylim(bottom,top) and xlim() are used to limit lower and upper bounds\n#last graph gave a positive slope showing engine and price are linearly dependent\nsb.regplot(x=\"highway-mpg\",y=\"price\",data=df)\nplt.ylim(0,)\nplt.show()\n#here we can see more mpg less price from graph\n\"\"\"\nwe use strongly correlated attributes for prediction\nstrength is measured by different methods\nPEARSON CORRELATION\nwe get coefficient and p-value of a data from this\ncoef is close to 1->large positive relation\ncoef is close to -1->large negative relation\ncoef is close to 0->no relation\npval < 0.001 strong coef certainity\npval < 0.05 moderate certainity\npval < 0.1 weak certainity\npval > 0.1 no certainity \nwe can calculate using scipy stats\n\"\"\" \ncoef,pval=sps.pearsonr(df['horsepower'],df['price'])\nprint(coef,pval)\nprint(df.corr()) #returns coef\nsb.heatmap(df.corr(), annot = True)\nplt.show()\n\n#---------ANALYSIS OF VARIABLES(ANOVA)--------------\n\"\"\"\nanova is used to find correlation between catogorical variables\nanova returns F-test_score and p-val\nf-test->variation between same group means divided by variation within same group\np-val is confidence degree\nif f is small corr is less\n\"\"\"\ndf_anova=df[[\"make\"],[\"price\"]]\ngrpd_anova=df_anova.groupby([\"make\"])\n","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"131545534","text":"# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base config.\"\"\"\n\nfrom ml_collections import config_dict\n\n\ndef validate_keys(base_cfg, config, base_filename=\"base_config.py\"):\n \"\"\"Validates that the config \"inherits\" from a base config.\n\n Args:\n base_cfg (`ConfigDict`): base config object containing the required fields\n for each experiment config.\n config (`ConfigDict`): experiment config to be checked against base_cfg.\n base_filename (str): file used to generate base_cfg.\n\n Raises:\n ValueError: if base_cfg contains keys that are not present in config.\n \"\"\"\n\n for key in base_cfg.keys():\n if key not in config:\n raise ValueError(\"Key {!r} missing from config. This config is required \"\n \"to have keys: {}. See {} for details.\".format(\n key, list(base_cfg.keys()), base_filename))\n if (isinstance(base_cfg[key], config_dict.ConfigDict) and\n config[key] is not None):\n validate_keys(base_cfg[key], config[key])\n\n\ndef validate_config(config):\n validate_keys(get_base_config(), config)\n\n\ndef get_base_config():\n \"\"\"Returns base config object for an experiment.\"\"\"\n config = config_dict.ConfigDict()\n config.experiment_kwargs = config_dict.ConfigDict()\n\n config.training_steps = 10000 # Number of training steps.\n\n config.interval_type = \"secs\"\n config.save_checkpoint_interval = 300\n config.log_tensors_interval = 60\n config.log_train_data_interval = 120.0 # None to turn off\n\n # If True, asynchronously logs training data from every training step.\n config.log_all_train_data = False\n\n # If true, run evaluate() on the experiment once before you load a checkpoint.\n # This is useful for getting initial values of metrics at random weights, or\n # when debugging locally if you do not have any train job running.\n config.eval_initial_weights = False\n\n # When True, the eval job immediately loads a checkpoint runs evaluate()\n # once, then terminates.\n config.one_off_evaluate = False\n\n # Number of checkpoints to keep by default\n config.max_checkpoints_to_keep = 5\n\n # Settings for the RNGs used during training and evaluation.\n config.random_seed = 42\n config.random_mode_train = \"unique_host_unique_device\"\n config.random_mode_eval = \"same_host_same_device\"\n\n # The metric (returned by the step function) used as a fitness score.\n # It saves a separate series of checkpoints corresponding to\n # those which produce a better fitness score than previously seen.\n # By default it is assumed that higher is better, but this behaviour can be\n # changed to lower is better, i.e. behaving as a loss score, by setting\n # `best_model_eval_metric_higher_is_better = False`.\n # If `best_model_eval_metric` is empty (the default), best checkpointing is\n # disabled.\n config.best_model_eval_metric = \"\"\n config.best_model_eval_metric_higher_is_better = True\n\n return config\n","sub_path":"jaxline/base_config.py","file_name":"base_config.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"107343589","text":"from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\n\nlearn_rate = 0.01\nsgd_momentum = 0.5\ncuda = True\nepochs = 10\nseed = 1\nbatch_size = 64\ntest_batch_size = 1000\nlog_intervall = 10\n\nTRAIN = False\nif TRAIN:\n # Training settings\n\n cuda = cuda and torch.cuda.is_available()\n\n torch.manual_seed(1)\n if cuda:\n torch.cuda.manual_seed(1)\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('PT/data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True, **kwargs)\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('PT/data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=test_batch_size, shuffle=True, **kwargs)\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef train(epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n if cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % log_intervall == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n\n\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n output = model(data)\n test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\nmodel = Net()\nmodel.cuda()\n\nif TRAIN:\n optimizer = optim.SGD(model.parameters(), lr=learn_rate, momentum=sgd_momentum)\n\nmodel.cuda().load_state_dict(torch.load(\"PT/netsave\"))\nmodel.eval()\n\nif TRAIN:\n for epoch in range(1, epochs + 1):\n train(epoch)\n if epoch % 10 is 0:\n test()\n if epoch % 100 is 0:\n torch.save(model.cuda().state_dict(), \"PT/netsave\")\n\n torch.save(model.cuda().state_dict(), \"PT/netsave\")\n\n# test()\n\n# print(model(image_loader(\"dddd.jpg\")).data.max(1, keepdim=True)[1])\n","sub_path":"pytorch.py","file_name":"pytorch.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"578788984","text":"from case_utility import *\nfrom qrd_shared.case import *\nfrom test_case_base import TestCaseBase\nimport re,copy\nfrom test_suit_ui_launcher import *\n############################################\n#author:\n# huitingn@qualcomm.com.cn\n#function:\n# Launcher Stress-drag widget\n############################################\n\n\nclass test_suit_ui_launcher_case06(TestCaseBase):\n tag = 'ui_launcher_case06'\n def test_case_main(self,case_results):\n case_flag = False\n \n global preWorkFlag\n if preWorkFlag is False:\n [toolsLocation,deviceID]=pre_work()\n \n if can_continue():\n send_key(KEY_HOME)\n basic = 'java -jar %s%sXAgentClientRunner.jar -s %s '%(toolsLocation,os.sep,deviceID)\n \n command = basic+'UICommand.getViewPosition %s equals %s'%('text','\"Digital clock\"')\n result = copy.copy(os.popen(command).read())\n (leftX,leftY,rightX,rightY) = re.search('(?<=true:)(\\d+),(\\d+),(\\d+),(\\d+)', result).group(1,2,3,4)\n \n startX = (leftX+rightX)/2 # the position of widget\n startY = (leftY+rightY)/2\n endX = (startX-120) *int(480.0/getDisplayWidth()) # this is the 'Remove's position\n endY = (startY-150) *int(854.0/getDisplayHeight())\n command = basic+'UICommand.Drag %s %s %s %s'%(startX,startY,endX,endY)\n \n if can_continue():\n command = basic+'UICommand.getViewPosition %s equals %s'%('text','Search')\n result = copy.copy(os.popen(command).read())\n (leftX2,leftY2,rightX2,rightY2) = re.search('(?<=true:)(\\d+),(\\d+),(\\d+),(\\d+)', result).group(1,2,3,4)\n \n before = (leftX,leftY,rightX,rightY)\n after = (leftX2,leftY2,rightX2,rightY2)\n if cmp(before,after)==0:\n case_flag = True\n \n \n log_test_case(self.tag, 'case_flag = '+str(case_flag))\n exit_cur_case(self.tag)\n if case_flag:\n qsst_log_case_status(STATUS_SUCCESS, \"\" , SEVERITY_HIGH)\n else:\n qsst_log_case_status(STATUS_FAILED, \"some widgets are failed\", SEVERITY_HIGH)\n case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], can_continue()))\n","sub_path":"test_env/test_suit_ui_launcher/test_suit_ui_launcher_case06.py","file_name":"test_suit_ui_launcher_case06.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"90201805","text":"import sys\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineSettings\nfrom PyQt5.QtCore import QUrl, pyqtSlot, pyqtSignal, Qt, QObject, QDateTime, QPointF\n\nimport numpy as np\nimport datetime\nimport collections\n\n# This will automatically load qrc\nimport core_rc\n\nclass GPSView(QWebEngineView):\n\n aboutToClose = pyqtSignal(QObject)\n cursorMoved = pyqtSignal(float)\n\n def __init__(self, parent):\n\n super(QWebEngineView, self).__init__(parent)\n self.path = []\n self.marker_position = []\n self.positions = collections.OrderedDict()\n\n #self.setFixedHeight(300)\n #self.setMinimumHeight(500)\n\n #self.reftime = datetime.datetime.now()\n self.pageReady = False\n\n # Settings\n self.settings().setAttribute(QWebEngineSettings.JavascriptEnabled, True)\n self.settings().setAttribute(QWebEngineSettings.LocalContentCanAccessRemoteUrls, True)\n\n self.loadFinished.connect(self.pageLoaded)\n\n # Load file from qrc\n self.setUrl(QUrl('qrc:/OpenIMU/html/map.html'))\n\n # 3IT = 45.3790193,-71.9430778\n # self.setCurrentPosition(45.3790193, -71.9430778)\n\n def closeEvent(self, QCloseEvent):\n self.aboutToClose.emit(self)\n\n def addPosition(self, timestamp, latitude, longitude):\n # if timestamp < self.reftime:\n # self.reftime = timestamp\n if type(timestamp) is datetime.datetime:\n timestamp = timestamp.timestamp()\n\n self.positions[timestamp] = QPointF(latitude, longitude)\n\n if self.pageReady is True:\n self.page().runJavaScript('addPosition(' + str(latitude) + ',' + str(longitude) + ');')\n else:\n # print('Cannot set position, page not ready, saving for later')\n self.path.append([latitude, longitude])\n\n def setCursorPositionFromTime(self, timestamp, emit_signal=False):\n\n # timestamp -= datetime.timedelta(microseconds=timestamp.microsecond)\n position = None\n\n try:\n position = self.positions[timestamp] # Right on the value!\n except KeyError:\n # Find the closest best position\n # if timestamp < self.reftime:\n # timestamp = self.reftime\n # start_timestamp = next(iter(self.positions))\n\n if type(timestamp) is datetime.datetime:\n timestamp = timestamp.timestamp()\n\n # Find nearest point\n idx = (np.abs(np.asarray(list(self.positions.keys())) - timestamp)).argmin()\n position = list(self.positions.values())[idx]\n\n \"\"\"if timestamp < start_timestamp:\n timestamp = start_timestamp\n\n while timestamp >= start_timestamp:\n timestamp = timestamp - datetime.timedelta(seconds=1)\n try:\n position = self.positions[timestamp]\n break\n except KeyError:\n continue\n \"\"\"\n if position is not None:\n self.marker_position = [position.x(), position.y()]\n if self.pageReady:\n self.page().runJavaScript('setMarkerPosition(' + str(position.x()) + ',' + str(position.y()) + ');')\n\n #if emit_signal:\n # self.cursorMoved.emit(timestamp)\n\n \"\"\"\n def setCurrentPosition(self, latitude, longitude):\n if self.pageReady is True:\n self.page().runJavaScript('setCurrentPosition(' + str(latitude) + ',' + str(longitude) + ');')\n else:\n # print('Cannot set position, page not ready, saving for later')\n self.path.append([latitude, longitude])\n \"\"\"\n def clearMap(self):\n if self.pageReady is True:\n self.page().runJavaScript('clearMap();')\n\n @pyqtSlot(bool)\n def pageLoaded(self, state):\n print('page loaded:', state)\n\n if state is True:\n self.pageReady = True\n for coords in self.path:\n self.page().runJavaScript('addPosition(' + str(coords[0]) + ',' + str(coords[1]) + ');')\n if self.marker_position != []:\n self.page().runJavaScript('setMarkerPosition(' + str(self.marker_position[0]) + ',' + str(self.marker_position[1]) + ');')\n\n\n\n# Testing app\nif __name__ == '__main__':\n\n from PyQt5.QtWidgets import QApplication\n from PyQt5.QtWidgets import QMainWindow, QPushButton\n from PyQt5.QtCore import Qt\n\n app = QApplication(sys.argv)\n\n window = QMainWindow()\n window.setWindowTitle('GPSView - Test')\n window.resize(640, 480)\n\n view = GPSView(window)\n # view.setCurrentPosition(latitude=0, longitude=0)\n window.setCentralWidget(view)\n window.show()\n\n sys.exit(app.exec_())\n\n","sub_path":"python/libopenimu/qt/GPSView.py","file_name":"GPSView.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474224587","text":"#!/usr/bin/env python3\n# Copyright © 2015 Andrew Wilcox and Elizabeth Myers.\n# All rights reserved.\n# This file is part of the PyIRC 3 project. See LICENSE in the root directory\n# for licensing information.\n\n\n\"\"\"Track IRC ban modes (+beIq)\n\nIn order to be taught about new types, this extension must know the numerics\nused for ban listing.\n\"\"\"\n\n\nfrom time import time\nfrom collections import namedtuple\nfrom logging import getLogger\n\nfrom PyIRC.extension import BaseExtension\nfrom PyIRC.hook import hook, PRIORITY_LAST\nfrom PyIRC.line import Hostmask\nfrom PyIRC.numerics import Numerics\n\n\nlogger = getLogger(__name__)\n\n\nBanEntry = namedtuple(\"BanEntry\", \"string setter timestamp\")\n\n\nclass BanTrack(BaseExtension):\n\n \"\"\"Track bans and other \"list\" modes.\n\n This supports +beI, Inspircd +g, and Charybdis-style +q. Others may be\n added for other IRC daemons in the future.\n\n .. note::\n Unless you are opped, your view of modes such as +eI may be limited\n and incomplete.\n \"\"\"\n\n requires = [\"ISupport\", \"ChannelTrack\", \"BasicRFC\"]\n\n def __init__(self, base, **kwargs):\n self.base = base\n\n @hook(\"commands\", \"JOIN\", PRIORITY_LAST)\n def join(self, event):\n params = event.line.params\n logger.debug(\"Creating ban modes for channel %s\",\n params[0])\n channeltrack = self.get_extension(\"ChannelTrack\")\n channel = channeltrack.get_channel(params[0])\n\n channel.synced_list = dict()\n\n isupport = self.get_extension(\"ISupport\")\n modes = isupport.get(\"CHANMODES\")[0]\n\n for mode in modes:\n channel.modes[mode] = list()\n channel.synced_list[mode] = False\n\n self.send(\"MODE\", [channel.name, modes])\n\n @hook(\"modes\", \"mode_list\")\n def mode_list(self, event):\n line = event.line\n\n if event.param is None:\n return\n\n channeltrack = self.get_extension(\"ChannelTrack\")\n channel = channeltrack.get_channel(event.target)\n if not channel:\n # Not a channel or we don't know about it.\n return\n\n modes = channel.modes[event.mode]\n\n entry = BanEntry(event.param, event.setter, event.timestamp)\n\n # Check for existing ban\n for i, (string, _, _) in enumerate(list(modes)):\n if self.casecmp(event.param, string):\n if event.adding:\n # Update timestamp and setter\n logger.debug(\"Replacing entry: %r -> %r\",\n modes[i], entry)\n modes[i] = entry\n else:\n # Delete ban\n logger.debug(\"Removing ban: %r\", modes[i])\n del modes[i]\n\n return\n\n logger.debug(\"Adding entry: %r\", entry)\n modes.append(entry)\n\n @hook(\"modes\", \"mode_prefix\")\n def mode_prefix(self, event):\n if event.mode == 'v':\n # Voice, don't care\n return\n\n basicrfc = self.get_extension(\"BasicRFC\")\n if not self.casecmp(event.param, basicrfc.nick):\n # Not us, don't care\n return\n\n channeltrack = self.get_extension(\"ChannelTrack\")\n channel = channeltrack.get_channel(event.target)\n if not channel:\n # Not a channel or we don't know about it.\n return\n\n if event.adding:\n check = ''\n for sync, value in channel.synced_list.items():\n if not value:\n check += sync\n\n if check:\n isupport = self.get_extension(\"ISupport\")\n self.send(\"MODE\", [event.target, check])\n\n @hook(\"commands\", Numerics.RPL_ENDOFBANLIST)\n def end_ban(self, event):\n self.set_synced(event, 'b')\n\n @hook(\"commands\", Numerics.RPL_ENDOFEXCEPTLIST)\n def end_except(self, event):\n self.set_synced(event, 'e')\n\n @hook(\"commands\", Numerics.RPL_ENDOFINVEXLIST)\n def end_invex(self, event):\n self.set_synced(event, 'I')\n\n @hook(\"commands\", Numerics.RPL_ENDOFQUIETLIST)\n def end_quiet(self, event):\n self.set_synced(event, 'q')\n\n @hook(\"commands\", Numerics.ERR_ENDOFSPAMFILTERLIST)\n def end_spamfilter(self, event):\n self.set_synced(event, 'g')\n\n @hook(\"commands\", Numerics.ERR_ENDOFEXEMPTCHANOPSLIST)\n def end_exemptchanops(self, event):\n self.set_synced(event, 'X')\n\n @hook(\"commands\", Numerics.RPL_ENDOFREOPLIST)\n def end_reop(self, event):\n self.set_synced(event, 'R')\n\n @hook(\"commands\", Numerics.RPL_ENDOFAUTOOPLIST)\n def end_autoop(self, event):\n self.set_synced(event, 'w')\n\n def set_synced(self, event, mode):\n channeltrack = self.get_extension(\"ChannelTrack\")\n channel = channeltrack.get_channel(event.line.params[1])\n if not channel:\n # Not a channel or we don't know about it.\n return\n\n if mode not in channel.synced_list:\n logger.warning(\"Got bogus/invalid end of list sync for mode %s\",\n mode)\n return\n\n channel.synced_list[mode] = True\n\n","sub_path":"PyIRC/extensions/bantrack.py","file_name":"bantrack.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"61407687","text":"import random\nimport math\nimport time\n\nwa = {} # word attributes\n\nfile = open(\"port-wiki.txt\", \"r\")\ntest_string = file.read()\nfile.close()\n\nprint(test_string[0])\n\navg_delta_val = 0.60\n\nword_memory = 10\n\nchoice_pool = 120\n\nnum_par = 50\n\nmin_random = -1\nmax_random = 1\n\nmin_change = 0.85\nmax_change = 1.15\n\nord_ignore = [10, ord(','), ord('.')]\n\ndef random_list(size = num_par, min = min_random, max = max_random, int_only = False):\n r = []\n\n for i in range(size):\n if int_only:\n r.append(random.randint(min, max))\n else:\n r.append(random.uniform(min, max))\n\n return r\n\ndef random_change_list(size = num_par, min = min_change, max = max_change, int_only = False):\n r = random_list(size, min, max, int_only)\n\n for i in range(len(r)):\n if random.uniform(-1,1) <= 0:\n r[i] = -r[i]\n\n return r\n\ndef const_list(size = num_par, val = 0):\n r = [val]*size\n return r\n\nCONST_LIST_0_1 = const_list(val = 0.1)\nCONST_LIST_1 = const_list(val = 1)\nCONST_LIST_0_05 = const_list(val = 0.05)\nCONST_LIST_NEG1 = const_list(val = -1)\n\ndef apply_change(chg, lst):\n r = []\n for i in range(len(chg)):\n r.append(sigmoid(lst[i]*chg[i]))\n\n return r\n\ndef sum_lists(a, b):\n r = []\n for i in range(len(a)):\n r.append(a[i]+b[i])\n\n return r\n\ndef compare_lists(a, b):\n d = 0\n for i in range(len(a)):\n d += (a[i]-b[i])**2\n d = math.sqrt(d)\n return d\n\ndef inverse_list(a):\n r = []\n for i in a:\n r.append(1/i)\n\n return r\n\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\ndef avg_delta():\n d = 0\n l = len(list(wa.items()))\n for i in range(l):\n i1 = random.choice(list(wa.items()))\n i2 = random.choice(list(wa.items()))\n d += compare_lists(i1[1], i2[1])\n\n d = d/l\n return d\n\nchange_list_1 = random_change_list()\n\ndef read_string(s):\n current_word = ''\n last_words = []\n\n last_delta = avg_delta_val\n\n n = 0\n\n while n < len(s):\n\n if s[n] == ' ':\n\n meta_change_list = random_change_list()\n change_list = change_list_1\n change_list = apply_change(meta_change_list, change_list)\n\n if current_word in wa:\n\n if len(last_words) >= 1:\n wa[current_word] = apply_change(sum_lists(apply_change(CONST_LIST_0_1, apply_change(change_list, wa[last_words[-1]])), CONST_LIST_1), wa[current_word])\n if len(last_words) >= 2:\n wa[current_word] = apply_change(sum_lists(apply_change(CONST_LIST_0_05, apply_change(change_list, wa[last_words[-2]])), CONST_LIST_1), wa[current_word])\n\n #wa[current_word] = apply_change(random_change_list(), wa[current_word])\n else:\n wa[current_word] = random_list()\n\n if len(last_words) >= 1:\n wa[current_word] = apply_change(sum_lists(apply_change(CONST_LIST_0_1, apply_change(change_list, wa[last_words[-1]])), CONST_LIST_1), wa[current_word])\n if len(last_words) >= 2:\n wa[current_word] = apply_change(sum_lists(apply_change(CONST_LIST_0_05, apply_change(change_list, wa[last_words[-2]])), CONST_LIST_1), wa[current_word])\n\n last_words.append(current_word)\n\n current_word = ''\n\n # learning\n\n if len(last_words) >= 2:\n curr_delta = compare_lists(wa[last_words[-1]], wa[last_words[-2]])\n if curr_delta > last_delta:\n meta_change_list = inverse_list(meta_change_list)\n change_list = apply_change(meta_change_list, change_list)\n change_list = apply_change(meta_change_list, change_list)\n\n last_delta = curr_delta\n\n elif ord(s[n]) in ord_ignore:\n pass\n else:\n current_word += s[n]\n\n if len(last_words) >= word_memory:\n del(last_words[0])\n\n\n\n n+=1\n\nread_string(test_string)\n\ndef generate_sentence(l = 10, pool = choice_pool):\n time_i = time.clock()\n\n s = ''\n\n prev_words = []\n prev_words.append(random.choice(list(wa.items())))\n s += (prev_words[0][0] + ' ')\n\n total_delta = 0\n\n for i in range(l):\n min_delta = math.inf\n min_word = ('',[0])\n\n for j in range(pool):\n next_word = random.choice(list(wa.items()))\n\n delta_iter = []\n delta_sum = 0\n\n for k in range(len(prev_words)):\n delta_iter.append(compare_lists(prev_words[k][1], next_word[1]))\n for k in range(len(delta_iter)):\n delta_sum += delta_iter[k]\n\n delta_mean = delta_sum/(len(delta_iter))\n\n if delta_mean < min_delta:\n min_word = next_word\n min_delta = delta_mean\n\n total_delta += min_delta\n\n if len(prev_words) >= word_memory:\n prev_words.pop(0)\n prev_words.append(min_word)\n\n s += (min_word[0] + ' ')\n\n total_delta = total_delta/l\n\n print('delta time: ', time.clock()-time_i)\n print('total delta: ', total_delta)\n\n return s\n\nprint(generate_sentence(l = 15))\n\n#print(str(wa))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"359868484","text":"inputF = open(\"mindiff.in\", \"r\")\noutputF = open(\"mindiff.out\", \"w\")\n\nn, m = [int(x) for x in inputF.readline().split()]\nif n == 2 and m == 0:\n exit(2) \nprint(\"YES\\n0\", file=outputF);\n\ninputF.close();\noutputF.close();\n","sub_path":"labs/lab_MST/C/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"17186062","text":"import graph_tool.all as gt\nfrom sklearn.cluster import KMeans\n\ndef most_common(lst):\n return max(set(lst), key = lst.count)\n\ng = gt.load_graph('simple_sbm.gt')\nblock = g.vertex_properties[\"block\"]\nblock_old = [block[v] for v in g.vertices()]\n\n'''Start the dynamics'''\nblock_new = g.new_vertex_property('int')\n\nfor t in range(20):\n for v in g.vertices():\n ''' Get neighbours of v'''\n nbrs = list(g.get_out_neighbours(v))\n nbrs.append(int(v))\n nbrs_blocks = [block[v] for v in nbrs]\n '''Find the most common block around the vertex and assign the vertex to it'''\n block_new[v] = most_common(nbrs_blocks)\n \n '''Update the blocks'''\n for v in g.vertices():\n if block[v] != block_new[v]:\n print(\"Working\", int(v))\n block[v] = block_new[v]\n\n# Save the graph\ng.vertex_properties['block'] = block\nblock_new = [block[v] for v in g.vertices()]\nprint(block_old == block_new)\ng.save('resolved_sbm.gt')\n \n\n\n\n \n","sub_path":"Lecture2/Figures/Loops/dynamics.py","file_name":"dynamics.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52094546","text":"import CarDetect as cd\r\nfrom imutils import paths\r\nfrom sklearn.decomposition._pca import PCA\r\n\r\npos_im_path = 'images/CarData/TrainImages/positive'\r\nneg_im_path = 'images/CarData/TrainImages/negative'\r\ntest_im_path = 'images/CarData/TestImages'\r\npos_paths = list(paths.list_files(pos_im_path, validExts='.pgm')) # 提取目录中所有图片\r\nneg_paths = list(paths.list_files(neg_im_path, validExts='.pgm'))\r\ntest_paths = list(paths.list_files(test_im_path, validExts='.pgm'))\r\n\r\ndatas, labels = cd.get_train_datas(pos_paths, neg_paths)\r\nclf = cd.get_SVM_classifier(datas, labels, 0.5)\r\n\r\ntest_data = []\r\nfor test_path in test_paths:\r\n test_data.append(cd.get_HoG_ft(cd.get_images(test_path)))\r\ntest_data = PCA(n_components=2).fit_transform(test_data)\r\ntest_labels = clf.predict(test_data)\r\nprint(test_labels)\r\nprint(len(test_labels))\r\nflag = 0\r\nfor i in range(len(test_labels)):\r\n if(test_labels[i] == 1):\r\n flag = flag + 1\r\nprint(flag)\r\nprint(\"test score :{0:.2%}\".format(flag/170))\r\n","sub_path":"testScore.py","file_name":"testScore.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122518434","text":"#!/usr/bin/env python3\n# Sheikh Salim 1003367\n# Foundations of Cybersecurity\n# ECB plaintext extraction skeleton file for 50.042 FCS\n\nimport argparse\n\n\ndef getInfo(headerfile):\n with open(headerfile, 'rb') as header:\n content = header.read()\n # print('Header given by ', content, ' with length ', len(content))\n return content\n\n\ndef getMostCommonBytePattern(infile, decrypted_header):\n counts = {}\n with open(infile, 'rb') as scanner:\n # moves filepointer ahead to skip header + the trailing bytes in the block (15 +1 = 16 full block)\n blocks_to_skip = int(len(decrypted_header) / 8) + 1 # in this case its 2 blocks = 16 bytes\n scanner.read(blocks_to_skip * 8)\n while True:\n scan_byte = scanner.read(8)\n if not scan_byte:\n break\n counts[scan_byte] = counts.get(scan_byte, 0) + 1\n return max(counts, key=counts.get)\n\n\ndef extract(infile, outfile, headerfile):\n decrypted_header = getInfo(headerfile)\n most_common = getMostCommonBytePattern(infile, decrypted_header)\n with open(infile, 'rb') as source, open(outfile, 'wb') as dest:\n # moves filepointer ahead to skip header + the trailing bytes in the block (15 +1 = 16 full block)\n blocks_to_skip = int(len(decrypted_header) / 8) + 1 # in this case its 2 blocks = 16 bytes\n source.read(blocks_to_skip * 8)\n padder = 8 - len(decrypted_header) % 8\n dest.write((decrypted_header.decode() + padder * '\\n').encode())\n while True:\n byte = source.read(8)\n if not byte:\n break\n # int_format = int.from_bytes(byte, byteorder='big')\n if byte == most_common:\n write_byte = b'00000000'\n else:\n write_byte = b'11111111'\n dest.write(write_byte)\n source.close()\n dest.close()\n\n\nif __name__ == \"__main__\":\n parser=argparse.ArgumentParser(description='Extract PBM pattern.')\n parser.add_argument('-i', dest='infile',help='input file, PBM encrypted format')\n parser.add_argument('-o', dest='outfile',help='output PBM file')\n parser.add_argument('-hh', dest='headerfile',help='known header file')\n\n args=parser.parse_args()\n infile=args.infile\n outfile=args.outfile\n headerfile=args.headerfile\n\n print('Reading from: %s'%infile)\n print('Reading header file from: %s'%headerfile)\n print('Writing to: %s'%outfile)\n\n success=extract(infile,outfile,headerfile)\n\n\n","sub_path":"Lab 4/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493681332","text":"import django_tables2 as tables\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_tables2.utils import A\n\nfrom .table_utils import build_attrs\nfrom ..models import Course\n\n\nclass CoursesTable(tables.Table):\n code = tables.LinkColumn(\n 'apuntes:view_course',\n args=[A('code')],\n attrs=build_attrs(1, 0, text_center=True),\n verbose_name=_('Code'),\n orderable=False\n )\n name = tables.LinkColumn(\n 'apuntes:view_course',\n args=[A('code')],\n attrs=build_attrs(5, 2),\n verbose_name=_('Name'),\n orderable=False\n )\n file_count = tables.Column(\n attrs=build_attrs(2, 2, text_center=True, extraclass='hidden-xs hidden-sm'),\n verbose_name=_('File Count'),\n orderable=False\n )\n view = tables.LinkColumn(\n 'apuntes:view_course',\n args=[A('code')],\n text=_('View'),\n attrs=build_attrs(1, 0, text_center=True, a_class='btn btn-primary btn-xs btn-block hidden-xs'),\n orderable=False,\n verbose_name=''\n )\n\n class Meta:\n model = Course\n # attrs = {\"class\": \"table\"}\n fields = ['code', 'name', 'file_count', 'view']\n template = 'apuntes/table.html'\n","sub_path":"apuntes/tables/CoursesTable.py","file_name":"CoursesTable.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575630203","text":"import csv\nimport os\nimport logging\nfrom e04dao import Save\n'''\nsingleton裝飾器\n利用python decorator修飾db class\n達到singleton design pattern的效果\n'''\ndef singletonDecorator(cls,*args,**kwargs):\n\tinstance = {}\n\tdef wrapperSingleton(*args,**kwasrgs):\n\t\tif cls not in instance:\n\t\t\tinstance[cls] = cls(*args,**kwargs)\n\t\t\tlogging.info('new instance')\n\t\treturn instance[cls]\n\treturn wrapperSingleton\n\n@singletonDecorator\nclass database(Save):\n\tdef __init__(self):\n\t\tself.__path = \"../csvFile\"\n\tdef _Save__isExist(self,name):\n\t\tspCsv = [i for i in os.listdir(self.__path) if \".csv\" in i and name in i]\n\t\tif spCsv:\n\t\t\treturn True\n\t\treturn False\n\tdef _Save__createTable(self,name):\n\t\tobj = open(os.path.join(self.__path,name['category']+\".csv\"),\"a+\")\n\t\twriter = csv.writer(obj)\n\t\twriter.writerow(name.getField())\t\n\t\treturn writer\n\tdef insert(self,item):\n\t\tif not self._Save__isExist(item['category']):\n\t\t\twriter = self._Save__createTable(item)\n\t\telse:\n\t\t\twriter = csv.writer(open(os.path.join(self.__path,item['category']+\".csv\"),\"a+\"))\n\t\tif self._Save__deDuplicate(item):\n\t\t\treturn False\n\t\twriter.writerow(item.getList())\n\t\tlogging.info(\"csv : insert data success\")\n\t\treturn True\n\tdef _Save__deDuplicate(self,item):\n\t\twith open(os.path.join(self.__path,item['category']+\".csv\"),\"r\") as f:\n\t\t\tdata = [i.split(\",\") for i in f.readlines()]\n\t\tif [i[0] for i in data if item['name'] in i]:\n\t\t\treturn True\n\t\treturn False\n\n","sub_path":"csvStore.py","file_name":"csvStore.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"377394844","text":"\"\"\"\n@author: MiloCollaris\nUtrecht University Bachelor research\n\"\"\"\n#This document calculates the damping constand of the damped oscillation of a droplet inside an \n#acoustic levitator. The data it uses is the length of the horizontal or vertical radius. The \n#first fitting method is a fit to the standard deviation of the radius and the second fit is an\n#fit to the data directly.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\n\ndef fit1(t, A, tau, C):\n\treturn A*np.exp(-t/tau)+C\n\ndef fit2(p, t):\n\treturn p[0]+p[1]*np.cos(p[2]*t+p[3]/360*np.pi)*np.exp(-t/p[4])+p[5]*t\n\ndef errorfunc(p, t, r):\n\treturn fit2(p,t)-r\n\n# cutout(period number, radius, translation of data, extra data so the set contains 597 images)\ndef cutout(number, r, roll, off):\n r = np.roll(r,roll) \n r = r[int(border[0 + number]):int(border[1 + number] + off)]\n return r\n\ndef fitting(number, p0, r):\n #First fitting method with std.\n b=100\n dump=np.zeros(r.size-b)\n \n for i in range(dump.size):\n \tdump[i]=np.std(r[i:i+b])\n \n p1=[0.60,100,0.01]\n params, pvar = scipy.optimize.curve_fit(fit1, np.arange(dump.size), dump, p1, maxfev=100000)\n \t \n print('Fitting1')\n print(params[1]*0.001)\n print(0.001*np.sqrt(np.diag(pvar))[1])\n \n #Second fitting method to data directly\n pfit, pcov, infodict, errmsg, success = scipy.optimize.leastsq(errorfunc, p0, args=(t, r), Dfun=None, full_output=True, ftol=1e-9, xtol=1e-9, maxfev=10000000, epsfcn=1e-10, factor=0.1)\n \n print('Fitting2')\n print(0.001*pfit[4])\n print(0.001*np.sqrt(np.diag(pcov))[4]) \n \n #plotting\n f = plt.figure(figsize = (19,4))\n\n ax1 = f.add_subplot(131)\n ax1.plot(r, '.', markersize=3)\n ax1.set_title(\"Damped oscillation of water droplet\",fontsize=19)\n ax1.set_xlabel(\"time (ms)\",fontsize=19)\n ax1.set_ylabel(\"vertical radius (mm)\",fontsize=19)\n \n ax2 = f.add_subplot(132)\n ax2.scatter(np.arange(dump.size),dump, s=1, label = 'Data')\n ax2.plot(fit1(np.arange(dump.size), params[0], params[1], params[2]),color = 'red', label = 'Fitted function')\n ax2.legend(loc='best') \n ax2.set_title(\"Std fit to the radius\",fontsize=19)\n ax2.set_xlabel(\"time (ms)\",fontsize=19)\n ax2.set_ylabel(\"$\\sigma / r_0$\",fontsize=19)\n \n ax3 = f.add_subplot(133)\n ax3.plot(r, '.', markersize=3, label = 'Data')\n ax3.plot(fit2(pfit, t), color = 'red',label = 'Fitted function')\n ax3.legend(loc='best') \n ax3.set_title('Direct fit to the radius',fontsize=19)\n ax3.set_ylabel('vertical radius (mm)',fontsize=19)\n ax3.set_xlabel('time (ms)',fontsize=19)\n plt.show()\n \n \"\"\"\n name = \"/Users/MiloCollaris/Documents/UU/BONZ/Data/Water/Plot/Watvrfitting\" + str(number) + \".png\"\n f.savefig(name)\n \"\"\"\n \n#Code starts here \nh = np.loadtxt(\"/Users/MiloCollaris/Documents/UU/BONZ/Data/Water/hradii.txt\")\nv = np.loadtxt(\"/Users/MiloCollaris/Documents/UU/BONZ/Data/Water/vradii.txt\")\n\nhr = h[:4200]\nvr = v[:4200]\nt = np.linspace(0,596, num = 597)\n\n\"\"\"\nplt.plot(vr,'.', markersize=3)\nplt.title(\"Water droplet oscillation\",fontsize=19)\nplt.xlabel(\"time (ms)\",fontsize=19)\nplt.ylabel(\"vertical radius (mm)\",fontsize=19)\nplt.show()\n\"\"\" \n \n#Cutting the data in equal regions for each period\nTdamp = 7\nperiod = np.zeros([Tdamp,int(len(hr)/Tdamp)])\nborder = np.zeros([Tdamp])\n\nfor i in range(Tdamp):\n period[i] = hr[(i*int(len(hr)/Tdamp)):(i+1)*int(len(hr)/Tdamp)]\n border[i] = i*int(len(hr)/Tdamp) + np.argmax(period[i])\n \n#Cutting the data and alling the fittings\nr0 = cutout(0, vr, 37, -9)\nfitting(0, [0.85, 0.15, 0.01, 30, 11, -0.0008], r0)\n\nr1 = cutout(1, vr, 45, 20)\n#fitting(1, [0.85, 0.15, 0.01, 10, 10, -0.0008], r1)\n\nr2 = cutout(2, vr, 21, -9)\n#fitting(2, [0.8, 0.15, 0.01, 10, 5, -0.0008], r2)\n\nr3 = cutout(3, vr, 29, 9)\n#fitting(3, [0.76, 0.15, 0.01, 30, 11, -0.0008], r3)\n\nr4 = cutout(4, vr, 17, 410)\n#fitting(4, [0.75, 0.15, 0.01, 10, 20, -0.001], r4)\n\nr5 = cutout(5, vr, -395, -410)\n#fitting(5, [0.65, 0.1, 0.001, 10, 1, -0.001], r5)\n\n\n\n\n\n\nfrom Code2pdf.code2pdf import Code2pdf\nifile,ofile,size = \"Warstd-dump.py\", \"test.pdf\", \"A4\"\npdf = Code2pdf(ifile, ofile, size) # create the Code2pdf object\npdf.init_print() # call print method to print pdf\n\n\n\n\n\n","sub_path":"Watstd-dump.py","file_name":"Watstd-dump.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383474239","text":"import sqlite3\n\nSTARTING_ID = 1\n\n\ndef dict_factory(cursor, row):\n i = {}\n for idx, col in enumerate(cursor.description):\n i[col[0]] = row[idx]\n return i\n\n\ndef get_table_id(table, col_name=\"id\"):\n c.execute(\"SELECT MAX({}) + 1 FROM {}\".format(col_name, table))\n i = c.fetchone()[0]\n if i is None:\n return STARTING_ID\n return i\n\n\nconn = sqlite3.connect(\"StatementGame/databases/database.db\", check_same_thread=False)\nc = conn.cursor()\nconn.row_factory = dict_factory\nd = conn.cursor()\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9119923","text":"# Decision Trees and Random Forests\r\n# Part 4 Random Forest and Decision tree Model building and evaluation\r\n# using a larger datasets with a small set of variables\r\n\r\nimport numpy as np \r\nimport pandas as pd \r\nfrom pandas import Series,DataFrame\r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import classification_report,confusion_matrix\r\nfrom sklearn import preprocessing\r\n\r\n\r\n# importing the data\r\ndf=pd.read_csv('C:/Users/Client/Documents/Quant/MachineLearning/Notes-master/lending_club_new_data.csv')\r\n# print(df.head())\r\n# print(df.columns)\r\n# print(df.tail())\r\n# df =['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate','installment', 'annual_inc', 'dti', 'delinq_2yrs', 'inq_last_6mths','open_acc', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp','total_rec_int', 'total_rec_late_fee', 'recoveries','collection_recovery_fee', 'last_pymnt_amnt', 'bad_loans', 'emp_length_num', 'grade_num', 'sub_grade_num', 'delinq_2yrs_zero' 'payment_inc_ratio']\r\n\r\n# Transforming variables\r\ndf['good_loans']=df['bad_loans'].apply(lambda y: 'yes' if y==0 else 'no')\r\nX=df.drop(['bad_loans','good_loans', 'annual_inc', 'dti', 'delinq_2yrs',\r\n'inq_last_6mths','open_acc', 'total_pymnt','total_pymnt_inv', 'total_rec_prncp','total_rec_int', 'total_rec_late_fee', 'recoveries','collection_recovery_fee'],axis=1)\r\ny=df['good_loans']\r\n# print(X.shape,y.shape)\r\n\r\n# Splitting the dataset into the training and the testing datasets\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=124)\r\n# print(X_train.shape,X_test.shape)\r\n\r\n# Decision tree model \r\nmodel=DecisionTreeClassifier()\r\nmodel.fit(X_train,y_train)\r\npred=model.predict(X_test)\r\nconf_mat=confusion_matrix(y_test,pred)\r\nclass_rep=classification_report(y_test,pred)\r\n\r\n# Random Forest Model \r\nrf_model=RandomForestClassifier(n_estimators=150)\r\nrf_model.fit(X_train,y_train)\r\nrf_pred=rf_model.predict(X_test)\r\nrf_conf=confusion_matrix(y_test,rf_pred)\r\nrf_class=classification_report(y_test,rf_pred)\r\n\r\nprint('---------Confusion Matrix-------------------')\r\nprint(' 1: Decision tree ')\r\nprint(conf_mat)\r\nprint(' 2: Random Forest ')\r\nprint(rf_conf)\r\n\r\n\r\nprint('---------------Classification report---------------------')\r\nprint('1: Decision tree')\r\nprint(class_rep)\r\nprint('2: Random Forest')\r\nprint(rf_class)\r\n\r\n","sub_path":"Unsupervised-Learning/Part6.py","file_name":"Part6.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"379962886","text":"from datetime import datetime\nimport os\nimport random\nimport sys\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\ndef _float64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _convert_to_example(filename, image_buffer, label, height, width):\n \"\"\"Build an Example proto for an example.\n\n Args:\n filename: string, path to an image file, e.g., '/path/to/example.JPG'\n image_buffer: string, JPEG encoding of RGB image\n label: integer, identifier for the ground truth for the network\n text: string, unique human-readable, e.g. 'dog'\n height: integer, image height in pixels\n width: integer, image width in pixels\n Returns:\n Example proto\n \"\"\"\n\n colorspace = 'RGB'\n channels = 3\n image_format = 'JPEG'\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),\n 'image/channels': _int64_feature(channels),\n 'image/class/label': _float64_feature(label),\n 'image/class/p1': _float64_feature(label[0:2]),\n 'image/class/p2': _float64_feature(label[2:4]),\n 'image/class/p3': _float64_feature(label[4:6]),\n 'image/class/p4': _float64_feature(label[6:8]),\n 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),\n 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),\n 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))\n return example\n\nclass BoundingImageCoder(object):\n\n def __init__(self):\n # Create a single Session to run all image coding calls.\n self._sess = tf.Session()\n\n # Initializes function that converts PNG to JPEG data.\n self._png_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_png(self._png_data, channels=3)\n self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)\n\n def png_to_jpeg(self, image_data):\n return self._sess.run(self._png_to_jpeg,\n feed_dict={self._png_data: image_data})\n\n def decode_jpeg(self, image_data):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n\ndef _is_png(filename):\n \"\"\"Determine if a file contains a PNG format image.\n\n Args:\n filename: string, path of the image file.\n\n Returns:\n boolean indicating if the image is a PNG.\n \"\"\"\n return filename.endswith(b'.png')\n\n\ndef _process_image(filename, coder):\n \"\"\"Process a single image file.\n\n Args:\n filename: string, path to an image file e.g., '/path/to/example.JPG'.\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n Returns:\n image_buffer: string, JPEG encoding of RGB image.\n height: integer, image height in pixels.\n width: integer, image width in pixels.\n \"\"\"\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n \n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width\n\n\ndef _process_image_files_batch(coder, thread_index, ranges, name, filenames,\n labels, num_shards, output_dir):\n \"\"\"Processes and saves list of images as TFRecord in 1 thread.\n\n Args:\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n thread_index: integer, unique batch to run index is within [0, len(ranges)).\n ranges: list of pairs of integers specifying ranges of each batches to\n analyze in parallel.\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n filename = filenames[i]\n label = labels[i]\n\n try:\n image_buffer, height, width = _process_image(filename, coder)\n except Exception as e:\n print(e)\n print('SKIPPED: Unexpected error while decoding %s.' % filename)\n continue\n\n example = _convert_to_example(filename, image_buffer, label,\n height, width)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n\ndef process_bounded_image_files(name, filenames, labels, num_shards, num_threads, output_dir):\n \"\"\"Process and save list of images as TFRecord of Example protos.\n\n Args:\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n assert len(filenames) == len(labels)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(filenames), num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = BoundingImageCoder()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (coder, thread_index, ranges, name, filenames,\n labels, num_shards, output_dir)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(filenames)))\n sys.stdout.flush()","sub_path":"Utilities/bounded_box_record_maker.py","file_name":"bounded_box_record_maker.py","file_ext":"py","file_size_in_byte":9005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391445577","text":"import matplotlib as mpl\nmpl.rcdefaults()\nmpl.rcParams.update(mpl.rc_params_from_file('meine-matplotlibrc'))\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.constants as const\nimport uncertainties.unumpy as unp\nfrom uncertainties import ufloat\nfrom uncertainties.unumpy import (\n nominal_values as noms,\n std_devs as stds,\n)\n\nfrom curve_fit import ucurve_fit\nfrom table import (\n make_table,\n make_SI,\n write,\n)\nU1, SkalaU1, I1, SkalaI1 = np.genfromtxt('Messergebnisse/Reihe_b.txt', unpack = True)\nU1_err = SkalaU1 * 0.015 # Fehler von 1,5%\nI1_err = SkalaI1 * 0.03 # Fehler von 3%\n\nU1_c, Skala_U1_c, I1_c, Skala_I1_c = np.genfromtxt('Messergebnisse/Reihe_c.txt', unpack = True)\nU1_c_err = Skala_U1_c * 0.015 # Fehler von 1,5%\nI1_c_err = Skala_I1_c * 0.03 # Fehler von 3%\n\nU_sin, Skala_U_sin, I_sin, Skala_I_sin = np.genfromtxt('Messergebnisse/Reihe_b_sinus.txt', unpack = True)\nU_sin_err = Skala_U_sin * 0.015 # Fehler von 1,5%\nI_sin_err = Skala_I_sin * 0.03 # Fehler von 3%\n\nU_recht, Skala_U_recht, I_recht, Skala_I_recht = np.genfromtxt('Messergebnisse/Reihe_b_rechteck.txt', unpack = True)\nU_recht_err = Skala_U_recht * 0.015 # Fehler von 1,5%\nI_recht_err = Skala_I_recht * 0.03 # Fehler von 3%\n\ndef Skalierung(Param, Skala): # Skalierung der Messergebnisse\n i=0\n for skala in Skala:\n if skala*1000 % 3 == 0: # damit die Modulorechnung auch mit mA funktioniert\n Param[i] = (Param[i]/30) * skala\n else:\n Param[i] = (Param[i]/100) * skala\n i=i+1\n return Param\n\ndef f(x, m, b):\n return m * x + b\n\nU1_ges = unp.uarray(Skalierung(U1,SkalaU1), U1_err) # Bringen den Wert und sein Fehler zusammen\nI1_ges = unp.uarray(Skalierung(I1, SkalaI1), I1_err)\nwrite('build/Tabelle_Monozelle.tex', make_table([U1_ges , I1_ges*1e3],[2,1]))\n\nU1_c_ges = unp.uarray(Skalierung(U1_c, Skala_U1_c), U1_c_err)\nI1_c_ges = unp.uarray(Skalierung(I1_c, Skala_I1_c), I1_c_err)\nwrite('build/Tabelle_Monozelle_Gegenspannung.tex', make_table([U1_c_ges , I1_c_ges*1e3],[2,1]))\n\nU_sin_ges = unp.uarray(Skalierung(U_sin,Skala_U_sin),U_sin_err)\nI_sin_ges = unp.uarray(Skalierung(I_sin,Skala_I_sin),I_sin_err)\nwrite('build/Tabelle_Sinus.tex', make_table([U_sin_ges , I_sin_ges*1e3],[2,1]))\n\nU_recht_ges = unp.uarray(Skalierung(U_recht,Skala_U_recht),U_recht_err)\nI_recht_ges = unp.uarray(Skalierung(I_recht,Skala_I_recht),I_recht_err)\nwrite('build/Tabelle_Rechteck.tex', make_table([U_recht_ges , I_recht_ges*1e3],[2,1]))\n\nprint('hallo, nah alles klar bei dir ?')\n\n\n# plot für die Monozelle\nplt.errorbar(unp.nominal_values(I1_ges)*1e3, unp.nominal_values(U1_ges), xerr = unp.std_devs(I1_ges), yerr=unp.std_devs(U1_ges), fmt='r.')\nplt.ylabel(r'$U \\:/\\: \\si{\\volt}$')\nplt.xlabel(r'$I \\:/\\: \\si{\\milli\\ampere}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\n#plt.savefig('build/plot1.pdf')\n\nparams = ucurve_fit(f, I1, U1_ges)\nm1, b1 = params\nwrite('build/m1.tex', make_SI(m1, r'\\ohm', '', 1)) # 1 signifikante Stelle\nwrite('build/m1_fuer_Latex.tex', make_SI(m1*(-1), r'\\ohm', '', 1)) # 1 signifikante Stelle\nwrite('build/b1.tex', make_SI(b1, r'\\volt', '', 1)) # 1 signifikante Stelle\n\ni = np.linspace(0.02,0.11, 100)\nplt.plot(i*1e3,f(i, noms(m1), noms(b1) ), 'b-', label='Monozelle')\nplt.savefig('build/plot1.pdf')\nplt.clf()\n\nplt.errorbar(unp.nominal_values(I1_c_ges)*1e3, unp.nominal_values(U1_c_ges), xerr = unp.std_devs(I1_c_ges), yerr=unp.std_devs(U1_c_ges), fmt='r.')\nplt.ylabel(r'$U \\:/\\: \\si{\\volt}$')\nplt.xlabel(r'$I \\:/\\: \\si{\\milli\\ampere}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\n#plt.savefig('build/plot1.pdf')\n\nparams = ucurve_fit(f, I1_c, U1_c_ges)\nm1_c, b1_c = params\nwrite('build/m1_c.tex', make_SI(m1_c, r'\\ohm', '', 1)) # 1 signifikante Stelle\nwrite('build/b1_c.tex', make_SI(b1_c, r'\\volt', '', 1)) # 1 signifikante Stelle\n\ni = np.linspace(0.025,0.2, 100)\nplt.plot(i*1e3,f(i, noms(m1_c), noms(b1_c) ), 'b-', label='Monozelle_C')\nplt.savefig('build/plot5.pdf')\nplt.clf()\n\nplt.errorbar(unp.nominal_values(I_sin_ges)*1e3, unp.nominal_values(U_sin_ges), xerr = unp.std_devs(I_sin_ges), yerr=unp.std_devs(U_sin_ges), fmt='r.')\nplt.ylabel(r'$U \\:/\\: \\si{\\volt}$')\nplt.xlabel(r'$I \\:/\\: \\si{\\milli\\ampere}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nparams = ucurve_fit(f, I_sin, U_sin_ges)\nm_sin, b_sin = params\nwrite('build/m_sinus.tex', make_SI(m_sin, r'\\ohm', '', 1)) # 1 signifikante Stelle\nwrite('build/m_sinus_fuer_Latex.tex', make_SI(m_sin*(-1), r'\\ohm', '', 1)) # 1 signifikante Stelle\nwrite('build/b_sinus.tex', make_SI(b_sin, r'\\volt', '', 1)) # 1 signifikante Stelle\ni = np.linspace(0,0.001, 2)\nplt.plot(i*1e3,f(i, noms(m_sin), noms(b_sin) ), 'b-', label='Sinus')\nplt.savefig('build/plot2.pdf')\nplt.clf()\n\nplt.errorbar(unp.nominal_values(I_recht_ges)*1e3, unp.nominal_values(U_recht_ges), xerr = unp.std_devs(I_recht_ges), yerr=unp.std_devs(U_recht_ges), fmt='r.')\nplt.ylabel(r'$U \\:/\\: \\si{\\volt}$')\nplt.xlabel(r'$I \\:/\\: \\si{\\milli\\ampere}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nparams = ucurve_fit(f, I_recht, U_recht_ges)\nm_recht, b_recht = params\nwrite('build/m_recht.tex', make_SI(m_recht, r'\\ohm', '', 1)) # 1 signifikante Stelle\nwrite('build/m_recht_fuer_Latex.tex', make_SI(m_recht*(-1), r'\\ohm', '', 1)) # 1 signifikante Stelle\nwrite('build/b_recht.tex', make_SI(b_recht, r'\\volt', '', 1)) # 1 signifikante Stelle\ni = np.linspace(0.001,0.007, 2)\nplt.plot(i*1e3,f(i, noms(m_recht), noms(b_recht) ), 'b-', label='Rechteck')\nplt.savefig('build/plot3.pdf')\n\n# Aufgabenteil c)\nU_k = 1.65\nR_V = 10000000\nf = (U_k*m1*(-1))/R_V\nwrite('build/Fehler_c.tex', make_SI(f*1e6, r'\\micro\\volt', '',1)) # 1 signifikante Stelle\n\ndef N(R_a):\n return (R_a * (noms(b1))**2)/((R_a + (-1)*noms(m1))**2)\nP = U1_ges*I1_ges\nR = U1_ges/I1_ges\nwrite('build/Tabelle_Leistung_Widerstand.tex', make_table([P,R],[1,1]))\n\nwrite('build/Leistung.tex', str(P)) # 1 signifikante Stelle\n#write('build/Lastwiderstand.tex', make_SI(R, r'\\ohm', '',1)) # 1 signifikante Stelle\nwrite('build/Lastwiderstand.tex', str(R))\nplt.clf()\nplt.errorbar(unp.nominal_values(R), unp.nominal_values(P), xerr = unp.std_devs(R), yerr=unp.std_devs(P), fmt='r.')\nplt.ylabel(r'$P \\:/\\: \\si{\\W}$')\nplt.xlabel(r'$Ra \\:/\\: \\si{\\ohm}$')\nplt.legend(loc='best')\nplt.grid()\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\n\ni = np.linspace(0,60, 100)\nplt.plot(i,N(i), 'b-',)\n\nplt.savefig('build/plot4.pdf')\n# t, U, U_err = np.genfromtxt('data.txt', unpack=True)\n# t *= 1e-3\n# U = 1e3 * unp.uarray(U, U_err)\n#\n# def f(t, a, b, c, d):\n# return a * np.sin(b * t + c) + d\n#\n# params = ucurve_fit(f, t, U, p0=[1, 1e3, 0, 0])\n#\n# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3\n# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')\n# plt.errorbar(t * 1e3, noms(U) * 1e-3, yerr=stds(U) * 1e-3, fmt='r_', label='Daten')\n# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)\n# plt.xlabel(r'$t \\:/\\: \\si{\\milli\\second}$')\n# plt.ylabel(r'$U \\:/\\: \\si{\\kilo\\volt}$')\n# plt.legend(loc='best')\n# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\n# plt.savefig('build/loesung-plot.pdf')\n#\n# t1, t2 = np.array_split(t * 1e3, 2)\n# U1, U2 = np.array_split(U * 1e-3, 2)\n# write('build/loesung-table.tex', make_table([t1, U1, t2, U2], [3, None, 3, None]))\n#\n# a, b, c, d = params\n# write('build/loesung-a.tex', make_SI(a * 1e-3, r'\\kilo\\volt'))\n# write('build/loesung-b.tex', make_SI(b * 1e-3, r'\\kilo\\hertz'))\n# write('build/loesung-c.tex', make_SI(c, r''))\n# write('build/loesung-d.tex', make_SI(d * 1e-3, r'\\kilo\\volt'))\n","sub_path":"WS15_16/301/PythonSkript.py","file_name":"PythonSkript.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"58340035","text":"from BinPy.Gates import *\nfrom BinPy.tools import *\nfrom nose.tools import with_setup, nottest\n\n\ndef PowerSourceTest():\n POW = PowerSource()\n a = Connector()\n\n POW.connect(a)\n print((a.state))\n if a.state != 1:\n assert False\n\n POW.disconnect(a)\n print((a.state))\n if a.state is not None:\n assert False\n\n\ndef GroundTest():\n GND = Ground()\n a = Connector()\n\n GND.connect(a)\n print((a.state))\n if a.state != 0:\n assert False\n\n GND.disconnect(a)\n print((a.state))\n if a.state is not None:\n assert False\n","sub_path":"BinPy/tests/source_tests.py","file_name":"source_tests.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239435890","text":"### Preparation\n\n# # Connect Drive\n# from google.colab import drive\n\nfrom pathlib import Path\n\n# gdrive_dir = Path('/content/gdrive')\n# drive.mount(str(gdrive_dir))\n\n# Define data directory and make it if necessary\n# base_dir = gdrive_dir / 'My Drive/Produvia/texttoimage'\n#!rm -r /content/texttoimage\nbase_dir = Path(\".\")\nbase_dir.mkdir(parents=True, exist_ok=True)\n\nmain_dir = base_dir / \"\"\ndata_dir = main_dir / \"data\"\nmodels_dir = main_dir / \"models\"\ndamsme_dir = main_dir / \"DAMSMencoders\"\n\ndata_dir.mkdir(parents=True, exist_ok=True)\nmodels_dir.mkdir(parents=True, exist_ok=True)\n\n# Download COCO dataset\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\n\nfiles = [\n {\"id\": \"1O_LtUP9sch09QH3s_EBAgLEctBQ5JBSJ\", \"dir\": data_dir / \"birds.zip\"},\n {\"id\": \"1rSnbIGNDGZeHlsUlLdahj0RJ9oo6lgH9\", \"dir\": data_dir / \"coco.zip\"},\n {\"id\": \"1GNUKjVeyWYBJ8hEU-yrfYQpDOkxEyP3V\", \"dir\": damsme_dir / \"bird.zip\"},\n {\"id\": \"1zIrXCE9F6yfbEJIbNP5-YrEe2pZcPSGJ\", \"dir\": damsme_dir / \"coco.zip\"},\n {\n \"id\": \"1lqNG75suOuR_8gjoEPYNp8VyT_ufPPig\",\n \"dir\": models_dir / \"bird_AttnGAN2.pth\",\n },\n {\n \"id\": \"1i9Xkg9nU74RAvkcqKE-rJYhjvzKAMnCi\",\n \"dir\": models_dir / \"coco_AttnGAN2.pth\",\n },\n {\n \"id\": \"19TG0JUoXurxsmZLaJ82Yo6O0UJ6aDBpg\",\n \"dir\": models_dir / \"bird_AttnDCGAN2.pth\",\n },\n]\n\nfor file in files:\n if file[\"dir\"].exists():\n print(\"Skipped downloading %s as it already exists.\" % file[\"dir\"])\n else:\n gdd.download_file_from_google_drive(\n file_id=file[\"id\"],\n dest_path=file[\"dir\"],\n unzip=file[\"dir\"].suffix == \".zip\",\n )\n\n# Change to the AttnGAN code directory.\n# import os\n# os.chdir(main_dir / 'code')\n\n# Unzip files.\n#!cd data/coco/ && unzip -qq val2014-text.zip && unzip -qq train2014-text.zip && ls\n\n# Choose the dataset.\ncoco_dataset = \"train2014\"\nwhile True:\n try:\n (data_dir / \"coco\" / \"text\").symlink_to(\n data_dir / \"coco\" / coco_dataset, target_is_directory=True\n )\n break\n except FileExistsError:\n (data_dir / \"coco\" / \"text\").unlink()\n continue\n","sub_path":"prep.py","file_name":"prep.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119699626","text":"from PyQt5.QtWidgets import QDialog\nfrom Ui_MainDialog import Ui_MainDialog\nclass MainDialog(QDialog):\n def __init__(self, parent=None):\n super().__init__(parent)\n ui = Ui_MainDialog()\n ui.setupUi(self)\n self.ui = ui\n self.ui.button.clicked.connect(self.button_clicked)\n self.setFixedSize(self.frameSize())\n\n def button_clicked(self):\n text = self.ui.lineEdit.text()\n if text:\n self.ui.label.setText(text)","sub_path":"pyqt/MainDialog.py","file_name":"MainDialog.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"81217159","text":"#!/usr/bin/env python3\n#\n# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center\n# Distributed under the terms of the 3-clause BSD License.\n\nimport os\nimport subprocess\nimport unittest\n\nfrom sos.hosts import Host\nfrom sos.targets import file_target\nfrom sos.utils import env\n\nhas_docker = True\ntry:\n subprocess.check_output('docker ps | grep test_sos', shell=True).decode()\nexcept subprocess.CalledProcessError:\n subprocess.call('sh build_test_docker.sh', shell=True)\n try:\n subprocess.check_output(\n 'docker ps | grep test_sos', shell=True).decode()\n except subprocess.CalledProcessError:\n print('Failed to set up a docker machine with sos')\n has_docker = False\n\n# if sys.platform == 'win32':\n# with open('~/docker.yml', 'r') as d:\n# cfg = d.read()\n# with open('~/docker.yml', 'w') as d:\n# d.write(cfg.replace('/home/', 'c:\\\\Users\\\\'))\n\n\nclass TestRemote(unittest.TestCase):\n def setUp(self):\n env.reset()\n # self.resetDir('~/.sos')\n self.temp_files = []\n Host.reset()\n # remove .status file left by failed workflows.\n subprocess.call('sos purge', shell=True)\n\n def tearDown(self):\n for f in self.temp_files:\n file_target(f).unlink()\n\n @unittest.skipIf(not has_docker, \"Docker container not usable\")\n def testRemoteExecute(self):\n if os.path.isfile('result_remote.txt'):\n os.remove('result_remote.txt')\n if os.path.isfile('local.txt'):\n os.remove('local.txt')\n with open('local.txt', 'w') as w:\n w.write('something')\n self.assertEqual(subprocess.call(\n 'sos push local.txt -c ~/docker.yml --to docker', shell=True), 0)\n with open('test_remote.sos', 'w') as tr:\n tr.write('''\n[10]\ninput: 'local.txt'\noutput: 'result_remote.txt'\ntask:\n\nrun:\n cp local.txt result_remote.txt\n echo 'adf' >> 'result_remote.txt'\n\n''')\n self.assertEqual(subprocess.call(\n 'sos run test_remote.sos -c ~/docker.yml -r docker -s force', shell=True), 0)\n self.assertFalse(file_target('result_remote.txt').target_exists())\n #self.assertEqual(subprocess.call('sos preview result_remote.txt -c ~/docker.yml -r docker', shell=True), 0)\n #self.assertNotEqual(subprocess.call('sos preview result_remote.txt', shell=True), 0)\n self.assertEqual(subprocess.call(\n 'sos pull result_remote.txt -c ~/docker.yml --from docker', shell=True), 0)\n self.assertTrue(file_target('result_remote.txt').target_exists())\n #self.assertEqual(subprocess.call('sos preview result_remote.txt', shell=True), 0)\n with open('result_remote.txt') as w:\n content = w.read()\n self.assertTrue('something' in content, 'Got {}'.format(content))\n self.assertTrue('adf' in content, 'Got {}'.format(content))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_remote.py","file_name":"test_remote.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7439339","text":"import numpy as np\nimport copy\nfrom gym.spaces import Box\nfrom gym import spaces\nimport warnings\nfrom pettingzoo.utils.wrappers import AgentIterWrapper as PettingzooWrap\n\n\nclass BaseWrapper(PettingzooWrap):\n\n metadata = {'render.modes': ['human']}\n\n def __init__(self, env):\n '''\n Creates a wrapper around `env`. Extend this class to create changes to the space.\n '''\n super().__init__(env)\n\n self._check_wrapper_params()\n\n self._modify_spaces()\n\n def _check_wrapper_params(self):\n raise NotImplementedError()\n\n def _modify_spaces(self):\n raise NotImplementedError()\n\n def _modify_action(self, agent, action):\n raise NotImplementedError()\n\n def _modify_observation(self, agent, observation):\n raise NotImplementedError()\n\n def _update_step(self, agent, observation):\n raise NotImplementedError()\n\n def reset(self, observe=True):\n observation = super().reset(observe)\n agent = self.env.agent_selection\n\n self._update_step(agent,observation)\n if observe:\n observation = self._modify_observation(agent,observation)\n return observation\n else:\n return None\n\n def observe(self, agent):\n obs = super().observe(agent)\n observation = self._modify_observation(agent, obs)\n return observation\n\n def step(self, action, observe=True):\n agent = self.env.agent_selection\n cur_act_space = self.action_spaces[agent]\n assert not isinstance(cur_act_space,Box) or cur_act_space.shape == action.shape, \"the shape of the action {} is not equal to the shape of the action space {}\".format(action.shape,cur_act_space.shape)\n action = self._modify_action(agent, action)\n next_obs = super().step(action, observe=observe)\n\n new_agent = self.env.agent_selection\n\n self._update_step(new_agent,next_obs)\n\n if observe:\n next_obs = self._modify_observation(new_agent,next_obs)\n return next_obs\n else:\n return None\n","sub_path":"supersuit/base_aec_wrapper.py","file_name":"base_aec_wrapper.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110866551","text":"\"\"\"\nTest that we can communicate with arbitrary hosts via the Kubernetes\ncluster. For --docker-run this requires the --proxy argument.\n\nWe do this by setting an alias to a service in /etc/hosts on telepresence-k8s\npod. That means that connecting to the alias in the local computer should only\nwork if we're being proxied.\n\"\"\"\n\nfrom subprocess import check_call\nimport os\nimport sys\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\n\n\ndef main():\n env = os.environ.copy()\n # Don't want torsocks messing with kubectl:\n for name in [\"LD_PRELOAD\", \"DYLD_INSERT_LIBRARIES\"]:\n if name in env:\n del env[name]\n # Add alias analiaswedefine that points at nginx Service:\n check_call([\n \"kubectl\",\n \"exec\",\n \"--container=\" + os.environ[\"TELEPRESENCE_CONTAINER\"],\n os.environ[\"TELEPRESENCE_POD\"],\n \"--\",\n \"/bin/sh\",\n \"-c\",\n (\n r\"\"\"apk add --no-cache bind-tools; \"\"\" +\n r\"\"\"echo -e \"\\n$(host -t A {} | sed 's/.* \\([.0-9]*\\)/\\1/')\"\"\" +\n r''' analiaswedefine\\n\" >> /etc/hosts; tail /etc/hosts'''\n ).format(sys.argv[1]),\n ], env=env)\n\n try:\n result = str(\n urlopen(\"http://analiaswedefine:80/\", timeout=5).read(), \"utf-8\"\n )\n assert \"nginx\" in result\n # special code indicating success:\n raise SystemExit(113)\n except (HTTPError, AssertionError):\n raise SystemExit(3)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"159435060","text":"import scipy.signal as scp\nimport numpy as np\n\n\"\"\"\nGiven an image and pooling range, returns an image where each \"pixel\" represents the sums of the pixel values within \nthe pooling range of the original pixel.\n\"\"\"\n\ndef sumFilter(imgIn, radius):\n\n # Size of 4 means it was a vector inputted\n if np.size(radius)== 4:\n imgOut = scp.convolve2d(imgIn,(radius(2)+radius(4)+1,radius(1)+radius(3)+1), mode='same')\n if np.size(radius)== 1:\n imgOut = scp.convolve2d(imgIn,np.ones(2*radius+1), 'same')","sub_path":"util/sumFilter.py","file_name":"sumFilter.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"205906604","text":"import sys\n\nimport pygame\n\ndef run_game():\n # Initialize game and create and empty screen\n pygame.init()\n screen = pygame.display.set_mode((1200, 800))\n pygame.display.set_caption(\"keys\")\n\n # Start the main loop for the game.\n while True:\n\n #watch for keyboard events and print attributes\n for event in pygame.event.get():\n if event.type ==pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n print(event.key)\n\n # make the most recently drawn screen visible\n pygame.display.flip()\n\nrun_game()\n","sub_path":"keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195698212","text":"def find(num):\n if parent[num]==num:\n return num\n parent[num]=find(parent[num])\n return parent[num]\ndef union(num1, num2):\n a=find(num1)\n b=find(num2)\n if a!=b:\n parent[a]=b\n return 1\n else:\n return 0\nfor _ in range(int(input())):\n s, p=map(int, input().split())\n x_list=list()\n node=list()\n for i in range(p):\n a, b=map(float, input().split())\n x_list.append((a, b))\n for i in range(p-1):\n for j in range(i+1, p):\n node.append((i, j, (((x_list[i][0]-x_list[j][0])**2+(x_list[i][1]-x_list[j][1])**2))))\n parent=[i for i in range(p)]\n node.sort(key=lambda x : x[-1])\n count=0\n for go, to, cost in node:\n answer=cost**(1/2)\n if union(go, to):\n count+=1\n if count==p-s:\n break\n print(\"%.2f\" %answer)\n","sub_path":"BOJ/4343.py","file_name":"4343.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528943167","text":"#!/usr/bin/python3\n## (C) COPYRIGHT Ingenic Limited.\n## ALL RIGHTS RESERVED\n##\n## File : areas_power.py\n## Authors : slwang@aries\n## Create Time: 2017-09-06:15:47:08\n## Description:\n## \n##\nimport numpy as np\nimport sys\nimport argparse\nfrom matplotlib import pyplot as pl\n\ndef parse_args():\n \"\"\"Parse input arguments\"\"\"\n parser = argparse.ArgumentParser(description=\"reciprocal areas approximation\")\n parser.add_argument(\"--start\", dest=\"start\", help = \"set start value, default 1\", default = 1, type=float)\n parser.add_argument(\"--end\", dest = \"end\", help = \"set end value, default 10\", default = 10, type = float)\n parser.add_argument(\"--offset\", dest = \"offset\", help = \"the range of approximated interval, default 0.5\", default = 0.5, type = float)\n parser.add_argument(\"--inside_offset\", dest = \"inside_offset\", help = \"the range of approximated interval, default 0.01\", default = 0.01, type = float)\n parser.add_argument(\"--power\", dest = \"power\", help = \"the value of power, default 0.5\", default = 0.5, type = float)\n args = parser.parse_args()\n return args\n\n\"\"\"x^(power) = coefficient * x + shift_ \"\"\"\ndef main(start, end, offset, inside_offset, power):\n assert (end >0), \"the value of start and end must be positive.\"\n inside_offset =inside_offset\n left_region = np.arange(start, end, offset)\n right_region = left_region + offset\n print(\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\")\n print(\" x^({})=scale * x + shift\".format(power))\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n\n print(\"[Info] left_region: \", left_region)\n print(\"[Info] right_region: \", right_region)\n\n coefficient = [0] * len(left_region)\n intercept = [0] * len(left_region)\n maxerror = [0] * len(left_region)\n \n for i in range(0, len(left_region), 1):\n min_max_error = sys.maxsize\n tmp_coefficient = 0\n tmp_intercept = 0\n inside_value = np.arange(left_region[i]+inside_offset, right_region[i], inside_offset)\n for inside_region in inside_value:\n \"\"\" calculate coefficient_value\"\"\"\n tmp_coefficient =(2 * ((inside_region - left_region[i]) * (right_region[i] **(power +1) - inside_region ** (power +1))\n - (right_region[i] - inside_region) * (inside_region ** (power +1) - left_region[i] ** (power +1)))\n / ((power + 1) * (right_region[i] -inside_region) * (inside_region - left_region[i]) * (right_region[i] - left_region[i]))) \n \n \"\"\"calculate intercept_value\"\"\"\n tmp_intercept = (((right_region[i] ** 2 - inside_region ** 2) * (inside_region ** (power +1) - left_region[i] ** (power +1)) \n - (inside_region ** 2 - left_region[i] ** 2) * (right_region[i] ** (power +1) - inside_region ** (power +1))) \n / ((power +1) * (right_region[i] -inside_region) * (inside_region - left_region[i]) * (right_region[i] - left_region[i]))) \n \n \n max_error = 0.0\n for point in np.arange(left_region[i] + 0.01, right_region[i], 0.01):\n inter_error =np.fabs(point ** (power) - (tmp_coefficient * point + tmp_intercept))\n #inter_error =np.fabs(point ** (power) - (tmp_coefficient * point + tmp_intercept))/(point ** (power))\n if max_error < inter_error:\n max_error = inter_error\n if min_max_error > max_error:\n min_max_error = max_error\n coefficient[i] = tmp_coefficient\n intercept[i] = tmp_intercept\n maxerror[i] = max_error\n for i in range(0, len(coefficient), 1):\n print(\"[Info] (\", left_region[i], \", \", right_region[i], \"], \", \"\\t\", \"scale: \", coefficient[i], \" \\t\", \", shift: \", intercept[i], \" \\t\", \", max_error: \", maxerror[i] )\n input_value = np.linspace(left_region[0], right_region[-1], 100000)\n real_value = input_value ** (power)\n approx_value = [0] * len(input_value)\n for i in range(len(input_value)):\n for j in range(len(right_region)):\n if input_value[i] < right_region[j]:\n approx_value[i] = coefficient[j] * input_value[i] + intercept[j]\n break\n pl.plot(input_value, real_value, 'r', label=\"origin\")\n pl.plot( input_value, approx_value, 'b', label=\"approx\")\n pl.legend(loc=\"upper left\")\n #pl.show()\n print(\"============= Summary ================\")\n print(\"right region: \")\n for i in range(0, len(coefficient), 1):\n print(right_region[i], end=\", \")\n if i%4 == 3:\n print(\" \")\n print(\" \")\n print(\"coefficient: \")\n for i in range(0, len(coefficient), 1):\n print(coefficient[i], end=\", \")\n if i%4 == 3:\n print(\" \")\n print(\" \")\n print(\"intercept: \")\n for i in range(0, len(coefficient), 1):\n print(intercept[i], end=\", \")\n if i%4 == 3:\n print(\" \")\n\nif __name__ == \"__main__\":\n \"\"\" parse argument\"\"\"\n args = parse_args()\n start = args.start\n end = args.end\n offset = args.offset\n inside_offset = args.inside_offset\n power = args.power\n main(start, end, offset, inside_offset, power)\n","sub_path":"python/areas/areas_power.py","file_name":"areas_power.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226826934","text":"# -*- coding: utf-8 -*-.\r\n\r\nfrom flask import Flask, request, abort, Response\r\nfrom datetime import datetime as dt\r\nimport dateutil.parser\r\nfrom pymongo import MongoClient\r\nimport pandas as pd\r\nfrom bson import json_util\r\nimport json\r\n\r\napp = Flask(__name__)\r\n\r\n\r\ndef _connect_mongo(db, host='localhost', port=27017, username=None, password=None):\r\n \"\"\" A util for making a connection to mongo \"\"\"\r\n\r\n if username and password:\r\n mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (\r\n username, password, host, port, db)\r\n conn = MongoClient(mongo_uri)\r\n else:\r\n conn = MongoClient(host, port)\r\n\r\n return conn[db]\r\n\r\n\r\ndef _read_mongo(db, collection, query={}, query_args=None, no_id=True, df=False):\r\n \"\"\" Read from Mongo and Store into DataFrame \"\"\"\r\n\r\n # Connect to MongoDB\r\n db = _connect_mongo(db=db)\r\n\r\n # Make a query to the specific DB and Collection\r\n cursor = db[collection].find(query, query_args)\r\n\r\n if df:\r\n # Expand the cursor and construct the DataFrame\r\n df = pd.DataFrame(list(cursor))\r\n\r\n # Delete the _id\r\n if no_id and '_id' in df.keys().tolist():\r\n del df['_id']\r\n\r\n return df\r\n return list(cursor)\r\n\r\n\r\ndef _process_channel_program(shows):\r\n today = dt.today()\r\n future_shows = []\r\n for show in shows:\r\n if dateutil.parser.parse(show['date']).date() == today.date():\r\n dailyShows = []\r\n for j in xrange(len(show['dailyShows'])):\r\n if dateutil.parser.parse(show['dailyShows'][j]['endTime']) >= today:\r\n dailyShows.append(show['dailyShows'][j])\r\n show.update({'dailyShows': dailyShows})\r\n future_shows.append(show)\r\n if dateutil.parser.parse(show['date']).date() > today.date():\r\n future_shows.append(show)\r\n return future_shows\r\n\r\n\r\ndef _process_today_shows(shows):\r\n today = dt.today()\r\n for show in shows:\r\n if dateutil.parser.parse(show['date']).date() == today.date():\r\n dailyShows = []\r\n for j in xrange(len(show['dailyShows'])):\r\n if dateutil.parser.parse(show['dailyShows'][j]['endTime']) >= today:\r\n dailyShows.append(show['dailyShows'][j])\r\n return dailyShows\r\n\r\n\r\ndef _process_current_shows(shows):\r\n today = dt.today()\r\n dailyShows = []\r\n for show in shows:\r\n if dateutil.parser.parse(show['date']).date() >= today.date():\r\n for dshow in show['dailyShows']:\r\n if dateutil.parser.parse(dshow['endTime']) >= today:\r\n dailyShows.append(dshow)\r\n return dailyShows[:3]\r\n\r\n\r\ndef _process_channel_content_of(shows, showName):\r\n channel_content = []\r\n for show in shows:\r\n dailyShows = []\r\n for dshow in show['dailyShows']:\r\n if dshow['showName'].encode('utf-8') == showName.encode('utf-8'):\r\n dailyShows.append(dshow)\r\n if dailyShows:\r\n show.update({'dailyShows': dailyShows})\r\n channel_content.append(show)\r\n return channel_content\r\n\r\n\r\ndef _process_channel_content_by(shows, genre):\r\n channel_content = []\r\n for show in shows:\r\n dailyShows = []\r\n for dshow in show['dailyShows']:\r\n if dshow['genre'].encode('utf-8') == genre.encode('utf-8'):\r\n dailyShows.append(dshow)\r\n if dailyShows:\r\n show.update({'dailyShows': dailyShows})\r\n channel_content.append(show)\r\n return channel_content\r\n\r\n\r\ndef _process_shows_content_of(shows):\r\n today = dt.today()\r\n tvguide = []\r\n for show in shows:\r\n if dateutil.parser.parse(show['endTime']) >= today:\r\n tvguide.append(show)\r\n return tvguide\r\n\r\n\r\n@app.route('/api/get_channels', methods=['GET'])\r\ndef getChannelsList():\r\n channels = _read_mongo('tv_guide', 'channels', query_args={\r\n '_id': False, 'channelName': True, 'channelLogo': True})\r\n return Response(response=json.dumps(dict(result=channels), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\n@app.route('/api/get_channel_program', methods=['GET'])\r\ndef getChannelProgram():\r\n channelName = request.args.get('name')\r\n if (not channelName):\r\n abort(500)\r\n channelProgram = _read_mongo('tv_guide', 'channels', {\r\n 'channelName': channelName}, {'_id': False}, df=True)\r\n channelProgram['shows'] = channelProgram['shows'].apply(\r\n _process_channel_program, 1)\r\n channelProgram = channelProgram.to_dict('records')\r\n return Response(response=json.dumps(dict(result=channelProgram), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\n@app.route('/api/get_today_shows', methods=['GET'])\r\ndef getTodayShows():\r\n channels = _read_mongo('tv_guide', 'channels', df=True)\r\n channels['shows'] = channels['shows'].apply(_process_today_shows, 1)\r\n channels = channels[channels['shows'].str.len() !=\r\n 0].reset_index(drop=True)\r\n channels = channels.to_dict('records')\r\n return Response(response=json.dumps(dict(result=channels), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\n@app.route('/api/get_current_shows', methods=['GET'])\r\ndef getCurrentShows():\r\n channels = _read_mongo('tv_guide', 'channels', df=True)\r\n channels['shows'] = channels['shows'].apply(_process_current_shows, 1)\r\n channels = channels.dropna(0, subset=['shows']).reset_index(drop=True)\r\n channels = channels.to_dict('records')\r\n return Response(response=json.dumps(dict(result=channels), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\n@app.route('/api/get_channel_content_of', methods=['GET'])\r\ndef getChannelContentOf():\r\n showName = request.args.get('name')\r\n if (not showName):\r\n abort(500)\r\n channels = _read_mongo('tv_guide', 'channels', df=True)\r\n channels['shows'] = channels['shows'].apply(\r\n lambda x: _process_channel_content_of(x, showName), 1)\r\n channels = channels[channels['shows'].str.len() !=\r\n 0].reset_index(drop=True)\r\n channels = channels.to_dict('records')\r\n return Response(response=json.dumps(dict(result=channels), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\n@app.route('/api/get_channel_content_by', methods=['GET'])\r\ndef getChannelContentBy():\r\n genre = request.args.get('genre')\r\n if (not genre):\r\n abort(500)\r\n channels = _read_mongo('tv_guide', 'channels', df=True)\r\n channels['shows'] = channels['shows'].apply(\r\n lambda x: _process_channel_content_by(x, genre), 1)\r\n channels = channels[channels['shows'].str.len() !=\r\n 0].reset_index(drop=True)\r\n channels = channels.to_dict('records')\r\n return Response(response=json.dumps(dict(result=channels), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\n@app.route('/api/get_show_content_of', methods=['GET'])\r\ndef getShowContentOf():\r\n showName = request.args.get('name')\r\n if (not showName):\r\n abort(500)\r\n show = _read_mongo('tv_guide', 'shows', {\r\n 'showName': showName}, {'_id': False}, df=True)\r\n show['tvguide'] = show['tvguide'].apply(_process_shows_content_of, 1)\r\n youtube_show = _read_mongo('tv_guide', 'youtube_shows', {\r\n 'showName': showName}, {'_id': False}, df=True)\r\n if(len(youtube_show.index) > 0):\r\n show = pd.merge(show,\r\n youtube_show,\r\n left_on=['showName'],\r\n right_on=['showName'],\r\n how='left',\r\n sort=False)\r\n show = show.to_dict('records')\r\n return Response(response=json.dumps(dict(result=show), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\n@app.route('/api/get_show_content_by', methods=['GET'])\r\ndef getShowContentBy():\r\n showType = request.args.get('type')\r\n if showType:\r\n show = _read_mongo('tv_guide', 'shows', {'showType': showType}, {\r\n '_id': False, 'showName': True, 'showImage': True})\r\n else:\r\n show = _read_mongo('tv_guide', 'shows', {}, {\r\n '_id': False, 'showName': True, 'showImage': True})\r\n return Response(response=json.dumps(dict(result=show), default=json_util.default),\r\n headers={'Content-Type': 'application/json; charset=utf-8'},\r\n status=200)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run('0.0.0.0')\r\n","sub_path":"run_app.py","file_name":"run_app.py","file_ext":"py","file_size_in_byte":9076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"564586252","text":"# # 예제 0~1000 정수내에서 (i * 2) + 263 / 18 적용해서 출력\n# for i in range (1000): \n# print((i*2)+263/18)\n\n# # 예제 0~100 정수내에서 2의 승수만 출력\nfor i in range (1,10):\n a = pow(2,i)\n if (a < 100):\n print (a)\n# # 예제 0~100 정수내에서 3의 승수만 출력\nfor i in range (1,10):\n a = pow(3,i)\n if (a < 100):\n print (a)\n# # 예제 24와 36의 최대공약수\nfrom math import gcd\nprint(gcd(24,36))\n\n# 예제 12과 9의 최소공배수\nprint(12*9//gcd(12,9))\n\n# 예제 구구단 만들기 (3 * 1 = 3 '\\n' 3 * 2 = 6 ...)\nfor i in range (1,10):\n for a in range (1,10):\n print (\"{} x {} = {}\\n\".format(i,a,i*a))\n","sub_path":"back_python_1_basic1/1_python_grammar1_min/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"579916137","text":"\"\"\"\nScript for outputting the data from a simulation batch and the graphs.\nIf not running from a command prompt, change the value of:\n the \"settingsFilePath\" variable below to access different data directories,\n the value of \"batchID\" to access the data from a specific batch,\n the value of \"evaluateSim\" to output the graphs for a specific simulation in a batch.\n\"\"\"\n\n# Settings\nevaluateSim = None# Simulation number to display results for. Leave as None for best and worst simulations.\nbatchID = None# Batch to read data from\ndirectoryPath = \"./OliverLodge/OliverLodge\"# Include the name of the file in the path but not the \".properties\" extention!\n\n\n\n# External Imports\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\nimport sys\n\n# Project Imports\nfrom CustomDataTypes import *\nfrom CustomExeptions import *\nfrom GraphingClassFile import GraphingClass\n\n\n\n# Make the directory setting an absolute filepath\ndirectoryPath = os.path.abspath(directoryPath)\n\n\n\n# Read filepath from console arguments\nif len(sys.argv) > 1:\n if sys.argv[1] not in [\"None\", \"none\", \"Null\", \"null\"]:\n evaluateSim = int(sys.argv[1])\n\n if len(sys.argv) > 2:\n if sys.argv[2] not in [\"None\", \"none\", \"Null\", \"null\"]:\n batchID = sys.argv[2]\n\n if len(sys.argv) > 3:\n if sys.argv[3] not in [\"None\", \"none\", \"Null\", \"null\"]:\n if os.path.isfile(os.path.abspath(sys.argv[3]) + \".properties\"):\n directoryPath = sys.argv[3]\n\n else:\n raise NoPathExistsException(sys.argv[1] + \".properties\")\n\n\n\n# Output the location of the specified data directory to inform the user\nprint(\"Using data from directory: \\\"{}\\\"\".format(os.path.abspath(os.path.split(directoryPath)[0])))\n\n\n\n#- Load logs and graph data\nDirectoryManager.Initialise(directoryPath)\n\ndata, times, positions, properties = DirectoryManager.ReadLogs()\n\n\n\n#- Initialise classes reliant on data\nTickTimer.Initialise(properties.TotalTicks, properties.SecondsPerTick)\n\n\nif __name__ == \"__main__\":\n#- Output the raw data from the properties file\n print(\"Results:\")\n for index, item in enumerate(data):\n if index == 0:\n print(\" Batch ID = \" + str(item))\n else:\n print(\" \" + DirectoryManager.batchDataProperties[index - 1][:-1].replace(\"_\", \" \").capitalize() + \" = \" + str(item))\n\n\n\n#- Output graphs\n # General data\n simAverageData = [GraphingClass.averageWaitingTimes([data[0:2] for data in simulation], TickTimer.TimeUnit.Hours) for simulation in times]\n simAverageTimes, simTimeIntervals = [simulation[0] for simulation in simAverageData], [simulation[1] for simulation in simAverageData]\n \n averageData = []\n for i in range(len(simAverageTimes)):\n for j in range(len(simAverageTimes[i])):\n averageData.append([TickTimer.GetTicks(simTimeIntervals[i][j] * 3600, True), simAverageTimes[i][j]])\n\n GraphingClass.Distribution([record[1] for record in averageData], None, True)\n\n GraphingClass.waitingTimeBarChart(averageData, TickTimer.TimeUnit.Hours, True, 5)\n\n\n\n # Selected or deafult simulation data\n if evaluateSim != None:\n #- Selected sim\n # Lift Location - Disabled as not compatable with Juypter Notebooks. Uncomment and run file or .bat file from console to access graph\n #GraphingClass.LiftLocation(positions[evaluateSim], 0, \"Simulation \" + str(evaluateSim), properties.MinimumFloor, properties.MaximumFloor)#tick, lift, current, dest.\n \n # Average waiting time histogram\n GraphingClass.Distribution([record[1] for record in times[evaluateSim]])\n\n # Average waiting time each hour\n GraphingClass.waitingTimeBarChart([record[0:2] for record in times[evaluateSim]], TickTimer.TimeUnit.Hours, False, 5)\n\n\n\n else:# Do both best and worst sims\n #- Best\n # Lift Location - Disabled as not compatable with Juypter Notebooks. Uncomment and run file or .bat file from console to access graph\n #GraphingClass.LiftLocation(positions[data.BestSim], 0, \"Best Simulation\", properties.MinimumFloor, properties.MaximumFloor)\n\n # Average waiting time histogram\n GraphingClass.Distribution([record[1] for record in times[data.BestSim]])\n\n # Average waiting time each hour\n GraphingClass.waitingTimeBarChart([record[0:2] for record in times[data.BestSim]], TickTimer.TimeUnit.Hours, False, 5)\n\n #- Worst\n # Lift Location - Disabled as not compatable with Juypter Notebooks. Uncomment and run file or .bat file from console to access graph\n #GraphingClass.LiftLocation(positions[data.WorstSim], 0, \"Worst Simulation\", properties.MinimumFloor, properties.MaximumFloor)\n\n # Average waiting time histogram\n GraphingClass.Distribution([record[1] for record in times[data.WorstSim]])\n\n # Average waiting time each hour\n GraphingClass.waitingTimeBarChart([record[0:2] for record in times[data.WorstSim]], TickTimer.TimeUnit.Hours, False, 5)","sub_path":"LiftSim/LiftSim/Results.py","file_name":"Results.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"168619638","text":"\"\"\"\nFractal triangles\n\"\"\"\nsize(800, 800)\nw, h = 700, 700\ntranslate(w/2, h/3)\nnoStroke()\n\nimport math\nimport random\n\nSIDE = 350.\nRAN = 0\n\nclass Tri(object):\n def __init__(self, level, scale_, scale_factor):\n print(\"Scale={}\".format(scale_)) \n self.a = (SIDE+random.randint(-RAN, RAN))*scale_\n self.b = (SIDE+random.randint(-RAN, RAN))*scale_\n self.c = (SIDE+random.randint(-RAN, RAN))*scale_\n self.color = color(random.randint(40, 168), random.randint(40, 168), random.randint(40, 168), )\n self.scale = scale_\n self.level = level\n self.children = []\n if level > 1:\n self.children = [Tri(level-1, scale_*scale_factor, scale_factor) for i in range(3)]\n \n \ndef draw_tri(x, y, tri):\n \"\"\"\n Draws triangle with centre of gravity at x, y\n \"\"\"\n \n a, b, c = tri.a, tri.b, tri.c\n \n # # Calculations\n x1, y1 = a, 0\n # find angle\n # http://www.teacherschoice.com.au/Maths_Library/Trigonometry/solve_trig_SSS.htm\n beta = math.acos((a**2+c**2-b**2)/(2*a*c))\n x2, y2 = b*cos(beta), b*sin(beta)\n\n\n # # center of gravity of triangle\n # http://www.wikihow.com/Calculate-the-Center-of-Gravity-of-a-Triangle\n xg, yg = (0+x1+x2)/3, (0+y1+y2)/3\n # Draws point to mark centre of gravity\n\n # Draws triangle\n fill(tri.color)\n\n pushMatrix()\n translate(x-xg, y-yg)\n triangle(0, 0, x1, y1, x2, y2)\n\n if tri.level > 1:\n draw_tri(0, 0, tri.children[0])\n draw_tri(x1, y1, tri.children[1])\n draw_tri(x2, y2, tri.children[2])\n \n popMatrix()\n\n\nnlevels=8\nscale_=1.\nscale_factor=0.5\ntri = Tri(nlevels, scale_, scale_factor)\ndraw_tri(0, 0, tri)","sub_path":"processing/triangulo/tri2/tri2.pyde","file_name":"tri2.pyde","file_ext":"pyde","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"274832616","text":"N,K=[int(i)for i in input().split()]\n\ndef apply(use, S):\n trim = \"\"\n res = \"\"\n for i in range(N):\n if use[i]:\n trim+=S[i]\n trim = list(trim)\n trim.sort()\n cnt=0\n for i in range(N):\n if use[i]:\n res+=trim[cnt]\n cnt+=1\n else:\n res+=S[i]\n return res\n\n# input\nS=input()\nuse=[False for i in range(N)]\n\n# search\nfor i in range(N):\n if use[i]:\n continue\n if K==0:\n break\n now = S[i]\n idx=i\n for j in range(i+1, N):\n if not use[j] and now>S[j]:\n now=S[j]\n idx=j\n if K>=2 and i!=idx:\n use[i]=True\n use[idx]=True\n K-=2\n continue\n tres = apply(use, S)\n use[i] = True\n nres = apply(use, S)\n if K>=1 and tres > nres:\n K-=1\n else:\n use[i] = False\n\nprint(apply(use,S))\n","sub_path":"atcoder/abc/000/abc009c.py","file_name":"abc009c.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"101758786","text":"# encoding: utf-8\n\"\"\"\nannounce/vpn.py\n\nCreated by Thomas Mangin on 2017-07-05.\nCopyright (c) 2009-2017 Exa Networks. All rights reserved.\nLicense: 3-clause BSD. (See the COPYRIGHT file)\n\"\"\"\n\nfrom exabgp.protocol.ip import NoNextHop\n\nfrom exabgp.rib.change import Change\n\nfrom exabgp.bgp.message import OUT\n\nfrom exabgp.protocol.family import AFI\nfrom exabgp.protocol.family import SAFI\n\nfrom exabgp.bgp.message.update.nlri import IPVPN\nfrom exabgp.bgp.message.update.nlri.cidr import CIDR\nfrom exabgp.bgp.message.update.attribute import Attributes\n\nfrom exabgp.configuration.announce.label import ParseLabel\n\nfrom exabgp.configuration.static.parser import prefix\nfrom exabgp.configuration.static.mpls import route_distinguisher\n\n\nclass ParseVPN (ParseLabel):\n\t# put next-hop first as it is a requirement atm\n\tdefinition = [\n\t\t' (optional) rd 255.255.255.255:65535|65535:65536|65536:65535;\\n',\n\t] + ParseLabel.definition\n\n\tsyntax = \\\n\t\t' / { ' \\\n\t\t'\\n ' + ' ;\\n '.join(definition) + '\\n}'\n\n\tknown = dict(ParseLabel.known,**{\n\t\t'rd': route_distinguisher,\n\t})\n\n\taction = dict(ParseLabel.action,**{\n\t\t'rd': 'nlri-set',\n\t})\n\n\tassign = dict(ParseLabel.assign,**{\n\t\t'rd': 'rd',\n\t})\n\n\tname = 'vpn'\n\tafi = None\n\n\tdef __init__ (self, tokeniser, scope, error, logger):\n\t\tParseLabel.__init__(self,tokeniser,scope,error,logger)\n\n\tdef clear (self):\n\t\treturn True\n\n\tdef _check (self):\n\t\tif not self.check(self.scope.get(self.name),self.afi):\n\t\t\treturn self.error.set(self.syntax)\n\t\treturn True\n\n\t@staticmethod\n\tdef check (change,afi):\n\t\tif change.nlri.nexthop is NoNextHop \\\n\t\t\tand change.nlri.action == OUT.ANNOUNCE \\\n\t\t\tand change.nlri.afi == afi \\\n\t\t\tand change.nlri.safi in (SAFI.unicast,SAFI.multicast):\n\t\t\treturn False\n\t\treturn True\n\n\ndef ip_vpn (tokeniser,afi,safi):\n\tipmask = prefix(tokeniser)\n\n\tnlri = IPVPN(afi,safi,OUT.ANNOUNCE)\n\tnlri.cidr = CIDR(ipmask.pack(),ipmask.mask)\n\n\tchange = Change(\n\t\tnlri,\n\t\tAttributes()\n\t)\n\n\twhile True:\n\t\tcommand = tokeniser()\n\n\t\tif not command:\n\t\t\tbreak\n\n\t\taction = ParseVPN.action.get(command,'')\n\n\t\tif action == 'attribute-add':\n\t\t\tchange.attributes.add(ParseVPN.known[command](tokeniser))\n\t\telif action == 'nlri-set':\n\t\t\tchange.nlri.assign(ParseVPN.assign[command],ParseVPN.known[command](tokeniser))\n\t\telif action == 'nexthop-and-attribute':\n\t\t\tnexthop,attribute = ParseVPN.known[command](tokeniser)\n\t\t\tchange.nlri.nexthop = nexthop\n\t\t\tchange.attributes.add(attribute)\n\t\telse:\n\t\t\traise ValueError('route: unknown command \"%s\"' % command)\n\n\treturn [change]\n\n\nclass ParseIPv4VPN (ParseVPN):\n\tname = 'ipv4'\n\tafi = AFI.ipv4\n\n\n@ParseIPv4VPN.register('mpls-vpn','extend-name',True)\ndef mpls_vpn_v4 (tokeniser):\n\treturn ip_vpn(tokeniser,AFI.ipv4,SAFI.unicast)\n\n\nclass ParseIPv6VPN (ParseVPN):\n\tname = 'ipv6'\n\tafi = AFI.ipv6\n\n\n@ParseIPv6VPN.register('mpls-vpn','extend-name',True)\ndef mpls_vpn_v6 (tokeniser):\n\treturn ip_vpn(tokeniser,AFI.ipv6,SAFI.unicast)\n","sub_path":"lib/exabgp/configuration/announce/vpn.py","file_name":"vpn.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"359081094","text":"###\n### Python Restful client\n### Author: Ojvar\n###\n\nfrom datetime import timedelta\nfrom khayyam import *\nfrom rest_client import loadTrades\n\n##\n## @brief Main loop\n##\ndef main ():\n print ('Operation started');\n\n # Calculate yesterday date value\n date = JalaliDatetime.now();\n date += - timedelta (1);\n date = date.strftime ('%Y%m%d');\n\n print ('Date : %s' % date);\n print ('Load Trades & Save into database');\n\n loadTrades (date);\n\n print ('Operation finished');\n\n\n\n## Start point\nif (__name__ == '__main__'):\n print (\"\"\"\n######################\n### Restful Client ###\n### Author: Ojvar ###\n######################\n\"\"\");\n\n main ();\n","sub_path":"pyClient/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"211569390","text":"#! /usr/bin/env python\nimport os\nimport math\nimport sys\nimport argparse\nimport gzip\nimport numpy as np\nfrom methylbed_utils import MethRead,SnifflesEntry,make_coord,read_bam\nimport pysam\nimport re\nimport multiprocessing as mp\nfrom multiprocess_utils import listener,init_mp,close_mp\nimport time\nstart_time = time.time()\n\ndef parseArgs() :\n # dir of source code\n srcpath=sys.argv[0]\n srcdir=os.path.dirname(os.path.abspath(srcpath))\n # parser\n parser = argparse.ArgumentParser(description='parse methylation around SVs')\n parser.add_argument('-t','--threads',type=int,required=False,default=2, \n help=\"number of parallel processes (default : 2 )\")\n parser.add_argument('-v','--verbose', action='store_true',default=False,\n help=\"verbose output\")\n parser.add_argument('-s','--sniffles',type=argparse.FileType('r'),required=False, \n default=sys.stdin,help=\"sniffles vcf output (default stdin)\")\n parser.add_argument('-b','--bam',type=os.path.abspath,required=True,\n help=\"bam file\")\n parser.add_argument('-c','--cpg',type=os.path.abspath,required=True,\n help=\"gpc methylation bed - sorted, bgzipped, and indexed\")\n parser.add_argument('-w','--window',type=int,required=False,\n default=200,help=\"window for methylation\")\n parser.add_argument('-o','--output',type=argparse.FileType('w'),required=False, \n default = sys.stdout,help=\"output path (default : stdout)\")\n # parse args\n args = parser.parse_args()\n args.srcdir=srcdir\n return args\n\ndef read_tabix(fpath,window) :\n with pysam.TabixFile(fpath) as tabix :\n entries = [x for x in tabix.fetch(window)]\n reads = [MethRead(x) for x in entries]\n rdict = dict()\n for read in reads :\n try :\n rdict[read.qname].append(read)\n except :\n rdict[read.qname] = [read]\n return rdict\n\ndef getRegMeth(read_list,start,end) :\n callarray = np.concatenate([ x.callarray for x in read_list])\n regidx = np.where(np.logical_and(callarray[:,0]>=start, callarray[:,0]<=end))\n callreg = callarray[regidx,1].flatten()\n sigidx = np.argwhere(callreg != -1)\n sigreg = callreg[sigidx]\n methcount = np.count_nonzero(sigreg)\n return len(sigreg),methcount\n \ndef parse_methylation(q,sv,cpg,gpc,start,end,tag) :\n qname = cpg[0].qname\n taglist = tag.split(\"_\")\n cpgcov,cpgmeth = getRegMeth(cpg,start,end)\n gpccov,gpcmeth = getRegMeth(gpc,start,end)\n if (gpccov == 0 or cpgcov == 0) : return\n line = '\\t'.join([str(x) for x in [ sv.chrom,sv.pos,sv.pos,\n sv.info[\"CHR2\"],sv.info[\"END\"],sv.info[\"END\"],\n qname,sv.id,\".\",\".\",taglist[1],taglist[0],cpgmeth,gpcmeth,cpgcov,gpccov]])\n q.put((qname+sv.id+tag,line))\n\ndef TRA_methylation(sv,bamfn,cpgfn,gpcfn,methwin,verbose,q) :\n # windows for fetching reads - \n win = 300\n win1 = make_coord(sv.chrom,sv.pos-win,sv.pos+win)\n win2 = make_coord(sv.info[\"CHR2\"],sv.info[\"END\"]-win,sv.info[\"END\"]+win)\n # fetch reads\n try : \n bam_dicts = [ read_bam(bamfn,w) for w in [win1,win2] ]\n except ValueError : \n return\n cpg_dicts = [ read_tabix(cpgfn,w) for w in [win1,win2] ]\n gpc_dicts = [ read_tabix(gpcfn,w) for w in [win1,win2] ]\n qnames = list(bam_dicts[0].keys()) + list(bam_dicts[1].keys())\n for qname in set(qnames) :\n if (qname in sv.rnames or \n ( qname in bam_dicts[0] \n and qname in bam_dicts[1] )):\n # this read is an SV and has both parts\n if qname in cpg_dicts[0].keys() and qname in gpc_dicts[0].keys() :\n cpg1 = cpg_dicts[0][qname]\n gpc1 = gpc_dicts[0][qname]\n parse_methylation(q,sv,cpg1,gpc1,sv.pos-methwin,sv.pos+methwin,\"destination_SV\")\n if qname in cpg_dicts[1].keys() and qname in gpc_dicts[1].keys() :\n cpg = cpg_dicts[1][qname]\n gpc = gpc_dicts[1][qname]\n start,end,tag = (sv.info[\"END\"]-methwin,sv.info[\"END\"]+methwin,\"origin_SV\")\n else :\n continue\n elif ( qname in bam_dicts[1] ) :\n # non-SV origin\n if qname in cpg_dicts[1].keys() and qname in gpc_dicts[1].keys() :\n cpg = cpg_dicts[1][qname]\n gpc = gpc_dicts[1][qname]\n else : \n continue\n coords = [ pos for x in bam_dicts[1][qname] for pos in [x.reference_start,x.reference_end] ]\n start,end = (sv.info[\"END\"]-methwin,sv.info[\"END\"]+methwin)\n tag = \"origin_nonSV\"\n elif ( qname in bam_dicts[0] ) :\n # non-SV destination\n try :\n cpg = cpg_dicts[0][qname]\n gpc = gpc_dicts[0][qname]\n except :\n continue\n coords = [ pos for x in bam_dicts[0][qname] for pos in [x.reference_start,x.reference_end] ]\n start,end = (sv.pos-methwin,sv.pos+methwin)\n bp = [ x for x in coords if x >= sv.pos-win and x <= sv.pos+win ]\n tag = \"destination_nonSV\"\n parse_methylation(q,sv,cpg,gpc,start,end,tag)\n\nif __name__==\"__main__\":\n args=parseArgs()\n svlines = [SnifflesEntry(x) for x in args.sniffles.readlines() if x[0]!=\"#\"]\n svreads = [ read for x in svlines for read in x.rnames]\n fetchwin = 2000\n if len(svreads) == 0 : \n windows = [ \"scaffold_16:15300006-15310006\" ]\n else : \n windows = [ make_coord(x.chrom,x.pos-fetchwin,x.pos+fetchwin) for x in svlines ]\n meth = read_tabix(args.cpg,windows[0])\n# bams = read_bam(args.bam,windows[0])\n svmeth = list()\n if len(svreads) >= 0 : # change this to >= to make it inclusive\n for qname in meth.keys() :\n svmeth.append(meth[qname])\n else : \n for qname in svreads :\n if qname in meth.keys() :\n svmeth.append(meth[qname])\n for readlist in svmeth :\n for read in readlist :\n print(\"\\t\".join(read.fields),file=args.output)\n \n if args.verbose : print(\"time elapsed : {} seconds\".format(time.time()-start_time),file=sys.stderr)\n","sub_path":"scripts/SVmethylation.py","file_name":"SVmethylation.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"632910081","text":"import hashlib\nimport logging\nimport os\nimport tempfile\nimport threading\nfrom time import localtime\n\nfrom qtpy.QtWidgets import QApplication, QHBoxLayout\nfrom qtconsole.rich_jupyter_widget import RichJupyterWidget\nfrom qtconsole.manager import QtKernelManager\n\nfrom ..utils import TyphonBase, make_identifier\n\nlogger = logging.getLogger(__name__)\n\n\nclass TyphonConsole(TyphonBase):\n \"\"\"\n IPython Widget for Typhon Display\n\n This widget handles starting a ``JupyterKernel`` and connecting an IPython\n console in which the user can type Python commands. It is important to note\n that the kernel in which commands are executed is a completely separate\n process. This protects the user against locking themselves out of the GUI,\n but makes it difficult to pass the Device.\n\n To get around this caveat, this widget uses ``happi`` to pass the Device\n between the processes. This is not a strict requirement, but if ``happi``\n is not installed, users will need to create a custom ``add_device`` method\n if they want their devices loaded in both the GUI and console.\n \"\"\"\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n # Setup widget\n self.kernel = RichJupyterWidget()\n self.setLayout(QHBoxLayout())\n self.layout().setContentsMargins(0, 0, 0, 0)\n self.layout().addWidget(self.kernel)\n # Create a Kernel\n logger.debug(\"Starting Jupyter Kernel ...\")\n kernel_manager = QtKernelManager(kernel_name='python3')\n kernel_manager.start_kernel()\n kernel_client = kernel_manager.client()\n kernel_client.start_channels()\n self.kernel.kernel_manager = kernel_manager\n self.kernel.kernel_client = kernel_client\n # Ensure we shutdown the kernel\n app = QApplication.instance()\n app.aboutToQuit.connect(self.shutdown)\n # Styling\n self.kernel.syntax_style = 'monokai'\n self.kernel.set_default_style(colors='Linux')\n # Ensure cleanup\n app = QApplication.instance()\n app.aboutToQuit.connect(self.shutdown)\n\n def sizeHint(self):\n default = super().sizeHint()\n default.setWidth(600)\n return default\n\n def shutdown(self):\n \"\"\"Shutdown the Jupyter Kernel\"\"\"\n client = self.kernel.kernel_client\n if client.channels_running:\n logger.debug(\"Stopping Jupyter Client\")\n # Stop channels in the background\n t = threading.Thread(target=client.stop_channels)\n t.start()\n self.kernel.kernel_manager.shutdown_kernel()\n else:\n logger.debug(\"Kernel is already shutdown.\")\n\n\ntry:\n import happi\n\n def add_device(obj, device):\n # Needs metadata\n if not hasattr(device, 'md'):\n logger.error(\"Device %r has no stored metadata. \"\n \"Unable to load in TyphonConsole\",\n device)\n return\n # Create a temporary file\n name = hashlib.md5(str(localtime()).encode('utf-8')).hexdigest()\n name = os.path.join(tempfile.gettempdir(), name)\n try:\n # Dump the device in the tempfile\n client = happi.Client(path=name, initialize=True)\n client.add_device(device.md)\n # Create a valid Python identifier\n python_name = make_identifier(device.md.name)\n # Create the script to load the device\n load_script = (\n f'import happi; '\n f'from happi.loader import from_container; '\n f'client = happi.Client(path=\"{name}\"); '\n f'md = client.find_device(name=\"{device.md.name}\"); '\n f'{python_name} = from_container(md)')\n # Execute the script\n obj.kernel.kernel_client.execute(load_script, silent=True)\n except Exception:\n logger.exception(\"Unable to add device %r to TyphonConsole.\",\n device.md.name)\n # Cleanup after ourselves\n if os.path.exists(name):\n os.remove(name)\n\n # Set the TyphonConsole up to load devices\n TyphonConsole.add_device = add_device\n\nexcept ImportError:\n logger.info(\"Unable to import ``happi``. Devices will not be added \"\n \"to the ``TyphonConsole`` unless ``TyphonConsole.add_device`` \"\n \"is implemented.\")\n\n # Dummy pass-through function\n def add_device(obj, x):\n pass\n\n TyphonConsole.add_device = add_device\n","sub_path":"typhon/tools/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"124225299","text":"import argparse\nimport requests\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport numpy\nimport timeit\nimport re\nimport csv\nimport ast\nimport os\nimport socket\nfrom random import shuffle\nimport redis\n\nfrom time import sleep\n\nfrom .stress_analyzer import *\nfrom .modify_resources import *\nfrom .weighting_conversions import *\nfrom .remote_execution import *\nfrom .run_experiment import *\nfrom .container_information import *\nfrom .present_results import *\nfrom .run_spark_streaming import *\n\n#Amount of time to allow commands to propagate through system\nCOMMAND_DELAY = 3\n\n### Throttle only a single resource at a time.\ndef throttle_cpu_quota(ssh_client, container_id, cpu_period, cpu_quota):\n # update_cpu_through_stress(ssh_client, number_of_stress)\n set_cpu_quota(ssh_client, container_id, cpu_period, cpu_quota)\n\ndef throttle_cpu_cores(ssh_client, container_id, cores):\n set_cpu_cores(ssh_client, container_id, cores)\n\ndef throttle_disk(ssh_client, container_id, disk_rate):\n print('Disk Throttle Rate: {}'.format(disk_rate))\n return change_container_blkio(ssh_client, container_id, disk_rate)\n # return create_dummy_disk_eater(ssh_client, disk_rate)\n\n# network_bandwidth is a map from interface->bandwidth\ndef throttle_network(ssh_client, container_id, network_bandwidth):\n print('Network Reduction Rate: {}'.format(network_bandwidth))\n set_egress_network_bandwidth(ssh_client, container_id, network_bandwidth)\n\n###Stop the throttling for a single resource\ndef stop_throttle_cpu(ssh_client, container_id, cores):\n print('RESETTING CPU THROTTLING')\n if cores:\n reset_cpu_cores(ssh_client, container_id)\n else:\n reset_cpu_quota(ssh_client, container_id)\n\ndef stop_throttle_network(ssh_client, container_id):\n print('RESETTING NETWORK THROTTLING')\n # reset_egress_network_bandwidth(ssh_client, container_id)\n container_to_network_capacity = get_container_network_capacity(ssh_client, container_id)\n network_bandwith = weighting_to_bandwidth(ssh_client, 0, container_to_network_capacity)\n throttle_network(ssh_client, container_id, network_bandwith)\n\ndef stop_throttle_disk(ssh_client, container_id):\n print('RESETTING DISK THROTTLING')\n change_container_blkio(ssh_client, container_id, 0)\n # remove_dummy_disk_eater(ssh_client, num_fail)\n\ndef reset_all_stresses(ssh_client, container_id, cpu_cores):\n print('RESETTING ALL STRESSES!')\n stop_throttle_cpu(ssh_client, container_id, cpu_cores)\n stop_throttle_disk(ssh_client, container_id)\n stop_throttle_network(ssh_client, container_id)\n sleep(COMMAND_DELAY)\n\ndef model_machine(ssh_clients, container_ids_dict, experiment_inc_args, experiment_iterations, experiment_type,\n stress_policy, resources, only_baseline, resume_bool, prev_results,\n experiment_iteration_count, redis_db):\n\n # RESUME FUNCTION SHELVED FOR LATER (ALSO OUTDATED)\n if not resume_bool:\n reduction_level_to_latency_network = {}\n reduction_level_to_latency_disk = {}\n reduction_level_to_latency_cpu = {}\n else:\n reduction_level_to_latency_cpu, reduction_level_to_latency_disk, reduction_level_to_latency_network = prev_results\n\n increment_values = experiment_inc_args[0]\n experiment_args = experiment_inc_args[1]\n\n # Declaring baseline outside of loop for ALL services\n baseline_runtime_array, baseline_utilization_diff = None, None\n\n for service, ip_container_tuples in container_ids_dict.items():\n print('STRESSING SERVICE {}'.format(service))\n\n if not resume_bool or (service not in reduction_level_to_latency_cpu):\n reduction_level_to_latency_network_service = {}\n reduction_level_to_latency_disk_service = {}\n reduction_level_to_latency_cpu_service = {}\n else:\n reduction_level_to_latency_network_service = reduction_level_to_latency_network[service]\n reduction_level_to_latency_disk_service = reduction_level_to_latency_disk[service]\n reduction_level_to_latency_cpu_service = reduction_level_to_latency_cpu[service]\n\n # Used for service aliasing (Later implementation)\n service_tag = service\n\n # OUTDATED\n if stress_policy == 'HALVING':\n container_id, resource = container_id\n resources = [resource]\n\n # initialize_machine(ssh_client) LEGACY\n print(\"CLEARING ALL STRESSES\")\n print(\"=====================================\")\n for service, ip_container_tuples in container_ids_dict.items():\n for vm_ip, container_id in ip_container_tuples:\n ssh_client = ssh_clients[vm_ip]\n reset_all_stresses(ssh_client, container_id, cpu_cores)\n print(\"FINISHED CLEARING ALL STRESSES\")\n print(\"=====================================\")\n print('\\n' * 4)\n\n shuffle(increment_values)\n\n BASELINE_ITERATIONS = 10\n # BASELINE_ITERATIONS = 3 # For fast Benchmarking\n if 0 in increment_values:\n # Checking if baseline has been calculated yet\n if baseline_runtime_array == None and baseline_utilization_diff == None:\n baseline_runtime_array = measure_runtime(None, experiment_args, BASELINE_ITERATIONS,\n experiment_type)\n\n reduction_level_to_latency_network_service[0] = baseline_runtime_array\n hash_name = '{},{},{}'.format(experiment_iteration_count, service_tag, 'NET')\n sorted_set_name = '{},{}'.format(service_tag, 'NET')\n print('HashName: {}'.format(hash_name))\n print('SortedSetName: {}'.format(sorted_set_name))\n for metric, data in baseline_runtime_array.items():\n key_name = '{},{}'.format(0, metric)\n sorted_key_name = '{},{}'.format(experiment_iteration_count, key_name)\n redis_db.hset(hash_name, key_name, '{}'.format(data))\n redis_db.zadd(sorted_set_name, {sorted_key_name: numpy.mean(data)})\n\n reduction_level_to_latency_disk_service[0] = baseline_runtime_array\n hash_name = '{},{},{}'.format(experiment_iteration_count, service_tag, 'DISK')\n sorted_set_name = '{},{}'.format(service_tag, 'DISK')\n print('HashName: {}'.format(hash_name))\n print('SortedSetName: {}'.format(sorted_set_name))\n for metric, data in baseline_runtime_array.items():\n key_name = '{},{}'.format(0, metric)\n sorted_key_name = '{},{}'.format(experiment_iteration_count, key_name)\n redis_db.hset(hash_name, key_name, '{}'.format(data))\n redis_db.zadd(sorted_set_name, {sorted_key_name: numpy.mean(data)})\n\n reduction_level_to_latency_cpu_service[0] = baseline_runtime_array\n hash_name = '{},{},{}'.format(experiment_iteration_count, service_tag, 'CPU')\n sorted_set_name = '{},{}'.format(service_tag, 'CPU')\n print('HashName: {}'.format(hash_name))\n print('SortedSetName: {}'.format(sorted_set_name))\n for metric, data in baseline_runtime_array.items():\n key_name = '{},{}'.format(0, metric)\n sorted_key_name = '{},{}'.format(experiment_iteration_count, key_name)\n redis_db.hset(hash_name, key_name, '{}'.format(data))\n redis_db.zadd(sorted_set_name, {sorted_key_name: numpy.mean(data)})\n\n if not only_baseline:\n for increment in increment_values:\n if increment == 0:\n continue\n\n print('Experiment with increment={}'.format(increment))\n\n if 'CPU' in resources:\n print('=====================================')\n print('INITIATING CPU Experiment')\n\n for vm_ip, container_id in ip_container_tuples:\n print('STRESSING VM_IP {} AND CONTAINER {}'.format(vm_ip, container_id))\n ssh_client = ssh_clients[vm_ip]\n if cpu_cores:\n num_cores = weighting_to_cpu_cores(ssh_client, increment)\n throttle_cpu_cores(ssh_client, container_id, num_cores)\n else:\n cpu_throttle_quota = weighting_to_cpu_quota(increment)\n throttle_cpu_quota(ssh_client, container_id, 1000000, cpu_throttle_quota)\n\n results_data_cpu = measure_runtime(container_id, experiment_args,\n experiment_iterations, experiment_type)\n\n for vm_ip, container_id in ip_container_tuples:\n ssh_client = ssh_clients[vm_ip]\n stop_throttle_cpu(ssh_client, container_id, cpu_cores)\n\n reduction_level_to_latency_cpu_service[increment] = results_data_cpu\n hash_name = '{},{},{}'.format(experiment_iteration_count, service_tag, 'CPU')\n sorted_set_name = '{},{}'.format(service_tag, 'CPU')\n print('HashName: {}'.format(hash_name))\n print('SortedSetName: {}'.format(sorted_set_name))\n for metric, data in results_data_cpu.items():\n key_name = '{},{}'.format(increment, metric)\n sorted_key_name = '{},{}'.format(experiment_iteration_count, key_name)\n redis_db.hset(hash_name, key_name, '{}'.format(data))\n redis_db.zadd(sorted_set_name, {sorted_key_name: numpy.mean(data)})\n\n if 'NET' in resources:\n print('======================================')\n print('INITIATING Network Experiment')\n try:\n for vm_ip, container_id in ip_container_tuples:\n print('STRESSING VM_IP {} AND CONTAINER {}'.format(vm_ip, container_id))\n ssh_client = ssh_clients[vm_ip]\n container_to_network_capacity = get_container_network_capacity(ssh_client, container_id)\n network_reduction_rate = weighting_to_bandwidth(ssh_client, increment,\n container_to_network_capacity)\n throttle_network(ssh_client, container_id, network_reduction_rate)\n\n results_data_network = measure_runtime(container_id, experiment_args, experiment_iterations,\n experiment_type)\n\n for vm_ip, container_id in ip_container_tuples:\n ssh_client = ssh_clients[vm_ip]\n stop_throttle_network(ssh_client, container_id)\n\n reduction_level_to_latency_network_service[increment] = results_data_network\n hash_name = '{},{},{}'.format(experiment_iteration_count, service_tag, 'NET')\n sorted_set_name = '{},{}'.format(service_tag, 'NET')\n print('HashName: {}'.format(hash_name))\n print('SortedSetName: {}'.format(sorted_set_name))\n for metric, data in results_data_network.items():\n key_name = '{},{}'.format(increment, metric)\n sorted_key_name = '{},{}'.format(experiment_iteration_count, key_name)\n redis_db.hset(hash_name, key_name, '{}'.format(data))\n redis_db.zadd(sorted_set_name, {sorted_key_name: numpy.mean(data)})\n except:\n print('Passed NET')\n reduction_level_to_latency_network_service[increment] = \\\n reduction_level_to_latency_network_service[0]\n if 'DISK' in resources:\n print('=======================================')\n print('INITIATING Disk Experiment ')\n disk_throttle_rate = weighting_to_disk_access_rate(increment)\n for vm_ip, container_id in ip_container_tuples:\n print('STRESSING VM_IP {} AND CONTAINER {}'.format(vm_ip, container_id))\n ssh_client = ssh_clients[vm_ip]\n throttle_disk(ssh_client, container_id, disk_throttle_rate)\n\n results_data_disk = measure_runtime(container_id, experiment_args, experiment_iterations,\n experiment_type)\n\n for vm_ip, container_id in ip_container_tuples:\n ssh_client = ssh_clients[vm_ip]\n stop_throttle_disk(ssh_client, container_id)\n\n reduction_level_to_latency_disk_service[increment] = results_data_disk\n hash_name = '{},{},{}'.format(experiment_iteration_count, service_tag, 'DISK')\n sorted_set_name = '{},{}'.format(service_tag, 'DISK')\n print('HashName: {}'.format(hash_name))\n print('SortedSetName: {}'.format(sorted_set_name))\n for metric, data in results_data_disk.items():\n key_name = '{},{}'.format(increment, metric)\n sorted_key_name = '{},{}'.format(experiment_iteration_count, key_name)\n redis_db.hset(hash_name, key_name, '{}'.format(data))\n redis_db.zadd(sorted_set_name, {sorted_key_name: numpy.mean(data)})\n\n # Saving results\n reduction_level_to_latency_network[service_tag] = reduction_level_to_latency_network_service\n reduction_level_to_latency_disk[service_tag] = reduction_level_to_latency_disk_service\n reduction_level_to_latency_cpu[service_tag] = reduction_level_to_latency_cpu_service\n\n # File Checkpoint\n file = append_results_to_file(reduction_level_to_latency_cpu, reduction_level_to_latency_disk,\n reduction_level_to_latency_network, resources, increments,\n experiment_type, experiment_iterations,\n experiment_iteration_count, False)\n print('Checkpoint file for increment {} is {}'.format(increment, file))\n\n return reduction_level_to_latency_cpu, reduction_level_to_latency_disk, reduction_level_to_latency_network\n\n'''\nExperiment arguements takes a list of arguments for the type of experiments\nExamples:\n\"REST\": Node TODO App: [public_vm_ip]\n\"spark-ml-matrix\": Spark ml-matrix: [public_vm_ip, private_vm_ip]\n\"nginx-single\": Single unreplicated nginx serving up static pages\n'''\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"website_ip\", help=\"List of public IP Addresses that the measurement module will hit with traffic\")\n parser.add_argument(\"victim_machine_public_ip\", help=\"List of IP Addresses of servers that are hit with stress\")\n parser.add_argument(\"experiment_type\", help=\"Options: spark-ml-matrix, nginx-single, REST\")\n parser.add_argument(\"--victim_machine_private_ip\", help=\"Private (10./) IP Address of server that is being hit with stress\")\n parser.add_argument(\"--traffic_generator_public_ip\", help=\"Public IP Address from where synthetic traffic is generated from\")\n parser.add_argument(\"--services_to_stress\", help=\"List of services to stress on machines\")\n parser.add_argument(\"--stress_all_services\", action=\"store_true\", help=\"Stress all services\")\n parser.add_argument(\"--resources_to_stress\", help=\"List of resources to throttle\")\n parser.add_argument(\"--stress_all_resources\", action=\"store_true\", help=\"Throttle all resources\")\n parser.add_argument(\"--cpu_cores\", action=\"store_true\", help=\"Use CPU core throttling\")\n parser.add_argument(\"--stress_search_policy\", help=\"Type of stress policy\")\n parser.add_argument(\"--iterations\", type=int, default=7, help=\"Number of HTTP requests to send the REST server per experiment\")\n parser.add_argument(\"--only_baseline\", action=\"store_true\", help=\"Only takes a measurement of the baseline without any stress\")\n parser.add_argument(\"--increments\", help=\"The increments of stressing\")\n parser.add_argument(\"--resume\", help=\"Resume experiment (need to specify increments)\")\n parser.add_argument(\"--redis_ms\", help=\"IP of Redis Machine\")\n parser.add_argument(\"--spark_stream\", help =\"IP of Spark Stream\")\n parser.add_argument(\"--kafka\", help=\"IP of Kafka machine\")\n parser.add_argument(\"--spark_ms\", help=\"IP of spark-ms \")\n parser.add_argument(\"--spark_wk\", help=\"IPs of spark workers\")\n parser.add_argument(\"--multiservice_stressing\", help=\"Lists of services to be stressed together\")\n\n\n args = parser.parse_args()\n print(args)\n\n # Initializing Redis DB\n redis_db = redis.StrictRedis(host='localhost', port=6379, db=0)\n\n # Accomodating for wildcards\n if args.stress_all_services:\n services = '*'\n elif args.services_to_stress:\n services = args.services_to_stress.split(',')\n else:\n print('Please state which services to stress')\n exit()\n if args.stress_all_resources:\n resources = ['CPU', 'DISK', 'NET']\n elif args.resources_to_stress:\n resources = args.resources_to_stress.split(',')\n else:\n print('Please state which resources to throttle')\n exit()\n\n if not args.stress_search_policy:\n print('Please state a stress search policy')\n exit()\n stress_policy = args.stress_search_policy\n\n # Checking if ip addresses are valid\n ip_addresses = args.victim_machine_public_ip.split(',')\n for ip in ip_addresses:\n try:\n socket.inet_aton(ip)\n except:\n print('IP {} is invalid'.format(ip))\n exit()\n\n website_addresses = args.website_ip.split(',')\n for website in website_addresses:\n try:\n socket.inet_aton(website)\n except:\n print('Website IP {} is invalid'.format(website))\n exit()\n\n if args.traffic_generator_public_ip:\n try:\n socket.inet_aton(args.traffic_generator_public_ip)\n except:\n print('Traffic Generator IP {} is invalid'.format(args.traffic_generator_public_ip))\n exit()\n # Installng dependencies on traffic generator client\n traffic_client = get_client(args.traffic_generator_public_ip)\n ssh_exec(traffic_client, 'sudo apt-get install -y apache2-utils')\n\n # Creating dictionary of SSH CLIENTS\n victim_ips = args.victim_machine_public_ip.split(',')\n ssh_clients = {}\n for victim_ip in victim_ips:\n ssh_clients[victim_ip] = get_client(victim_ip)\n\n # MESSY TODO: Write an abstract class for the experiment type and implement elsewhere\n if args.experiment_type == 'REST':\n experiment_args = [args.website_ip, ip_addresses]\n elif args.experiment_type == \"spark-ml-matrix\":\n #website_ip in this case is the spark master public ip\n experiment_args = [args.website_ip, args.victim_machine_private_ip]\n elif args.experiment_type == \"nginx-single\":\n experiment_args = [args.website_ip, args.traffic_generator_public_ip]\n elif args.experiment_type == \"todo-app\":\n experiment_args = [args.website_ip, traffic_client]\n elif args.experiment_type == \"basic-get\":\n experiment_args = [args.website_ip, traffic_client]\n elif args.experiment_type == \"spark-streaming\":\n if not args.redis_ms:\n print('Please enter a redis IP')\n exit()\n else:\n redis_ip = args.redis_ms\n if not args.spark_stream:\n print('Please enter a spark-stream IP')\n exit()\n else:\n spark_stream_ip = args.spark_stream\n if not args.kafka:\n print('Please enter a kafka IP')\n exit()\n else:\n kafka_ip = args.kafka\n if not args.spark_ms:\n print('Please enter a spark-ms IP')\n exit()\n else:\n spark_ms_ip = args.spark_ms\n if not args.spark_wk:\n print('Please enter the spark worker IP')\n exit()\n else:\n spark_wk_ip_list = args.spark_wk.split(',')\n redis_dict = get_container_ids_all([redis_ip], \"hantaowang/redis\")\n spark_stream_dict = get_container_ids_all([spark_stream_ip], \"mchang6137/spark_streaming\")\n kafka_dict = get_container_ids_all([kafka_ip], \"mchang6137/kafka\")\n spark_ms_dict = get_container_ids_all([spark_ms_ip], \"mchang6137/spark-yahoo\")\n spark_wk_dict = {\"spark-wk\": []}\n for spark_wk_ip in spark_wk_ip_list:\n spark_wk_sub_dict = get_container_ids_all([spark_wk_ip], \"mchang6137/spark-yahoo\")\n spark_wk_dict[\"spark-wk\"].append(spark_wk_sub_dict[\"mchang6137/spark-yahoo\"])\n ssh_clients[spark_wk_ip] = get_client(spark_wk_ip)\n # Adding spark worker ips to ip_addresses\n ip_addresses.append(spark_wk_ip)\n\n experiment_args_dict = {}\n experiment_args_dict.update(redis_dict)\n experiment_args_dict.update(spark_stream_dict)\n experiment_args_dict.update(kafka_dict)\n experiment_args_dict.update(spark_ms_dict)\n experiment_args_dict.update(spark_wk_dict)\n experiment_args = [experiment_args_dict]\n\n #Initialize Spark Master\n initialize_spark_experiment(experiment_args[0])\n else:\n print('INVALID EXPERIMENT TYPE: {}'.format(args.experiment_type))\n exit()\n\n # Notifying User CPU throttling type\n cpu_cores = args.cpu_cores\n if cpu_cores:\n print('Using CPU Core Throttling')\n else:\n print('Using CPU Quota Throttling')\n\n # Getting increments\n if not args.increments:\n if args.resume:\n print('If resuming from a previous experiment, please specify increments')\n exit()\n increments = [0, 20, 40, 60, 80]\n else:\n if args.only_baseline:\n print('Cannot specify increments when only_baseline is true')\n exit()\n string_increments = args.increments.split(',')\n try:\n increments = list(map(int, string_increments))\n except:\n print('ERROR: Increments must be integers')\n exit()\n experiment_inc_args = [increments, experiment_args]\n\n if args.resume:\n print('RESUME FUNCTIONALITY HAS BEEN SHELVED FOR LATER (AND IT IS ALSO OUTDATED)')\n exit()\n try:\n previous_results = read_from_file(args.resume, True)\n except:\n print('File not found')\n exit()\n resume_boolean = True\n else:\n resume_boolean = False\n previous_results = None\n\n # Retrieving dictionary of container_ids with service names as keys\n container_ids_dict = get_container_ids(ip_addresses, services, resources, stress_policy)\n\n # Accounting for multi-service stressing (Currently only works for stress_policy=ALL)\n if args.multiservice_stressing:\n multi_service_lists = args.multiservice_stressing.split('|')\n for multi_service_str in multi_service_lists:\n multi_service_list = ast.literal_eval(multi_service_str)\n new_multi_service_tuple_list = []\n new_multi_service_name = None\n for multi_service in multi_service_list:\n old_container_tuple_list = container_ids_dict.pop(multi_service, None)\n if not old_container_tuple_list:\n print('Service {} not found'.format(multi_service))\n exit()\n new_multi_service_tuple_list += old_container_tuple_list\n if not new_multi_service_name:\n new_multi_service_name = multi_service\n else:\n new_multi_service_name += '+{}'.format(multi_service)\n container_ids_dict.update({new_multi_service_name: new_multi_service_tuple_list})\n\n # Checking for stress search type\n if stress_policy == 'HALVING' or stress_policy == 'BINARY':\n container_ids_dict1, container_ids_dict2 = container_ids_dict\n\n results_disk = {}\n results_cpu = {}\n results_network = {}\n\n continue_stressing = True\n\n experiment_iteration_count = 0\n\n while continue_stressing:\n # Reset results dictionary for each iteration\n results_disk = {}\n results_cpu = {}\n results_network = {}\n\n if stress_policy == 'BINARY' or stress_policy == 'HALVING':\n results1 = model_machine(ssh_clients, container_ids_dict1, experiment_inc_args, args.iterations,\n args.experiment_type, stress_policy, resources,\n args.only_baseline, resume_boolean, previous_results, experiment_iteration_count,\n redis_db)\n results2 = model_machine(ssh_clients, container_ids_dict2, experiment_inc_args, args.iterations,\n args.experiment_type, stress_policy, resources,\n args.only_baseline, resume_boolean, previous_results, experiment_iteration_count,\n redis_db)\n else: # More will be added as more search policies are implemented\n results = model_machine(ssh_clients, container_ids_dict, experiment_inc_args, args.iterations,\n args.experiment_type, stress_policy, resources,\n args.only_baseline, resume_boolean, previous_results, experiment_iteration_count,\n redis_db)\n\n if args.experiment_type == 'REST':\n for service, (vm_ip, container_id) in container_ids_dict:\n reset_experiment(vm_ip, container_id)\n\n results_in_milli = True\n if args.experiment_type == 'spark-ml-matrix' or args.experiment_type == 'nginx-single':\n results_in_milli = False\n\n # Revert container_ids_dict if necessary (Allows for modular update function)\n if stress_policy == 'BINARY' or stress_policy == 'HALVING':\n results = (results1, results2)\n\n # Update container dictionary based on type\n container_ids_dict = get_updated_container_ids(container_ids_dict, results, stress_policy)\n\n # Checking and updating loop condition if necessarily (based on type)\n if container_ids_dict == None:\n continue_stressing = False\n if stress_policy == 'BINARY' or stress_policy == 'HALVING':\n results, _ = results\n results_cpu, results_disk, results_network = results\n\n experiment_iteration_count += 1\n\n output_file_name = append_results_to_file(results_cpu, results_disk, results_network, resources, increments, args.experiment_type, args.iterations, '-',True)\n #plot_results(output_file_name, resources, args.experiment_type, args.iterations, 'save', convertToMilli=results_in_milli)\n","sub_path":"src-python3/stress_scheduler.py","file_name":"stress_scheduler.py","file_ext":"py","file_size_in_byte":27356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207295311","text":"import sys, os, shutil, time\nimport __main__\n\ndef get_input():\n basename = __main__.__file__.replace(\".py\", \".txt\")\n input_fn = os.path.join(\n os.path.dirname(__file__),\n \"input\",\n basename\n )\n if not os.path.exists(input_fn):\n print(f\"\\n{basename} not found, have you downloaded it?\")\n exit(1)\n return open(input_fn)\n\ndef header(subtitle : str):\n day = __main__.__file__.replace(\"day\", \"\").replace(\".py\", \"\")\n title = f\"Advent of Code 2019 – Day {int(day)}\"\n width = min(shutil.get_terminal_size().columns, 55)\n\n print(f\" ╭{'─' * (width - 3)}╮\")\n print(f\" │ {{:^{width - 5}}} │\".format(title))\n print(f\" │ {{:^{width - 5}}} │\".format(subtitle))\n print(f\" ╰{'─' * (width - 3)}╯\")\n print()\n\ndef output(part : int, func, post=None, output=None, comment=None, args=[], kwargs={}):\n print(f\"⧖ Part {part}\", end=\"\", flush=True)\n t0 = time.perf_counter()\n result = func(*args, **kwargs)\n t1 = time.perf_counter()\n\n print(f\"\\r✓ Part {part}\", flush=True)\n print(f\" Elapsed: {(t1-t0)*1000:>10.3f} ms\")\n\n if comment is not None: print(f\" {comment}\")\n if output is None: output = lambda r: print(f\" {r}\")\n if post is not None:\n output(post(result))\n else:\n output(result)\n \n print()\n return result\n\ndef run_tests():\n try:\n t0 = time.perf_counter()\n __main__.test()\n t1 = time.perf_counter()\n print(\"✓ All tests passed!\")\n print(f\" Elapsed: {(t1-t0)*1000:>10.3f} ms\")\n except AssertionError as e:\n print(f\"✗ Tests failed!\\n\")\n raise e\n print()\n\ndef __find_next_day():\n days = map(\n lambda fn: int(os.path.basename(fn).replace(\"day\",\"\").replace(\".py\",\"\")),\n filter(\n lambda fn: fn.startswith(\"day\"), \n os.listdir(os.path.dirname(__file__))\n )\n )\n return max(days) + 1\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n subparser = parser.add_subparsers(dest=\"command\")\n\n parser_new = subparser.add_parser(\"new\", help=\"Create a file for a new day\")\n parser_new.add_argument(\"day\", type=int, nargs=\"?\", default=__find_next_day(), help=\"Day to create (defaults to next day that doesn't exist)\")\n\n args = parser.parse_args()\n if args.command == \"new\":\n filename = os.path.join(os.path.dirname(__file__), f\"day{args.day:02}.py\")\n if os.path.exists(filename):\n print(f\"day{args.day:02}.py exists, overwrite? (y/N)\")\n if input().lower() != \"y\":\n exit()\n with open(filename, 'w') as fd:\n fd.write(\n\"\"\"import aoc\n\ndef main():\n aoc.header(\"Your title here\")\n aoc.run_tests()\n\n # aoc.output(1, part1)\n # aoc.output(2, part2)\n\ndef test():\n pass\n\ndef part1():\n pass\n\ndef part2():\n pass\n\nif __name__ == \"__main__\":\n main()\n\"\"\")\n print(f\"Created day{args.day:02}.py\")\n\n \n\n","sub_path":"aoc.py","file_name":"aoc.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420577491","text":"import socket, datetime\nfrom flask import Flask\n\napplication = Flask(__name__)\n\n@application.route(\"/\")\ndef hello():\n\ttry:\n\t\tf = open(\"/mnt/access.log\", \"a+\")\n\t\tf.write(\"

Hello World! Greetings from \"+socket.gethostname()+\" @ \"+str(datetime.datetime.now())+\"\\n

\")\n\t\tf.close()\n\texcept Exception as e:\n\t\treturn str(e)\n\n\ttry:\n\t\twith open(\"/mnt/access.log\", \"r\") as myfile:\n\t\t\tdata = myfile.read()\n\texcept Exception as e:\n\t\treturn str(e)\n\n\treturn \"\"+data+\"\"\n\n\nif __name__ == \"__main__\":\n\tapplication.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"335573574","text":"from typing import List\nfrom core import main_code_parser\nfrom core.base_handler import BaseHandler\nfrom core.variable import *\nfrom core.function_context import FunctionContext\n\n\nclass BufferOverflowHandler(BaseHandler):\n\n vulnerability_name = 'Переполнение буфера'\n\n \"\"\"\n - char * strcpy ( char * destination, const char * source ); - Копирует строку в буфер .\n \n - int printf ( const char * format, ... ); - Выводит строку указанную в формате в стандартный поток вывода.\n \n - char * strcat ( char * destination, const char * source ); - Добавляет к строке копию \n \n - void * memcpy ( void * destination, const void * source, size_t num ); - копирует байт из в \n \n \n - char * gets ( char * str ); - читает символы из потока ввода и записывает их в \n \n - int sprintf ( char * str, const char * format, ... ); - аналогично , но вывод происходит в буфер \n \n - int vsprintf (char * s, const char * format, va_list arg ); - аналогично , но принимает только один \n \n - char * strncpy ( char * destination, const char * source, size_t num ); - , но копирует только байт\n \"\"\"\n\n def __init__(self):\n self.pattern = r\"(strcpy|printf|strcat|memcpy|gets|sprintf|vsprintf|strncpy|scanf)\" \\\n r\"\\(.*\\)\"\n self.output = []\n\n def parse(self, contexts: List[FunctionContext]):\n total_errors = 0\n for context in contexts:\n declared_variables = context.variables\n for line_number, line in context.source_code.items():\n matches = re.finditer(self.pattern, line)\n for match in matches:\n used_variables = main_code_parser.get_parameters(match.group(0), declared_variables)\n for used_variable in used_variables:\n declaration = used_variable.full_declaration\n if is_pointer(declaration) or is_array(declaration):\n total_errors += 1\n self.output.append(\n f\"{total_errors}) Предупреждение в методе <{context.name}>!\\n\"\n f\"Использование буфера <{declaration[:-1]}> (строка {used_variable.line_appeared}) \"\n f\"в небезопасной функции <{match.group(1)}> (строка {line_number}).\\n\"\n f\"Это может стать причиной переполнения буфера. \"\n f\"Убедитесь в наличии проверки этой угрозы!\\n\")\n self.output.append(self.vulnerability_name + \": \" + str(total_errors))\n return self.output\n","sub_path":"handlers/buffer_overflow_handler.py","file_name":"buffer_overflow_handler.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"473619730","text":"\"\"\"add total_satoshis_received\n\nRevision ID: 3115f1d92acb\nRevises: d5149c4e2ffc\nCreate Date: 2019-03-05 21:30:30.164464\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3115f1d92acb'\ndown_revision = 'd5149c4e2ffc'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('etl_open_channels', sa.Column('total_satoshis_received', sa.BIGINT(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('etl_open_channels', 'total_satoshis_received')\n # ### end Alembic commands ###\n","sub_path":"lnd_sql/migrations/versions/3115f1d92acb_add_total_satoshis_received.py","file_name":"3115f1d92acb_add_total_satoshis_received.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"387244838","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\n\nclass Window(QWidget):\n def __init__(self, parent=None):\n super(Window, self).__init__(parent, flags=Qt.Window)\n # 创建父控件为 Window 的分割窗口:\n self.splitter_0 = QSplitter(self)\n self.lineEdit = QLineEdit()\n self.lineEdit.setPlaceholderText(\"请输入待检词汇\")\n self.pushButton = QPushButton(self)\n self.pushButton.setText(\"开始检测\")\n self.pushButton_2 = QPushButton()\n self.pushButton_2.setText(\"查看检测出的文件\")\n self.splitter_0.setOrientation(Qt.Vertical)\n # 设置分割窗口下的控件不可折叠:\n self.splitter_0.setChildrenCollapsible(False)\n # 设置分割条的宽度:\n self.splitter_0.setHandleWidth(10)\n # 设置分割窗口大小:\n self.splitter_0.resize(900, 650)\n self.splitter_0.addWidget(self.lineEdit)\n self.splitter_0.addWidget(self.pushButton)\n self.splitter_0.addWidget(self.pushButton_2)\n\n # 创建父控件为 splitter_0 的分割窗口:\n self.splitter_1 = QSplitter(self.splitter_0)\n # 设置分割窗口的方向:\n self.splitter_1.setOrientation(Qt.Horizontal)\n\n # 为分割窗口添加控件:\n self.tree = QTreeView()\n self.tree.setModel(QDirModel())\n self.tree.setColumnWidth(0, 300)\n self.splitter_1.addWidget(self.tree)\n self.splitter_1.addWidget(QTextEdit('横向排列'))\n self.splitter_1.setSizes([180, 60])\n # 设置分割窗口下的控件的宽度,如果是垂直排列的则是高度:\n # splitter_1.setSizes([10, 80, 60])\n # 设置分隔条是否跟随鼠标移动:\n self.splitter_1.setOpaqueResize(False)\n\n def start(self):\n index = self.tree.currentIndex()\n file_path = self.model.filePath(index)\n print(file_path)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = Window()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"duoxianc.py","file_name":"duoxianc.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"311962581","text":"import pickle\nimport numpy as np\nimport re\n\ndef isInt(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\ndata_load = pickle.load( open(\"../output/testData_mod_new4.pkl\", \"rb\") )\n\nlen_data = len(data_load)/2\ncount = 0\n\nfor dic_values in data_load:\n if(count < len_data):\n dic_values['label']=\"buggy\"\n else:\n dic_values['label']=\"clean\"\n \n dic_values.pop('date_tz')\n dic_values.pop('rev')\n dic_values.pop('repository_id')\n dic_values.pop('file_id')\n dic_values.pop('author_date')\n dic_values.pop('author_date_tz')\n dic_values.pop('file_path')\n \n ###Refining Values\n ###----------------------------------------------\n dat = dic_values['date']\n day_of_week = dat.weekday()\n hour_of_day = dat.hour \n \n dic_values['day_of_week']=day_of_week\n dic_values['hour_of_day']=hour_of_day\n \n message = dic_values['message']\n message_len = len(message)\n dic_values['message_len'] = message_len\n \n ###Extracting commit_message\n ###----------------------------------------------\n tempStr = message\n tempStr = re.sub(r'\\W+', ' ', message) ##Replacing non alphanumeric with space\n tempStr = tempStr.strip().lower()\n tempStr = tempStr.split()\n fix_count=tempStr.count('fix')+tempStr.count('fixed')\n update_count=tempStr.count('update')\n bug_count=tempStr.count('bug')\n #new_count=tempStr.count('new')\n feature_count=tempStr.count('feature')\n \n dic_values['fix_count']=fix_count\n dic_values['update_count']=update_count\n dic_values['bug_count']=bug_count\n dic_values['feature_count']=feature_count\n \n ###Popping unwanted values\n ###----------------------------------------------\n dic_values.pop('message')\n dic_values.pop('date')\n \n ###fixing datatypes\n ###----------------------------------------------\n if(isInt(dic_values['num_lines_added'])):\n dic_values['num_lines_added'] = int(dic_values['num_lines_added'])\n else:\n dic_values['num_lines_added'] = np.NaN\n if(isInt(dic_values['num_lines_deleted'])):\n dic_values['num_lines_deleted'] = int(dic_values['num_lines_deleted'])\n else:\n dic_values['num_lines_deleted'] = np.NaN\n print(count)\n count = count + 1 \n\npickle.dump(data_load, open(\"../output/testData_mod_new_test.pkl\", \"wb\"))\nprint(len(data_load))\nprint(\"DONE\")","sub_path":"V_3_0/DatasetGeneration/testDb10.py","file_name":"testDb10.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"67379120","text":"# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\nimport numpy as np\n\npickle_file = 'notMNIST.pickle'\nimage_size = 28\nnum_labels = 10\nnum_channels = 1 # grayscale\nnum_hidden = 64\nnum_steps = 10001\n\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape(\n (-1, image_size, image_size, num_channels)).astype(np.float32)\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])\n\ndef run_session():\n\twith tf.Session(graph=graph) as session:\n\t\ttf.initialize_all_variables().run()\n\t\tprint('Initialized')\n\t\tfor step in range(num_steps):\n\t\t\toffset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n\t\t\tbatch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n\t\t\tbatch_labels = train_labels[offset:(offset + batch_size), :]\n\t\t\tfeed_dict = {\n\t\t\t\ttf_train_dataset : batch_data,\n\t\t\t\ttf_train_labels : batch_labels\n\t\t\t}\n\t\t\t_, l, predictions = \\\n\t\t\tsession.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n\t\t\tif (step % 50 == 0):\n\t\t\t\tprint('Minibatch loss at step %d: %f' % (step, l))\n\t\t\t\tprint('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))\n\t\t\t\tprint('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels))\n\t\tprint('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))\n\n\n\n### Start\nwith open(pickle_file, 'rb') as f:\n\tsave = pickle.load(f)\n\ttrain_dataset = save['train_dataset']\n\ttrain_labels = save['train_labels']\n\tvalid_dataset = save['valid_dataset']\n\tvalid_labels = save['valid_labels']\n\ttest_dataset = save['test_dataset']\n\ttest_labels = save['test_labels']\n\tdel save # hint to help gc free up memory\n\tprint('Training set', train_dataset.shape, train_labels.shape)\n\tprint('Validation set', valid_dataset.shape, valid_labels.shape)\n\tprint('Test set', test_dataset.shape, test_labels.shape)\n\n\n###\nprint(type(train_dataset), type(train_labels))\nprint(train_dataset.shape, train_labels.shape)\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n###\n\n\n# define our computational graph\n###\nbatch_size = 32\npatch_size = 5\t# filter size\ndepth = 16\nconv1_depth = depth\nconv2_depth = depth\t# just for semantic clarity\nlast_conv_depth = conv2_depth\npooling_stride = 2\n\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\t# Input data.\n\ttf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n\ttf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n\ttf_valid_dataset = tf.constant(valid_dataset)\n\ttf_test_dataset = tf.constant(test_dataset)\n\n\t# Architure\n\t# conv1 -> relu -> conv2 -> relu -> pool ->\n\t# conv3 -> relu -> pool ->\n\t# fully connected\n\n\t# Variables.\n\t# conv1 layer 1\n\t# \"layer1_weights\" is a terrible naming, better to name it \"conv1_filter\"\n\tconv1_filter = tf.Variable(\n\t\ttf.truncated_normal([patch_size, patch_size, num_channels, conv1_depth], stddev=0.1))\n\tconv1_biases = tf.Variable(tf.zeros([conv1_depth]))\n\n\t# conv layer 2\n\tconv2_filter = tf.Variable(\n\t\ttf.truncated_normal([patch_size, patch_size, conv1_depth, conv2_depth], stddev=0.1))\n\tconv2_biases = tf.Variable(tf.constant(1.0, shape=[conv2_depth]))\n\n\t# layer 3, fully connected\n\tdown_scale = pooling_stride ** 2\t# because we do 2 times pooling of stride 2\n\tlayer3_weights = tf.Variable(\n\t\ttf.truncated_normal(\n\t\t\t[image_size // down_scale * image_size // down_scale * last_conv_depth, num_hidden],\n\t\t\tstddev=0.1))\n\tlayer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n\n\t# layer 4\n\tlayer4_weights = tf.Variable(\n\t\ttf.truncated_normal([num_hidden, num_labels], stddev=0.1))\n\tlayer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n\n\t# Model.\n\tdef model(data, isTrain=False):\n\t\t# conv layer 1\n\t\tconv1 = tf.nn.conv2d(data, conv1_filter, [1, 1, 1, 1], padding='SAME')\n\t\thidden = tf.nn.relu(conv1 + conv1_biases)\n\t\thidden = tf.nn.max_pool(\n\t\t\thidden,\n\t\t\t[1,pooling_stride,pooling_stride,1],\n\t\t\t[1,pooling_stride,pooling_stride,1],\n\t\t\tpadding='SAME')\n\n\t\t# conv layer 2\n\t\tconv2 = tf.nn.conv2d(hidden, conv2_filter, [1, 1, 1, 1], padding='SAME')\n\t\thidden = tf.nn.relu(conv2 + conv2_biases)\n\t\thidden = tf.nn.max_pool(\n\t\t\thidden,\n\t\t\t[1,pooling_stride,pooling_stride,1],\n\t\t\t[1,pooling_stride,pooling_stride,1],\n\t\t\tpadding='SAME')\n\n\t\t# layer 3?\n\t\tshape = hidden.get_shape().as_list()\n\t\treshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n\t\thidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n\n\t\t# layer 4 / output layer?\n\t\t# Add a 50% dropout during training only. Dropout also scales\n\t\t# activations such that no rescaling is needed at evaluation time.\n\t\tif isTrain:\n\t\t\thidden = tf.nn.dropout(hidden, 0.5, seed=1234)\n\n\t\treturn tf.matmul(hidden, layer4_weights) + layer4_biases\n\n\t# Training computation.\n\tlogits = model(tf_train_dataset, True)\n\tloss = tf.reduce_mean(\n\t\ttf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n\n\t# L2 regularization for the fully connected parameters\n\tregularization = tf.nn.l2_loss(layer3_weights) + tf.nn.l2_loss(layer3_biases) + \\\n\t tf.nn.l2_loss(layer4_weights) + tf.nn.l2_loss(layer4_biases)\n\tloss += 5e-4 * regularization\n\n # learning rate decay\n\tglobal_step = tf.Variable(0)\n\tlearning_rate = tf.train.exponential_decay(\n\t\t0.25,\n\t\tglobal_step * batch_size,\n\t\ttrain_labels.shape[0],\n\t\t0.95,\n\t\tstaircase=True\n\t)\n\n\t# Optimizer.\n\toptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n\n\t# Predictions for the training, validation, and test data.\n\ttrain_prediction = tf.nn.softmax(logits)\n\tvalid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n\ttest_prediction = tf.nn.softmax(model(tf_test_dataset))\n\nif __name__ == '__main__':\n\trun_session()\n","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600853987","text":"import os\nimport sys\nimport requests\nimport csv\nimport urllib.request\nimport datetime\nimport time\nimport pyperclip\nimport imghdr\nimport eel\nimport urllib3\nimport re\nimport json\nfrom subprocess import PIPE, Popen\nimport os\n\nimport common.desktop as desktop\n\nDATE_FORMAT = '%Y-%m-%d-%H-%M-%S'\n\n\ndef get_current_window_name():\n for i in Popen(['xprop', '-root'], stdout=PIPE).stdout:\n if '_NET_ACTIVE_WINDOW(WINDOW):' in i:\n for j in Popen(['xprop', '-id', i.split()[4]], stdout=PIPE).stdout:\n if 'WM_ICON_NAME(STRING)' in j:\n return j.split()[2][1:][:-1]\n\ndef toggle_foreground(app_name):\n app = get_current_window_name()\n if not app or not 0 is app.find(app_name):\n os.system('wmctrl -a ' + app_name)\n else:\n os.system('xwit -iconify -names ' + app_name)\n\ndef getAbsPath(): # 作業パス取得\n if getattr(sys, 'frozen', False):\n application_path = os.path.dirname(sys.executable)\n running_mode = 'Frozen/executable'\n else:\n try:\n app_full_path = os.path.realpath(__file__)\n application_path = os.path.dirname(app_full_path)\n running_mode = \"Non-interactive (e.g. 'python myapp.py')\"\n except NameError:\n application_path = os.getcwd()\n running_mode = 'Interactive'\n return application_path + os.sep\n\ndef now_string():\n return datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n\ndef log(txt):\n now = datetime.datetime.now()\n logStr = '[%s: %s] %s' % ('log', now.strftime(DATE_FORMAT), txt)\n # ログ出力\n with open(absPath + logFile, 'a', encoding='utf-8') as f:\n f.write(logStr + '\\n')\n print(logStr)\n\ndef printDate(text):\n print(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S.%f') + \" \" + text) \n\ndef log(txt):\n now = datetime.datetime.now()\n logStr = '[%s: %s] %s' % ('log', now.strftime(DATE_FORMAT), txt)\n # ログ出力\n with open(absPath + logFile, 'a', encoding='utf-8') as f:\n f.write(logStr + '\\n')\n print(logStr)\n\ndef get_list(list,index=0):\n if len(list)>=1:\n return list[index]\n else:\n return \"\"\n\ndef readCSV(path,delimiter,skipHeader=True,encoding=\"utf-8\"):\n # 入力ファイルの読み込み\n try:\n with open(path , \"r\", encoding=encoding) as f:\n if skipHeader==True:\n h = next(csv.reader(f)) # ヘッダ行を読み飛ばし\n \n reader= csv.reader(f,delimiter=delimiter)\n #temp = [row for row in reader ]\n inputData=[]\n for d in reader:\n inputData.append(d)\n return inputData\n except FileNotFoundError as e: # FileNotFoundErrorは例外クラス名\n print(\"ファイルが見つかりません\", e)\n return None\n except Exception as e: # Exceptionは、それ以外の例外が発生した場合\n print(e)\n return None\n\ndef write_csv(filepath,data,delimiter=\",\",encoding=\"utf-8\",date_mode=True):\n date=\"\"\n if date_mode:\n date=now.strftime('%Y%m%d') \n # 現状のファイルの行数を取得\n file_data=readCSV(os.getcwd() + filepath + date + \".csv\",delimiter,encoding=encoding)\n # 出力項目リストを作成\n fieldnames=[]\n for key in data.keys():\n fieldnames.append(key)\n # 結果をCSV出力\n try:\n with open(os.getcwd() + filepath + date + \".csv\", \"a\",encoding=encoding,newline=\"\",errors=\"ignore\") as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames,delimiter=delimiter)\n if file_data==None: # 0行ならヘッダを付与\n writer.writeheader()\n writer.writerow(data)\n except Exception as e:\n log(\"CSV書き込みエラー\")\n import traceback\n log(traceback.format_exc())\n pass\n\ndef write_text(filepath,data,mode,encoding=\"utf-8\"):\n with open(os.getcwd() + filepath ,mode=mode,encoding=encoding) as f:\n f.write(data + \"\\n\")\n\ndef read_text(filepath):\n with open(os.getcwd() + filepath,encoding=\"utf-8\") as f:\n return f.read() # ファイル終端まで全て読んだデータを返す\n \ndef write_csv_array(filepath,data,mode=\"w\",delimiter=\",\",encoding=\"utf-8\"):\n with open(os.getcwd() + \"\\\\\" + filepath , mode=mode) as file:\n writer = csv.writer(file, lineterminator='\\n',delimiter=delimiter)\n writer.writerows(data)\n\n\ndef read_csv_dic(path,delimiter,encoding):\n # 入力ファイルの読み込み\n try:\n with open(os.getcwd() + \"\\\\\" + path, \"r\", encoding=encoding) as f:\n csv_header=next(csv.reader(f,delimiter=delimiter)) # ヘッダ行の設定\n reader = csv.DictReader(f, csv_header,delimiter=delimiter)\n data = [row for row in reader]\n return data\n except FileNotFoundError as e: # FileNotFoundErrorは例外クラス名\n log(\"エラー:ファ��ルが見つかりません\")\n sys.exit\n except Exception as e: # Exceptionは、それ以外の例外が発生した場合\n print(e)\n sys.exit\n\ndef write_json(path,json_list):\n # JSON ファイルへの書き込み\n with open(path, 'w') as f:\n json.dump(json_list, f)\n\ndef randomSleep(begin,end):\n if begin==\"\":\n begin=0\n if end==\"\":\n end=0\n time.sleep(random.randint(int(begin),int(end))) \n\ndef getDateDelta(delta):\n now=datetime.datetime.now()\n return now+datetime.timedelta(days=int(delta))\n\ndef img_download(img_src,img_save_path):\n # ダウンロード\n request_methods = urllib3.PoolManager()\n responce = request_methods.request('GET', img_src)\n # エラーの場合は終了\n if not responce.status==200:\n log(\"画像ファイルダウンロードエラー\")\n log(img_src)\n return \"\"\n # 拡張子取得\n ext=imghdr.what(None,h=responce.data)\n # 取得できないタイプの場合、別の方法で取得\n if ext is None:\n ext = responce.headers['Content-Type'].split('/')[1]\n # 拡張子変換\n if ext==\"jpeg\":\n ext=\"jpg\"\n elif ext==\"svg+xml\":\n ext=\"svg\"\n # 画像保存\n result_path=img_save_path + \".\" + ext\n with open(os.getcwd() + result_path, \"wb\") as f: \n f.write(responce.data)\n\n return result_path\n\ndef trim(text):\n return text.replace(\"\\n\",\"\").replace(\" \",\"\").replace(\" \",\"\")\n\ndef get_html(url, headers=\"\",referer='',apiFlg=False): # GET処理\n #headers = getHeader(referer, apiFlg)\n html = requests.get(url, headers=headers)\n html.encoding = html.apparent_encoding # 文字化け対応\n return html\n\ndef get_doller_yen_rate():\n json_data=get_json(\"https://api.exchangeratesapi.io/latest?base=USD\")\n return float(json.dumps(json_data[\"rates\"][\"JPY\"]))\n\ndef get_yen_krw_rate():\n json_data=get_json(\"https://api.exchangeratesapi.io/latest?base=JPY\")\n return float(json.dumps(json_data[\"rates\"][\"KRW\"]))\n \ndef get_json(url, referer=''): # GET処理\n headers= {\"content-type\": \"application/json\"}\n respons = requests.get(url, headers=headers)\n return respons.json()\n\n# 共通変数\nnow = datetime.datetime.now()\nfileDate = now.strftime(DATE_FORMAT)\nlogFile = \"\\\\log\\\\log_\" + fileDate + '.txt'\nabsPath = getAbsPath()","sub_path":"common/my_util.py","file_name":"my_util.py","file_ext":"py","file_size_in_byte":6976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"399235387","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 19 15:40:02 2018\n\nCode to look at results from convolution\n\n@author: ppxee\n\"\"\"\n\n\n### Import required libraries ###\nimport matplotlib.pyplot as plt #for plotting\nfrom astropy.io import fits #for handling fits\n#from astropy.table import Table #for handling tables\nimport numpy as np #for handling arrays\n#import math\nfrom astropy.stats import median_absolute_deviation\nimport vari_funcs #my module to help run code neatly\nfrom matplotlib.colors import LogNorm\nplt.close('all') #close any open plots\n\ndef singlesigmasq(flux, baseerr):\n ''' Function that calculates the excess varience value for each row in an \n array \n Inputs:\n flux = array of fluxes from objects in a number of epochs \n baseerr = array of errors that the mean error should be calculated from\n Output:\n sig = array of excess variance values for every object '''\n avgflux = np.nanmean(flux)\n N = np.size(flux)\n sig = [((flux - avgflux)**2 - (baseerr)**2)]# \n sigsum = np.nansum(sig)\n normsig = sigsum/(N)\n return normsig\n\nalldata = fits.open('mag_flux_tables/stars_mag_flux_table.fits')[1].data\nalldataconv = fits.open('mag_flux_tables/stars_mag_flux_table_1519match.fits')[1].data\ncolname = 'FWHM_WORLD_'\nsemesters = ['05B', '06B', '07B', '08B', '09B', '10B', '11B', '12B']\ngoodstarIDs = np.load('PSF_IDs_original.npy')\n\n#Extract the flux radii and remove negative values\navgfwhm = np.zeros(8)\navgfwhmconv = np.zeros(8)\n\n#mask = np.isin(alldata['DR11_IDs'], goodstarIDs)\n#alldata = alldata[mask]\n#mask = np.isin(alldataconv['DR11_IDs'], goodstarIDs)\n#alldataconv = alldataconv[mask]\n\nfor n, sem in enumerate(semesters):\n colnames = colname+sem\n \n # remove stars in old table\n mag = alldata['MAG_APER_'+sem][:,4]\n mask1 = mag > 15 #removes saturated\n mask2 = mag < 19 #removes very faint stars\n mask = mask1 * mask2\n alldata = alldata[mask]\n avgfwhm[n] = np.median(alldata[colnames]) * 3600\n \n # remove stars in new table\n mag = alldataconv['MAG_APER_'+sem][:,4]\n mask1 = mag > 15 #removes saturated\n mask2 = mag < 19 #removes very faint stars\n mask = mask1 * mask2\n alldataconv = alldataconv[mask]\n avgfwhmconv[n] = np.median(alldataconv[colnames]) * 3600\n \n# Create flux stack\nallflux = vari_funcs.flux5_stacks(alldata)\nallfluxconv = vari_funcs.flux5_stacks(alldataconv)\n\n#allflux, alldata = vari_funcs.semfluxlim(allflux, alldata)\n#allfluxconv, alldataconv = vari_funcs.semfluxlim(allfluxconv, alldataconv)\n\nallflux, alldata = vari_funcs.noneg(allflux, alldata)\nallfluxconv, alldataconv = vari_funcs.noneg(allfluxconv, alldataconv)\n\nallfluxerr = vari_funcs.fluxerr1_stacks(alldata)\nallfluxconverr = vari_funcs.fluxerr1_stacks(alldataconv)\n\n#depths = np.load('fluxdepths.npy')\n#allfluxerr = np.zeros(np.shape(allflux)) + depths[None,:]\n#depthsconv = np.load('fluxdepthsconv_PSF.npy')\n#allfluxconverr = np.zeros(np.shape(allfluxconv)) + depthsconv[None,:]\n\n\n# Normalise\nallflux, allfluxerr = vari_funcs.normalise_flux_and_errors(allflux, allfluxerr)\nallfluxconv, allfluxconverr = vari_funcs.normalise_flux_and_errors(allfluxconv, allfluxconverr)\n\n## Find FWHM values\n#avgfwhm = np.array([np.median(alldata['FWHM_WORLD_05B']), \n# np.median(alldata['FWHM_WORLD_06B']), \n# np.median(alldata['FWHM_WORLD_07B']), \n# np.median(alldata['FWHM_WORLD_08B']), \n# np.median(alldata['FWHM_WORLD_09B']), \n# np.median(alldata['FWHM_WORLD_10B']), \n# np.median(alldata['FWHM_WORLD_11B']), \n# np.median(alldata['FWHM_WORLD_12B'])]) *3600\n#\n#avgfwhmconv = np.array([np.median(alldataconv['FWHM_WORLD_05B']), \n# np.median(alldataconv['FWHM_WORLD_06B']), \n# np.median(alldataconv['FWHM_WORLD_07B']), \n# np.median(alldataconv['FWHM_WORLD_08B']), \n# np.median(alldataconv['FWHM_WORLD_09B']), \n# np.median(alldataconv['FWHM_WORLD_10B']), \n# np.median(alldataconv['FWHM_WORLD_11B']), \n# np.median(alldataconv['FWHM_WORLD_12B'])]) *3600\n\n### find and plot averages ###\n#plot FWHM curve before\nvari_funcs.avg_lightcurve(avgfwhm, shape='s', size=9)\n#plt.title('Median FWHM of stars before convolution')\n#plt.ylim(0.73, 0.9)\nplt.ylabel('FWHM (arcsec)')\n#plt.savefig('plots/Lightcurves/FWHMbefore')\n\n#plot FWHM curve after\nvari_funcs.avg_lightcurve(avgfwhmconv)\n#plt.title('Median FWHM')\n#plt.ylim(0.74, 1.5)\nplt.ylabel('FWHM (arcsec)')\nplt.xlabel('Semester')\nplt.title('')\n#plt.savefig('plots/Lightcurves/FWHMafter')\n\n#fwhmmadbefore = median_absolute_deviation(avgfwhm)\n#plt.text(0.7, 0.75, 'MAD before = '+str(round(fwhmmadbefore,4)))\n#fwhmmadafter = median_absolute_deviation(avgfwhmconv)\n#plt.text(0.7, 0.74, 'MAD after = '+str(round(fwhmmadafter,4)))\n\nplt.figure()\n#plot flux curve before\n#plt.figure()\navgflux = np.nanmedian(allflux, axis=0)\navgerr = np.nanmedian(allfluxerr, axis=0)\nvari_funcs.avg_lightcurve(avgflux, avgerr)\nplt.title('Normalised Flux of Stars')\n#plt.ylim(0.95, 1.06)\nplt.ylabel('Normalised Flux')\n\n#plot flux curve after\navgfluxconv = np.median(allfluxconv, axis=0)\navgerrconv = np.median(allfluxconverr, axis=0)\nvari_funcs.avg_lightcurve(avgfluxconv, avgerrconv)\nplt.title('Normalised Flux of Stars')\nplt.ylabel('Normalised Flux')\n#plt.ylim(0.95, 1.06)\n\n\n#flaxvarbefore = singlesigmasq(avgflux, avgerr)\n#plt.text(5, 0.989, 'Var before = %.2E' % flaxvarbefore)\n#fluxvarafter = singlesigmasq(avgfluxconv, avgerrconv)\n#plt.text(5, 0.986, 'Var after = %.2E' % fluxvarafter)","sub_path":"invextconv.py","file_name":"invextconv.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"276335081","text":"#Print anagrams together in Python using List and Dictionary \n\ndef Anagrams(inp): \n\t\n\tdict = {} \n\tfor i in inp: \n\t\tkey = ''.join(sorted(i)) \n\t\n\t\tif key in dict.keys(): \n\t\t\tdict[key].append(i) \n\t\telse: \n\t\t\tdict[key] = [] \n\t\t\tdict[key].append(i) \n\tana= \"\" \n\tfor key,value in dict.items(): \n\t\tana = ana + ' '.join(value) + ' '\n\n\treturn ana \ninp = [x for x in input(\"Enter the list items : \").split()] \nprint (Anagrams(inp)) ","sub_path":"Personel/Tejaswini/Assessment/16-March/Prog6.py","file_name":"Prog6.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"332276266","text":"import json\nimport requests\nimport boto3\nfrom requests_aws4auth import AWS4Auth\n\ndef lambda_handler(event, context):\n lex_client = boto3.client('lex-runtime')\n lex_response = lex_client.post_text(\n botName='photobot',\n botAlias='photobot',\n userId='user1',\n inputText=event[\"queryStringParameters\"][\"q\"],\n )\n\n print(lex_response)\n\n slots = lex_response[\"slots\"]\n k1 = slots[\"keya\"]\n k2 = slots[\"keyb\"]\n print(slots)\n\n es_host = 'search-search-photos-2lt6xlp5nykp6766sn77hsktxu.us-east-1.es.amazonaws.com'\n index = 'photos'\n url = 'https://' + es_host + '/' + index + '/_search/'\n\n region = 'us-east-1' # For example, us-west-1\n service = 'es'\n credentials = boto3.Session().get_credentials()\n awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)\n\n headers = {\"Content-Type\": \"application/json\"}\n\n if k1 is not None:\n label_value = k1\n\n if k2 is not None:\n label_value += \" AND \" + k2\n\n query = {\n \"query\": {\n \"match\": {\"labels\": label_value}\n }\n }\n req = requests.get(url, auth=awsauth, headers=headers, data=json.dumps(query))\n r_dict = json.loads(req.text)\n print(r_dict)\n result_list = r_dict[\"hits\"][\"hits\"]\n image_url_list = []\n\n response = {}\n response[\"results\"] = []\n if result_list is not None:\n for result in result_list:\n response_object = {}\n s3_url = \"https://\" + result[\"_source\"][\"bucket\"] + \".s3.amazonaws.com/\" + result[\"_source\"][\"objectKey\"]\n response_object[\"url\"] = s3_url\n response_object[\"labels\"] = result[\"_source\"][\"labels\"]\n response[\"results\"].append(response_object)\n\n print(response)\n\n return {\n 'statusCode': 200,\n 'headers': {\n \"Access-Control-Allow-Headers\": \"*\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"OPTIONS,POST,GET\"\n },\n 'body': json.dumps(response)\n }\n","sub_path":"old-lambda/search-photos-old.py","file_name":"search-photos-old.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110426110","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models, backend\nfrom keras.utils import plot_model\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.applications.resnet_v2 import ResNet50V2\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nfrom numba import cuda\nfrom imgSrc import learnDir, __main as main, covid_classes\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport utils\n\nphysical_devices = tf.config.list_physical_devices()\ntf.config.set_visible_devices([], 'GPU')\n\nclass convModel:\n def __init__(self, trainPath='proxy'):\n self.__name = trainPath\n self.__model = models.Sequential()\n self.__trainPath = os.path.join(learnDir, 'train', trainPath)\n self.__testPath = os.path.join(learnDir, 'test')\n self.__modelPath = os.path.join(main, 'models')\n self.__modelType = ''\n\n self.__history = None\n self.__train_gen = ImageDataGenerator()\n self.__val_gen = ImageDataGenerator()\n self.__gpu = cuda.get_current_device()\n self.__batchSize = 64\n self.__seed = 101\n self.__xSize = 224\n self.__ySize = 224\n\n def getModelType(self):\n return self.__modelType\n\n def getPaths(self):\n return 'train: ' + self.__trainPath + ', test: ' + self.__testPath + ', save: ' + self.__modelPath\n\n def __compileModel(self, optimizer='adam', loss='categorical_crossentropy', metrics=None):\n print('compiling')\n if metrics is None:\n metrics = ['accuracy']\n\n self.__model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics)\n\n def addLayers(self, Layers, optimizer='adam', loss='categorical_crossentropy', metrics=None):\n backend.clear_session()\n\n for layer in Layers:\n self.__model.add(layer)\n self.__compileModel()\n\n def getAccuracy(self):\n valid_datagen = ImageDataGenerator()\n valid_generator = valid_datagen.flow_from_directory(\n directory=self.__testPath,\n target_size=(self.__xSize, self.__ySize),\n color_mode=\"rgb\",\n batch_size=self.__batchSize,\n class_mode=\"categorical\",\n shuffle=True,\n seed=42\n )\n return self.__model.evaluate_generator(valid_generator)\n\n def getConfusionMatrix(self, savePath=None):\n valid_datagen = ImageDataGenerator()\n valid_generator = valid_datagen.flow_from_directory(\n directory=self.__testPath,\n target_size=(self.__xSize, self.__ySize),\n color_mode=\"rgb\",\n batch_size=self.__batchSize,\n class_mode=\"categorical\",\n shuffle=True,\n seed=42\n )\n Y_pred = self.__model.predict_generator(valid_generator)\n y_pred = np.argmax(Y_pred, axis=1)\n\n matrix = confusion_matrix(valid_generator.classes, y_pred)\n classes = []\n for c in covid_classes:\n classes.append(c[1:])\n\n ax = plt.subplot()\n sns.heatmap(matrix, annot=True, ax=ax, cmap='Oranges', annot_kws={\"size\": 16})\n ax.set_xlabel('Predicted labels')\n ax.set_ylabel('True labels')\n ax.set_title(self.__name + ' confusion matrix')\n ax.xaxis.set_ticklabels(classes)\n ax.yaxis.set_ticklabels(classes)\n ax.tick_params(axis='y', rotation=45)\n\n if savePath is None:\n plt.show()\n else:\n savePath = os.path.join(savePath, self.__name)\n if not os.path.exists(savePath):\n os.mkdir(savePath)\n plt.tight_layout()\n # print(savePath)\n plt.savefig(os.path.join(savePath, self.__name + \"_\" + self.__modelType + '_confusion_matrix.png'), dpi=100)\n plt.close()\n\n def trainModel(self, trainPath=None, validationPath=None, epohs=10):\n\n if trainPath is None:\n trainPath = self.__trainPath\n if validationPath is None:\n validationPath = self.__testPath\n\n train_datagen = ImageDataGenerator(horizontal_flip=True, rotation_range=90, brightness_range=[0.2, 1.0])\n valid_datagen = ImageDataGenerator(horizontal_flip=True, rotation_range=90, brightness_range=[0.2, 1.0])\n\n train_generator = train_datagen.flow_from_directory(\n directory=trainPath,\n target_size=(self.__xSize, self.__ySize),\n color_mode=\"rgb\",\n batch_size=self.__batchSize,\n class_mode=\"categorical\",\n shuffle=True,\n seed=42\n )\n\n valid_generator = valid_datagen.flow_from_directory(\n directory=validationPath,\n target_size=(self.__xSize, self.__ySize),\n color_mode=\"rgb\",\n batch_size=self.__batchSize,\n class_mode=\"categorical\",\n shuffle=True,\n seed=42\n )\n\n STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size\n STEP_SIZE_VALID = valid_generator.n // valid_generator.batch_size\n self.__history = self.__model.fit_generator(generator=train_generator,\n steps_per_epoch=STEP_SIZE_TRAIN,\n validation_data=valid_generator,\n validation_steps=STEP_SIZE_VALID,\n epochs=epohs\n )\n\n def __addTop(self, x, nclasses=3):\n\n x = layers.GlobalAveragePooling2D()(x)\n x = layers.Dense(1000, activation='relu')(x)\n x = layers.Dropout(0.3)(x)\n x = layers.Dense(500, activation='relu')(x)\n x = layers.Dropout(0.3)(x)\n x = layers.Dense(200, activation='relu')(x)\n return layers.Dense(nclasses, activation='softmax')(x)\n\n def vggNet(self, nclasses=3, summary=True):\n\n vgg = VGG16(weights='imagenet', include_top=False, input_shape=(self.__xSize, self.__ySize, 3))\n vgg.trainable = False\n input_l = layers.Input(shape=(self.__xSize, self.__ySize, 3))\n x = vgg(input_l, training=False)\n x = layers.GlobalAveragePooling2D()(x)\n x = layers.Dense(1000, activation='relu')(x)\n x = layers.Dropout(0.3)(x)\n x = layers.Dense(500, activation='relu')(x)\n x = layers.Dropout(0.3)(x)\n x = layers.Dense(200, activation='relu')(x)\n predict = layers.Dense(nclasses, activation='softmax')(x)\n self.__model = tf.keras.Model(input_l, predict)\n\n if summary:\n self.__model.summary()\n\n self.__model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n self.__modelType = 'vggNet'\n\n def resNet(self, nclasses=3, summary=True):\n res = ResNet50V2(weights='imagenet', include_top=False, input_shape=(self.__xSize, self.__ySize, 3))\n res.trainable = False\n input_l = layers.Input(shape=(self.__xSize, self.__ySize, 3))\n x = res(input_l, training=False)\n predict = self.__addTop(x, nclasses)\n self.__model = tf.keras.Model(input_l, predict)\n if summary:\n self.__model.summary()\n\n self.__compileModel()\n self.__modelType = 'ResNet'\n\n def inception(self, nclasses=3, summary=True):\n inc = InceptionV3(weights='imagenet', include_top=False, input_shape=(self.__xSize, self.__ySize, 3))\n inc.trainable = False\n input_l = layers.Input(shape=(self.__xSize, self.__ySize, 3))\n x = inc(input_l, training=False)\n predict = self.__addTop(x, nclasses)\n self.__model = tf.keras.Model(input_l, predict)\n if summary:\n self.__model.summary()\n\n self.__compileModel()\n self.__modelType = 'InceptionNet'\n\n def saveModelToFile(self, name=''):\n self.__model.save(os.path.join(self.__modelPath, name), overwrite=True, save_format='tf')\n\n def loadModelFromFile(self, name='', summary=True):\n print('Loading model')\n self.__model = tf.keras.models.load_model(os.path.join(self.__modelPath, name))\n self.__unsetTrainable()\n if summary:\n self.__model.summary()\n self.__compileModel()\n self.__modelType=name\n\n def __unsetTrainable(self):\n self.__model.layers[1].trainable = False\n\n def getJSON(self):\n return self.__model.to_json()\n\n def getWeights(self):\n return self.__model.get_weights()\n\n def getTrainableWeights(self):\n return self.__model.trainable_weights\n\n def setJSON(self, json):\n \"\"\"\n\n @:param json str: JSON string with layer structure of class neural net\n\n \"\"\"\n self.__model = models.model_from_json(json)\n\n def setWeights(self, weights):\n \"\"\"\n\n @:param weights np.array: array o new weights to be applied to model.\n \"\"\"\n self.__model.set_weights(weights)\n self.__compileModel()\n\n def setTrainableWeights(self, weights):\n \"\"\"@setTrainableWeights\n @:param weights np.array: numpy array or list of tensor variables. These are only trainable weights.\n \"\"\"\n non_trainable = self.__model.non_trainable_weights\n all_weights = []\n for i in non_trainable:\n all_weights.append(i.numpy())\n if type(weights).__module__ == np.__name__:\n for i in weights:\n all_weights.append(i)\n else:\n for i in weights:\n all_weights.append(i.numpy())\n\n self.__model.set_weights(np.array(all_weights))\n\n def learningCurves(self, savePath=None):\n \"\"\"@learningCurves\n\n @:param savePath str: learning curves save path. If save path is None then plot is displayed.\n \"\"\"\n plt.title(self.__name + ' learning curves')\n plt.xlabel('Epoch')\n plt.ylabel('Cross Entropy')\n plt.plot(self.__history.history['loss'], label='train')\n plt.plot(self.__history.history['val_loss'], label='val')\n plt.legend()\n if savePath is None:\n plt.show()\n elif savePath is not None:\n plt.savefig(os.path.join(savePath, self.__name, self.__name + \"_\" + self.__modelType + '_learning_curves.png'))\n plt.close()\n\n def setNet(self, netType, summary=True):\n \"\"\"@setNet\n\n Method that create transfer learning net structure for class instance neural Net.\n\n @:param netType str: neural net type.\n Currently supported:\n 'vgg' - VGG Net,\n 'res' - ResNet,\n 'inc' - Inception Net\n :param summary:\n :return:\n \"\"\"\n if netType == 'vgg':\n self.vggNet(summary=summary)\n elif netType == 'inc':\n self.inception(summary=summary)\n elif netType == 'res':\n self.resNet(summary=summary)\n\n def getModelGraph(self, savepath='', filename=None):\n \"\"\"\n\n :param savepath: string - path to location where you want to save models graph without filename.\n :param filename: string - name of result image. If None then model name specified in constructor is used.\n :return:\n \"\"\"\n\n utils.validateType(savepath, str)\n if filename is not None:\n utils.validateType(filename, str)\n if filename[-4:] != '.png':\n filename += '.png'\n else:\n filename = self.__name + '.png'\n\n plot_model(self.__model, os.path.join(savepath, filename), show_dtype=True, show_layer_names=True)\n\n def getLayersInfo(self, savepath='', filename=None):\n \"\"\"\n\n :param savepath: string - path to location where you want to save models graph without filename.\n :param filename: string - name of result image. If None then model name specified in constructor is used.\n :return:\n \"\"\"\n\n ext = '.txt'\n utils.validateType(savepath, str)\n if filename is not None:\n filename = self.__addExtension(filename, ext)\n else:\n filename = self.__modelType + '_structure' + ext\n\n\n if savepath != '':\n self.__checkPath(savepath)\n with open(os.path.join(savepath, filename), mode='w') as file:\n # for layer in self.__model.layers:\n # file.write(layer.name + ' input shape: ' + str(layer.input_shape) + \" output shape: \" + str(layer.output_shape) + '\\n')\n def printFn(line):\n file.write(line + '\\n')\n self.__model.summary(print_fn=printFn)\n file.close()\n\n def __checkPath(self, savePath):\n if not os.path.exists(savePath):\n os.mkdir(savePath)\n\n def __addExtension(self, filename, ext):\n \"\"\"\n\n :param filename: string - name of file\n :param ext: string extension to be added\n :return: string filename with extension\n \"\"\"\n\n utils.validateType(filename, str)\n if filename[-4:] != ext:\n filename += ext\n return filename\n\n def predict(self, image):\n \"\"\"\n\n @param PIL image: PIL image to predict\n :return: predicted Class\n \"\"\"\n image = image.convert('RGB')\n w, h = image.size\n\n if w != self.__xSize or h != self.__ySize:\n image = image.resize((self.__xSize, self.__ySize))\n\n image = np.array(image)\n image = image[None, ...]\n\n prediction = self.__model.predict(image)\n prediction = np.argmax(prediction)\n\n return covid_classes[prediction][1:]\n","sub_path":"convNet1.py","file_name":"convNet1.py","file_ext":"py","file_size_in_byte":13820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"529831166","text":"#18.Cloning or copying a list\n#a\nl=[\"abc\", \"pqr\", \"rst\", 12, 14]\nl2=l.copy()\nprint(l2)\n\n#b\nl3=[]\nfor i in range(len(l2)):\n l3.append(l2[i])\nprint(l3)\n\n#c\nl4=list(l3)\nprint(l4)","sub_path":"18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"57702428","text":"# 定义类、创建对象\n\n# **要求:**\n\n# 1. python中如何定义一个类\n\n# 2. 类(class)由哪三个部分构成\n# 3. 类名的命名规则是什么\n# 4. python中如何通过类创建对象,请用代码进行说明\n# 5. 如何在类中定义一个方法,请用代码进行说明\n# 6. 定义一个People类,使用People类,创建一个mayun对象后,添加company属性,值是\"阿里巴巴\";创建一个wangjianlin对象,添加company属性,值是\"万达集团\"\nclass People:\n def addcompany(self,company):\n self.company = company\n\n\nmayun = People()\nmayun.addcompany('阿里巴巴')\nwangjianlin = People()\nwangjianlin.addcompany('万达集团')\n# 7. 定义一个水果类,然后通过水果类,创建苹果对象、橘子对象、西瓜对象并分别添加上颜色属性\nclass Fruit:\n def setcolor(self,color):\n self.color = color\napple = Fruit()\napple.setcolor('red')\norange = Fruit()\norange.setcolor('Orange')\nwatermelon = Fruit()\nwatermelon.setcolor('Green')\n# 8. 定义一个汽车类,并在类中定义一个move方法,然后分别创建BMW_X9、AUDI_A9对象,并添加颜色、马力、型号等属性,然后分别打印出属性值、调用move方法\n\n#\n# **提示:**\n#\n# 1. 可以在通过类创建出对象后,再为对象添加属性:对象名.属性名 = 值 的方式添加\n# 2. 调用对象的方法和调用普通函数的一样,使用()来实现调用,只不过调用方法需要使用:对象名.方法名()来进行调用","sub_path":"week3/day2/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516924384","text":"import pprint\n\nfrom saml2 import BINDING_HTTP_REDIRECT, BINDING_HTTP_POST\nfrom saml2.client import Saml2Client\nfrom saml2.metadata import entity_descriptor\nfrom saml2.response import LogoutResponse\nfrom saml2.saml import AuthnContextClassRef\nfrom saml2.samlp import RequestedAuthnContext\n\n\nfrom pyramid.httpexceptions import (HTTPFound, HTTPBadRequest, HTTPNotFound,\n HTTPUnauthorized, HTTPInternalServerError,\n HTTPOk)\nfrom pyramid.response import Response\nfrom pyramid.renderers import render_to_response, render\nfrom pyramid.security import authenticated_userid\nfrom pyramid.view import view_config, forbidden_view_config\n\nfrom eduiddashboard.saml2.utils import get_saml2_config, get_location\nfrom eduiddashboard.saml2.auth import authenticate, login, logout\nfrom eduiddashboard.saml2.cache import (IdentityCache, OutstandingQueriesCache,\n StateCache, )\nfrom eduiddashboard.saml2.acs_actions import (acs_action,\n schedule_action,\n get_action)\n\nfrom eduiddashboard import log\n\n\nclass HTTPXRelocate(HTTPOk):\n\n empty_body = True\n\n def __init__(self, new_location, **kwargs):\n super(HTTPXRelocate, self).__init__('', headers=[\n ('X-Relocate', new_location),\n ('Content-Type', 'text/html; charset=UTF-8'),\n ])\n\n\ndef _set_name_id(session, name_id):\n \"\"\"\n Store SAML2 name id info.\n\n :param session: The current session object\n :param name_id: saml2.saml.NameID object\n :return: None\n\n :type name_id: saml2.saml.NameID\n \"\"\"\n session['_saml2_session_name_id'] = name_id\n\n\ndef _get_name_id(session):\n \"\"\"\n Get the SAML2 NameID of the currently logged in user.\n\n :param session: The current session object\n :return: NameID\n :rtype: saml2.saml.NameID | None\n \"\"\"\n try:\n return session['_saml2_session_name_id']\n except KeyError:\n return None\n\n\n@forbidden_view_config()\n@view_config(route_name='saml2-forbidden-view')\ndef forbidden_view(context, request):\n \"\"\"\n View to trap all Forbidden errors and redirect any not logged in users to the login page.\n\n For logged in users, a template is rendered - this template probably won't be seen\n by the user though since there is Javascript handling 401 errors from form posts\n showing a small pop-up error message instead.\n :param context: Some object like HTTPForbidden()\n :param request: Request() object\n :return:\n \"\"\"\n user = authenticated_userid(request)\n if user:\n # Return a plain forbbiden page\n try:\n reason = context.explanation\n except AttributeError:\n reason = 'unknown'\n log.debug(\"User {!r} tripped Forbidden view, request {!r}, reason {!r}\".format(\n user, request, reason))\n response = Response(render('templates/forbidden.jinja2', {}))\n response.status_int = 401\n return response\n\n loginurl = request.route_url('saml2-login',\n _query=(('next', request.path),))\n if not request.is_xhr:\n return HTTPFound(location=loginurl)\n else:\n return HTTPXRelocate(loginurl)\n\n\n@acs_action('login-action')\ndef login_action(request, session_info, user):\n\n headers = login(request, session_info, user)\n _set_name_id(request.session, session_info['name_id'])\n\n # redirect the user to the view where he came from\n relay_state = request.POST.get('RelayState', '/')\n log.debug('Redirecting to the RelayState: ' + relay_state)\n return HTTPFound(location=relay_state, headers=headers)\n\n\n@view_config(route_name='saml2-login')\ndef login_view(request):\n login_redirect_url = request.registry.settings.get(\n 'saml2.login_redirect_url', '/')\n\n came_from = request.GET.get('next', login_redirect_url)\n\n if authenticated_userid(request):\n return HTTPFound(location=came_from)\n\n selected_idp = request.GET.get('idp', None)\n if selected_idp is not None:\n request.session['selected_idp'] = selected_idp\n\n idps = request.saml2_config.getattr('idp')\n if selected_idp is None and len(idps) > 1:\n log.debug('A discovery process is needed')\n\n return render_to_response('templates/wayf.jinja2', {\n 'available_idps': idps.items(),\n 'came_from': came_from,\n 'login_url': request.route_url('saml2-login'),\n })\n\n result = get_authn_request(request, came_from, selected_idp)\n\n schedule_action(request.session, 'login-action')\n\n log.debug('Redirecting the user to the IdP')\n if not request.is_xhr:\n return HTTPFound(location=get_location(result))\n else:\n loginurl = request.route_url('saml2-login',\n _query=(('next', request.path),))\n return HTTPXRelocate(loginurl)\n\n\n@view_config(route_name='saml2-acs', request_method='POST')\ndef assertion_consumer_service(request):\n ''' '''\n action = get_action(request.session)\n\n if 'SAMLResponse' not in request.POST:\n raise HTTPBadRequest(\"Couldn't find 'SAMLResponse' in POST data.\")\n xmlstr = request.POST['SAMLResponse']\n client = Saml2Client(request.saml2_config,\n identity_cache=IdentityCache(request.session))\n\n oq_cache = OutstandingQueriesCache(request.session)\n outstanding_queries = oq_cache.outstanding_queries()\n\n try:\n # process the authentication response\n response = client.parse_authn_request_response(xmlstr, BINDING_HTTP_POST,\n outstanding_queries)\n except AssertionError:\n log.error('SAML response is not verified')\n raise HTTPBadRequest(\n \"\"\"SAML response is not verified. May be caused by the response\n was not issued at a reasonable time or the SAML status is not ok.\n Check the IDP datetime setup\"\"\")\n\n if response is None:\n log.error('SAML response is None')\n raise HTTPBadRequest(\n \"SAML response has errors. Please check the logs\")\n\n session_id = response.session_id()\n oq_cache.delete(session_id)\n\n # authenticate the remote user\n session_info = response.session_info()\n\n log.debug('Trying to locate the user authenticated by the IdP')\n log.debug('Session info:\\n{!s}\\n\\n'.format(pprint.pformat(session_info)))\n\n user = authenticate(request, session_info)\n if user is None:\n log.error('Could not find the user identified by the IdP')\n raise HTTPUnauthorized(\"Access not authorized\")\n\n return action(request, session_info, user)\n\n\n@view_config(route_name='saml2-echo-attributes')\ndef echo_attributes(request):\n raise NotImplementedError\n\n\n@view_config(route_name='saml2-logout')\ndef logout_view(request):\n \"\"\"SAML Logout Request initiator\n\n This view initiates the SAML2 Logout request\n using the pysaml2 library to create the LogoutRequest.\n \"\"\"\n log.debug('Logout process started')\n state = StateCache(request.session)\n\n client = Saml2Client(request.saml2_config, state_cache=state,\n identity_cache=IdentityCache(request.session))\n subject_id = _get_name_id(request.session)\n if subject_id is None:\n log.warning(\n 'The session does not contains the subject id for user ')\n location = request.registry.settings.get('saml2.logout_redirect_url')\n\n else:\n logouts = client.global_logout(subject_id)\n loresponse = logouts.values()[0]\n # loresponse is a dict for REDIRECT binding, and LogoutResponse for SOAP binding\n if isinstance(loresponse, LogoutResponse):\n if loresponse.status_ok():\n log.debug('Performing local logout of {!r}'.format(authenticated_userid(request)))\n headers = logout(request)\n location = request.registry.settings.get('saml2.logout_redirect_url')\n return HTTPFound(location=location, headers=headers)\n else:\n return HTTPInternalServerError('Logout failed')\n headers_tuple = loresponse[1]['headers']\n location = headers_tuple[0][1]\n\n state.sync()\n log.debug('Redirecting to {!r} to continue the logout process'.format(location))\n return HTTPFound(location=location)\n\n\n@view_config(route_name='saml2-logout-service',\n renderer='templates/saml2-logout.jinja2')\ndef logout_service(request):\n \"\"\"SAML Logout Response endpoint\n\n The IdP will send the logout response to this view,\n which will process it with pysaml2 help and log the user\n out.\n Note that the IdP can request a logout even when\n we didn't initiate the process as a single logout\n request started by another SP.\n \"\"\"\n log.debug('Logout service started')\n\n state = StateCache(request.session)\n client = Saml2Client(request.saml2_config, state_cache=state,\n identity_cache=IdentityCache(request.session))\n settings = request.registry.settings\n\n logout_redirect_url = settings.get('saml2.logout_redirect_url')\n next_page = request.session.get('next_page', logout_redirect_url)\n next_page = request.GET.get('next_page', next_page)\n\n if 'SAMLResponse' in request.GET: # we started the logout\n log.debug('Receiving a logout response from the IdP')\n response = client.parse_logout_request_response(\n request.GET['SAMLResponse'],\n BINDING_HTTP_REDIRECT\n )\n state.sync()\n if response and response.status_ok():\n headers = logout(request)\n return HTTPFound(next_page, headers=headers)\n else:\n log.error('Unknown error during the logout')\n return HTTPBadRequest('Error during logout')\n\n elif 'SAMLRequest' in request.GET: # logout started by the IdP\n log.debug('Receiving a logout request from the IdP')\n subject_id = _get_name_id(request.session)\n if subject_id is None:\n log.warning(\n 'The session does not contain the subject id for user {0} '\n 'Performing local logout'.format(\n authenticated_userid(request)\n )\n )\n headers = logout(request)\n return HTTPFound(location=next_page, headers=headers)\n else:\n http_info = client.handle_logout_request(\n request.GET['SAMLRequest'],\n subject_id,\n BINDING_HTTP_REDIRECT,\n relay_state=request.GET['RelayState']\n )\n state.sync()\n location = get_location(http_info)\n headers = logout(request)\n return HTTPFound(location=location, headers=headers)\n else:\n log.error('No SAMLResponse or SAMLRequest parameter found')\n raise HTTPNotFound('No SAMLResponse or SAMLRequest parameter found')\n\n\n@view_config(route_name='saml2-metadata')\ndef metadata(request):\n \"\"\"Returns an XML with the SAML 2.0 metadata for this\n SP as configured in the settings.py file.\n \"\"\"\n conf = get_saml2_config(\n request.registry.settings.get('saml2.settings_module'))\n metadata = entity_descriptor(conf)\n return Response(body=str(metadata), content_type=\"text/xml; charset=utf8\")\n\n\n@view_config(route_name='saml2-wayf-demo',\n renderer='templates/wayf.jinja2')\ndef wayf_demo(request):\n return {\n 'available_idps': (\n ('http://idp1.example.com', 'IDP from Organization 1'),\n ('http://idp2.example.com', 'IDP from Organization 2'),\n ),\n 'came_from': '/',\n 'login_url': request.route_url('saml2-login'),\n }\n\n\ndef get_authn_request(request, came_from, selected_idp,\n required_loa=None, force_authn=False):\n # Request the right AuthnContext for workmode\n # (AL1 for 'personal', AL2 for 'helpdesk' and AL3 for 'admin' by default)\n if required_loa is None:\n required_loa = request.registry.settings.get('required_loa', {})\n workmode = request.registry.settings.get('workmode')\n required_loa = required_loa.get(workmode, '')\n log.debug('Requesting AuthnContext {!r}'.format(required_loa))\n kwargs = {\n \"requested_authn_context\": RequestedAuthnContext(\n authn_context_class_ref=AuthnContextClassRef(\n text=required_loa\n )\n ),\n \"force_authn\": str(force_authn).lower(),\n }\n\n client = Saml2Client(request.saml2_config)\n try:\n (session_id, info) = client.prepare_for_authenticate(\n entityid=selected_idp, relay_state=came_from,\n binding=BINDING_HTTP_REDIRECT,\n **kwargs\n )\n except TypeError:\n log.error('Unable to know which IdP to use')\n raise\n\n oq_cache = OutstandingQueriesCache(request.session)\n oq_cache.set(session_id, came_from)\n return info\n","sub_path":"eduiddashboard/saml2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"625007894","text":"import argparse\nimport os\nimport os.path\nfrom itertools import groupby\nfrom os import listdir\nfrom os.path import isfile, join, isdir, relpath\nfrom time import strftime, localtime\n\nimport Documentor\n\nOUTPUT_ROOT = str()\n\ndef parsePackageInfo(abspath, absoutput):\n\tinput = str()\n\twith open(abspath, 'r') as fp:\n\t\tinput = fp.read()\n\tresultingHTML = '''\n\tThere should be package doc but there is only content:\n\t%s\n\t''' % input\n\twith open(join(absoutput, 'package-info.html'), 'w') as fp:\n\t\tfp.write(resultingHTML)\n\n\ndef htmlHeader(mainPagePath: str, contentsPath: str, abcPath: str):\n\treturn '''\n\n\n\n\t\t\n\t\t\n\t\t\n\t\tHello, world!\n\n\n\t\n\t'''.format(mainPagePath=mainPagePath, contentsPath=contentsPath, abcPath=abcPath)\n\n\ndef generateHomepage(absoutput, projectName, singlefilemode, elementsCount):\n\tsingleFileWarn = '
GENERATED IN SINGLE-FILE-MODE
' if singlefilemode else ''\n\tresultingHTML = \"\"\"\n{navbar}\n
\n\t
\n\t {projectName}\n\t
\n\t
\n\t {generationDate}\n\t
\n\t
\n\t KotlinPyDoc\n\t
\n\t
\n\t {elementsCount} elements\n\t
\n {singlefilemode}\n
\n\n\n\t\"\"\".format(projectName=projectName, generationDate=strftime(\"%d.%m.%Y %H:%M:%S\", localtime()),\n\t\t\t singlefilemode=singleFileWarn, navbar=htmlHeader('./index.html', './dir_doc.html', './abc.html'), elementsCount=elementsCount)\n\twith open(join(absoutput, 'index.html'), 'w') as fp:\n\t\tfp.write(resultingHTML)\n\n\ndef generateDirDoc(pathToOutput: str, files: list, dirs:list=None):\n\thtmlResult = '''\n\t{header}\n\t'''.format(header=htmlHeader(join(pathToOutput,'index.html'),join(pathToOutput,'dir_doc.html'),join(pathToOutput,'abc.html')))\n\tif pathToOutput!='.':\n\t\thtmlResult+=''\n\tif (dirs):\n\t\tfor dir in dirs:\n\t\t\thtmlResult+=''.format(dirName=dir)\n\tfor file in files:\n\t\thtmlResult+=''.format(fileName=file)\n\thtmlResult+='''\n\t\n\t\n\t'''\n\treturn htmlResult\n\n\ndef generateAbc(listElements: list, abcPath: str):\n\tlistElements.sort(key=lambda k: k.name.lower())\n\thtmlResult = \"\"\"\n\t{navbar}\n\t
Abc
\n\t\"\"\".format(navbar=htmlHeader('./index.html', './dir_doc.html', './abc.html')) # add html head\n\tfor letter, words in groupby(listElements, key=lambda k: k.name[0].lower()):\n\t\thtmlResult += \"\"\"\n\t\t
\n\t\t
{letter}
\n\t\t
\n\t\t\"\"\".format(letter=letter.upper()) # open card with letter\n\t\tfor word, elems in groupby(words, key=lambda k: k.name):\n\t\t\thtmlResult += \"\"\"\n \t
\n
{word}
\n \"\"\".format(word=word) # open card with word\n\t\t\tfor elem in elems:\n\t\t\t\thtmlResult += '''\n\t\t\t\t\n\t\t\t\t'''.format(elem=Documentor.htmlfy(\n\t\t\t\t\telem.title.replace(elem.name, elem.parent.name + '.' + elem.name) if hasattr(elem, 'parent') else elem.title),\n\t\t\t\t\t\t pathToFile=os.path.relpath(elem.filePath, abcPath),\n\t\t\t\t\t\t elemId=elem.id) # add card with elem\n\t\t\thtmlResult += \"
\" # close card with word\n\t\thtmlResult += \"
\" # close card with letter\n\thtmlResult += \"\" # add html bottom\n\twith open(join(abcPath, 'abc.html'), 'w') as fp:\n\t\tfp.write(htmlResult)\n\n\ndef parsedir(abspath, absoutput, recursive: bool = False):\n\tonlyKtFiles = [f for f in listdir(abspath) if isfile(join(abspath, f)) and f.endswith('.kt')]\n\tlistElements = list()\n\tfor file in onlyKtFiles:\n\t\toneListElements = parsefile(join(abspath, file), absoutput)\n\t\tlistElements.extend(oneListElements)\n\tif os.path.exists(join(abspath, 'package-info.java')):\n\t\tparsePackageInfo(join(abspath, 'package-info.java'), absoutput)\n\tonlyDirs=list()\n\tif recursive:\n\t\tonlyDirs = [d for d in listdir(abspath) if isdir(join(abspath, d))]\n\t\tfor dir in onlyDirs:\n\t\t\tlistElements.extend(parsedir(join(abspath, dir), join(absoutput, dir),recursive))\n\tdirDocPath = join(absoutput, 'dir_doc.html')\n\tos.makedirs(os.path.dirname(dirDocPath), exist_ok=True)\n\twith open(dirDocPath, 'w') as fp:\n\t\tfp.write(generateDirDoc(relpath(OUTPUT_ROOT, absoutput), onlyKtFiles, dirs=onlyDirs))\n\treturn listElements\n\n\ndef parsefile(abspath, absoutput):\n\tpathToOutput = relpath(OUTPUT_ROOT, absoutput)\n\tresultingHTML, listElements = Documentor.parsefile(abspath, htmlHeader(join(pathToOutput,'index.html'),join(pathToOutput,'dir_doc.html'),join(pathToOutput,'abc.html')))\n\t_, filename = os.path.split(abspath)\n\tabsoutput = join(absoutput, filename + '_doc.html')\n\tos.makedirs(os.path.dirname(absoutput), exist_ok=True)\n\twith open(absoutput, 'w') as fp:\n\t\tfp.write(resultingHTML)\n\tfor element in listElements:\n\t\telement.filePath = absoutput\n\treturn listElements\n\n\ndef parse(abspath, absoutput, recursive: bool):\n\tobjectPath, objectName = os.path.split(abspath)\n\tif absoutput == None:\n\t\tabsoutput = os.path.join(objectPath, objectName + '_docs')\n\tglobal OUTPUT_ROOT\n\tOUTPUT_ROOT=absoutput\n\tlistElements = list()\n\tsinglefilemode = False\n\tif (os.path.isfile(abspath)):\n\t\tlistElements = parsefile(abspath, absoutput)\n\t\tsinglefilemode = True\n\t\tdirDocPath = join(absoutput, 'dir_doc.html')\n\t\tos.makedirs(os.path.dirname(dirDocPath), exist_ok=True)\n\t\twith open(dirDocPath, 'w') as fp:\n\t\t\tfp.write(generateDirDoc(relpath(OUTPUT_ROOT, absoutput), [objectName]))\n\tif (os.path.isdir(abspath)):\n\t\tlistElements = parsedir(abspath, absoutput, recursive)\n\tgenerateHomepage(absoutput, objectName, singlefilemode, len(listElements))\n\tgenerateAbc(listElements, absoutput)\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(prog='KotlinPyDoc', description='Generate documentation for Kotlin project.')\n\tparser.add_argument(\"path\", help=\"Path to the folder or Kotlin file\")\n\tparser.add_argument(\"-r\", \"--recursive\", action=\"store_true\", help=\"enable recursive mode(folders only)\",\n\t\t\t\t\t\tdefault=False)\n\tparser.add_argument(\"--output\", help=\"output path\")\n\targs = parser.parse_args()\n\n\tabspath = os.path.abspath(os.path.expanduser(os.path.expandvars(args.path)))\n\n\tabsoutput = args.output\n\tif (absoutput != None):\n\t\tabsoutput = os.path.abspath(os.path.expanduser(os.path.expandvars(absoutput)))\n\n\tparse(abspath, absoutput, args.recursive)\n","sub_path":"KotlinPyDoc.py","file_name":"KotlinPyDoc.py","file_ext":"py","file_size_in_byte":7811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274496144","text":"# Mohidul Abedin; William Cao; Team Mo Goes Cow\n\n# SoftDev1 pd2\n\n# K17 -- No Trouble\n\n# 2019-10-10\n\nimport sqlite3 #enable control of an sqlite database\nimport csv #facilitate CSV I/O\n\n\nDB_FILE=\"discobandit.db\"\n\ndb = sqlite3.connect(DB_FILE) #open if file exists, otherwise create\nc = db.cursor() #facilitate db ops\n\n\ndef put_data_in(file_path: str, table_name):\n \"\"\"\n Reads a csv file with three columns and enters into a table. Table must exist already\n\n :param file_path: Path to csv file to enter data in. The csv should only have 3 columns\n :param table_name: Name of table\n \"\"\"\n with open(file_path) as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n values = list(row.values())\n db.execute(\"INSERT INTO {} VALUES ('{}', {}, {});\".format(table_name, *values))\n\n\ndb.execute(\"CREATE TABLE IF NOT EXISTS students (name STRING, age INTERGER, id INTERGER PRIMARY KEY);\")\nput_data_in(\"./data/students.csv\", \"students\")\ndb.execute(\"CREATE TABLE IF NOT EXISTS courses (code STRING, mark INTERGER, id INTERGER);\")\nput_data_in(\"./data/courses.csv\", \"courses\")\n\ndb.commit() #save changes\ndb.close() #close database\n","sub_path":"fall/17_csv2db/db_builder.py","file_name":"db_builder.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327270698","text":"import time\r\nimport random\r\nimport msgpack\r\nimport regex\r\nimport chatcommunicate\r\nfrom globalvars import GlobalVars\r\nfrom helpers import log\r\nfrom tasks import Tasks\r\n\r\n\r\nclass SocketScience:\r\n _incomplete_messages = {}\r\n _pings = []\r\n _switch_task = None\r\n\r\n @staticmethod\r\n def send(payload):\r\n encoded = msgpack.dumps(payload)\r\n\r\n # Messages can be 500 chars, but we need to leave space for control and message ident\r\n chunks = [encoded[i:i + 485] for i in range(0, len(encoded), 485)]\r\n message_id = random.randint(1000, 9999)\r\n\r\n chunks[0] = \"\\u0002\" + str(message_id) + chunks[0]\r\n chunks[-1] = chunks[-1] + str(message_id) + \"\\u0003\"\r\n for n in range(1, len(chunks) - 1):\r\n chunks[n] = \"\\u0016\" + str(message_id) + chunks[n]\r\n\r\n for chunk in chunks:\r\n chatcommunicate.tell_rooms_with(\"direct\", chunk)\r\n\r\n @staticmethod\r\n def receive(content):\r\n global _incomplete_messages\r\n\r\n content = content.strip()\r\n\r\n # U+0002 STX START OF TEXT; U+0003 ETX END OF TEXT; U+0016 SYN SYNCHRONOUS IDLE\r\n if content.startswith(\"\\u0002\") and content.endswith(\"\\u0003\"):\r\n decoded = msgpack.loads(regex.sub(r\"\\d{4}\\u0003\", \"\", regex.sub(r\"^\\u0002\\d{4}\", \"\", content)))\r\n SocketScience.handle(decoded)\r\n\r\n # STX indicates probably valid, but incomplete - wait for another message with content and ETX.\r\n elif content.startswith(\"\\u0002\") and not content.endswith(\"\\u0003\"):\r\n message_id = regex.match(r\"^\\u0002(\\d{4})\", content)[1]\r\n _incomplete_messages[message_id] = content\r\n\r\n # No STX but ends with ETX, so probably a completion of a previous message.\r\n elif not content.startswith(\"\\u0002\") and content.endswith(\"\\u0003\"):\r\n message_id = regex.match(r\"(\\d{4})\\u0003$\", content)[1]\r\n complete = _incomplete_messages[message_id] + content\r\n decoded = msgpack.loads(regex.sub(r\"\\d{4}\\u0003\", \"\", regex.sub(r\"^\\u0002\\d{4}\", \"\", complete)))\r\n SocketScience.handle(decoded)\r\n\r\n # Starts with SYN and message ID - continuation but not completion of previous message.\r\n elif content.startswith(\"\\u0016\"):\r\n message_id = regex.match(r\"^\\u0016(\\d{4})\", content)[1]\r\n _incomplete_messages[message_id] += regex.sub(r\"^\\u0016\\d{4}\", \"\", content)\r\n\r\n else:\r\n log('debug', 'SocketScience received malformed direct message')\r\n log('debug', content)\r\n\r\n @staticmethod\r\n def handle(content):\r\n global _pings\r\n global _switch_task\r\n\r\n if \"metasmoke_state\" in content:\r\n if content[\"metasmoke_state\"] == \"down\":\r\n log('info', \"{} says metasmoke is down, switching to active ping monitoring.\"\r\n .format(content[\"location\"]))\r\n GlobalVars.metasmoke_down = True\r\n Tasks.later(SocketScience.check_recent_pings, after=90)\r\n\r\n if content[\"metasmoke_state\"] == \"up\":\r\n log('info', '{} says metasmoke is up, disabling ping monitoring.'.format(content[\"location\"]))\r\n GlobalVars.metasmoke_down = False\r\n\r\n if \"ping\" in content:\r\n _pings.append({\"timestamp\": content[\"ping\"], \"location\": content[\"location\"]})\r\n if _switch_task is not None:\r\n _switch_task.cancel()\r\n\r\n @staticmethod\r\n def check_recent_pings():\r\n global _pings\r\n global _switch_task\r\n\r\n recent = _pings.sort(key=lambda p: p[\"timestamp\"]).reverse()\r\n if len(recent) >= 1:\r\n most_recent = recent[0][\"timestamp\"]\r\n now = time.time()\r\n\r\n if now - most_recent >= 90 or len(recent) == 0:\r\n # No active Smokeys. Wait a random number of seconds, then switch to active.\r\n sleep = random.randint(0, 30)\r\n _switch_task = Tasks.later(SocketScience.switch_to_active, after=sleep)\r\n\r\n @staticmethod\r\n def switch_to_active():\r\n GlobalVars.standby_mode = False\r\n chatcommunicate.tell_rooms_with(\"debug\", GlobalVars.location + \" entering autonomous failover.\",\r\n notify_site=\"/failover\")\r\n","sub_path":"socketscience.py","file_name":"socketscience.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"5448670","text":"import lp_colors, scripts\nfrom time import sleep\n\nPATH = None\nLAYOUT_EXT = \".LPHKlayout\"\nLAYOUT_PATH = \"/user_layouts/\"\nSCRIPT_EXT = \".LPHKscript\"\nSCRIPT_PATH = \"/user_scripts/\"\n\nBUTTON_SEPERATOR = \":LPHK_BUTTON_SEP:\"\nENTRY_SEPERATOR = \":LPHK_ENTRY_SEP:\"\nNEWLINE_REPLACE = \":LPHK_NEWLINE_REP:\"\n\nimport window\n\ncurr_layout = None\nin_error = False\nlayout_changed_since_load = False\n\ndef init(path_in):\n global PATH\n PATH = path_in\n\ndef save_layout(name, add_path=True):\n final_path = None\n if add_path:\n final_path = PATH + LAYOUT_PATH + name + LAYOUT_EXT\n else:\n final_path = name\n with open(final_path, \"w+\") as f:\n for x in range(9):\n for y in range(9):\n color = lp_colors.curr_colors[x][y]\n if type(color) == list:\n f.write(str(color[0]))\n f.write(\",\")\n f.write(str(color[1]))\n f.write(\",\")\n f.write(str(color[2]))\n else:\n f.write(str(color))\n\n f.write(ENTRY_SEPERATOR)\n\n script_text = scripts.text[x][y].replace(\"\\n\", NEWLINE_REPLACE)\n f.write(script_text)\n\n if y < 8:\n f.write(BUTTON_SEPERATOR)\n f.write(\"\\n\")\n print(\"[files] Saved layout as \" + final_path)\n\ndef load_layout(name, add_path=True):\n global curr_layout\n global in_error\n global layout_changed_since_load\n \n scripts.unbind_all()\n window.app.draw_canvas()\n \n final_path = None\n if add_path:\n final_path = PATH + LAYOUT_PATH + name + LAYOUT_EXT\n else:\n final_path = name\n with open(final_path, \"r\") as f:\n l = f.readlines()\n\n for x in range(9):\n line = l[x][:-1].split(BUTTON_SEPERATOR)\n for y in range(9):\n info = line[y].split(ENTRY_SEPERATOR)\n color = info[0]\n if not color.isdigit():\n split = color.split(\",\")\n color = []\n color.append(int(split[0]))\n color.append(int(split[1]))\n color.append(int(split[2]))\n else:\n color = int(info[0])\n script_text = info[1].replace(NEWLINE_REPLACE, \"\\n\")\n\n if script_text != \"\":\n script_validation = scripts.validate_script(script_text)\n if script_validation != True:\n lp_colors.update_all()\n window.app.draw_canvas()\n in_error = True\n window.app.save_script(window.app, x, y, script_text, open_editor = True, color = color)\n in_error = False\n else:\n scripts.bind(x, y, script_text, color)\n else:\n lp_colors.setXY(x, y, color)\n lp_colors.update_all()\n window.app.draw_canvas()\n curr_layout = final_path\n layout_changed_since_load = False\n print(\"[files] Loaded layout \" + final_path)\n\ndef import_script(name, add_path=True):\n final_path = None\n if add_path:\n final_path = PATH + LAYOUT_PATH + name + LAYOUT_EXT\n else:\n final_path = name\n with open(final_path, \"r\") as f:\n text = f.read()\n print(\"[files] Imported script as \" + final_path)\n return text\n\ndef export_script(name, script, add_path=True):\n final_path = None\n if add_path:\n final_path = PATH + LAYOUT_PATH + name + LAYOUT_EXT\n else:\n final_path = name\n with open(final_path, \"w+\") as f:\n f.write(script)\n print(\"[files] Exported script as \" + final_path)\n\ndef strip_lines(text):\n return \"\\n\".join([line.strip() for line in text.split(\"\\n\")])\n","sub_path":"files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"261176360","text":"from sdv import create_app\nimport unittest\n\n\nclass SDVTestCase(unittest.TestCase):\n\n def setUp(self):\n self.app = create_app('testing')\n\n self.ctx = self.app.app_context()\n self.ctx.push()\n\n self.client = self.app.test_client()\n\n def tearDown(self):\n self.ctx.pop()\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"387311105","text":"from pyquery import PyQuery as pq\nimport re\nimport pandas as pd\n\n\n# Analysis\ndef get_analysis(html):\n doc = pq(html)\n tables = doc('#Main table').items()\n t = [i.text().split('\\n') for i in tables]\n t_df = [[tt[i:i + 4] for i in range(0, len(tt), 5)] for tt in t]\n columns = [i.pop(0) for i in t_df]\n dfs = [pd.DataFrame(i) for i in t_df]\n for i in range(len(dfs)):\n dfs[i].columns = columns[i]\n return dfs\n\n\n# Financial\ndef get_stats(html):\n # parser for statistics in yahoo finance\n doc = pq(html)\n tables = doc('#YDC-Col1 table').items() # parsing rule\n # formatting to the clean format\n t = [i.text().split('\\n') for i in tables]\n t = [t[0]] + [[i[j] for i in t[1:6] for j in range(len(i))]] + [[i[j] for i in t[7:9] for j in range(len(i))]]\n assert (len(t) == 3)\n t = [[i[j:j + 2] for j in range(0, len(i), 2)] for i in t]\n for i in t: # formatting\n for j in i:\n j[0] = re.compile(' [0-9]$').sub('', j[0])\n assert (len(t) == 3)\n dfs = [pd.DataFrame(i) for i in t]\n return dfs\n\n\ndef get_statements(html):\n # Not done yet, needs to format and pd\n doc = pq(html)\n items = doc('#mrt-node-Col1-1-Financials table tr').items() # Parser, will be seperated\n result = [i.text().split('\\n') for i in items if len(i.text().split('\\n')) > 1]\n df = pd.DataFrame(result)\n return df\n\n\ndef get_reports(html): # parser for income, cash_flow, and balance sheets\n doc = pq(html)\n items = doc('#mrt-node-Col1-1-Financials table tr').items() # Parsing rule\n result = [i.text().split('\\n') for i in items if len(i.text().split('\\n')) > 1] # formatting\n columns = result.pop(0)\n columns[0] = 'Statements' # eliminate unnecessary info\n df = pd.DataFrame(result)\n df.columns = columns\n return df\n\n\n# Holders\ndef get_major_holders(html):\n \"\"\"Getter for holders from yahoo finance\"\"\"\n doc = pq(html)\n tables = doc('#mrt-node-Col1-1-Holders table') # parsing rule\n t1 = pq(tables.pop(0)).text().split('\\n')\n t2 = [[t1[i + 1], t1[i]] for i in range(0, len(t1), 2)] # formatting\n df = pd.DataFrame(t2) # convert to pd dataframe\n df.columns = ['Category', 'Percentage']\n return df\n\n\ndef get_top_institutional_and_mutual_fund_holders(html):\n \"\"\"Getter for other holders in yahoo finance\"\"\"\n doc = pq(html)\n tables = doc('#mrt-node-Col1-1-Holders table') # parsing rule\n # formatting\n t = [pq(i).text().split('\\n') for i in tables[1:]]\n t2 = [[t1[i:i + 4] for i in range(0, len(t1), 5)] for t1 in t]\n columns = [t.pop(0) for t in t2]\n df1, df2 = pd.DataFrame(t2[0]), pd.DataFrame(t2[1]) # convert to pd DataFrame\n df1.columns = columns[0]\n df2.columns = columns[1]\n return df1, df2\n\n\n# Profile\ndef get_executives(html):\n doc = pq(html)\n text = doc('#Main table').text().split('\\n')\n t = [text[i:i + 5] for i in range(0, len(text), 5)]\n columns = t.pop(0)\n df = pd.DataFrame(t)\n df.columns = columns\n return df\n\n\ndef get_description(html):\n doc = pq(html)\n text = doc('.quote-sub-section p').text()\n df = pd.DataFrame([text])\n df.columns = ['Description']\n return df\n\n\n# Summary\ndef get_summary(html, stock):\n doc = pq(html)\n tables = doc('#quote-summary table').items()\n t = [i.text().split('\\n') for i in tables]\n t = [i[j] for i in t for j in range(len(i))]\n columns = [t[i] for i in range(0, len(t), 2)]\n data = [[t[i] for i in range(1, len(t), 2)]]\n data[0].insert(0, stock)\n columns.insert(0, 'Stock')\n del t\n df = pd.DataFrame(data)\n df.columns = columns\n return df\n\n\n# Update page\ndef get_update(html):\n try:\n updates = [i.text() for i in pq(html)('.simpTblRow a').items()]\n df = pd.DataFrame(updates)\n df.to_csv('dates_temp.csv', mode='a', header=False) # csv of firms\n except TypeError:\n pass\n","sub_path":"jaqk/Spyder/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605930321","text":"from django.contrib import messages\nfrom django.conf import settings as django_settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom cccheckout import decorators\nfrom cccheckout.importlib import import_thing\nfrom cccheckout import settings as c_settings\nfrom cccheckout.forms import DiscountCodeForm, PostageForm\n\n\ndef complete(request):\n # add checkout to context\n checkout = request.session['cccheckout']\n # if checkout is not complete redirect to payment\n if not checkout.paid:\n return HttpResponseRedirect(reverse('cccheckout:payment'))\n return render_to_response('cccheckout/complete.html', {},\n context_instance=RequestContext(request))\n\n\n@decorators.process_discount\n@decorators.allow_guest\n@decorators.prepare_checkout\ndef payment(request, *args, **kwargs):\n \"\"\"Confirms to customer and then begins the payment process\"\"\"\n checkout = request.session['cccheckout']\n # ensure that we have a customer and postage if required\n if c_settings.CCCHECKOUT_CUSTOMER_FORM is not None and \\\n checkout.customer is None:\n return HttpResponseRedirect(reverse('cccheckout:customer'))\n # if there is postage models but checkout doesnt have one then redirect\n if len(c_settings.CCCHECKOUT_POSTAGE_MODELS) > 0 and \\\n checkout.postage is None:\n return HttpResponseRedirect(reverse('cccheckout:postage'))\n discountform = kwargs.get('discountform', None)\n # now import the forms\n forms = []\n for form in c_settings.CCCHECKOUT_PAYMENT_FORMS:\n form_mod, form_cls = import_thing(form)\n form = form_cls(checkout=checkout)\n forms.append(form)\n return render_to_response('cccheckout/payment.html', {\n 'forms': forms,\n 'checkout': checkout,\n 'discountform': discountform},\n context_instance=RequestContext(request))\n\n\n@decorators.process_discount\n@decorators.allow_guest\n@decorators.prepare_checkout\ndef postage(request, *args, **kwargs):\n \"\"\"Handles the processing of the postage\"\"\"\n checkout = kwargs['checkout'] \n # ensure that we have a customer if that's required\n if c_settings.CCCHECKOUT_CUSTOMER_FORM is not None and \\\n checkout.customer is None:\n return HttpResponseRedirect(reverse('cccheckout:customer'))\n # if postage is not required redirect to payment\n if len(c_settings.CCCHECKOUT_POSTAGE_MODELS) == 0:\n return HttpResponseRedirect(reverse('cccheckout:payment'))\n\n # if there is a postage method on the checkout use that as the initial\n initial = {'available_methods': checkout.postage_form_value}\n # make the postage form\n form = PostageForm(initial=initial, checkout=checkout)\n # create a discount form\n discountform = kwargs.get('discountform', None)\n # process the post\n if request.method == 'POST':\n form = PostageForm(request.POST, initial=initial, checkout=checkout)\n if form.is_valid():\n postage, postage_tier, postage_form_value = form.save()\n checkout.postage = postage\n checkout.postage_tier = postage_tier\n checkout.postage_form_value = postage_form_value\n checkout.save()\n messages.success(request, 'Your postage option has been saved')\n return HttpResponseRedirect(reverse('cccheckout:payment'))\n messages.error(request, 'You must choose a postage method')\n return render_to_response('cccheckout/postage.html', {\n 'form': form,\n 'discountform': discountform,\n 'checkout': checkout},\n context_instance=RequestContext(request))\n\n\n@decorators.process_discount\n@decorators.allow_guest\n@decorators.prepare_checkout\ndef customer(request, *args, **kwargs):\n \"\"\"Handles the processing of the customer model and form\"\"\"\n checkout = kwargs['checkout'] \n # if there is no form in the settings for customer redirect to postage\n if c_settings.CCCHECKOUT_CUSTOMER_FORM is None:\n return HttpResponseRedirect(reverse('cccheckout:postage'))\n # get the form\n module, form_cls = import_thing(c_settings.CCCHECKOUT_CUSTOMER_FORM)\n model_cls = form_cls._meta.model\n # make the form\n form = form_cls(instance=checkout.customer)\n # make discount form\n discountform = kwargs.get('discountform', None)\n if request.method == 'POST':\n form = form_cls(request.POST, instance=checkout.customer)\n if form.is_valid():\n customer = form.save()\n checkout.customer = customer\n checkout.save()\n messages.success(request, 'Your details have been saved')\n return HttpResponseRedirect(reverse('cccheckout:postage'))\n messages.error(request, 'There were problems with the information'\n ' you\\'ve entered')\n return render_to_response('cccheckout/customer.html', {\n 'form': form,\n 'discountform': discountform,\n 'checkout': checkout},\n context_instance=RequestContext(request))\n","sub_path":"cccheckout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"98517538","text":"#!/bin/python\n\n#------------------- Description & Notes --------------------#\n\n'''\nDescription:\n Given a list of FASTA/FASTQ or SAM/BAM files, output (to file) the\n frequencies for each oligonucleotide sequence of length K in a sequence/s.\n Frequencies are calculated either:\n * Across all sequence records (combined).\n * For each sequence record (split).\n\nArgs:\n fastXFiles (filepath):\n List containing the filepath of each file. Files can be\n compressed (.gz) and should contain at least one\n sequence record (FASTA/FASTQ or SAM/BAM). Our processing limit\n seems to be around 3.5 million (3,500,000) sequence records.\n\n kmerLength (int):\n Length of oligonucleotide sequences. Must be a positive integer.\n Ideally, this should be <= 13 since the total number of possible\n oligonucleotide sequences exponentially increases (4^K).\n * 4^13 Kmers = 67,108,864 ## Possible\n * 4^14 Kmers = 268,435,456 ## Sometimes possible\n * 4^15 Kmers = 1,073,741,824 ## Probably not possible\n\nReturns:\n oFile (dir):\n Directory containing a list of files. Each file is compressed\n in Parquet format and contains the frequencies for each\n oligonucleotide sequence of length K in a sequence/s.\n'''\n\n#------------------- Dependencies ---------------------------#\n\n# Standard library imports\nimport argparse\nimport sys\nfrom pathlib import Path\n\n# External imports\n\n# Internal imports\nTHIS_DIR = Path(__file__).resolve().parent\nsys.path.append(str(THIS_DIR.parent)) ## Allow us to import from SRC\nfrom src.kmer import count as kmercount\nfrom src.util import spark\nfrom src import io\n\n#------------------- Constants ------------------------------#\n\n#------------------- Public Classes & Functions -------------#\n\ndef combineKmerFrequencies(iFiles, kmerLength, oFile, ignoreNs, countExp):\n print(\"Combining counts\")\n params = get_spark_params()\n with spark.getSparkSession(params) as ss:\n with ss.sparkContext as sc:\n ## Read FASTX files\n seqRecRdd = sc.parallelize(iFiles)\n seqRecRdd = seqRecRdd.flatMap(io.fastx.read)\n seqRecRdd = kmercount.combined.setup(seqRecRdd)\n\n ## Get a table containing the kmer counts across all records\n kmerDf = kmercount.combined.getCounts(seqRecRdd, kmerLength,\n ignoreNs, countExp)\n kmerDf = kmercount.combined.cleanup(kmerDf, kmerLength)\n\n ## Write the table to disk\n print(\"Writing output\")\n io.kmer.write(oFile, kmerDf)\n ## Instead of outputing, we could extend the pipeline here\n ## so that it goes directly to analysing Kmer frequencies\n\ndef splitKmerFrequencies(iFiles, kmerLength, oFile, ignoreNs, countExp):\n print(\"Splitting counts\")\n params = get_spark_params()\n with spark.getSparkSession(params) as ss:\n with ss.sparkContext as sc:\n ## Read FASTX files\n seqRecRdd = sc.parallelize(iFiles)\n seqRecRdd = seqRecRdd.flatMap(io.fastx.read)\n seqRecRdd = kmercount.split.setup(seqRecRdd, kmerLength)\n\n ## Get a table containing the kmer counts across for each record\n kmerDf = kmercount.split.getCounts(seqRecRdd, kmerLength,\n ignoreNs, countExp)\n kmerDf = kmercount.split.cleanup(kmerDf, kmerLength)\n\n ## Write the table to disk\n print(\"Writing output\")\n io.kmer.write(oFile, kmerDf)\n ## Instead of outputing, we could extend the pipeline here\n ## so that it goes directly to analysing Kmer frequencies\n\n#------------------- Private Classes & Functions ------------#\n\ndef get_spark_params():\n # import os ## For HPC\n params = [\n ## Driver\n # ('spark.driver.cores', '5'), ## Same as executor\n ('spark.driver.memory', '27G'), ## Same as executor\n ('spark.driver.maxResultSize', '0'),\n\n ## Executor\n # ('spark.executor.cores', '5'), ## Didn't seem to have any effect...\n # ('spark.executor.instances', '99'), ## Didn't seem to have any effect...\n ('spark.executor.memory', '27G'),\n ('spark.executor.heartbeatInterval', '60s'),\n\n ## SQL\n ('spark.sql.broadcastTimeout', '600s'),\n ('spark.sql.execution.arrow.pyspark.enabled', 'true'),\n ('spark.sql.shuffle.partitions', '200'),\n\n ## Misc\n ('spark.local.dir', THIS_DIR),\n # ('spark.local.dir', os.environ['MEMDIR']), ## For HPC\n ('spark.network.timeout', '600s'),\n # ('spark.default.parallelism', '8')\n ]\n return params\n\ndef make_parser():\n def _initArgs(p):\n p.add_argument(\"-f\", help=\"FASTA/FASTQ files (.fa/.fq)\",\n nargs='+', type=str, required=True)\n p.add_argument(\"-k\", help=\"Kmer length\",\n type=int, required=True)\n p.add_argument(\"-o\", help=\"Output file (.snappy.parquet)\",\n type=str, required=True)\n p.add_argument(\"-n\", help=\"Ignore Kmers containing ambiguous bases \\\n (i.e., N's)\", action='store_true')\n p.add_argument(\"-e\", help=\"Calculate expected Kmer counts instead \\\n of observed Kmer counts. Expected counts are based on the (k-2) \\\n Markov Chain Model\", action='store_true')\n\n parser = argparse.ArgumentParser(description='Compute \\\n oligonucleotide frequencies of sequence/s')\n subparser = parser.add_subparsers(dest='command')\n combined = subparser.add_parser('combined')\n split = subparser.add_parser('split')\n _initArgs(combined)\n _initArgs(split)\n return parser\n\ndef main(parser):\n args = parser.parse_args()\n if (args.command is None):\n parser.print_help()\n sys.exit(1)\n\n else:\n if (args.k < 0):\n parser.print_help()\n parser.error('Invalid value. K > 0.')\n sys.exit(1)\n\n if (args.e and args.k < 3):\n parser.print_help()\n parser.error('Expected counts can only be calculated for K > 3.')\n sys.exit(1)\n\n if (args.command == 'combined'):\n print(args)\n combineKmerFrequencies(args.f, args.k, args.o, args.n, args.e)\n\n elif (args.command == 'split'):\n print(args)\n splitKmerFrequencies(args.f, args.k, args.o, args.n, args.e)\n\n print(\"DONE\")\n\n#------------------- Main -----------------------------------#\n\nif (__name__ == \"__main__\"):\n parser = make_parser()\n main(parser)\n\n#------------------------------------------------------------------------------\n","sub_path":"bin/calculate_kmer_frequencies.py","file_name":"calculate_kmer_frequencies.py","file_ext":"py","file_size_in_byte":6726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"217526054","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@File : ccxt_simple.py\n@Author: flyhawk\n@Date : 2019/5/18 15:53\n@Desc : \n'''\n\n# 引入pandas框架\nimport pandas as pd\nimport time\nimport pprint\n\n# 引入ccxt框架, 通过pip install ccxt 可以进行安装\n# ccxt 的github地址为: https://github.com/ccxt/ccxt\nimport ccxt\n\n\ndef bitstamp_test():\n \n # 初始化bitme交易所对象\n bitmex = ccxt.bitstamp()\n \n # 请求的candles个数\n limit = 500\n \n # 当前时间\n current_time = int(time.time() // 60 * 60 * 1000) # 毫秒\n print(current_time)\n \n # 获取请求开始的时间\n since_time = current_time - limit * 60 * 1000\n \n # 'BTC/USD' 比特币对美元的交易对,或者ETH/USD 以太坊对美元的交易对.\n data = bitmex.fetch_ohlcv(symbol='BTC/USD', limit=500, since=since_time)\n df = pd.DataFrame(data)\n df = df.rename(columns={0: 'open_time', 1: 'open', 2: 'high', 3: 'low', 4: 'close', 5: 'volume'})\n \n # 时间转换成北京时间\n df['open_time'] = pd.to_datetime(df['open_time'], unit='ms') + pd.Timedelta(hours=8)\n \n # 设置index\n df = df.set_index('open_time', drop=True)\n \n # 保存成csv文件\n df.to_csv('bitmex_data.csv') # comma seperate Value\n print(df)\n\n\ndef simple_test(exchange):\n # 实例化市场\n # exchange = ccxt.bitstamp()\n # 交易对\n symbol = 'BTC/USD'\n \n # 获取ticker信息\n ticker = exchange.fetch_ticker(symbol)\n # 获取depth信息\n depth = exchange.fetch_order_book(symbol)\n \n print('ticker:%s, depth:%s' % (ticker, depth))\n \n \nif __name__ == '__main__':\n simple_test()\n # bitstamp_test()\n ","sub_path":"ccxt_simple.py","file_name":"ccxt_simple.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391998762","text":"class Solution:\n def countNegatives(self, grid: List[List[int]]) -> int:\n \"\"\"\n Time Complexity: O(m + n)\n Space Complexity: O(1)\n \"\"\"\n res = 0\n n = len(grid)\n m = len(grid[0])\n i = 0\n j = m-1\n while i < n:\n while j >= 0 and 0 > grid[i][j]:\n res += n - i\n j -= 1\n i += 1\n return res","sub_path":"LeetCodeLearn/String_Array/1351.py","file_name":"1351.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"621443956","text":"# Brendan McGlynn\r\n# CSEC 472 - Lab 3\r\n# Group 2\r\n# Method 4\r\n\r\nimport skimage\r\n\r\nimport numpy as np # computations in the mse command\r\nfrom PIL import Image, ImageChops, ImageStat # Image Library\r\n\r\nimport glob # Import file paths\r\nimport mahotas\r\nimport pylab\r\nimport random # shuffle array\r\nimport math\r\n\r\ntest = []\r\ntrain = []\r\nimageDetails_list = []\r\nfinalTestCheck = []\r\n\r\n\r\n\r\ndef difference(imageA, imageB):\r\n h = ImageChops.difference(imageA, imageB).histogram()\r\n val = math.sqrt(sum(h*(i**2) for i, h in enumerate(h)) / (float(imageA.size[0]) * imageB.size[1]))\r\n return val/100\r\n\r\ndef main():\r\n # Add images to testing\r\n for finger in glob.glob(\r\n 'C:/Users/brend/Desktop/RIT/Senior Year/Fall Semester/Auth/mod 4/lab/groupStuff/jake/test/*'):\r\n if finger.endswith('.txt'):\r\n imageDetails_list.append(finger[-12:])\r\n else:\r\n im = Image.open(finger)\r\n finalTestCheck.append(finger[-12:])\r\n test.append(im)\r\n\r\n # Add images to training\r\n for finger2 in glob.glob(\r\n 'C:/Users/brend/Desktop/RIT/Senior Year/Fall Semester/Auth/mod 4/lab/groupStuff/jake/train/*'):\r\n im2 = Image.open(finger2)\r\n train.append(im2)\r\n\r\n trainlength = len(train)\r\n print(\"Number of train images:\", trainlength)\r\n\r\n testlength = len(test)\r\n print(\"Number of test images:\", testlength)\r\n\r\n #print(test[0])\r\n\r\n #Image._show(train[0])\r\n #Image._show(train[1])\r\n #Image._show(train[2])\r\n\r\n # img = Image._show(image_list[0])\r\n # img.show()\r\n\r\n # print(imageDetails_list[0])\r\n # print(imageDetails_list[1])\r\n\r\n for everyFinger in train:\r\n test.append(everyFinger)\r\n #print()\r\n\r\n # print(\"new test Length - \", newLength)\r\n\r\n trainNum = 0\r\n random.shuffle(test)\r\n newLength = len(test)\r\n\r\n while trainNum < len(train): # go thru each train item\r\n testNum = 0\r\n while testNum < len(test): # go thru each test item\r\n image1 = train[trainNum]\r\n image2 = test[testNum]\r\n valid = difference(image1,image2)\r\n #print(\"counter - \", testNum)\r\n if valid < 0.40: # 0 is more similar\r\n #Image._show(test[testNum])\r\n test.pop(testNum)\r\n print(\"train num: \", trainNum)\r\n print(\"test num: \", testNum)\r\n testNum=0\r\n trainNum+=1\r\n if trainNum >= len(train):\r\n break\r\n else:\r\n #do next iteration\r\n testNum += 1\r\n if(testNum >= len(test)):\r\n testNum = 0\r\n trainNum += 1\r\n break\r\n\r\n\r\n print(\"final length of test - \", len(test))\r\n numCorrect = 0\r\n for item in test:\r\n print(\"File Name:\", item.filename[-12:])\r\n fingerPrintNum = int(item.filename[-11:-7])\r\n print(\"Finger Print Number:\", fingerPrintNum)\r\n if(fingerPrintNum > 1500):\r\n numCorrect += 1\r\n print(\"Number of correct fingerprints: \" + str(numCorrect) + \"/500\")\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"histogram mean squared error 60 percent.py","file_name":"histogram mean squared error 60 percent.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"208215142","text":"from collections import defaultdict\r\nfrom prettytable import PrettyTable\r\nimport os\r\nimport unittest\r\n\r\n\r\ndef file_reader(file_name, fields_per_line, separator=',', header=False):\r\n \"\"\"this generator returns all the values of a line on each call to next()\"\"\"\r\n try:\r\n fp = open(file_name, 'r') # Do the risky action of attempting to open a file\r\n except FileNotFoundError:\r\n print(\"can't open\", file_name) # If file not found, raise exception\r\n else: # If the file is found\r\n with fp:\r\n line_number = 1 # Start line counter to identify line that raises ValueError\r\n next_line = [] # Initialize next_line variable to store the line in question\r\n \r\n for line in fp:\r\n line = line.rstrip('\\n\\r').split(separator) # Strips the \\n and/or \\r from the end of the line and Separates the line into values using the separator\r\n if len(line) != fields_per_line:\r\n raise ValueError(file_name, \"has\", len(line), \"fields in\", line_number, \"but expected\", fields_per_line)\r\n for value in line: \r\n next_line.append(value)\r\n \r\n line = next_line # Transfer the values into line, so we can empty and reuse next_line\r\n next_line = [] # Before yielding, we must empty next_line for future use\r\n line_number += 1 # Increase the line counter by 1\r\n if header == True: # If there is a header, skip that line. \r\n header = False # Set header=False so later lines don't get skipped\r\n continue\r\n yield tuple(line)\r\n\r\n \r\nclass University:\r\n \"\"\" Class University imports data from .txt files, organizes such data into \r\n dictionaries with classes, and prints them in prettytable format \"\"\"\r\n def __init__(self, dir_path):\r\n self.dir_path = dir_path\r\n self.students = dict() # self.students[cwid] = instance of class Student\r\n self.instructors = dict() # self.instructors[cwid] = instance of class Instructor\r\n self._majors = dict() # self.majors[major] = instance of class major\r\n\r\n # Calls functions that import university data from files\r\n self.import_majors(dir_path)\r\n self.import_students(dir_path)\r\n self.import_instructors(dir_path)\r\n self.import_grades(dir_path)\r\n\r\n # Methods that import data from .txt files, and create instances of classes as values in dicitonaries\r\n def import_students(self, dir_path):\r\n \"\"\" Pulls student data from .txt file and organizes it into the students dictionary \"\"\"\r\n students_file = os.path.join(dir_path, \"students.txt\")\r\n try:\r\n for cwid, name, major_name in file_reader(students_file, 3, '\\t'):\r\n self.students[cwid] = Student(cwid, name, major_name, self._majors[major_name])\r\n except ValueError as e:\r\n print(e)\r\n\r\n def import_instructors(self, dir_path):\r\n \"\"\" Pulls instructor data from .txt file and organizes it into the instructors dictionary \"\"\"\r\n instructors_file = os.path.join(dir_path, \"instructors.txt\")\r\n try:\r\n for cwid, name, department in file_reader(instructors_file, 3, '\\t'):\r\n self.instructors[cwid] = Instructor(cwid, name, department)\r\n except ValueError as e:\r\n print(e) \r\n\r\n def import_grades(self, dir_path):\r\n \"\"\" read the grades file, update the student to note the course and grade, update instructor to \r\n note an additional student \r\n \"\"\"\r\n grades_file = os.path.join(dir_path, \"grades.txt\")\r\n try:\r\n for student_cwid, course, grade, instructor_cwid in file_reader(grades_file, 4, '\\t'):\r\n self.students[student_cwid].add_course(course, grade) # adds dictionary entry pair. See def in class Student\r\n self.instructors[instructor_cwid].add_course(course) # adds a student to #students in course. See def in Instructor class.\r\n except ValueError as e:\r\n print(e) \r\n \r\n def import_majors(self, dir_path):\r\n \"\"\" reads majors from file in dir_path and adds them to a dictionary self._majors \"\"\"\r\n majors_file = os.path.join(dir_path, \"majors.txt\")\r\n try:\r\n for major, flag, course in file_reader(majors_file, 3, separator='\\t', header=False):\r\n if major not in self._majors:\r\n self._majors[major] = Major(major)\r\n\r\n self._majors[major].add_course(flag, course)\r\n except ValueError as e:\r\n print(e)\r\n\r\n # Print summary information as tables\r\n def student_prettytable(self):\r\n \"\"\" create a student pretty table with info the student and courses \"\"\"\r\n student_prettytable = PrettyTable() # initialize pt\r\n student_prettytable.field_names = Student.pt_header(self) #set headers as defined in function inside Student class\r\n for student in self.students.values():\r\n student_prettytable.add_row(student.pt_row()) # add rows using the output of pt_row defined in Student class\r\n return student_prettytable\r\n\r\n def instructor_prettytable(self):\r\n \"\"\" create an instructor pretty table with info the instructor and courses \"\"\"\r\n instructor_prettytable = PrettyTable()\r\n instructor_prettytable.field_names = Instructor.pt_header(self)\r\n for ins in self.instructors.values():\r\n for row in ins.pt_row():\r\n instructor_prettytable.add_row(row)\r\n return instructor_prettytable\r\n\r\n def major_prettytable(self):\r\n \"\"\" create a pretty table containing information of courses associated with majors \"\"\"\r\n major_prettytable = PrettyTable() # initialize pt\r\n major_prettytable.field_names = Major.pt_header(self) #set headers as defined in function inside Student class\r\n for major in self._majors.values():\r\n major_prettytable.add_row(major.pt_row()) # add rows using the output of pt_row defined in Student class\r\n return major_prettytable\r\n\r\n\r\nclass Student:\r\n \"\"\" Keeps track of all information concerning students, \r\n including what happens when a student takes a new course \"\"\"\r\n def __init__(self, cwid, name, major_name, major):\r\n self.cwid = cwid\r\n self.name = name\r\n self.major_name = major_name\r\n self.major = major\r\n\r\n self.courses = dict() # self.courses[course] = grade\r\n\r\n def add_course(self, course, grade):\r\n \"\"\" note that the student took a course and earned a grade \"\"\"\r\n self.courses[course] = grade\r\n \r\n def pt_header(self):\r\n \"\"\" return a list of the fields in the prettytable \"\"\"\r\n return ['CWID', 'Name', 'Major', 'Completed Courses', 'Remaining Required', 'Remaining Electives']\r\n\r\n def pt_row(self):\r\n \"\"\" return the values for the students pretty table for self \"\"\"\r\n completed_courses, remaining_required, remaining_electives = self.major.remaining(self.courses)\r\n return [self.cwid, self.name, self.major_name, completed_courses, remaining_required, remaining_electives]\r\n \r\n \r\nclass Instructor:\r\n \"\"\" Keeps track of all information concerning Instructors, \r\n including what happens when a student takes a new course \"\"\"\r\n def __init__(self, cwid, name, department):\r\n self.cwid = cwid\r\n self.department = department\r\n self.name = name\r\n self.courses = defaultdict(int) # self.courses[course] = number of students\r\n\r\n def add_course(self, course):\r\n \"\"\" tell the instructor that she taught a student in a course \"\"\"\r\n self.courses[course] += 1\r\n\r\n def pt_header(self):\r\n return ['CWID', 'Name', 'Department', 'Course', '#Students']\r\n\r\n def pt_row(self):\r\n \"\"\" a generator to return the rows with course and number of students \"\"\"\r\n for course, students in self.courses.items():\r\n yield [self.cwid, self.name, self.department, course, students]\r\n\r\n\r\nclass Major:\r\n \"\"\" Track all the information regarding the major, inlcuding its required and elective courses \"\"\"\r\n def __init__(self, department, passing=None):\r\n self._department = department\r\n self._required = set()\r\n self._electives = set()\r\n if passing is None:\r\n self.passing_grades = {'A', 'A-', 'B+', 'B', 'B-', 'C+', 'C'}\r\n else:\r\n self.passing_grades = passing\r\n\r\n def add_course(self, flag, course):\r\n \"\"\" notes another required course or elective \"\"\"\r\n if flag.upper() == 'E':\r\n self._electives.add(course)\r\n elif flag.upper() == 'R':\r\n self._required.add(course)\r\n else:\r\n raise ValueError(f\"Flag {flag} is invalid for course {course}\")\r\n\r\n def pt_header(self):\r\n \"\"\" return a list of the fields in the prettytable \"\"\"\r\n return ['Major', 'Required Courses', 'Elective Courses']\r\n\r\n def pt_row(self):\r\n \"\"\" returns the list of values that populate the prettytable for a specific major \"\"\"\r\n return [self._department, self._required, self._electives]\r\n\r\n def remaining(self, courses):\r\n \"\"\" Calculate completed_courses, remaining_required, remaining_electives from \r\n a dictionary of course=grade for a single student \"\"\"\r\n completed_courses = {course for course, grade in courses.items() if grade in self.passing_grades}\r\n remaining_required = self._required - completed_courses\r\n if self._electives.intersection(completed_courses):\r\n remaining_electives = None\r\n else:\r\n remaining_electives = self._electives\r\n return completed_courses, remaining_required, remaining_electives\r\n\r\n\r\ndef main():\r\n stevens = University('G:\\My Drive\\F18\\SSW-810\\Week 10')\r\n print(\"Student Summary\")\r\n student_summary = print(stevens.student_prettytable())\r\n print(\"Instructor Summary\")\r\n instructor_summary = print(stevens.instructor_prettytable())\r\n print(\"Major Summary\")\r\n major_summary = print(stevens.major_prettytable())\r\n\r\n\r\nclass UniversityTest(unittest.TestCase):\r\n def test_student_instance(self):\r\n \"\"\"Tests several student instances by comparing the values in the instances to the correct values\"\"\"\r\n stevens = University('G:\\My Drive\\F18\\SSW-810\\Week 10')\r\n self.assertEqual(stevens.students['10175'].name, \"Erickson, D\")\r\n self.assertEqual(stevens.students['11461'].name, \"Wright, U\")\r\n self.assertEqual(stevens.students['11461'].courses, {'SYS 800': 'A', 'SYS 750': 'A-', 'SYS 611': 'A'})\r\n\r\n def test_instructor_instance(self):\r\n \"\"\"Tests several instructor instances by comparing the values in the instances to the correct values\"\"\"\r\n stevens = University('G:\\My Drive\\F18\\SSW-810\\Week 10')\r\n self.assertEqual(stevens.instructors['98764'].name, \"Feynman, R\")\r\n self.assertEqual(stevens.instructors['98765'].name, \"Einstein, A\")\r\n self.assertEqual(stevens.instructors['98760'].courses, {'SYS 800': 1, 'SYS 750': 1, 'SYS 611': 2, 'SYS 645': 1})\r\n\r\n def test_major_instance(self):\r\n \"\"\" Tests Major instances to compare to the correct values \"\"\"\r\n stevens = University('G:\\My Drive\\F18\\SSW-810\\Week 10')\r\n self.assertEqual(stevens._majors['SFEN']._required, {'SSW 540', 'SSW 555', 'SSW 564', 'SSW 567'})\r\n self.assertEqual(stevens._majors['SFEN']._electives, {'CS 501', 'CS 545', 'CS 513'})\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main(exit = False, verbosity = 2)\r\n main()\r\n \r\n","sub_path":"HW10_V2_Sarita_Hedaya.py","file_name":"HW10_V2_Sarita_Hedaya.py","file_ext":"py","file_size_in_byte":11677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"350564815","text":"\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2020 AbstractUmbra\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport argparse\nimport platform\nimport sys\n\nimport aiohttp\nimport pkg_resources\n\ntry:\n import requests\nexcept ImportError:\n requests = None\n\nimport mystbin\n\n\ndef show_version():\n entries = []\n\n entries.append(\n \"- Python v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}\".format(\n sys.version_info\n )\n )\n version_info = mystbin.version_info\n entries.append(\n \"- mystbin.py v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}\".format(\n version_info\n )\n )\n if version_info.releaselevel != \"final\":\n pkg = pkg_resources.get_distribution(\"mystbin.py\")\n if pkg:\n entries.append(\" - mystbin.py pkg_resources: v{0}\".format(pkg.version))\n\n entries.append(\"- aiohttp v{0.__version__}\".format(aiohttp))\n if requests is not None:\n entries.append(\" - [requests] v{0.__version__}\".format(requests))\n uname = platform.uname()\n entries.append(\"- system info: {0.system} {0.release} {0.version}\".format(uname))\n print(\"\\n\".join(entries))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n prog=\"mystbin\", description=\"Tools for helping with mystbin.py\"\n )\n parser.add_argument(\n \"-v\", \"--version\", action=\"store_true\", help=\"shows the wrapper version\"\n )\n parser.set_defaults(func=core)\n\n return parser, parser.parse_args()\n\n\ndef core(parser, args):\n if args.version:\n show_version()\n\n\ndef main():\n parser, args = parse_args()\n args.func(parser, args)\n\n\nmain()\n","sub_path":"mystbin/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"109081637","text":"# Copyright 2020 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n\nfrom core.src.bootstrap.Bootstrapper import Bootstrapper\nfrom core.src.bootstrap.Constants import Constants\n\n\nclass CoreMain(object):\n def __init__(self, argv):\n \"\"\"The main entry point of patch operation execution\"\"\"\n # Level 1 bootstrapping - bare minimum components to allow for diagnostics in further bootstrapping\n bootstrapper = Bootstrapper(argv)\n file_logger = bootstrapper.file_logger\n composite_logger = bootstrapper.composite_logger\n stdout_file_mirror = bootstrapper.stdout_file_mirror\n lifecycle_manager = telemetry_writer = status_handler = None\n\n # Init operation statuses\n patch_operation_requested = Constants.UNKNOWN\n patch_assessment_successful = False\n patch_installation_successful = False\n\n try:\n # Level 2 bootstrapping\n composite_logger.log_debug(\"Building out full container...\")\n container = bootstrapper.build_out_container()\n lifecycle_manager, telemetry_writer, status_handler = bootstrapper.build_core_components(container)\n composite_logger.log_debug(\"Completed building out full container.\\n\\n\")\n\n # Basic environment check\n bootstrapper.bootstrap_splash_text()\n bootstrapper.basic_environment_health_check()\n lifecycle_manager.execution_start_check() # terminates if this instance shouldn't be running (redundant)\n\n # Execution config retrieval\n composite_logger.log_debug(\"Obtaining execution configuration...\")\n execution_config = container.get('execution_config')\n patch_operation_requested = execution_config.operation.lower()\n patch_assessor = container.get('patch_assessor')\n package_manager = container.get('package_manager')\n\n # if this is an auto patching installation request, log and disable (if enabled by default) the current auto OS update status. NOTE: log status in a separate file in config settings\n if execution_config.maintenance_run_id is not None and patch_operation_requested == Constants.INSTALLATION.lower():\n package_manager.disable_auto_os_update()\n\n # Assessment happens no matter what\n patch_assessment_successful = patch_assessor.start_assessment()\n\n # Patching + additional assessment occurs if the operation is 'Installation'\n if patch_operation_requested == Constants.INSTALLATION.lower():\n patch_installer = container.get('patch_installer')\n patch_installation_successful = patch_installer.start_installation()\n patch_assessment_successful = patch_assessor.start_assessment()\n\n except Exception as error:\n # Privileged operation handling for non-production use\n if Constants.EnvLayer.PRIVILEGED_OP_MARKER in repr(error):\n composite_logger.log_debug('\\nPrivileged operation request intercepted: ' + repr(error))\n raise\n\n # General handling\n composite_logger.log_error('\\nEXCEPTION during patch operation: ' + repr(error))\n composite_logger.log_error('TO TROUBLESHOOT, please save this file before the next invocation: ' + bootstrapper.log_file_path)\n\n composite_logger.log_debug(\"Safely completing required operations after exception...\")\n if telemetry_writer is not None:\n telemetry_writer.send_error_info(\"EXCEPTION: \" + repr(error))\n if status_handler is not None:\n composite_logger.log_debug(' - Status handler pending writes flags [I=' + str(patch_installation_successful) + ', A=' + str(patch_assessment_successful) + ']')\n if patch_operation_requested == Constants.INSTALLATION.lower() and not patch_installation_successful:\n status_handler.set_installation_substatus_json(status=Constants.STATUS_ERROR)\n composite_logger.log_debug(' -- Persisted failed installation substatus.')\n if not patch_assessment_successful:\n status_handler.set_assessment_substatus_json(status=Constants.STATUS_ERROR)\n composite_logger.log_debug(' -- Persisted failed assessment substatus.')\n\n if Constants.ERROR_ADDED_TO_STATUS not in repr(error):\n status_handler.add_error_to_status(\"Terminal exception {0}\".format(repr(error)), Constants.PatchOperationErrorCodes.OPERATION_FAILED)\n else:\n status_handler.add_error_to_status(\"Execution terminated due to last reported error.\", Constants.PatchOperationErrorCodes.OPERATION_FAILED)\n\n else:\n composite_logger.log_error(' - Status handler is not initialized, and status data cannot be written.')\n composite_logger.log_debug(\"Completed exception handling.\\n\")\n\n finally:\n if lifecycle_manager is not None:\n lifecycle_manager.update_core_sequence(completed=True)\n\n telemetry_writer.send_runbook_state_info(\"Succeeded.\")\n telemetry_writer.close_transports()\n\n stdout_file_mirror.stop()\n file_logger.close(message_at_close=\"\")\n","sub_path":"src/core/src/CoreMain.py","file_name":"CoreMain.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"417514105","text":"def start():\r\n loop() \r\n\r\ndef victory():\r\n print(\"Victorius!\")\r\n print(\"Exit? (y / n)\")\r\n\r\n exit = input(\"> \")\r\n\r\n if \"y\" in exit or \"Y\" in exit:\r\n exit(0)\r\n else:\r\n start()\r\n\r\ndef death():\r\n print(\"You have lost your mind and your body, you are dead.\")\r\n exit_inp = input(\"Do you want to continue? Y/N \")\r\n exit_inp = exit_inp.lower()\r\n\t\t\r\n if exit_inp == \"n\":\r\n exit(0)\r\n elif exit_inp == \"y\":\r\n start()\r\n else:\r\n print(\"Incorrect command, try again\")\r\n death()\r\n\r\ndef loop():\r\n print(\"\"\"\r\n ---------------------------------------------------------\r\n You are standing in an open field west of a white house, with a boarded front door.\r\n (A secret path leads southwest into the forest.)\r\n There is a Small Mailbox.\r\n From here, you can go north, south, east or west.\r\n ---------------------------------------------------------\r\n \"\"\")\r\n action = input(\"What do you do? > \")\r\n \r\n while True:\r\n if action.lower() == (\"take mailbox\"):\r\n print(\"---------------------------------------------------------\")\r\n print(\"It is securely anchored.\")\r\n elif action.lower() == (\"open mailbox\"):\r\n print(\"---------------------------------------------------------\")\r\n print(\"Opening the small mailbox reveals a leaflet.\")\r\n elif action.lower() == (\"open door\"):\r\n print(\"---------------------------------------------------------\")\r\n print(\"The door cannot be opened.\")\r\n elif action.lower() == (\"take boards\"):\r\n print(\"---------------------------------------------------------\")\r\n print(\"The boards are securely fastened.\")\r\n elif action.lower() == (\"look at house\"):\r\n print(\"---------------------------------------------------------\")\r\n print(\"The house is a beautiful colonial house which is painted white. It is clear that the owners must have been extremely wealthy.\")\r\n elif action.lower() == (\"read leaflet\"):\r\n print(\"---------------------------------------------------------\")\r\n print(\"To whomever this may concern: keep going north.\")\r\n elif \"north\" in action.lower():\r\n cthulhu()\r\n elif \"south\" in action.lower():\r\n prince()\r\n elif \"east\" in action.lower():\r\n princess()\r\n elif \"west\" in action.lower():\r\n lion()\r\n else:\r\n print(\"I got no idea what you're saying. Is it that hard picking a cardinal direction?\")","sub_path":"exercises/ex31/drills/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576525271","text":"from collections import defaultdict, namedtuple, OrderedDict\n\nsentence = \"The red for jumped over the fence and ran to the zoo for food\"\nwords = sentence.split(' ')\n\nd = defaultdict(int)\nfor word in words:\n d[word] += 1\n\nprint(d)\n\n\nParts = namedtuple('Parts', 'id_num desc cost amount')\nauto_parts = Parts(\n id_num='1234',\n desc='Ford Engine',\n cost=1200.00,\n amount=10\n)\n\nprint(auto_parts.id_num)\n\n\nd = {'banana': 3, 'apple': 4, 'pear': 1, 'orange': 2}\nnew_d = OrderedDict(sorted(d.items()))\n\nprint(new_d)\n\n\nfor key in new_d:\n print(key, new_d[key])\nprint('-------------------------------')\nfor key in reversed(new_d):\n print(key, new_d[key])\n","sub_path":"#features/default_dist.py","file_name":"default_dist.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"147895454","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 9 22:19:25 2019\n\n@author: Vijay\n\"\"\"\n\n#Code Challenge\n # Name: \n # Intersection\n #Filename: \n # Intersection.py\n #Problem Statement:\n # With two given lists [1,3,6,78,35,55] and [12,24,35,24,88,120,155]\n \n \n # Write a program to make a list whose elements are intersection of the above given lists. \nlist1=[]\nlist2=[]\nx=input(\"enter the element with comma:\").split(\",\") \nfor i in x:\n list1.append(i)\ny=input(\"enter the element with comma:\").split(\",\")\nfor j in y:\n list2.append(j)\nlist3 =set(list1).intersection(set(list2))\nprint(list3)","sub_path":"Day 3/intersection.py","file_name":"intersection.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"337667521","text":"import re\n\n\n\ntext_to_morse_symbols = {\n 'A': '·−', 'B': '−···', 'C': '−·−·',\n 'D': '−··', 'E': '·', 'F': '··−·',\n 'G': '−−·', 'H': '····', 'I': '··',\n 'J': '·−−−', 'K': '−·−', 'L': '·−··',\n 'M': '−−', 'N': '−·', 'O': '−−−',\n 'P': '·−−·', 'Q': '−−·−', 'R': '·−·',\n 'S': '···', 'T': '−', 'U': '··−',\n 'V': '···−', 'W': '·−−', 'X': '−··−',\n 'Y': '−·−−', 'Z': '−−··',\n \n '0': '−−−−−', '1': '·−−−−', '2': '··−−−',\n '3': '···−−', '4': '····−', '5': '·····',\n '6': '−····', '7': '−−···', '8': '−−−··',\n '9': '−−−−·',\n\n #'.': '·−·−·−', '?': '··−−··', '@': '·−−·−·',\n #',': '−−··−−', '/': '−··−·', ':': '−−−···',\n\n}\n\n# Inflate the morse letters above, so that there are spaces in them.\n# For example: −−− becaomes − − −\nfor k, v in text_to_morse_symbols.items():\n text_to_morse_symbols[k] = \" \".join(v)\n\n\n# Invert the dict above, by swapping its keys and values.\nmorse_to_text_symbols = dict([ (v, k) for k, v in text_to_morse_symbols.items() ])\n\n\n\ndef text_to_morse(text):\n \"\"\"Returns a morse code string if conversion from text is possible. Otherwise raises ValueError.\n \n Text must be understood by the \"International Morse Code\" format.\n \"\"\"\n\n if not isinstance(text, str): raise ValueError(\"Did not receive a string\")\n\n if text == \"\": return \"\"\n\n morse = \"\"\n\n for text_word in text.split():\n\n for text_letter in text_word:\n\n text_letter = text_letter.upper()\n if text_letter in text_to_morse_symbols:\n morse = morse + \" \" + text_to_morse_symbols[text_letter]\n else:\n raise ValueError(\"Can not convert this from text to morse: \" + text_letter)\n\n morse = morse + \" \"\n\n return morse.strip() # strip() removes any leading and trailing spaces\n\n\n\n\n\ndef morse_to_text(morse_code):\n \"\"\"Returns a text string if conversion from morse code is possible. Otherwise raises ValueError.\n\n Morse code must be in \"International Morse Code\" format.\n \"\"\"\n\n if not isinstance(morse_code, str): raise ValueError(\"Did not receive a string\")\n\n if morse_code == \"\": return \"\"\n\n text = \"\"\n\n morse_words = re.split(r' {7}', morse_code) # regex: \"match 7 spaces\"\n\n for morse_word in morse_words:\n\n morse_letters = re.split(r' {3}', morse_word) # regex: \"match 3 spaces\"\n\n for morse_letter in morse_letters:\n\n if morse_letter in morse_to_text_symbols:\n text = text + morse_to_text_symbols[morse_letter]\n else:\n raise ValueError(\"Can not convert this from morse to text: \" + morse_letter)\n\n text = text + \" \"\n\n return text.strip() # strip() removes any leading and trailing spaces","sub_path":"samu/samu.py","file_name":"samu.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"570916366","text":"from streamz_ext import Stream\n\n\ndef test_star_sink():\n L = []\n\n def add(x, y):\n L.append(x + y)\n\n source = Stream()\n source.starsink(add)\n\n source.emit((1, 10))\n\n assert L[0] == 11\n\n\ndef test_unique_dict():\n source = Stream()\n L = source.unique(history=1).sink_to_list()\n\n source.emit({'a': 1})\n source.emit({'a': 1})\n source.emit({'a': 1})\n\n assert L == [{'a': 1}]\n\n\ndef test_execution_order():\n L = []\n for i in range(5):\n s = Stream()\n b = s.pluck(1)\n a = s.pluck(0)\n l = a.combine_latest(b, emit_on=a).sink_to_list()\n z = [(1, 'red'), (2, 'blue'), (3, 'green')]\n for zz in z:\n s.emit(zz)\n L.append((l, ))\n for ll in L:\n assert ll == L[0]\n\n L2 = []\n for i in range(5):\n s = Stream()\n a = s.pluck(0)\n b = s.pluck(1)\n l = a.combine_latest(b, emit_on=a).sink_to_list()\n z = [(1, 'red'), (2, 'blue'), (3, 'green')]\n for zz in z:\n s.emit(zz)\n L2.append((l,))\n for ll, ll2 in zip(L, L2):\n assert ll2 == L2[0]\n assert ll != ll2\n\n\ndef test_starmap():\n def add(x=0, y=0):\n return x + y\n\n source = Stream()\n L = source.starmap(add).sink_to_list()\n\n source.emit((1, 10))\n\n assert L[0] == 11\n","sub_path":"test/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"644011705","text":"import csv\nimport os\nimport openpyxl\n\nbase_repository_name = 'Bigdata_Repository'\ntype_folder_1 = 'Type_A'\ntype_folder_2 = 'Type_B'\nfile_name = '시뮬레이션_남해군_관광지별_방문객'\ndir_delimeter = '/'\nfile_format = 'csv'\nsimulation_count = 100\nfile_size_limit = 10000\nsimulation_data = ['1111', '상주면', '남해군', '보리암', '1', '14137', '43677']\nfile_size = 0\nnumber = 1\nis_header = False\nis_first = True\n\n\ndef getTourPoint_csv(filewriter):\n filewriter.writerow(simulation_data)\n return\n\n# def getTourPoint_exel(show_exel):\n# show_exel.append(simulation_data)\n# return\n\ndef get_dest_file_name(file_index, base_repository_name, file_name, file_format, file_size_limit):\n global is_header\n dest_file_name = f'{file_name}{str(file_index)}.{file_format}'\n\n try:\n file_size = os.path.getsize(dest_file_name)\n print(f\"'{dest_file_name}' file size: {file_size}\")\n print(f\"파일당 size 제한: {file_size_limit}\")\n\n if file_size > file_size_limit:\n dest_file_name = f'{file_name}{str(file_index+1)}.{file_format}'\n is_header = True\n file_size = 0\n else:\n is_header = False\n except:\n pass\n return dest_file_name\n\ndef save_file(file_index, base_repository_name, file_name, file_format, file_size_limit):\n dest_file_name = get_dest_file_name(file_index, base_repository_name, file_name, file_format, file_size_limit)\n global is_header\n global is_first\n\n if file_format == 'csv':\n csv_out_file = open(dest_file_name, 'a', newline='')\n filewriter = csv.writer(csv_out_file)\n if is_header == True or is_first == True:\n header_list = ['addrCd', 'gungu', 'sido', 'resNm', 'rnum', 'csForCnt', 'csNatCnt']\n filewriter.writerow(header_list)\n is_first = False\n is_header = False\n\n for index in range(simulation_count):\n getTourPoint_csv(filewriter)\n csv_out_file.close()\n\n elif file_format == 'xls':\n exel_output_file = openpyxl.Workbook()\n if is_header == True or is_first == True:\n header_list = ['addrCd', 'gungu', 'sido', 'resNm', 'rnum', 'csForCnt', 'csNatCnt']\n show_exel = exel_output_file.active\n show_exel.append(header_list)\n exel_output_file.save(dest_file_name)\n is_first = False\n is_header = False\n show_exel = exel_output_file.active\n for index in range(simulation_count):\n show_exel.append(simulation_data)\n\n exel_output_file.save(dest_file_name)\n\ndef file_count():\n sub_name = f'C:\\Python_Workspace\\\\03_Bigdata\\97_190918_test\\{base_repository_name}\\{type_folder_1}'\n if file_name == 'csv':\n index = len(os.listdir(sub_name))\n return index\n elif file_name == 'xls':\n index = len(os.listdir(sub_name))\n return index\n\nwhile True:\n print(\"1.환경설정(디렉토리명, 저장 방식...)\")\n print(\"2.작업수행\")\n print(\"3.종료\")\n menu = input(\"메뉴를 선택하세요:\")\n if menu == '1':\n dest_file_name = f'{base_repository_name}{dir_delimeter}{file_name}1.{file_format}'\n while True:\n print(\"1.디렉토리명 초기값: \", base_repository_name)\n print(\"2.파일명 초기값: \", file_name)\n print(\"3.포멧 초기값(1.csv 2.xls): \", file_format)\n print(\"4.데이터 용량 제한(byte) 초기값: \", file_size_limit)\n print(\"5.이전메뉴\")\n sub_menu = input(\"환경설정 메뉴를 선택하세요: \")\n if sub_menu == '1':\n base_repository_name = input(\"디렉토리명 설정: \")\n elif sub_menu == '2':\n file_name = input(\"파일명 설정: \")\n elif sub_menu == '3':\n file_format = input(\"포멧 설정: \")\n elif sub_menu == '4':\n file_size_limit = int(input(\"용량 설정: \"))\n elif sub_menu == '5':\n break\n else:\n print(\"잘못 입력하셨습니다. 다시 입력하세요.\")\n continue\n\n elif menu == '2':\n dest_file_name = f'{base_repository_name}{dir_delimeter}{file_name}1.{file_format}'\n if file_format == 'csv':\n if number == 1:\n if not os.path.exists(base_repository_name):\n os.mkdir(base_repository_name)\n os.chdir(base_repository_name)\n os.mkdir(type_folder_1)\n os.chdir(type_folder_1)\n number += 1\n if not os.path.exists(dest_file_name):\n save_file(1, base_repository_name, file_name, file_format, file_size_limit)\n else:\n save_file(file_count(), base_repository_name, file_name, file_format, file_size_limit)\n elif number != 1:\n dest_file_name = f'{file_name}1.{file_format}'\n if not os.path.exists(dest_file_name):\n save_file(1, base_repository_name, file_name, file_format, file_size_limit)\n else:\n save_file(file_count(), base_repository_name, file_name, file_format, file_size_limit)\n\n if file_format == 'xls':\n if number == 1:\n if not os.path.exists(base_repository_name):\n os.mkdir(base_repository_name)\n os.chdir(base_repository_name)\n os.mkdir(type_folder_2)\n os.chdir(type_folder_2)\n number += 1\n if not os.path.exists(dest_file_name):\n save_file(1, base_repository_name, file_name, file_format, file_size_limit)\n else:\n save_file(file_count(), base_repository_name, file_name, file_format, file_size_limit)\n elif number != 1:\n dest_file_name = f'{file_name}1.{file_format}'\n if not os.path.exists(dest_file_name):\n save_file(1, base_repository_name, file_name, file_format, file_size_limit)\n else:\n save_file(file_count(), base_repository_name, file_name, file_format, file_size_limit)\n elif menu == '3':\n break\nelse:\n print(\"잘못 입력하셨습니다. 다시 입력하세요.\")\n\n\n","sub_path":"03_Bigdata/97_190918_test/정수현_빅데이터저장관리_프로그램1.py","file_name":"정수현_빅데이터저장관리_프로그램1.py","file_ext":"py","file_size_in_byte":6412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"434844676","text":"\"\"\"\nSomewhat hacky solution to create conda lock files.\n\"\"\"\n\nimport atexit\nimport datetime\nimport json\nimport logging\nimport os\nimport pathlib\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom itertools import chain\nfrom typing import (\n AbstractSet,\n Dict,\n Iterator,\n List,\n MutableSequence,\n Optional,\n Sequence,\n Set,\n Tuple,\n Union,\n)\n\nimport click\nimport ensureconda\nimport pkg_resources\n\nfrom click_default_group import DefaultGroup\n\nfrom conda_lock.common import read_file, read_json, write_file\nfrom conda_lock.errors import PlatformValidationError\nfrom conda_lock.src_parser import LockSpecification\nfrom conda_lock.src_parser.environment_yaml import parse_environment_file\nfrom conda_lock.src_parser.meta_yaml import parse_meta_yaml_file\nfrom conda_lock.src_parser.pyproject_toml import parse_pyproject_toml\nfrom conda_lock.virtual_package import (\n FakeRepoData,\n default_virtual_package_repodata,\n virtual_package_repo_from_specification,\n)\n\n\nlogger = logging.getLogger(__name__)\nDEFAULT_FILES = [pathlib.Path(\"environment.yml\")]\nPathLike = Union[str, pathlib.Path]\n\n# Captures basic auth credentials, if they exists, in the second capture group.\nAUTH_PATTERN = re.compile(r\"^(https?:\\/\\/)(.*:.*@)?(.*)\")\n\n# Captures the domain in the second group.\nDOMAIN_PATTERN = re.compile(r\"^(https?:\\/\\/)?([^\\/]+)(.*)\")\n\n# Captures the platform in the first group.\nPLATFORM_PATTERN = re.compile(r\"^# platform: (.*)$\")\nINPUT_HASH_PATTERN = re.compile(r\"^# input_hash: (.*)$\")\n\n\nif not (sys.version_info.major >= 3 and sys.version_info.minor >= 6):\n print(\"conda_lock needs to run under python >=3.6\")\n sys.exit(1)\n\n\nCONDA_PKGS_DIRS = None\nDEFAULT_PLATFORMS = [\"osx-64\", \"linux-64\", \"win-64\"]\nDEFAULT_KINDS = [\"explicit\"]\nKIND_FILE_EXT = {\n \"explicit\": \"\",\n \"env\": \".yml\",\n}\nKIND_USE_TEXT = {\n \"explicit\": \"conda create --name YOURENV --file {lockfile}\",\n \"env\": \"conda env create --name YOURENV --file {lockfile}\",\n}\n\n\ndef _extract_platform(line: str) -> Optional[str]:\n search = PLATFORM_PATTERN.search(line)\n if search:\n return search.group(1)\n return None\n\n\ndef _extract_spec_hash(line: str) -> Optional[str]:\n search = INPUT_HASH_PATTERN.search(line)\n if search:\n return search.group(1)\n return None\n\n\ndef extract_platform(lockfile: str) -> str:\n for line in lockfile.strip().split(\"\\n\"):\n platform = _extract_platform(line)\n if platform:\n return platform\n raise RuntimeError(\"Cannot find platform in lockfile.\")\n\n\ndef extract_input_hash(lockfile_contents: str) -> Optional[str]:\n for line in lockfile_contents.strip().split(\"\\n\"):\n platform = _extract_spec_hash(line)\n if platform:\n return platform\n return None\n\n\ndef _do_validate_platform(platform: str) -> Tuple[bool, str]:\n from ensureconda.resolve import platform_subdir\n\n determined_subdir = platform_subdir()\n return platform == determined_subdir, platform\n\n\ndef do_validate_platform(lockfile: str):\n platform_lockfile = extract_platform(lockfile)\n try:\n success, platform_sys = _do_validate_platform(platform_lockfile)\n except KeyError:\n raise RuntimeError(f\"Unknown platform type in lockfile '{platform_lockfile}'.\")\n if not success:\n raise PlatformValidationError(\n f\"Platform in lockfile '{platform_lockfile}' is not compatible with system platform '{platform_sys}'.\"\n )\n\n\ndef conda_pkgs_dir():\n global CONDA_PKGS_DIRS\n if CONDA_PKGS_DIRS is None:\n temp_dir = tempfile.TemporaryDirectory()\n CONDA_PKGS_DIRS = temp_dir.name\n atexit.register(temp_dir.cleanup)\n return CONDA_PKGS_DIRS\n else:\n return CONDA_PKGS_DIRS\n\n\ndef conda_env_override(platform) -> Dict[str, str]:\n env = dict(os.environ)\n env.update(\n {\n \"CONDA_SUBDIR\": platform,\n \"CONDA_PKGS_DIRS\": conda_pkgs_dir(),\n \"CONDA_UNSATISFIABLE_HINTS_CHECK_DEPTH\": \"0\",\n \"CONDA_ADD_PIP_AS_PYTHON_DEPENDENCY\": \"False\",\n }\n )\n return env\n\n\ndef solve_specs_for_arch(\n conda: PathLike,\n channels: Sequence[str],\n specs: List[str],\n platform: str,\n) -> dict:\n args: MutableSequence[PathLike] = [\n conda,\n \"create\",\n \"--prefix\",\n os.path.join(conda_pkgs_dir(), \"prefix\"),\n \"--dry-run\",\n \"--json\",\n ]\n conda_flags = os.environ.get(\"CONDA_FLAGS\")\n if conda_flags:\n args.extend(shlex.split(conda_flags))\n if channels:\n args.append(\"--override-channels\")\n\n for channel in channels:\n args.extend([\"--channel\", channel])\n if channel == \"defaults\" and platform in {\"win-64\", \"win-32\"}:\n # msys2 is a windows-only channel that conda automatically\n # injects if the host platform is Windows. If our host\n # platform is not Windows, we need to add it manually\n args.extend([\"--channel\", \"msys2\"])\n args.extend(specs)\n\n proc = subprocess.run(\n args,\n env=conda_env_override(platform),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf8\",\n )\n\n def print_proc(proc):\n import shlex\n\n print(f\" Command: {' '.join(shlex.quote(x) for x in proc.args)}\")\n if proc.stdout:\n print(f\" STDOUT:\\n{proc.stdout}\")\n if proc.stderr:\n print(f\" STDERR:\\n{proc.stderr}\")\n\n try:\n proc.check_returncode()\n except subprocess.CalledProcessError:\n try:\n err_json = json.loads(proc.stdout)\n message = err_json[\"message\"]\n except json.JSONDecodeError as e:\n print(f\"Failed to parse json, {e}\")\n message = \"\"\n except KeyError:\n print(\"Message key not found in json! returning the full json text\")\n message = err_json\n\n print(f\"Could not lock the environment for platform {platform}\")\n if message:\n print(message)\n print_proc(proc)\n\n sys.exit(1)\n\n try:\n return json.loads(proc.stdout)\n except json.JSONDecodeError:\n print(\"Could not solve for lock\")\n print_proc(proc)\n sys.exit(1)\n\n\ndef _process_stdout(stdout):\n cache = set()\n extracting_packages = False\n leading_empty = True\n for logline in stdout:\n logline = logline.rstrip()\n if logline:\n leading_empty = False\n if logline == \"Downloading and Extracting Packages\":\n extracting_packages = True\n if not logline and (extracting_packages or leading_empty):\n continue\n if \"%\" in logline:\n logline = logline.split()[0]\n if logline not in cache:\n yield logline\n cache.add(logline)\n else:\n yield logline\n\n\ndef do_conda_install(conda: PathLike, prefix: str, name: str, file: str) -> None:\n\n if prefix and name:\n raise ValueError(\"Provide either prefix, or name, but not both.\")\n\n kind = \"env\" if file.endswith(\".yml\") else \"explicit\"\n\n args: MutableSequence[PathLike] = [\n str(conda),\n *([\"env\"] if kind == \"env\" else []),\n \"create\",\n \"--file\",\n file,\n *([] if kind == \"env\" else [\"--yes\"]),\n ]\n\n if prefix:\n args.append(\"--prefix\")\n args.append(prefix)\n if name:\n args.append(\"--name\")\n args.append(name)\n conda_flags = os.environ.get(\"CONDA_FLAGS\")\n if conda_flags:\n args.extend(shlex.split(conda_flags))\n\n logging.debug(\"$MAMBA_ROOT_PREFIX: %s\", os.environ.get(\"MAMBA_ROOT_PREFIX\"))\n\n with subprocess.Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n bufsize=1,\n universal_newlines=True,\n ) as p:\n if p.stdout:\n for line in _process_stdout(p.stdout):\n logging.info(line)\n\n if p.stderr:\n for line in p.stderr:\n logging.error(line.rstrip())\n\n if p.returncode != 0:\n print(\n f\"Could not perform conda install using {file} lock file into {name or prefix}\"\n )\n sys.exit(1)\n\n\ndef search_for_md5s(\n conda: PathLike, package_specs: List[dict], platform: str, channels: Sequence[str]\n):\n \"\"\"Use conda-search to determine the md5 metadata that we need.\n\n This is only needed if pkgs_dirs is set in condarc.\n Sadly this is going to be slow since we need to fetch each result individually\n due to the cli of conda search\n\n \"\"\"\n\n def matchspec(spec):\n return (\n f\"{spec['name']}[\"\n f\"version={spec['version']},\"\n f\"subdir={spec['platform']},\"\n f\"channel={spec['channel']},\"\n f\"build={spec['build_string']}\"\n \"]\"\n )\n\n found: Set[str] = set()\n logging.debug(\"Searching for package specs: \\n%s\", package_specs)\n packages: List[Tuple[str, str]] = [\n *[(d[\"name\"], matchspec(d)) for d in package_specs],\n *[(d[\"name\"], f\"{d['name']}[url='{d['url_conda']}']\") for d in package_specs],\n *[(d[\"name\"], f\"{d['name']}[url='{d['url']}']\") for d in package_specs],\n ]\n\n for name, spec in packages:\n if name in found:\n continue\n channel_args = []\n for c in channels:\n channel_args += [\"-c\", c]\n cmd = [str(conda), \"search\", *channel_args, \"--json\", spec]\n logging.debug(\"seaching: %s\", cmd)\n out = subprocess.run(\n cmd,\n encoding=\"utf8\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=conda_env_override(platform),\n )\n content = json.loads(out.stdout)\n logging.debug(\"search output for %s\\n%s\", spec, content)\n if name in content:\n assert len(content[name]) == 1\n logging.debug(\"Found %s\", name)\n yield content[name][0]\n found.add(name)\n\n\ndef fn_to_dist_name(fn: str) -> str:\n if fn.endswith(\".conda\"):\n fn, _, _ = fn.partition(\".conda\")\n elif fn.endswith(\".tar.bz2\"):\n fn, _, _ = fn.partition(\".tar.bz2\")\n else:\n raise RuntimeError(f\"unexpected file type {fn}\", fn)\n return fn\n\n\ndef make_lock_specs(\n *,\n platforms: List[str],\n src_files: List[pathlib.Path],\n include_dev_dependencies: bool = True,\n channel_overrides: Optional[Sequence[str]] = None,\n extras: Optional[AbstractSet[str]] = None,\n virtual_package_repo: FakeRepoData,\n) -> Dict[str, LockSpecification]:\n \"\"\"Generate the lockfile specs from a set of input src_files\"\"\"\n res = {}\n for plat in platforms:\n lock_specs = parse_source_files(\n src_files=src_files,\n platform=plat,\n include_dev_dependencies=include_dev_dependencies,\n extras=extras,\n )\n\n lock_spec = aggregate_lock_specs(lock_specs)\n if channel_overrides:\n channels = list(channel_overrides)\n else:\n channels = lock_spec.channels\n lock_spec.virtual_package_repo = virtual_package_repo\n lock_spec.channels = channels\n res[plat] = lock_spec\n return res\n\n\ndef make_lock_files(\n conda: PathLike,\n platforms: List[str],\n kinds: List[str],\n src_files: List[pathlib.Path],\n include_dev_dependencies: bool = True,\n channel_overrides: Optional[Sequence[str]] = None,\n filename_template: Optional[str] = None,\n check_spec_hash: bool = False,\n extras: Optional[AbstractSet[str]] = None,\n virtual_package_spec: Optional[pathlib.Path] = None,\n):\n \"\"\"Generate the lock files for the given platforms from the src file provided\n\n Parameters\n ----------\n conda :\n The path to a conda or mamba executable\n platforms :\n List of platforms to generate the lock for\n src_files :\n Paths to a supported source file types\n include_dev_dependencies :\n For source types that separate out dev dependencies from regular ones,include those, default True\n channel_overrides :\n Forced list of channels to use.\n filename_template :\n Format for the lock file names. Must include {platform}.\n check_spec_hash :\n Validate that the existing spec hash has not already been generated for.\n extras :\n For src files that support extras use the extras defined in there.\n\n \"\"\"\n if filename_template:\n if \"{platform}\" not in filename_template and len(platforms) > 1:\n print(\n \"{platform} must be in filename template when locking\"\n f\" more than one platform: {', '.join(platforms)}\",\n file=sys.stderr,\n )\n sys.exit(1)\n for kind, file_ext in KIND_FILE_EXT.items():\n if file_ext and filename_template.endswith(file_ext):\n print(\n f\"Filename template must not end with '{file_ext}', as this \"\n f\"is reserved for '{kind}' lock files, in which case it is \"\n f\"automatically added.\"\n )\n sys.exit(1)\n\n # initialize virtual package fake\n if virtual_package_spec and virtual_package_spec.exists():\n virtual_package_repo = virtual_package_repo_from_specification(\n virtual_package_spec\n )\n else:\n virtual_package_repo = default_virtual_package_repodata()\n\n with virtual_package_repo:\n lock_specs = make_lock_specs(\n platforms=platforms,\n src_files=src_files,\n include_dev_dependencies=include_dev_dependencies,\n channel_overrides=channel_overrides,\n extras=extras,\n virtual_package_repo=virtual_package_repo,\n )\n\n for plat, lock_spec in lock_specs.items():\n for kind in kinds:\n if filename_template:\n context = {\n \"platform\": lock_spec.platform,\n \"dev-dependencies\": str(include_dev_dependencies).lower(),\n # legacy key\n \"spec-hash\": lock_spec.input_hash(),\n \"input-hash\": lock_spec.input_hash(),\n \"version\": pkg_resources.get_distribution(\"conda_lock\").version,\n \"timestamp\": datetime.datetime.utcnow().strftime(\n \"%Y%m%dT%H%M%SZ\"\n ),\n }\n\n filename = filename_template.format(**context)\n else:\n filename = f\"conda-{lock_spec.platform}.lock\"\n\n lockfile = pathlib.Path(filename)\n if lockfile.exists() and check_spec_hash:\n existing_spec_hash = extract_input_hash(lockfile.read_text())\n if existing_spec_hash == lock_spec.input_hash():\n print(\n f\"Spec hash already locked for {plat}. Skipping\",\n file=sys.stderr,\n )\n continue\n\n print(f\"Generating lockfile(s) for {plat}...\", file=sys.stderr)\n lockfile_contents = create_lockfile_from_spec(\n conda=conda,\n spec=lock_spec,\n kind=kind,\n )\n\n filename += KIND_FILE_EXT[kind]\n with open(filename, \"w\") as fo:\n fo.write(\"\\n\".join(lockfile_contents) + \"\\n\")\n\n print(\n f\" - Install lock using {'(see warning below)' if kind == 'env' else ''}:\",\n KIND_USE_TEXT[kind].format(lockfile=filename),\n file=sys.stderr,\n )\n\n if \"env\" in kinds:\n print(\n \"\\nWARNING: Using environment lock files (*.yml) does NOT guarantee \"\n \"that generated environments will be identical over time, since the \"\n \"dependency resolver is re-run every time and changes in repository \"\n \"metadata or resolver logic may cause variation. Conversely, since \"\n \"the resolver is run every time, the resulting packages ARE \"\n \"guaranteed to be seen by conda as being in a consistent state. This \"\n \"makes them useful when updating existing environments.\",\n file=sys.stderr,\n )\n\n\ndef is_micromamba(conda: PathLike) -> bool:\n return str(conda).endswith(\"micromamba\") or str(conda).lower().endswith(\n \"micromamba.exe\"\n )\n\n\ndef create_lockfile_from_spec(\n *,\n conda: PathLike,\n spec: LockSpecification,\n kind: str,\n) -> List[str]:\n assert spec.virtual_package_repo is not None\n virtual_package_channel = spec.virtual_package_repo.channel_url\n dry_run_install = solve_specs_for_arch(\n conda=conda,\n platform=spec.platform,\n channels=[*spec.channels, virtual_package_channel],\n specs=spec.specs,\n )\n logging.debug(\"dry_run_install:\\n%s\", dry_run_install)\n\n lockfile_contents = [\n \"# Generated by conda-lock.\",\n f\"# platform: {spec.platform}\",\n f\"# input_hash: {spec.input_hash()}\\n\",\n ]\n\n if kind == \"env\":\n link_actions = dry_run_install[\"actions\"][\"LINK\"]\n lockfile_contents.extend(\n [\n \"channels:\",\n *(f\" - {channel}\" for channel in spec.channels),\n \"dependencies:\",\n *(\n f' - {pkg[\"name\"]}={pkg[\"version\"]}={pkg[\"build_string\"]}'\n for pkg in link_actions\n # exclude virtual packages\n if not pkg[\"name\"].startswith(\"__\")\n ),\n ]\n )\n elif kind == \"explicit\":\n lockfile_contents.append(\"@EXPLICIT\\n\")\n\n link_actions = dry_run_install[\"actions\"][\"LINK\"]\n for link in link_actions:\n if is_micromamba(conda):\n link[\"url_base\"] = fn_to_dist_name(link[\"url\"])\n link[\"dist_name\"] = fn_to_dist_name(link[\"fn\"])\n else:\n link[\n \"url_base\"\n ] = f\"{link['base_url']}/{link['platform']}/{link['dist_name']}\"\n link[\"url\"] = f\"{link['url_base']}.tar.bz2\"\n link[\"url_conda\"] = f\"{link['url_base']}.conda\"\n link_dists = {link[\"dist_name\"] for link in link_actions}\n\n fetch_actions = dry_run_install[\"actions\"][\"FETCH\"]\n\n fetch_by_dist_name = {fn_to_dist_name(pkg[\"fn\"]): pkg for pkg in fetch_actions}\n\n non_fetch_packages = link_dists - set(fetch_by_dist_name)\n if len(non_fetch_packages) > 0:\n for search_res in search_for_md5s(\n conda=conda,\n package_specs=[\n x for x in link_actions if x[\"dist_name\"] in non_fetch_packages\n ],\n platform=spec.platform,\n channels=spec.channels,\n ):\n dist_name = fn_to_dist_name(search_res[\"fn\"])\n fetch_by_dist_name[dist_name] = search_res\n\n for pkg in link_actions:\n dist_name = (\n fn_to_dist_name(pkg[\"fn\"]) if is_micromamba(conda) else pkg[\"dist_name\"]\n )\n url = fetch_by_dist_name[dist_name][\"url\"]\n if url.startswith(virtual_package_channel):\n continue\n if url.startswith(spec.virtual_package_repo.channel_url_posix):\n continue\n try:\n md5 = fetch_by_dist_name[dist_name][\"md5\"]\n except KeyError:\n logger.error(\"failed to determine md5 for %s\", url)\n raise\n lockfile_contents.append(f\"{url}#{md5}\")\n\n def sanitize_lockfile_line(line):\n line = line.strip()\n if line == \"\":\n return \"#\"\n else:\n return line\n\n lockfile_contents = [sanitize_lockfile_line(line) for line in lockfile_contents]\n else:\n raise ValueError(f\"Unrecognised lock kind {kind}.\")\n\n logging.debug(\"lockfile_contents:\\n%s\\n\", lockfile_contents)\n return lockfile_contents\n\n\ndef main_on_docker(env_file, platforms):\n env_path = pathlib.Path(env_file)\n platform_arg = []\n for p in platforms:\n platform_arg.extend([\"--platform\", p])\n\n subprocess.check_output(\n [\n \"docker\",\n \"run\",\n \"--rm\",\n \"-v\",\n f\"{str(env_path.parent)}:/work:rwZ\",\n \"--workdir\",\n \"/work\",\n \"conda-lock:latest\",\n \"--file\",\n env_path.name,\n *platform_arg,\n ]\n )\n\n\ndef parse_source_files(\n src_files: List[pathlib.Path],\n platform: str,\n include_dev_dependencies: bool,\n extras: Optional[AbstractSet[str]] = None,\n) -> List[LockSpecification]:\n desired_envs = []\n for src_file in src_files:\n if src_file.name == \"meta.yaml\":\n desired_envs.append(\n parse_meta_yaml_file(src_file, platform, include_dev_dependencies)\n )\n elif src_file.name == \"pyproject.toml\":\n desired_envs.append(\n parse_pyproject_toml(\n src_file, platform, include_dev_dependencies, extras\n )\n )\n else:\n desired_envs.append(parse_environment_file(src_file, platform))\n return desired_envs\n\n\ndef aggregate_lock_specs(lock_specs: List[LockSpecification]) -> LockSpecification:\n # union the dependencies\n specs = list(\n set(chain.from_iterable([lock_spec.specs for lock_spec in lock_specs]))\n )\n\n # pick the first non-empty channel\n channels: List[str] = next(\n (lock_spec.channels for lock_spec in lock_specs if lock_spec.channels), []\n )\n\n # pick the first non-empty platform\n platform = next(\n (lock_spec.platform for lock_spec in lock_specs if lock_spec.platform), \"\"\n )\n\n return LockSpecification(specs=specs, channels=channels, platform=platform)\n\n\ndef _ensureconda(\n mamba: bool = False,\n micromamba: bool = False,\n conda: bool = False,\n conda_exe: bool = False,\n):\n _conda_exe = ensureconda.ensureconda(\n mamba=mamba,\n micromamba=micromamba,\n conda=conda,\n conda_exe=conda_exe,\n )\n\n return _conda_exe\n\n\ndef _determine_conda_executable(\n conda_executable: Optional[str], mamba: bool, micromamba: bool\n):\n if conda_executable:\n if pathlib.Path(conda_executable).exists():\n yield conda_executable\n yield shutil.which(conda_executable)\n\n yield _ensureconda(mamba=mamba, micromamba=micromamba, conda=True, conda_exe=True)\n\n\ndef determine_conda_executable(\n conda_executable: Optional[str], mamba: bool, micromamba: bool\n):\n for candidate in _determine_conda_executable(conda_executable, mamba, micromamba):\n if candidate is not None:\n if is_micromamba(candidate) and \"MAMBA_ROOT_PREFIX\" not in os.environ:\n mamba_root_prefix = pathlib.Path(candidate).parent / \"mamba_root\"\n mamba_root_prefix.mkdir(exist_ok=True, parents=True)\n os.environ[\"MAMBA_ROOT_PREFIX\"] = str(mamba_root_prefix)\n\n return candidate\n raise RuntimeError(\"Could not find conda (or compatible) executable\")\n\n\ndef _add_auth_to_line(line: str, auth: Dict[str, str]):\n search = DOMAIN_PATTERN.search(line)\n if search and search.group(2) in auth:\n return f\"{search.group(1)}{auth[search.group(2)]}@{search.group(2)}{search.group(3)}\"\n return line\n\n\ndef _add_auth_to_lockfile(lockfile: str, auth: Dict[str, str]) -> str:\n lockfile_with_auth = \"\\n\".join(\n _add_auth_to_line(line, auth) if line[0] not in (\"#\", \"@\") else line\n for line in lockfile.strip().split(\"\\n\")\n )\n if lockfile.endswith(\"\\n\"):\n return lockfile_with_auth + \"\\n\"\n return lockfile_with_auth\n\n\n@contextmanager\ndef _add_auth(lockfile: str, auth: Dict[str, str]) -> Iterator[str]:\n with tempfile.NamedTemporaryFile() as tf:\n lockfile_with_auth = _add_auth_to_lockfile(lockfile, auth)\n write_file(lockfile_with_auth, tf.name)\n yield tf.name\n\n\ndef _strip_auth_from_line(line: str) -> str:\n return AUTH_PATTERN.sub(r\"\\1\\3\", line)\n\n\ndef _extract_domain(line: str) -> str:\n return DOMAIN_PATTERN.sub(r\"\\2\", line)\n\n\ndef _strip_auth_from_lockfile(lockfile: str) -> str:\n lockfile_lines = lockfile.strip().split(\"\\n\")\n stripped_lockfile_lines = tuple(\n _strip_auth_from_line(line) if line[0] not in (\"#\", \"@\") else line\n for line in lockfile_lines\n )\n stripped_domains = sorted(\n {\n _extract_domain(stripped_line)\n for line, stripped_line in zip(lockfile_lines, stripped_lockfile_lines)\n if line != stripped_line\n }\n )\n stripped_lockfile = \"\\n\".join(stripped_lockfile_lines)\n if lockfile.endswith(\"\\n\"):\n stripped_lockfile += \"\\n\"\n if stripped_domains:\n stripped_domains_doc = \"\\n\".join(f\"# - {domain}\" for domain in stripped_domains)\n return f\"# The following domains require authentication:\\n{stripped_domains_doc}\\n{stripped_lockfile}\"\n return stripped_lockfile\n\n\ndef run_lock(\n environment_files: List[pathlib.Path],\n conda_exe: Optional[str],\n platforms: Optional[List[str]] = None,\n mamba: bool = False,\n micromamba: bool = False,\n include_dev_dependencies: bool = True,\n channel_overrides: Optional[Sequence[str]] = None,\n filename_template: Optional[str] = None,\n kinds: Optional[List[str]] = None,\n check_input_hash: bool = False,\n extras: Optional[AbstractSet[str]] = None,\n virtual_package_spec: Optional[pathlib.Path] = None,\n) -> None:\n if environment_files == DEFAULT_FILES:\n long_ext_file = pathlib.Path(\"environment.yaml\")\n if long_ext_file.exists() and not environment_files[0].exists():\n environment_files = [long_ext_file]\n\n _conda_exe = determine_conda_executable(\n conda_exe, mamba=mamba, micromamba=micromamba\n )\n make_lock_files(\n conda=_conda_exe,\n src_files=environment_files,\n platforms=platforms or DEFAULT_PLATFORMS,\n include_dev_dependencies=include_dev_dependencies,\n channel_overrides=channel_overrides,\n filename_template=filename_template,\n kinds=kinds or DEFAULT_KINDS,\n check_spec_hash=check_input_hash,\n extras=extras,\n virtual_package_spec=virtual_package_spec,\n )\n\n\n@click.group(cls=DefaultGroup, default=\"lock\", default_if_no_args=True)\ndef main():\n \"\"\"To get help for subcommands, use the conda-lock --help\"\"\"\n pass\n\n\n@main.command(\"lock\")\n@click.option(\n \"--conda\", default=None, help=\"path (or name) of the conda/mamba executable to use.\"\n)\n@click.option(\n \"--mamba/--no-mamba\", default=False, help=\"don't attempt to use or install mamba.\"\n)\n@click.option(\n \"--micromamba/--no-micromamba\",\n default=False,\n help=\"don't attempt to use or install micromamba.\",\n)\n@click.option(\n \"-p\",\n \"--platform\",\n multiple=True,\n help=\"generate lock files for the following platforms\",\n)\n@click.option(\n \"-c\",\n \"--channel\",\n \"channel_overrides\",\n multiple=True,\n help=\"\"\"Override the channels to use when solving the environment. These will replace the channels as listed in the various source files.\"\"\",\n)\n@click.option(\n \"--dev-dependencies/--no-dev-dependencies\",\n is_flag=True,\n default=True,\n help=\"include dev dependencies in the lockfile (where applicable)\",\n)\n@click.option(\n \"-f\",\n \"--file\",\n \"files\",\n default=DEFAULT_FILES,\n type=click.Path(),\n multiple=True,\n help=\"path to a conda environment specification(s)\",\n)\n@click.option(\n \"-k\",\n \"--kind\",\n default=[\"explicit\"],\n type=str,\n multiple=True,\n help=\"Kind of lock file(s) to generate [should be one of 'explicit' or 'env'].\",\n)\n@click.option(\n \"--filename-template\",\n default=\"conda-{platform}.lock\",\n help=\"Template for the lock file names. Filename must include {platform} token, and must not end in '.yml'. For a full list and description of available tokens, see the command help text.\",\n)\n@click.option(\n \"--strip-auth\",\n is_flag=True,\n default=False,\n help=\"Strip the basic auth credentials from the lockfile.\",\n)\n@click.option(\n \"-e\",\n \"--extras\",\n default=[],\n type=str,\n multiple=True,\n help=\"When used in conjunction with input sources that support extras (pyproject.toml) will add the deps from those extras to the input specification\",\n)\n@click.option(\n \"--check-input-hash\",\n is_flag=True,\n default=False,\n help=\"Check existing input hashes in lockfiles before regenerating lock files. If no files were updated exit with exit code 4. Incompatible with --strip-auth\",\n)\n@click.option(\n \"--log-level\",\n help=\"Log level.\",\n default=\"INFO\",\n type=click.Choice([\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]),\n)\n@click.option(\n \"--pdb\", is_flag=True, help=\"Drop into a postmortem debugger if conda-lock crashes\"\n)\n@click.option(\n \"--virtual-package-spec\",\n type=click.Path(),\n help=\"Specify a set of virtual packages to use.\",\n)\ndef lock(\n conda,\n mamba,\n micromamba,\n platform,\n channel_overrides,\n dev_dependencies,\n files,\n kind,\n filename_template,\n strip_auth,\n extras,\n check_input_hash: bool,\n log_level,\n pdb,\n virtual_package_spec,\n):\n \"\"\"Generate fully reproducible lock files for conda environments.\n\n By default, the lock files are written to conda-{platform}.lock. These filenames can be customized using the\n --filename-template argument. The following tokens are available:\n\n \\b\n platform: The platform this lock file was generated for (conda subdir).\n dev-dependencies: Whether or not dev dependencies are included in this lock file.\n input-hash: A sha256 hash of the lock file input specification.\n version: The version of conda-lock used to generate this lock file.\n timestamp: The approximate timestamp of the output file in ISO8601 basic format.\n \"\"\"\n logging.basicConfig(level=log_level)\n\n if pdb:\n\n def handle_exception(exc_type, exc_value, exc_traceback):\n import pdb\n\n pdb.post_mortem(exc_traceback)\n\n sys.excepthook = handle_exception\n\n if not virtual_package_spec:\n candidates = [\n pathlib.Path(\"virtual-packages.yml\"),\n pathlib.Path(\"virtual-packages.yaml\"),\n ]\n for c in candidates:\n if c.exists():\n logger.info(\"Using virtual packages from %s\", c)\n virtual_package_spec = c\n break\n else:\n virtual_package_spec = pathlib.Path(virtual_package_spec)\n\n files = [pathlib.Path(file) for file in files]\n extras = set(extras)\n lock_func = partial(\n run_lock,\n environment_files=files,\n conda_exe=conda,\n platforms=platform,\n mamba=mamba,\n micromamba=micromamba,\n include_dev_dependencies=dev_dependencies,\n channel_overrides=channel_overrides,\n kinds=kind,\n extras=extras,\n virtual_package_spec=virtual_package_spec,\n )\n if strip_auth:\n with tempfile.TemporaryDirectory() as tempdir:\n filename_template_temp = f\"{tempdir}/{filename_template.split('/')[-1]}\"\n lock_func(filename_template=filename_template_temp)\n filename_template_dir = \"/\".join(filename_template.split(\"/\")[:-1])\n for file in os.listdir(tempdir):\n lockfile = read_file(os.path.join(tempdir, file))\n lockfile = _strip_auth_from_lockfile(lockfile)\n write_file(lockfile, os.path.join(filename_template_dir, file))\n else:\n lock_func(\n filename_template=filename_template, check_input_hash=check_input_hash\n )\n\n\n@main.command(\"install\")\n@click.option(\n \"--conda\", default=None, help=\"path (or name) of the conda/mamba executable to use.\"\n)\n@click.option(\n \"--mamba/--no-mamba\", default=False, help=\"don't attempt to use or install mamba.\"\n)\n@click.option(\n \"--micromamba/--no-micromamba\",\n default=False,\n help=\"don't attempt to use or install micromamba.\",\n)\n@click.option(\"-p\", \"--prefix\", help=\"Full path to environment location (i.e. prefix).\")\n@click.option(\"-n\", \"--name\", help=\"Name of environment.\")\n@click.option(\n \"--auth\",\n help=\"The auth file provided as string. Has precedence over `--auth-file`.\",\n default=\"\",\n)\n@click.option(\"--auth-file\", help=\"Path to the authentication file.\", default=\"\")\n@click.option(\n \"--validate-platform\",\n is_flag=True,\n default=True,\n help=\"Whether the platform compatibility between your lockfile and the host system should be validated.\",\n)\n@click.option(\n \"--log-level\",\n help=\"Log level.\",\n default=\"INFO\",\n type=click.Choice([\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]),\n)\n@click.argument(\"lock-file\")\ndef install(\n conda,\n mamba,\n micromamba,\n prefix,\n name,\n lock_file,\n auth,\n auth_file,\n validate_platform,\n log_level,\n):\n \"\"\"Perform a conda install\"\"\"\n logging.basicConfig(level=log_level)\n auth = json.loads(auth) if auth else read_json(auth_file) if auth_file else None\n _conda_exe = determine_conda_executable(conda, mamba=mamba, micromamba=micromamba)\n install_func = partial(do_conda_install, conda=_conda_exe, prefix=prefix, name=name)\n if validate_platform:\n lockfile = read_file(lock_file)\n try:\n do_validate_platform(lockfile)\n except PlatformValidationError as error:\n raise PlatformValidationError(\n error.args[0] + \" Disable validation with `--validate-platform=False`.\"\n )\n if auth:\n lockfile = read_file(lock_file)\n with _add_auth(lockfile, auth) as lockfile_with_auth:\n install_func(file=lockfile_with_auth)\n else:\n install_func(file=lock_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"conda_lock/conda_lock.py","file_name":"conda_lock.py","file_ext":"py","file_size_in_byte":34213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405100001","text":"# Copyright 2016 VMware, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"NSXv add DHCP MTU to subnets\n\nRevision ID: dbe29d208ac6\nRevises: 081af0e396d7\nCreate Date: 2016-07-21 05:03:35.369938\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'dbe29d208ac6'\ndown_revision = '081af0e396d7'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # Add a new column and make the previous column nullable,\n # because it is enough that one of them is non-null\n op.add_column('nsxv_subnet_ext_attributes',\n sa.Column('dhcp_mtu', sa.Integer, nullable=True))\n op.alter_column('nsxv_subnet_ext_attributes', 'dns_search_domain',\n nullable=True, existing_type=sa.String(length=255),\n existing_nullable=False)\n","sub_path":"vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/dbe29d208ac6_nsxv_add_dhcp_mtu_to_subnets.py","file_name":"dbe29d208ac6_nsxv_add_dhcp_mtu_to_subnets.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"540714832","text":"import io\n\nfrom django.core import management\n\nfrom parkings.factories import ParkingFactory, RegionFactory\n\n\ndef call_mgmt_cmd_with_output(command_cls, *args, **kwargs):\n assert issubclass(command_cls, management.BaseCommand)\n stdout = io.StringIO()\n stderr = io.StringIO()\n cmd = command_cls(stdout=stdout, stderr=stderr)\n assert isinstance(cmd, management.BaseCommand)\n result = management.call_command(cmd, *args, **kwargs)\n return (result, stdout.getvalue(), stderr.getvalue())\n\n\ndef create_parkings_and_regions(parking_count=100, region_count=20):\n regions = RegionFactory.create_batch(region_count)\n parkings = ParkingFactory.create_batch(parking_count)\n\n centroids = [region.geom.centroid for region in regions]\n touching_points = [p for p in centroids if intersects_with_any(p, regions)]\n\n # Make sure that some of the parkings are inside the regions\n for (point, parking) in zip(touching_points, parkings):\n parking.location = point\n parking.save()\n\n for parking in parkings: # pragma: no cover\n if intersects_with_any(parking.location, regions):\n assert parking.region\n assert intersects(parking.location, parking.region)\n else:\n assert parking.region is None\n\n return (parkings, regions)\n\n\ndef intersects_with_any(point, regions):\n assert regions\n p = point.transform(regions[0].geom.srid, clone=True)\n assert all(x.geom.srid == p.srid for x in regions)\n return any(p.intersects(x.geom) for x in regions)\n\n\ndef intersects(point, region):\n geom = region.geom\n return point.transform(geom.srid, clone=True).intersects(geom)\n","sub_path":"parkings/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31105924","text":"import argparse\nimport tensorflow as tf\nfrom agents.gamma import GammaAgent\nfrom agents.solver import Solver\nimport deictic.models as models\nimport symbolic_models.bridges_1 as symbolic_model\nfrom envs.bridges import BridgesEnv\n\nNUM_PUCKS = 2\nNUM_BRIDGES = 1\nNUM_BLOCKS = 16\nHORIZONTAL_ONLY = True\nLEARNING_STARTS = 60\nBUFFER_SIZE = 10000\nTRAIN_FREQ = 1\nPRINT_FREQ = 1\n\n\ndef main(args):\n\n # setup environment\n env = BridgesEnv(num_pucks=NUM_PUCKS, num_bridges=NUM_BRIDGES, num_blocks=NUM_BLOCKS,\n only_horizontal=HORIZONTAL_ONLY)\n env.initStride = args.init_env_stride # stride for initial puck placement\n env.stride = args.env_stride # stride for action specification\n\n # deictic descriptor parameters\n descriptor_shape = (env.block_size * 5, env.block_size * 5, 2)\n descriptor_shape_small = (30, 30, 2)\n num_states = 3\n num_patches = env.num_positions\n num_actions = 2 * num_patches\n\n # network settings\n convs = [(num_filters, 3, 1) for num_filters in args.convs]\n if len(args.convs) > 0:\n abstract_embedding_size = args.convs[-1]\n else:\n abstract_embedding_size = 16\n\n # create neural network\n if args.multiplex:\n gamma_func = models.cnn_to_mlp_symbolic_multiplex(\n convs=convs,\n hiddens=args.hiddens\n )\n else:\n gamma_func = models.cnn_to_mlp_symbolic(\n convs=convs,\n hiddens=args.hiddens\n )\n\n # setup the agent\n agent = GammaAgent(env, symbolic_model, num_states, num_actions, gamma_func,\n tf.train.AdamOptimizer(learning_rate=args.learning_rate), descriptor_shape,\n descriptor_shape_small, args.exploration_fraction, 1.0, args.max_time_steps, args.final_epsilon,\n args.batch_size, abstract_input=True, num_abstract_actions=len(symbolic_model.ACTIONS),\n abstract_embedding_size=abstract_embedding_size, buffer_size=BUFFER_SIZE)\n\n agent.start_session(args.num_cpu, args.gpu_memory_fraction)\n\n # initialize a solver\n solver = Solver(env, agent, args.max_time_steps, learning_start=LEARNING_STARTS, train_freq=TRAIN_FREQ,\n max_episodes=args.max_episodes, rewards_file=args.rewards_file,\n abstract_actions_file=args.abstract_actions_file, animate=args.animate,\n animate_from=args.animate_from, gif_save_path=args.save_gifs_path, gif_save_limit=args.save_limit,\n gif_save_only_successful=args.save_only_successful, max_depth_value=NUM_PUCKS)\n\n # solve the environment\n solver.run()\n\n # stop session\n agent.stop_session()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\"Solve build 2 bridges with the gamma agent.\")\n\n parser.add_argument(\"--learning-rate\", type=float, default=0.0001, help=\"learning rate for gamma neural networks\")\n parser.add_argument(\"--batch-size\", type=int, default=30, help=\"batch size for the learning of gamma\")\n parser.add_argument(\"--exploration-fraction\", type=float, default=1.0,\n help=\"how many time steps to explore expressed as the faction of the maximum number of \"\n \"time steps\")\n parser.add_argument(\"--final-epsilon\", type=float, default=0.1,\n help=\"value of epsilon after the end of exploration\")\n parser.add_argument(\"--multiplex\", default=False, action=\"store_true\", help=\"multiplex the abstract action input\")\n parser.add_argument(\"--convs\", nargs=\"+\", type=int, default=[16],\n help=\"list of numbers of filters for convolutions\")\n parser.add_argument(\"--hiddens\", nargs=\"+\", type=int, default=[32],\n help=\"list of numbers of neurons for hidden layers\")\n\n parser.add_argument(\"--init-env-stride\", type=int, default=28,\n help=\"stride for the placement of objects in the environment\")\n parser.add_argument(\"--env-stride\", type=int, default=28, help=\"stride for the actions\")\n parser.add_argument(\"--max-time-steps\", type=int, default=2000, help=\"maximum number of time steps to run for\")\n parser.add_argument(\"--max-episodes\", type=int, default=None, help=\"maximum number of episodes to run for\")\n parser.add_argument(\"--rewards-file\", default=None, help=\"where to save the per-episode rewards\")\n parser.add_argument(\"--abstract-actions-file\", help=\"where to save the per-step abstract actions\")\n\n parser.add_argument(\"--animate\", default=False, action=\"store_true\", help=\"show an animation of the environment\")\n parser.add_argument(\"--animate-from\", type=int, default=0, help=\"from which episode to start the animation\")\n\n parser.add_argument(\"--save-gifs-path\", help=\"save path for gifs of episodes\")\n parser.add_argument(\"--save-only-successful\", default=False, action=\"store_true\",\n help=\"save only the successful episodes\")\n parser.add_argument(\"--save-limit\", type=int, help=\"maximum number of episodes to save\")\n\n parser.add_argument(\"--gpu-memory-fraction\", type=float, default=0.01,\n help=\"a fraction of GPU memory to use; None for all\")\n parser.add_argument(\"--num-cpu\", type=int, default=1, help=\"number of CPUs to use\")\n\n parsed = parser.parse_args()\n main(parsed)\n","sub_path":"scripts/solve/bridges_1_big/gamma.py","file_name":"gamma.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"166261973","text":"from __future__ import division\n\nimport torch\n\n\nclass RandomScale(object):\n def __init__(self, scale):\n self.max = max(abs(scale), 1)\n\n def __call__(self, data):\n mean = data.pos.mean(dim=0)\n pos = data.pos - mean\n\n scale = 2 * torch.rand(pos.size(1))\n upscale_mask = scale > 1\n downscale_mask = scale < 1\n\n scale[upscale_mask] -= 1\n scale[upscale_mask] *= (self.max - 1)\n scale[upscale_mask] += 1\n\n inv = 1 / self.max\n scale[downscale_mask] *= (1 - inv)\n scale[downscale_mask] += inv\n\n pos *= scale.type_as(pos)\n\n pos += mean\n data.pos = pos\n return data\n","sub_path":"torch_geometric/transform/random_scale.py","file_name":"random_scale.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"555952273","text":"from pwn import * \n\ncontext.log_level = 'debug'\nsh = process('./the_pwn_inn')\ngdb.attach(sh, 'b * 0x401319')\n\n\none = [0x45226, 0x4527a, 0xf0364, 0xf1207]\n\nexit_got = 0x404058\nstart_addr = 0x4010C0\n\nsh.recvuntil('your name? \\n')\npayload = '%' + str(0xc0) + 'c' + '%9$hhn%21$p%25$p'\nL = len(payload)\npayload += 'a' * (8 - L % 8)\npayload += p64(exit_got)\nsh.sendline(payload)\n\n\nsh.recvuntil('0x')\nText = '0x' + sh.recv(12)\nlog.success('Text:' + Text)\nlibcbase = int(Text, 16) - 0x78c0f\nlog.success('libcbase: ' + hex(libcbase))\nsh.recvuntil('0x')\nStack_base = int('0x' + sh.recv(12), 16)\nlog.success('Stack_base: ' + hex(Stack_base))\n\n\none_gadget = hex(one[1] + libcbase)\nlog.success(one_gadget)\n\nexit_hook = 0x5f0f50 + libcbase\n\nfor i in range(6, 0, -1):\n log.success(one_gadget[2*i:2*i+2])\n sh.recvuntil('your name? \\n')\n payload = '%' + str(int('0x' + one_gadget[2*i:2*i+2], 16)) + 'c' + '%8$hhn'\n L = len(payload)\n payload += 'a' * (8 - L % 8)\n payload += p64(exit_hook + 6 - i)\n sh.sendline(payload)\n\n\nsh.recvuntil('your name? \\n')\npayload = '%' + str(0x30) + 'c' + '%8$hhn'\nL = len(payload)\npayload += 'a' * (8 - L % 8)\npayload += p64(exit_got)\nsh.sendline(payload)\n\n\n\n\nsh.interactive()\n","sub_path":"context/0x41CTF/thepwninn/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138338548","text":"import numpy as np\n#from hmesuresspinon import ecrituretab2Dspinon\nfrom matplotlib import pyplot\n\n#n/N=1/(2L**2)sum_k 1/(2-cos(kx)-cos(ky)+mu_0)\n#muz,kappa,gz=sim.Couplages.Muz,sim.Couplages.Kappa,sim.Couplages.Gz\nlx,ly,lz=47,47,1#int(sim.Lattice.lx),int(sim.Lattice.ly),int(sim.Lattice.lz)\n#Dir=sim.Dir\n#kappa=0.0\n#gz=0.3\n\ndef lapl(i,j):\n return 2-np.cos(2.*np.pi*i/lx)-np.cos(2.*np.pi*j/ly);\ndef lapl2(i,j):\n x=5./4.-8.*np.cos(2.*np.pi*i/lx)/6.+np.cos(4.*np.pi*i/lx)/12.\n x+=5./4.-8.*np.cos(2.*np.pi*j/ly)/6.+np.cos(4.*np.pi*j/ly)/12.\n return x;\ndef lapl3(i,j):\n x=49./36.-3.*np.cos(2.*np.pi*i/lx)/2.+3.*np.cos(4.*np.pi*i/lx)/20.-np.cos(6.*np.pi*i/lx)/90.\n x+=49./36.-3.*np.cos(2.*np.pi*j/ly)/2.+3.*np.cos(4.*np.pi*j/ly)/20.-np.cos(6.*np.pi*j/ly)/90.\n return x;\n\ndef grad(i,j):\n return np.linalg.norm([np.sin(2.*np.pi*i/lx),np.sin(2.*np.pi*j/ly)]);\ndef grad2(i,j):\n y=4.*np.sin(2.*np.pi*i/lx)/3.-np.sin(4.*np.pi*i/lx)/6.\n z=4.*np.sin(2.*np.pi*j/ly)/3.-np.sin(4.*np.pi*j/ly)/6.\n x=np.linalg.norm([y,z])\n return x;\ndef grad3(i,j):\n y=3.*np.sin(2.*np.pi*i/lx)/2.-3.*np.sin(4.*np.pi*i/lx)/10.+np.sin(6.*np.pi*i/lx)/30.\n z=3.*np.sin(2.*np.pi*j/ly)/2.-3.*np.sin(4.*np.pi*j/ly)/10.+np.sin(6.*np.pi*j/ly)/30.\n x=np.linalg.norm([y,z])\n return x;\n\n\nmuz,kappa,gz=-3.5,0.3,0.3\ntab=[]\n\ndef enemoins(i,j,nn,muz):\n return 0.5*kappa*kappa+lapl3(i,j)+muz-kappa*grad3(i,j)+2.*gz*nn\ndef eneplus(i,j,nn,muz):\n return 0.5*kappa*kappa+lapl3(i,j)+muz+kappa*grad3(i,j)+2.*gz*nn\n \nfichier = open(\"dat2.dat\",\"a\")\n\nnn=1.5\niterations=5000\nprint(lx,ly,lz)\n\nfor mmuz in range(0,2000):\n muz-=0.5#-mmuz/100.\n print(\"muz\",muz)\n conver=[]\n temp=-1\n\n while(temp<0):\n nn+=0.1\n temp=0.\n for i in range(0,lx):\n for j in range(0,ly):\n temp+=max(1./enemoins(i,j,nn,muz),0)\n temp+=max(1./eneplus(i,j,nn,muz),0)\n \n for k in range (iterations):\n if(k%(iterations/10)==0):\n print(\"iteration\",10-k/np.double(iterations/10))\n \n temp=0.\n for i in range(0,lx):\n for j in range(0,ly):\n temp+=max(1./enemoins(i,j,nn,muz),0)\n temp+=max(1./eneplus(i,j,nn,muz),0)\n nn=nn/10.+9.*temp/(10.*lx*ly)\n conver.append(nn/2.+temp/(2.*lx*ly))\n\n mean=np.mean(conver)\n moyenne=[]\n for i in range(len(conver)-100):\n if np.abs(conver[100+i])<2.*mean:\n moyenne.append(conver[100+i])\n nn=np.mean(moyenne)\n print(len(moyenne))\n\n print(\"densite\",nn)\n #pyplot.plot(conver)\n #pyplot.show()\n\n fichier.write(str(muz)+\"\\t\"+str(nn)+\"\\t\"+str((8./enemoins(2,1,nn,muz)+8./eneplus(2,1,nn,muz))/(lx*ly*nn))+\"\\n\")\n\nfichier.close()\n\n#pyplot.plot(conver)\n#pyplot.show()\n\n\"\"\"\nnmoins=0.\nfor i in range(0,lx):\n for j in range(0,ly):\n temp=1./(0.5*kappa*kappa+lapl3(i,j)+muz-kappa*grad3(i,j))\n nmoins+=temp\nnmoins/=(lx*ly)\n\nnplus=0.\nfor i in range(0,lx):\n for j in range(0,ly):\n temp=1./(0.5*kappa*kappa+lapl3(i,j)+muz+kappa*grad3(i,j))\n nplus+=temp\nnplus/=(lx*ly)\n\"\"\"\n","sub_path":"meanfield.py","file_name":"meanfield.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"547291087","text":"\n# coding: utf-8\n\n# In[7]:\n\n\nsize=int(input(\"enter the no of nos:\"))\ni=0\nnumbers=list()\nwhile i=0:\n powers=numbers[size]**powers\n size=size-1\nprint(powers)\nans=str(powers)\nprint(ans[len(ans)-1])\n\n\n# In[11]:\n\n\n# get the size of the Arithmetic Progression\nsize=int(input(\"Enter the size of the Arithmetic Progression:\"))\ni=0\n# creating an empty list\nap=list() #syntax error: use list()\n# getting the elements of the Arithmetic Progression\nwhile iabs(diff2): \n missing=ap[0]+abs(diff2)\n # if yes then inserting it the second position\n ap.insert(1,missing)\n#checking if the missing element should be in the second last position\nelif abs(diff2)>abs(diff1): \n missing=ap[last]-diff1\n #if yes then inserting in the second last position\n ap.insert(last,missing)\n# if the missing element is not in the second and second last element then finding the position\nelse:\n #checking the common differences of all consecutive numbers taking 2 at a time\n cd=diff1\n j=1\n while j=0 & j-1>=0 & count<=len(search) & mat[i-1][j-1]==search[count+1]:\n boggle(i-1,j-1,count+1)\n elif i-1>=0 & count<=len(search) & mat[i-1][j]==search[count+1]:\n boggle(i-1,j,count+1)\n elif i-1>=0 & j+1<=rows & count<=len(search) & mat[i-1][j+1]==search[count+1]:\n boggle(i-1,j+1,count+1)\n elif j-1>=0 & count<=len(search) & mat[i][j-1]==search[count+1]:\n boggle(i,j-1,count+1)\n elif j+1<=rows & count<=len(search) & mat[i][j+1]==search[count+1]:\n boggle(i,j+1,count+1)\n elif i+1<=rows & j-1>=0 & count<=len(search) & mat[i+1][j-1]==search[count+1]:\n boggle(i+1,j-1,count+1)\n elif i+1<=rows & j<=rows & count<=len(search) & mat[i+1][j]==search[count+1]:\n boggle(i+1,j,count+1)\n elif i+1<=rows & j+1<=rows & count<=len(search) & mat[i+1][j+1]==search[count+1]:\n boggle(i+1,j+1,count+1)\n else:\n flag=0\nrows=int(input(\"rows:\"))\ni=0\nmat=[]\nwhile i 1:\n table.append(n)\n return table\n\n\nn, m = map(int, read().split())\nmod = 10 ** 9 + 7\ncomb = Combination(m + 30)\nans = 1\nfor v in Counter(prime_decomposition(abs(n))).values():\n ans *= comb.calc(m + v - 1, v)\n ans %= mod\nprint(ans * pow(2, m - 1, mod) % mod)\n","sub_path":"submissions/arc004/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"188100153","text":"#!/usr/bin/python\n\n####################################\n# Your Name\n# Your A#\n####################################\n\nfrom var import var\nfrom const import const\nfrom pwr import pwr\nfrom prod import prod\nfrom plus import plus\nfrom maker import make_const, make_pwr, make_pwr_expr\nimport math\n\ndef deriv(expr):\n if isinstance(expr, const):\n return const_deriv(expr)\n elif isinstance(expr, pwr):\n return pwr_deriv(expr)\n elif isinstance(expr, prod):\n return prod_deriv(expr)\n elif isinstance(expr, plus):\n return plus_deriv(expr)\n else:\n raise Exception('deriv:' + repr(expr))\n\n# the derivative of a consant is 0.\ndef const_deriv(c):\n assert isinstance(c, const)\n return const(val=0.0)\n\ndef plus_deriv(s):\n # your code here\n assert isinstance(s, plus)\n return plus(elt1=deriv(s.get_elt1()), elt2=deriv(s.get_elt2()))\n # my codes ends here\n\ndef pwr_deriv(p):\n assert isinstance(p, pwr)\n b = p.get_base()\n d = p.get_deg()\n if isinstance(b, var):\n if isinstance(d, const):\n # your code here\n left = const(val=d.get_val())\n right = pwr(base=var(name=b.get_name()) , deg=const(d.get_val()-1))\n return prod(mult1=left, mult2=right)\n # my code ends here\n else:\n raise Exception('pwr_deriv: case 1: ' + str(p))\n if isinstance(b, pwr):\n if isinstance(d, const):\n # your code here\n left = prod(mult1=const(val=d.get_val()) ,mult2=pwr(base=b, deg=const(val=d.get_val()-1))) \n right = deriv(b)\n return prod(mult1=left, mult2=right)\n # my code ends here\n pass\n else:\n raise Exception('pwr_deriv: case 2: ' + str(p))\n elif isinstance(b, plus):\n if isinstance(d, const):\n # your code here\n left = prod(mult1=const(d.get_val()) , mult2=pwr(base=b, deg=const(d.get_val()-1)))\n right = deriv(b)\n return prod(mult1=left, mult2=right)\n # my code ends here\n # pass\n else:\n raise Exception('pwr_deriv: case 3: ' + str(p))\n elif isinstance(b, prod):\n if isinstance(d, const):\n # your code here\n left = prod(mult1=const(d.get_val()) , mult2=pwr(base=b, deg=const(d.get_val()-1)))\n right = deriv(b)\n return prod(mult1=left, mult2=right)\n # my code ends here\n # pass\n else:\n raise Exception('pwr_deriv: case 4: ' + str(p))\n else:\n raise Exception('power_deriv: case 5: ' + str(p))\n\ndef prod_deriv(p):\n assert isinstance(p, prod)\n m1 = p.get_mult1()\n m2 = p.get_mult2()\n if isinstance(m1, const):\n if isinstance(m2, const):\n # your code here\n return const(val=0)\n # my code ends here\n # pass\n elif isinstance(m2, pwr):\n # your code here\n return prod(mult1=const(val=m1.get_val()), \n mult2=deriv(m2))\n # my code ends here\n # pass\n elif isinstance(m2, plus):\n # your code here\n return deriv(plus(elt1=prod(mult1=const(val=m1.get_val()), mult2=m2.get_elt1()), \n elt2=prod(mult1=const(m1.get_val()), mult2=m2.get_elt2())))\n # my code ends here\n #pass\n elif isinstance(m2, prod):\n # your code here\n return prod(mult1=const(m1.get_val()),\n mult2=deriv(m2))\n # my code ends here\n # pass\n else:\n raise Exception('prod_deriv: case 0' + str(p))\n elif isinstance(m1, plus):\n if isinstance(m2, const):\n # your code here\n # test 8\n return deriv(plus(elt1=prod(mult1=const(val=m2.get_val()), mult2=m1.get_elt1()), \n elt2=prod(mult1=const(m2.get_val()), mult2=m1.get_elt2())))\n # my code ends here\n # pass\n else:\n raise Exception('prod_deriv: case 1:' + str(p))\n elif isinstance(m1, pwr):\n if isinstance(m2, const):\n # your code here\n # test 9\n return prod(mult1=deriv(m1), mult2=const(val=m2.get_val()))\n # my code ends here\n # pass\n else:\n raise Exception('prod_deriv: case 2:' + str(p))\n elif isinstance(m1, prod):\n if isinstance(m2, const):\n # your code here\n # test 10\n return prod(mult1=deriv(m1), mult2=const(m2.get_val()))\n # my code ends here\n # pass\n else:\n raise Exception('prod_deriv: case 3:' + str(p))\n else:\n raise Exception('prod_deriv: case 4:' + str(p))\n\n\n\n","sub_path":"assn/1/hw01/deriv.py","file_name":"deriv.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"73002381","text":"from eval.evaluation import run_experiment\nfrom grm import preprocessing, GRM\nfrom grm.util import get_activities\nfrom pm4py.algo.filtering.log.attributes import attributes_filter\nfrom pm4py.objects.log.log import EventLog\nfrom pm4py.objects.conversion.log import factory as conversion_factory\nfrom pm4py.objects.log.exporter.csv import factory as csv_exporter\nimport operator\n\nlogfile = \"sp2020.csv\"\nname_of_case_id = \"CASE_ID\"\nname_of_activity = \"ACTIVITY\"\nname_of_timestamp = \"TIMESTAMP\"\nname_of_label = \"REPAIR_IN_TIME_5D\"\nhyper_params = {'num_epochs': 1000}\nk = 10\n\nlog = preprocessing.import_data(\"../data\", logfile, separator=\";\", quote='\"', case_id=name_of_case_id,\n activity=name_of_activity,\n time_stamp=name_of_timestamp, target=name_of_label)\n\n# filter out most relevant\nmodel_path = '../best_models/sp2020/2020-05-05-14-59_best_model.pickle'\nactivities = get_activities(log)\ngrm_model = GRM.GRM(log, activities, restore_file=model_path)\n\nfiltered_log = EventLog()\nfor trace in log:\n case_id, pred, rel_scores = grm_model.predict(trace)\n if len(rel_scores) > 1:\n least_relevant = min(rel_scores.items(), key=operator.itemgetter(1))[0]\n log_trace = attributes_filter.apply_events(log, [case_id], parameters={\n attributes_filter.PARAMETER_CONSTANT_ATTRIBUTE_KEY: name_of_case_id, \"positive\": True})\n\n trace_without_most = attributes_filter.apply_events(log_trace, [least_relevant], parameters={\n attributes_filter.PARAMETER_CONSTANT_ATTRIBUTE_KEY: \"concept:name\", \"positive\": False})\n\n trace_without_most = trace_without_most[0]\n\n filtered_log._list.append(trace_without_most)\n\nlog = conversion_factory.apply(filtered_log)\ncsv_exporter.export(log, \"sp2020_without_least_relevant.csv\")\n\nrun_experiment(log, hyper_params=hyper_params, k=k, ml_flow_run_name_prefix=logfile)","sub_path":"eval/predictive_quality/most_least/evaluation_without_least_relevant_2.py","file_name":"evaluation_without_least_relevant_2.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"649535022","text":"from pylons import app_globals as g\n\nfrom r2.models import PromoCampaign\n\nclass PromoCampaignByFlightIdCache():\n @classmethod\n def _cachekey(cls, flight_id):\n return \"promo.flight.%d\" % flight_id\n\n @classmethod\n def add(cls, campaign):\n cachekey = cls._cachekey(campaign.external_flight_id)\n g.cache.set(cachekey, campaign._fullname, time=60*60*24)\n\n @classmethod\n def get(cls, flight_id):\n fullname = g.cache.get(cls._cachekey(flight_id))\n\n if not fullname:\n q = PromoCampaign._query(\n PromoCampaign.c.external_flight_id == flight_id,\n data=True,\n )\n q._limit = 1\n campaigns = list(q)\n if campaigns:\n campaign = campaigns[0]\n\n cls.add(campaign)\n\n return campaign._fullname\n else:\n return None\n else:\n return fullname\n","sub_path":"reddit_adzerk/lib/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7804213","text":"from mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n_magic_number = 2\n_modified_time = 1380863803.16\n_template_filename='F:\\\\work\\\\bgc\\\\www/admin_lock_handicaps.mako'\n_template_uri='/admin_lock_handicaps.mako'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding=None\n_exports = []\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'inc/base_fancybox.mako', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack.push_frame()\n try:\n __M_locals = dict(pageargs=pageargs)\n calc_year = context.get('calc_year', UNDEFINED)\n handicap_val = context.get('handicap_val', UNDEFINED)\n calc_month = context.get('calc_month', UNDEFINED)\n member_id = context.get('member_id', UNDEFINED)\n member_name = context.get('member_name', UNDEFINED)\n handicap_updated = context.get('handicap_updated', UNDEFINED)\n member_current_handicap = context.get('member_current_handicap', UNDEFINED)\n # SOURCE LINE 1\n context.write(u'\\n\\n\\n\\n')\n # SOURCE LINE 68\n MONTHS_LIST = ['Dummy','Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'] \n \n __M_locals.update(dict([(__M_key, locals()[__M_key]) for __M_key in ['MONTHS_LIST'] if __M_key in locals()]))\n context.write(u'\\n\\n\\n

LOCK HANDICAPS FOR ')\n # SOURCE LINE 71\n context.write(filters.html_escape(unicode(member_name.upper() or '')))\n context.write(u'

\\n')\n # SOURCE LINE 72\n if handicap_updated:\n # SOURCE LINE 73\n context.write(u' \\n
Handicap updated successfully
\\n\\n')\n # SOURCE LINE 76\n else:\n # SOURCE LINE 77\n context.write(u'\\n \\n \\n
\\n \\n
\\n \\n')\n # SOURCE LINE 84\n if member_current_handicap:\n # SOURCE LINE 85\n context.write(u' \\n \\n \\n \\n
Current System handicap :\\n ')\n # SOURCE LINE 87\n context.write(filters.html_escape(unicode(member_current_handicap.get('handicap') or '-')))\n context.write(u'\\n')\n # SOURCE LINE 89\n context.write(u'
Enter modified handicap :\\n \\n \\n
\\n
Lock handicap for :\\n month\\n
\\n \\n\\n
\\n
Handicaps will be locked from ')\n # SOURCE LINE 107\n context.write(filters.html_escape(unicode(MONTHS_LIST[calc_month])))\n context.write(u' ')\n context.write(filters.html_escape(unicode(calc_year)))\n context.write(u'
\\n
\\n UPDATE HANDICAP\\n
\\n\\n\\n')\n # SOURCE LINE 114\n context.write(u'\\n
\\n
')\n return ''\n finally:\n context.caller_stack.pop_frame()\n\n\n","sub_path":"bgc/bgc/tmp-mako/admin_lock_handicaps.mako.py","file_name":"admin_lock_handicaps.mako.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"639134947","text":"# Filename : cv2ex2.py\r\n\r\nimport numpy as np\r\nimport cv2\r\nimg = cv2.imread('ci.png',cv2.IMREAD_GRAYSCALE)\r\ncv2.imshow('image',img)\r\nk = cv2.waitKey(0)\r\n\r\nif k == 27: \r\n cv2.destroyAllWindows()\r\nelif k == ord('s'): \r\n cv2.imwrite('ci_gray.jpg', img)\r\n img2 = cv2.imread('ci_gray.jpg', cv2.IMREAD_UNCHANGED)\r\n cv2.imshow('image_gray', img2)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\nq\r\n","sub_path":"cv2ex2.py","file_name":"cv2ex2.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"191936843","text":"import FWCore.ParameterSet.Config as cms\n\n# Select MC truth\nfrom L1TriggerOffline.L1Analyzer.GenSelection_cff import *\n# Select L1\nfrom L1TriggerOffline.L1Analyzer.L1Selection_cff import *\n# Histogram limits\nfrom L1TriggerOffline.L1Analyzer.HistoLimits_cfi import *\n# Root output file\nfrom L1TriggerOffline.L1Analyzer.TFile_cfi import *\n# Match generator and L1 jets \nMatchForJetsMc = cms.EDFilter(\"TrivialDeltaRMatcher\",\n src = cms.InputTag(\"SelectL1ForJets\"),\n distMin = cms.double(0.5),\n matched = cms.InputTag(\"SelectGenForJets\")\n)\n\n# Match L1 and generator jets\nMatchMcForJets = cms.EDFilter(\"TrivialDeltaRMatcher\",\n src = cms.InputTag(\"SelectGenForJets\"),\n distMin = cms.double(0.5),\n matched = cms.InputTag(\"SelectL1ForJets\")\n)\n\n# Analyzer\nL1AnalyzerForJetsMC = cms.EDAnalyzer(\"L1Analyzer\",\n histoLimits,\n EffMatchMapSource = cms.untracked.InputTag(\"MatchMcForJets\"),\n ReferenceSource = cms.untracked.InputTag(\"SelectGenForJets\"),\n CandidateSource = cms.untracked.InputTag(\"SelectL1ForJets\"),\n ResMatchMapSource = cms.untracked.InputTag(\"MatchForJetsMc\")\n)\n\n# Define analysis sequence\nL1ForJetMCAnalysis = cms.Sequence(L1ForJetSelection+GenForJetSelection*MatchForJetsMc+MatchMcForJets*L1AnalyzerForJetsMC)\n\n","sub_path":"L1TriggerOffline/L1Analyzer/python/L1ForJetMCAnalysis_cff.py","file_name":"L1ForJetMCAnalysis_cff.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"611287713","text":"print(\"This program takes 3 integers as an input and outputs\",\r\n \"the sum, the smaller, the bigger, the multiplication and the\",\r\n \"mean value of these numbers\")\r\n\r\nnumber1 = int(input(\"Enter the first integer: \"))\r\nnumber2 = int(input(\"Enter the second integer: \"))\r\nnumber3 = int(input(\"Enter the third integer: \"))\r\n\r\nsumm = number1 + number2 + number3\r\nprint(\"The sum of these numbers is equal to\", summ)\r\n\r\nmean = (summ) / 3\r\nprint(\"The mean value of these numbers is equal to\", mean)\r\n\r\nmul = number1 * number2 * number3\r\nprint(\"The multiplication of these numbers is equal to\",mul)\r\n\r\nminimum = min(number1,number2,number3)\r\nprint(\"The smaller of these numbers is\", minimum)\r\n\r\nmaximum = max(number1,number2,number3)\r\nprint(\"The maximum of these numbers is\", maximum)\r\n\r\n\r\n","sub_path":"exercise210.py","file_name":"exercise210.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"287508622","text":"from django.apps import apps\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.comments import signals, get_model\nfrom django.contrib.comments.views.comments import CommentPostBadRequest\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.forms import model_to_dict\nfrom django.http import JsonResponse\nfrom django.shortcuts import render_to_response, render, redirect, get_object_or_404\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\nfrom django.utils.html import escape\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.http import require_POST\nfrom django_comments import get_form\n\n# Create your views here.\n@csrf_protect\n@require_POST\ndef post_comment(request, next=None, using=None):\n \"\"\"\n Post a comment.\n\n HTTP POST is required. If ``POST['submit'] == \"preview\"`` or if there are\n errors a preview template, ``comments/preview.html``, will be rendered.\n \"\"\"\n # Fill out some initial data fields from an authenticated user, if present\n data = request.POST.copy()\n if request.user.is_authenticated():\n if not data.get('name', ''):\n data[\"name\"] = request.user.get_full_name() or request.user.get_username()\n if not data.get('email', ''):\n data[\"email\"] = request.user.email\n\n # Look up the object we're trying to comment about\n ctype = data.get(\"content_type\")\n object_pk = data.get(\"object_pk\")\n parent = data.get(\"parent\")\n if ctype is None or object_pk is None:\n return CommentPostBadRequest(\"Missing content_type or object_pk field.\")\n try:\n model = apps.get_model(ctype)\n target = model._default_manager.using(using).get(pk=object_pk)\n except TypeError:\n return CommentPostBadRequest(\n \"Invalid content_type value: %r\" % escape(ctype))\n except LookupError:\n return CommentPostBadRequest(\n \"The given content-type %r does not resolve to a valid model.\" % \\\n escape(ctype))\n except ObjectDoesNotExist:\n return CommentPostBadRequest(\n \"No object matching content-type %r and object PK %r exists.\" % \\\n (escape(ctype), escape(object_pk)))\n except (ValueError, ValidationError) as e:\n return CommentPostBadRequest(\n \"Attempting go get content-type %r and object PK %r exists raised %s\" % \\\n (escape(ctype), escape(object_pk), e.__class__.__name__))\n\n # Do we want to preview the comment?\n preview = \"preview\" in data\n\n # Construct the comment form\n form = get_form()(target, data=data, parent=parent)\n\n # Check security information\n if form.security_errors():\n return CommentPostBadRequest(\n \"The comment form failed security verification: %s\" % \\\n escape(str(form.security_errors())))\n\n # If there are errors or if we requested a preview show the comment\n if form.errors or preview:\n template_list = [\n # These first two exist for purely historical reasons.\n # Django v1.0 and v1.1 allowed the underscore format for\n # preview templates, so we have to preserve that format.\n \"comments/%s_%s_preview.html\" % (model._meta.app_label, model._meta.model_name),\n \"comments/%s_preview.html\" % model._meta.app_label,\n # Now the usual directory based template hierarchy.\n \"comments/%s/%s/preview.html\" % (model._meta.app_label, model._meta.model_name),\n \"comments/%s/preview.html\" % model._meta.app_label,\n \"comments/preview.html\",\n ]\n\n dictionary = {\n \"comment\": form.data.get(\"comment\", \"\"),\n \"form\": form,\n \"next\": data.get(\"next\", next),\n }\n\n if request.is_ajax():\n return JsonResponse({\n 'form': render_to_string(template_list, dictionary, RequestContext(request, {})),\n })\n else:\n return render_to_response(template_list, dictionary, RequestContext(request, {}))\n\n # Otherwise create the comment\n comment = form.get_comment_object()\n comment.ip_address = request.META.get(\"REMOTE_ADDR\", None)\n if request.user.is_authenticated():\n comment.user = request.user\n\n # Signal that the comment is about to be saved\n responses = signals.comment_will_be_posted.send(\n sender=comment.__class__,\n comment=comment,\n request=request\n )\n\n for (receiver, response) in responses:\n if response == False:\n return CommentPostBadRequest(\n \"comment_will_be_posted receiver %r killed the comment\" % receiver.__name__)\n\n # Save the comment and signal that it was saved\n comment.save()\n signals.comment_was_posted.send(\n sender=comment.__class__,\n comment=comment,\n request=request\n )\n\n if request.is_ajax():\n success_template = 'comments/reply_success_ajax.html' if comment.parent else 'comments/post_success_ajax.html'\n\n return JsonResponse({\n 'form': render_to_string(success_template, {\"comment\": comment, \"object\": target}, RequestContext(request)),\n 'comment': render_to_string('comments/comment_item_ajax.html', {\"comment\": comment, \"object\": target}, RequestContext(request)),\n })\n\n redirect_target = comment.content_object\n if next:\n redirect_target = next\n\n return redirect(redirect_target)\n\n\n# Create your views here.\n@csrf_protect\n@login_required\ndef change_comment(request, pk):\n\n comment = get_object_or_404(get_model(), pk=pk)\n parent = comment.parent_id\n\n if request.method == 'GET':\n form = get_form()(comment.content_object, initial=model_to_dict(get_model().objects.get(id=comment.pk)), parent=parent)\n if request.is_ajax():\n return JsonResponse({\n 'form': render_to_string('comments/change_ajax.html', {\"comment\": comment, \"form\": form}, RequestContext(request)),\n })\n\n elif request.method == 'POST':\n target = comment.content_object\n author = comment.user\n data = request.POST.copy()\n\n if author.is_authenticated():\n if not data.get('name', ''):\n data[\"name\"] = author.get_full_name() or author.get_username()\n if not data.get('email', ''):\n data[\"email\"] = author.email\n\n form = get_form()(target, data=data, parent=parent)\n\n if form.is_valid():\n\n comment = form.get_comment_object()\n comment.user = author\n comment.pk = pk\n comment.save()\n\n if request.is_ajax():\n return JsonResponse({\n 'comment': render_to_string('comments/comment_item_ajax.html', {\"comment\": comment}, RequestContext(request)),\n })\n\n else:\n\n if request.is_ajax():\n return JsonResponse({\n 'form': render_to_string('comments/change_ajax.html', {\"comment\": comment, \"form\": form}, RequestContext(request)),\n })\n\n return render(request, 'comments/change.html', {\"comment\": comment, \"form\": form})\n\n@csrf_protect\ndef reply_form(request, pk):\n comment = get_object_or_404(get_model(), pk=pk)\n\n if request.is_ajax():\n return JsonResponse({\n 'form': render_to_string('comments/reply_ajax.html', {\"comment\": comment}, RequestContext(request))\n })\n\n return render(request, 'comments/reply.html', {\"comment\": comment})","sub_path":"pimped_comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511825591","text":"from dcs.helicopters import *\nfrom dcs.planes import *\nfrom dcs.ships import *\nfrom dcs.vehicles import *\n\nNorthKorea_2000 = {\n \"country\": \"North Korea\",\n \"side\": \"red\",\n \"units\":[\n MiG_29A,\n Su_25,\n MiG_15bis,\n MiG_21Bis,\n MiG_23MLD,\n MiG_19P,\n\n IL_76MD,\n IL_78M,\n An_26B,\n An_30M,\n Yak_40,\n\n A_50,\n\n Mi_8MT,\n Mi_24V,\n\n Armor.MBT_T_55,\n Armor.MBT_T_72B,\n Armor.MBT_T_80U,\n Armor.IFV_BMP_1,\n Armor.APC_BTR_80,\n Armor.ARV_BRDM_2,\n\n Unarmed.Transport_M818,\n Infantry.Soldier_AK,\n\n AirDefence.SAM_SA_2_LN_SM_90,\n AirDefence.SAM_SA_3_S_125_LN_5P73,\n\n CV_1143_5_Admiral_Kuznetsov,\n Bulk_cargo_ship_Yakushev,\n Dry_cargo_ship_Ivanov,\n Tanker_Elnya_160\n ],\n \"shorad\":[\n AirDefence.AAA_ZU_23_Emplacement,\n AirDefence.SPAAA_ZSU_23_4_Shilka\n ],\n \"boat\": [\n \"GrishaGroupGenerator\", \"MolniyaGroupGenerator\"\n ]\n}","sub_path":"game/factions/north_korea_2000.py","file_name":"north_korea_2000.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511941639","text":"import re\nfrom core import begins\nfrom core import multi\nfrom core import method\nfrom collections import namedtuple\n\nis_aqua = lambda a: None\nis_terra = lambda a: begins(a, 'MYD')\nis_modis = lambda a: any([is_aqua(a) or is_terra(a)])\n\nis_olitirs = lambda a: re.match(r'^lc8\\d{3}\\d{3}\\d{4}\\d{3}\\w{3}.{2}$', lower(a))\nis_oli = lambda a: re.match(r'^lo8\\d{3}\\d{3}\\d{4}\\d{3}\\w{3}.{2}$', lower(a))\nis_etm = lambda a: re.match(r'^le7\\d{3}\\d{3}\\d{4}\\d{3}\\w{3}.{2}$', lower(a))\nis_tm = lambda a: re.match(r'^lt[4|5]\\d{3}\\d{3}\\d{4}\\d{3}\\w{3}.{2}$',\n lower(a))\nis_landsat_ondemand = lambda a: any([is_etm(a),\n is_tm(a),\n is_oli(a),\n is_olitirs(a)])\nis_landsat_collection = lambda a: False\nis_landsat = lambda a: any([is_landsat_collection(a), is_landsat_ondemand(a)])\nis_supported = lambda a: any([is_landsat(a), is_modis(a)])\n\nlandsat_ondemand_horizontal = lambda a: a[3:6].lstrip('0')\nlandsat_ondemand_vertical = lambda a: a[6:9].lstrip('0')\nlandsat_ondemand_year = lambda a: a[9:13]\nlandsat_ondemand_doy = lambda a: a[13:16]\nlandsat_ondemand_gs = lambda a: a[16:19]\nlandsat_ondemand_version = lambda a: a[19:21]\n\nlandsat_collection_horizontal = lambda a: a[3:6].lstrip('0')\nlandsat_collection_vertical = lambda a: a[6:9].lstrip('0')\nlandsat_collection_year = lambda a: a[9:13]\nlandsat_collection_doy = lambda a: a[13:16]\nlandsat_collection_gs = lambda a: a[16:19]\nlandsat_collection_version = lambda a: a[19:21]\n\ninput_info = namedtuple('InputInfo', ['observation', 'dataset'])\n\nobservation_info = namedtuple('ObservationInfo', ['horizontal', 'vertical',\n 'year', 'doy', 'name'])\n\ndataset_info = namedtuple('DatasetInfo', ['mission', 'satellite',\n 'sensor', 'pixel_size', 'campaign'])\n\npixels = namedtuple('DefaultPixelSize', ['dd', 'meters'])\n\ndef dataset(product_name):\n datasets = {\n r'^lt4\\d{13}[a-z]{3}[a-z0-9]{2}$':\n dataset_info(mission='landsat', satellite='4',\n sensors=('tm'),\n default_pixel_size=pixels(dd=30, meters=30),\n campaign='landsat_ondemand'),\n\n r'^lt5\\d{13}[a-z]{3}[a-z0-9]{2}$':\n dataset_info(mission='landsat', satellite='5',\n sensors=('tm'), pixel_size=30,\n campaign='landsat_ondemand'),\n\n r'^le7\\d{13}[a-z]{3}[a-z0-9]{2}$':\n dataset_info(mission='landsat', satellite='7',\n sensors=('etm'), pixel_size=30,\n campaign='landsat_ondemand'),\n\n r'^lc8\\d{13}[a-z]{3}[a-z0-9]{2}$':\n dataset_info(mission='landsat', satellite='8',\n sensors=('oli','tirs'), pixel_size=30,\n campaign='landsat_ondemand'),\n\n r'^lo8\\d{13}[a-z]{3}[a-z0-9]{2}$':\n dataset_info(mission='landsat', satellite='8',\n sensors=('oli'), pixel_size=30,\n campaign='landsat_ondemand'),\n\n r'^mod09a1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^mod09ga\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^mod09gq\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^mod09q1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^mod13a1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^mod13a2\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^mod13a3\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^mod13q1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='terra',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd09a1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd09ga\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd09gq\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd09q1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd13a1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd13a2\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd13a3\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis'),\n\n r'^myd13q1\\.a\\d{7}\\.h\\d{2}v\\d{2}\\.005\\.\\d{13}$':\n dataset_info(mission='modis', satellite='aqua',\n sensors=('oli'), pixel_size=1000,\n campaign='modis')\n }\n\n for key,value in datasets.iteritems():\n if re.matches(key, product_name):\n return value\n else:\n return None\n\n\n@multi\ndef info(product_name):\n return dataset(product_name).campaign\n\n\n@method(info, 'landsat_collection')\ndef info(product_name):\n horizontal = None\n vertical = None\n year = None\n doy = None\n return None\n\n\n@method(info, 'landsat_ondemand')\ndef info(product_name):\n horizontal = None\n vertical = None\n year = None\n doy = None\n return a[3:6].lstrip('0')\n\n\n@method(info, 'modis')\ndef info(product_name):\n horizontal = None\n vertical = None\n year = None\n doy = None\n return a[3:6].lstrip('0')\n","sub_path":"cascade/inputs.py","file_name":"inputs.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"343105989","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('poll', '0002_auto_20150326_1728'),\n ('users', '0007_auto_20150326_1450'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('commentBody', models.CharField(max_length=500)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('postTitle', models.CharField(max_length=100)),\n ('postBody', models.CharField(max_length=500)),\n ('stick', models.CharField(default=b'N', max_length=1, choices=[(b'Y', b'Yes'), (b'N', b'No')])),\n ('lock', models.CharField(default=b'N', max_length=1, choices=[(b'Y', b'Yes'), (b'N', b'No')])),\n ('forum', models.ForeignKey(to='poll.forum')),\n ('user', models.ForeignKey(to='users.User')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='comment',\n name='post',\n field=models.ForeignKey(to='posts.Post'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='comment',\n name='user',\n field=models.ForeignKey(to='users.User'),\n preserve_default=True,\n ),\n ]\n","sub_path":"projBlog/Forum/posts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595606299","text":"import maya.cmds as cmds\nimport maya.utils as utils\nimport os.path\nimport glob\nimport re\nimport sys, os\nimport subprocess\nimport threading\nimport mtoa.callbacks as callbacks\nimport maya.OpenMaya as om\nimport mtoa.utils as mutils\n\ndefaultFolder = \"\"\n\nclass MtoARenderToTexture(object):\n window = None\n def __new__(cls, *args, **kwargs):\n if not '_instance' in vars(cls):\n cls._instance = super(MtoARenderToTexture, cls).__new__(cls, *args, **kwargs)\n return cls._instance\n\n def __init__(self):\n if self.window is None:\n self.window = 'MtoARenderToTexture'\n self.listElements = []\n self.column = ''\n\n\n\n def doExport(self):\n outFolder = cmds.textFieldButtonGrp('outputFolder', q=True, tx=True)\n\n if (outFolder == ''):\n cmds.confirmDialog( title='Render To Texture', message='An Output folder must be selected', button=['Ok'], defaultButton='Ok', cancelButton='Ok', dismissString='Ok' )\n return False\n\n\n resolution = cmds.intFieldGrp('resolution', q=True, v1=True)\n aa_sampling = cmds.intFieldGrp('aa_samples', q=True, v1=True)\n\n filter_type = cmds.optionMenuGrp('filter', q=True, v=True)\n all_udims = cmds.checkBox('all_udims', q=True, v=True)\n filter_width = cmds.floatFieldGrp('filterWidth', q=True, v1=True)\n shader = cmds.textFieldGrp('shader', q=True, tx=True)\n udims = cmds.textFieldGrp('udims', q=True, tx=True)\n\n selList = cmds.ls(sl=1)\n\n if (len(selList) == 0):\n cmds.confirmDialog( title='Render To Texture', message='No Geometry Selected', button=['Ok'], defaultButton='Ok', cancelButton='Ok', dismissString='Ok' )\n return False\n\n cmds.arnoldRenderToTexture(folder=outFolder, shader=shader, resolution=resolution, aa_samples=aa_sampling, filter=filter_type, filter_width=filter_width, all_udims=all_udims, udims=udims )\n\n cmds.deleteUI(self.window)\n return True\n\n def doCancel(self):\n cmds.deleteUI(self.window)\n return True\n\n def browseObjFilename(self):\n\n global defaultFolder\n if defaultFolder == \"\":\n defaultFolder = cmds.workspace(q=True,rd=True, fn=True)\n ret = cmds.fileDialog2(cap='Select Folder',okc='Select',fm=3,dir=defaultFolder)\n if ret is not None and len(ret):\n defaultFolder = ret[0]\n cmds.textFieldButtonGrp('outputFolder', e=True, text=defaultFolder)\n\n\n return True\n\n\n def create(self):\n\n if cmds.window(self.window, exists=True):\n cmds.deleteUI(self.window)\n\n winTitle = \"Render To Texture\"\n\n self.window = cmds.window(self.window, widthHeight=(460, 170), title=winTitle)\n self.createUI()\n\n\n cmds.setParent(menu=True)\n cmds.showWindow(self.window)\n\n try:\n initPos = cmds.windowPref( self.window, query=True, topLeftCorner=True )\n if initPos[0] < 0:\n initPos[0] = 0\n if initPos[1] < 0:\n initPos[1] = 0\n cmds.windowPref( self.window, edit=True, topLeftCorner=initPos )\n except :\n pass\n\n\n\n def createUI(self):\n cmds.scrollLayout(childResizable=True,)\n cmds.columnLayout(adjustableColumn=True)\n #cmds.setParent(\"..\")\n cmds.rowLayout(numberOfColumns=1, columnAlign1='left')\n global defaultFolder\n cmds.textFieldButtonGrp('outputFolder', label='Output Folder', cw3=(90,320, 50), text=defaultFolder, buttonLabel='...', buttonCommand=lambda *args: self.browseObjFilename())\n\n cmds.setParent(\"..\")\n cmds.rowLayout(numberOfColumns=2, columnAlign2=('left', 'right'))\n cmds.intFieldGrp('resolution', label='Resolution', value1=512, ct2=('left', 'left'), cw2=(90,110), w=230)\n cmds.intFieldGrp('aa_samples', label='Camera Samples (AA)', cw2=(150,60), value1=3, w=200)\n cmds.setParent(\"..\")\n\n cmds.rowLayout(numberOfColumns=2, columnAlign2=('left', 'right'))\n cmds.optionMenuGrp('filter', label='Filter ')\n cmds.menuItem( label='blackman_harris' )\n cmds.menuItem( label='box' )\n cmds.menuItem( label='catrom' )\n cmds.menuItem( label='catrom2d' )\n cmds.menuItem( label='closest' )\n cmds.menuItem( label='cone' )\n cmds.menuItem( label='cook' )\n cmds.menuItem( label='cubic' )\n cmds.menuItem( label='disk' )\n cmds.menuItem( label='farthest' )\n cmds.menuItem( label='gaussian' )\n cmds.menuItem( label='heatmap' )\n cmds.menuItem( label='mitnet' )\n cmds.menuItem( label='sync' )\n cmds.menuItem( label='triangle' )\n cmds.menuItem( label='variance' )\n cmds.menuItem( label='video' )\n\n cmds.optionMenuGrp('filter', e=True, w=230, ct2=('left', 'left'), cw2=(90,110), v='gaussian')\n\n cmds.floatFieldGrp('filterWidth', label='Filter Width', w=200, ct2=('left', 'left'), cw2=(150,60), value1=2.0)\n cmds.setParent(\"..\")\n\n cmds.rowLayout(numberOfColumns=1, columnAlign1='both')\n cmds.textFieldGrp('shader', label='Shader Override', ct2=('left', 'left'), cw2=(90,110), text=\"\", w=380)\n cmds.setParent(\"..\")\n\n cmds.rowLayout(numberOfColumns=2, columnAlign2=('left', 'right'))\n cmds.textFieldGrp('udims', label='Udims', ct2=('left', 'left'), cw2=(90,110), text=\"\", w=280)\n cmds.checkBox( 'all_udims',label='All Udims', value=False )\n\n cmds.setParent(\"..\")\n\n cmds.rowLayout(numberOfColumns=4, columnAlign4=('left', 'left', 'left', 'right'))\n cmds.text( ' ')\n\n cmds.button(label='Render', al='right', w=85, h=25, command=lambda *args: self.doExport())\n cmds.text( ' ')\n cmds.button(label='Cancel', al='right', w=85, h=25, command=lambda *args: self.doCancel())\n cmds.setParent(\"..\")","sub_path":"maya/plug-ins/mtoa_1.4.2.2_maya2016/scripts/mtoa/renderToTexture.py","file_name":"renderToTexture.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233399630","text":"#tri insertion\r\ndef tri_insertion(tab):\r\n\r\n for i in range(1, len(tab)):\r\n k = tab[i]\r\n j = i-1\r\n while j >= 0 and k < tab[j] :\r\n tab[j + 1] = tab[j]\r\n j -= 1\r\n tab[j + 1] = k\r\n\r\n\r\ntab = [77,0,0,98,75,5543,67,76,6]\r\ntri_insertion(tab)\r\nprint (\"Le tableau trié est:\")\r\nfor i in range(len(tab)):\r\n print (\"% d\" % tab[i])","sub_path":"ex6c.py","file_name":"ex6c.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"63984758","text":"import math\nimport os\nimport random\nimport shutil\nfrom getmotionvector import MotionVector\nimport cv2\nimport numpy as np\nfrom contextlib import suppress\nimport io\nimport sys\n\n\ndef vigenere_encrypt(message, key) :\n\treturn(message)\n\ndef video_to_image(path, temp_folder) :\n\ttry :\n\t\tos.mkdir(temp_folder)\n\texcept OSError:\n\t\tremove(temp_folder)\n\t\tos.mkdir(temp_folder)\n\n\tcount = 0\n\n\tsuccess = True\n\n\tvidcap = cv2.VideoCapture(path)\n\tinfo_image = {}\n\tinfo_image['width'] = int(vidcap.get(3))\n\tinfo_image['height'] = int(vidcap.get(4))\n\t#ret = vidcap.set(5, 10)\n\tinfo_image['fps'] = int(vidcap.get(5))\n\tprint (\"FPS = \", info_image['fps'])\n\tprint (\"Frame width = \", info_image['width'])\n\tprint (\"Frame height = \", info_image['height'])\n\t#info_image['fps'] = 5 \n\tinfo_image['fourcc'] = int(vidcap.get(6))\n\twhile success:\n\t\tsuccess,image = vidcap.read()\n\t\tif (success) :\n\t\t\tcv2.imwrite(os.path.join(temp_folder, \"{:d}.png\".format(count)), image)\n\t\t\tcount += 1\n\n\tinfo_image['total_image'] = count\n\tprint (\"Total frames = \", info_image['total_image'])\t\n\treturn(info_image)\n\ndef remove(path):\n if os.path.isfile(path):\n os.remove(path)\n elif os.path.isdir(path):\n shutil.rmtree(path)\n else:\n raise ValueError(\"file {} is not a file or dir.\".format(path))\n\nwith suppress(Exception):\n\tdef generate_random_order_pixel(video_name, image_width, image_height, seed, need_pixel, need_frame, frame_sequencial, pixel_sequencial, pixel_range, frame_range, pixel_per_image,is_motion) :\n\t\t# create a text trap and redirect stdout\n\t\ttext_trap = io.StringIO()\n\t\tsys.stdout = text_trap\n\t\t\n\t\trandom.seed(seed)\n\t\t#pixel_order = np.array([])\n\t\tpixel_order = []\n\t\tif(is_motion):\n\t\t\ti = 0\n\t\t\tcounter = 0\n\t\t\ttry:\n\t\t\t\tarray = MotionVector(video_name)\n\t\t\t#print (array)\n\t\t\texcept Exception:\n\t\t\t\tpass\n\t\t\twhile (counter <= need_pixel):\n\t\t\n\t\t\t\tcurrent_frame = array[i][0]\n\t\t\t\t#print(current_frame)\n\t\t\t\txcor = array[i][1]\n\t\t\t\tycor = array[i][2]\n\t\t\t\twlim = array[i][3]\n\t\t\t\thlim = array[i][4]\n\t\t\t\tx = xcor\n\t\t\t\ty = ycor\n\t\t\t\twhile(1) :\n\t\t\t\t\tval = x*image_width + y + current_frame * pixel_per_image \n\t\t\t\t\tpixel_order.append(val)\n\t\t\t\t\tcounter = counter + 1\n\t\t\t\t\tif (counter > need_pixel):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif(y < ycor + wlim):\n\t\t\t\t\t\ty = y + 1\n\t\t\t\t\telif(x < xcor + hlim):\n\t\t\t\t\t\ty = ycor\n\t\t\t\t\t\tx = x + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ti = i + 1\n\t\t\t\t\t\tbreak\n\t\telif ((not frame_sequencial) and (not pixel_sequencial)) :\n\t\t\tpixel_order = random.sample(range(pixel_range[0],pixel_range[1]), need_pixel)\n\t\telif (pixel_sequencial and frame_sequencial) :\n\t\t\tpixel_order = list(range (0, need_pixel))\n\t\telif (pixel_sequencial and (not frame_sequencial)) :\n\t\t\tframe_order = random.sample(range(frame_range[0],frame_range[1]), need_frame)\n\t\t\tfor frame_idx in frame_order :\n\t\t\t\tpixel_in_frame = list(range (frame_idx * pixel_per_image, (frame_idx+1) * pixel_per_image))\n\t\t\t\tpixel_order = np.append(pixel_order, pixel_in_frame)\n\t\t\tpixel_order = pixel_order[:need_pixel]\n\t\telif (not(pixel_sequencial) and frame_sequencial) :\n\t\t\tframe_order = list(range (0,need_frame))\n\t\t\tfor frame_idx in frame_order :\n\t\t\t\t# print(frame_idx)\n\t\t\t\tpixel_in_frame = random.sample(range(frame_idx * pixel_per_image, (frame_idx+1) * pixel_per_image), pixel_per_image)\n\t\t\t\tpixel_order = np.append(pixel_order, pixel_in_frame)\n\t\t\tpixel_order = pixel_order[:need_pixel]\n\t\n\t\t#pixel_order = np.array([])\n\t\n\t\t#flag_motion_vector = 1\n\t\t#print (need_pixel)\n\t\tsys.stdout = sys.__stdout__\n\t\t#print (pixel_order)\n\t\treturn (pixel_order)\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522916367","text":"import re\r\n\r\nimport requests\r\n\r\nurl = 'https://www.douyu.com/g_LOL'\r\nregular1 = '([\\s\\S]*?)'\r\nregular2 = '([\\s\\S]*?)'\r\ns = '

([\\s\\S]*?)

'\r\nx = '([\\s\\S]*?)'\r\nxx = '([\\s\\S]*?)'\r\nr = requests.get(url)\r\nr.encoding = 'utf-8'\r\nhtml = r.text\r\nf = re.findall(s, html)\r\nanchors = []\r\nfor i in f:\r\n name = re.findall(x, i)\r\n name = \"\".join(name)\r\n number = re.findall(xx, i)\r\n number = \"\".join(number)\r\n anchor = {'name':name, 'number':number}\r\n anchors.append(anchor)\r\nnewlist = sorted(anchors, key=lambda k: k['number'], reverse=True)\r\n#anchors2 = sorted(anchors)\r\n#anchors1 = sorted(anchors, key=anchors2)\r\n#anchors2 = sorted(anchors, key=anchors1)\r\n#u = lambda k: k['number']\r\n#r = re.findall('\\d*',)\r\nfor i in newlist:\r\n print(i['name'] + '--------' + i['number'])\r\n#print(anchors[0])\r\n#print(newlist)","sub_path":"(taishi)python visual studio code/12/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"61907441","text":"# Read Email With NTLM Hash\n\nimport sys\nfrom optparse import OptionParser\nfrom exchangelib import Account, Credentials, Configuration, FileAttachment\n\nAutoDownload = False\n\ndef EmailAccountAuthByNtlmHash(username, ntlmhash, email, server=None, flag=True):\n try:\n if flag:\n hash = ntlmhash if ntlmhash.find(\":\") > 0 else \"00000000000000000000000000000000:%s\"%str(ntlmhash)\n print(\"[+] 账号:%s 认证口令NTLM-Hash:%s\"%(str(username), str(hash)))\n else:\n hash = ntlmhash\n except Exception as hashexception:\n print(\"[-] NTLM-Hash值输入错误!\")\n raise hashexception\n try:\n credentials = Credentials(username, hash)\n except Exception as credexception:\n print(\"[-] 凭据生成错误!\")\n raise credexception\n try:\n if server == None:\n email = Account(email, credentials=credentials, autodiscover=True)\n print(\"[+] 邮箱账号认证登录成功\")\n return email\n else:\n config = Configuration(server, credentials)\n email = Account(primary_smtp_address=email, config=config, autodiscover=False)\n return email\n except Exception as error:\n print(error)\n print(\"[+] 账号连接或认证失败\")\n return None\n\ndef GetInboxFolder(email):\n try:\n inbox = email.inbox\n print(\"[+] 成功获取收件箱!\")\n return inbox\n except Exception as error:\n print(\"[-] 获取收件箱失败!\")\n return None\n\ndef SearchKeyword(keyword, email):\n try:\n if str(email.sender.email_address).find(keyword) >= 0:\n return True\n except Exception as reason:\n pass\n try:\n if email.subject.find(keyword) >= 0:\n return True\n except Exception as reason:\n pass\n try:\n if email.text_body.find(keyword) >= 0:\n return True\n except Exception as reason:\n pass\n try:\n for attachment in email.attachments:\n if attachment.name.find(keyword) >= 0:\n return True\n return False\n except Exception as reason:\n pass\n\ndef ListAllFloder(email):\n print(\"[+] 列出所有文件夹\")\n print(email.root.tree())\n\ndef GetOtherFloder(email, floder):\n target = email.root.glob(floder+\"*\")\n print(\"[+] 成功获取文件夹:%s\"%str(floder))\n if target.folders == []:\n target = email.root.glob(\"*/\"+floder)\n if target.folders == []:\n target = email.root.glob(\"**/\"+floder)\n if target.folders == []:\n return None\n return target\n\n\ndef GetEmail(floder, count):\n emails = floder.all().order_by(\"-datetime_received\")[:count]\n return emails\n\ndef GetEmailByPage(floder, size, page):\n start = 0\n pages = []\n total = 0\n if floder.name == \"收件箱\":\n total = floder.total_count\n else:\n total = floder.folders[0].total_count\n for index in range(page):\n if start + size < total:\n pages.append(floder.all().order_by(\"-datetime_received\")[start:start+size])\n start += size\n else:\n pages.append(floder.all().order_by(\"-datetime_received\")[start:floder.total_count])\n break\n return pages\n\ndef DownloadAttachment(attachment, filename):\n with open(filename, \"wb\") as fw:\n fw.write(attachment.content)\n print(\"附件: %s下载完成, 保存名字: %s\"%(attachment.name, filename))\n\ndef DisplayEmail(emails, keyword=None):\n for item in emails:\n if keyword != None and keyword not in [\"\",\" \"]:\n if not SearchKeyword(keyword, item):\n continue\n print(\"***************************************************************\")\n #print(dir(item.id_from_xml))\n print(\"邮件ID: %s\"%str(item.id))\n print(\"发件人: %s(%s)\"%(str(item.sender.name), str(item.sender.email_address)))\n if item.cc_recipients != None:\n ccp = \"抄送:\"\n for person in item.cc_recipients:\n ccp += \" %s(%s);\"%(str(person.name), str(person.email_address))\n print(ccp)\n if item.bcc_recipients != None:\n bccp = \"密送:\"\n for person in item.bcc_recipients:\n bccp += \" %s(%s);\"%(str(person.name), str(person.email_address))\n print(bccp)\n print(\"主题: %s\"%str(item.subject))\n print(\"时间: %s\"%str(item.datetime_received))\n print(\"邮件内容:\\n%s\"%str(item.text_body))\n for attachment in item.attachments:\n if isinstance(attachment, FileAttachment):\n filename = str(item.id) + attachment.name\n print(\"附件文件: %s\"%str(attachment.name))\n if AutoDownload:\n DownloadAttachment(attachment, filename)\n print(\"***************************************************************\")\n\n\n\nif __name__ == \"__main__\":\n parser = OptionParser()\n parser.add_option(\"-u\", \"--user\", dest=\"user\", help=\"Please Input Username: Domain\\\\DomainUserName!\")\n parser.add_option(\"-H\", \"--hash\", dest=\"hash\", help=\"Please Input Ntlmhash: xx:xx Or xxxx!\")\n parser.add_option(\"-p\", \"--pswd\", dest=\"pswd\", help=\"Please Input Password!\")\n parser.add_option(\"-e\", \"--email\", dest=\"email\", help=\"Please Input Email Address!\")\n parser.add_option(\"-c\", \"--count\", dest=\"count\", help=\"Please Input How Many Emails You Want To Read!\")\n parser.add_option(\"-s\", \"--server\", dest=\"server\", help=\"Please Input Email Server Address!\")\n parser.add_option(\"-k\", \"--keyword\", dest=\"keyword\", help=\"Please Input keyword To Search!\")\n parser.add_option(\"-L\", \"--List\", dest=\"List\", action=\"store_true\", default=False, help=\"List All Email Floders!\")\n parser.add_option(\"-D\", \"--download\", dest=\"download\", action=\"store_true\", default=False, help=\"Whether Download Attachment Files Or Not!\")\n parser.add_option(\"-d\", \"--display\", dest=\"display\", action=\"store_true\",help=\"Show All Email Floders!\")\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",help=\"List All Email Floders!\")\n parser.add_option(\"-f\", \"--folder\", dest=\"floder\", help=\"Please Input Email Floder Name!\")\n (options, args) = parser.parse_args()\n #print(repr(options.keyword))\n if options.download:\n AutoDownload = True\n if options.server == None:\n if options.pswd != None and options.hash == None:\n email = EmailAccountAuthByNtlmHash(options.user, options.pswd, options.email, flag=False)\n else:\n email = EmailAccountAuthByNtlmHash(options.user, options.hash, options.email)\n if email == None:\n exit(0)\n else:\n if options.pswd != None and options.hash == None:\n email = EmailAccountAuthByNtlmHash(options.user, options.pswd, options.email, options.server, flag=False)\n else:\n email = EmailAccountAuthByNtlmHash(options.user, options.hash, options.email, options.server)\n if email == None:\n exit(0)\n if options.List == True:\n ListAllFloder(email)\n sys.exit(0)\n if options.floder != None:\n floder = GetOtherFloder(email, options.floder)\n else:\n floder = GetInboxFolder(email)\n if int(options.count) > 20:\n size = 10\n pagecount = int(options.count)/size + 1 if int(options.count)%size != 0 else int(options.count)/size + 1\n pages = GetEmailByPage(floder, size, int(pagecount))\n for page in pages:\n DisplayEmail(page, options.keyword)\n else:\n emails = GetEmail(floder, int(options.count))\n DisplayEmail(emails, options.keyword)\n","sub_path":"getmail.py","file_name":"getmail.py","file_ext":"py","file_size_in_byte":7663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"105646050","text":"\nfrom typing import Optional\n\nfrom paramak import ExtrudeMixedShape\n\n\nclass ExtrudeSplineShape(ExtrudeMixedShape):\n \"\"\"Extrudes a 3d CadQuery solid from points connected with spline\n connections.\n\n Args:\n distance: the extrusion distance to use (cm units if used for\n neutronics).\n stp_filename: Defaults to \"ExtrudeSplineShape.stp\".\n stl_filename: Defaults to \"ExtrudeSplineShape.stl\".\n \"\"\"\n\n def __init__(\n self,\n distance: float,\n stp_filename: Optional[str] = \"ExtrudeSplineShape.stp\",\n stl_filename: Optional[str] = \"ExtrudeSplineShape.stl\",\n **kwargs\n ):\n\n super().__init__(\n distance=distance,\n stp_filename=stp_filename,\n stl_filename=stl_filename,\n connection_type=\"spline\",\n **kwargs\n )\n","sub_path":"paramak/parametric_shapes/extruded_spline_shape.py","file_name":"extruded_spline_shape.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"398375129","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\n\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n\nimport subprocess\nimport time\nimport signal\n\n\nclass CheckerTimeoutException(Exception):\n def __init__(self, *args, **kwargs):\n super(CheckerTimeoutException, self).__init__(*args, **kwargs)\n\n\ndef call(args, timeout, term_signal=signal.SIGINT, poll_interval=0.2):\n start_time = time.time()\n ret = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n while time.time() - start_time < timeout and ret.poll() is None:\n time.sleep(poll_interval)\n if ret.poll() is None:\n ret.send_signal(term_signal)\n raise CheckerTimeoutException(ret)\n return ret\n","sub_path":"python-modules/health_agent/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539131467","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 13 11:35:06 2015\n\n@author: Niklas\n\"\"\"\n\nfrom random import randint\n\nboard = []\n\nfor x in range(0, 10):\n board.append([\"O\"] * 10)\n\ndef print_board(board):\n for row in board:\n print(\" \".join(row))\n\ndef random_row(board):\n return(randint(0, len(board) - 1))\n\ndef random_col(board):\n return(randint(0, len(board[0]) - 1))\n\nship_row = random_row(board)\nship_col = random_col(board)\n\ndef battleship():\n print_board(board)\n guess_row = int(input(\"Guess Row:\"))\n guess_col = int(input(\"Guess Col:\"))\n\n if guess_row == ship_row and guess_col == ship_col:\n board[guess_col - 1][guess_row - 1] = \"!\"\n print_board(board)\n print(\"Congratulations! You sank my battleship!\")\n print(\" \")\n else:\n print(\"You missed my battleship!\")\n print(\" \")\n board[guess_col - 1][guess_row - 1] = \"X\"\n battleship()\n\nbattleship()","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355693371","text":"class TrieNode:\n def __init__(self, char):\n self.char = char\n self.children = []\n self.word_finished = False # to check if its a last characxter of the word\n self.counter = 1 # how many times this character apperared\n\n def get_char(self):\n print(self.char)\n\n def get_node(self):\n for i in self.children:\n print(i)\n\n\ndef add(root, word):\n node = root\n for char in word:\n # print(node.counter)\n found_in_child = False\n for child in node.children:\n # print(\"child+++++++++++++++++++char\", child.char)\n # print(\"-------\")\n # print(\"heregvggvgvgvgvgvgvgvgvgvgvgvg\", char)\n if child.char == char:\n # print(\"gvgvgvgvgv\")\n child.counter = child.counter + 1\n node = child\n found_in_child = True\n break\n #\n # print(found_in_child)\n # we did not find the word so add the new child\n if found_in_child == False:\n # print(\"chratacter\", char)\n new_node = TrieNode(char)\n node.children.append(new_node)\n for i in node.children:\n i.get_char()\n # point the node to the new child\n node = new_node\n # Everything finished. Mark it as the end of a word.\n node.word_finished = True\n\n\ndef search(root, str):\n node = root\n for char in str:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n return False\n\n return True\n\n\nroot = TrieNode('*')\nadd(root, \"hackathon\")\nadd(root, 'hack')\nadd(root, 'htck')\n\nprint(search(root, 'hacka'))\n","sub_path":"Trie/trie implementation.py","file_name":"trie implementation.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"176207841","text":"# Import the dependencies\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\n\nfrom flask import Flask, jsonify, request, render_template\nimport datetime as dt\nimport numpy as np\nfrom flask import Flask, jsonify\nimport pandas as pd\nimport os\n\n# Database Setup\n\nengine = create_engine(\n \"sqlite:///Resources/hawaii.sqlite\",\n connect_args={\"check_same_thread\": False},\n echo=True,\n)\n\n# Reflect an existing database into a new model\nBase = automap_base()\n# Reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurements = Base.classes.measurement\nStations = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# List all the flask Routes\n#################################################\n@app.route(\"/\")\ndef Welcome():\n return (\n f\"

Welcome to the Climate App for Honululu!

\"\n f\"
\"\n f'\"Honululu\"'\n f\"
\"\n f\"

If you've decided to treat yourself with a holiday vacation in Honolulu,\"\n f\" Hawaii then this App will help with your trip planning\"\n f\"
\"\n f\" This app will provide you with climate analysis in the area

\"\n f\"
\"\n f\"All the routes are listed below:
\"\n f\"
\"\n f\"/api/v1.0/precipitation (To list prior year rain totals from all stations)
\"\n f\" Click here for Precipitaion info\"\n f\"
\"\n f\"
\"\n f\"/api/v1.0/stations (To list the Stations Info)
\"\n f\" Click here for Stations details\"\n f\"
\"\n f\"
\"\n f\"/api/v1.0/tobs (To list prior year temperatures from all stations)
\"\n f\" Click here for Temperature details\"\n f\"
\"\n f\"
\"\n f\"/api/v1.0/start (When given the start date, calculates the min/avg/max temperature for all dates greater than and equal to the start date)
\"\n f\" Click here and enter start date in fomrat YYYY-MM-DD\"\n f\"
\"\n f\"Please replace start in url with the start_date you want to enter\"\n f\"
\"\n f\"
\"\n f\"/api/v1.0/start/end (When given the start and the end date (YYYY-MM-DD), calculate the min/avg/max temperature for dates between the start and end date inclusive)
\"\n f\" Click here and enter start and end date in format YYYY-MM-DD\"\n f\"
\"\n f\"Please replace start_date/end_date in the url in format yyyy-mm-dd/yyyy-mm-dd\"\n f\"
\"\n f\"
\"\n f\"
\"\n f\"
\"\n )\n\n\n#########################################################################################\n# Route #1(/api/v1.0/precipitation)\n#########################################################################################\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Return a list of precipitaion for prior year\n # # Query for the dates and precipitation observations from the last year.\n # # Convert the query results to a Dictionary and return the json representation of the dictionary.\n max_date = (\n session.query(Measurements.date).order_by(Measurements.date.desc()).first()\n )\n max_date = max_date[0]\n\n last_year = dt.datetime.strptime(max_date, \"%Y-%m-%d\") - dt.timedelta(days=365)\n precipitation_date = (\n session.query(Measurements.date, Measurements.prcp)\n .filter(Measurements.date >= last_year)\n .all()\n )\n # Convert list of tuples into normal list\n precipitation_dict = dict(precipitation_date)\n\n return jsonify(precipitation_dict)\n\n\n@app.route(\"/rainfall\")\ndef rainfall():\n max_date = (\n session.query(Measurements.date).order_by(Measurements.date.desc()).first()\n )\n max_date = max_date[0]\n\n last_year = dt.datetime.strptime(max_date, \"%Y-%m-%d\") - dt.timedelta(days=365)\n precipitation_date = (\n session.query(Measurements.date, Measurements.prcp)\n .filter(Measurements.date >= last_year)\n .all()\n )\n # Convert list of tuples into a dictionary\n precipitation_dict = dict(precipitation_date)\n\n # return render_template(\"rainfall.html\", target=precipitation_dict)\n return jsonify(precipitation_dict)\n\n\n#########################################################################################\n# Route #2(/api/v1.0/stations)\n#########################################################################################\n@app.route(\"/api/v1.0/stations\")\ndef get_stations():\n active_stations = session.query(Stations.station, Stations.name).all()\n # active_stations = session.query(Measurements.station, func.count(Measurements.station)).group_by(Measurements.station).order_by(func.count(Measurements.station).desc()).all()\n active_stations_list = [(i, j) for i, j in active_stations]\n stations_dict = dict(active_stations_list)\n return jsonify(stations_dict)\n # return render_template(\"station.html\", target=stations_dict)\n\n\n#########################################################################################\n# Route #3(/api/v1.0/tobs)\n#########################################################################################\n\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n \"\"\"Return a list of temperatures for prior year\"\"\"\n # * Query for the dates and temperature observations from the last year.\n # * Convert the query results to a Dictionary using `date` as the key and `tobs` as the value.\n # * Return the json representation of your dictionary.\n max_date = (\n session.query(Measurements.date).order_by(Measurements.date.desc()).first()\n )\n max_date = max_date[0]\n last_year = dt.datetime.strptime(max_date, \"%Y-%m-%d\") - dt.timedelta(days=365)\n # last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n temperature = (\n session.query(Measurements.date, Measurements.tobs, Measurements.station)\n .filter(Measurements.date >= last_year)\n .order_by(Measurements.date)\n .all()\n )\n\n # Create a list of dicts with `date` and `tobs` as the keys and values\n temperature_totals = []\n for result in temperature:\n row = {}\n row[\"date\"] = result[0]\n row[\"tobs\"] = result[1]\n row[\"station\"] = result[2]\n temperature_totals.append(row)\n return jsonify(temperature_totals)\n\n\n#########################################################################################\n# Route #2(/api/v1.0/start)\n#########################################################################################\n\n\n@app.route(\"/api/v1.0/\")\ndef temp_stats_start_date(start):\n\n results = (\n session.query(\n func.min(Measurements.tobs).label(\"min\"),\n func.avg(Measurements.tobs).label(\"avg\"),\n func.max(Measurements.tobs).label(\"max\"),\n )\n .filter(Measurements.date >= start)\n .group_by(Measurements.date)\n .all()\n )\n\n stats_data = []\n\n for r in results:\n stats_dict = {}\n # start_stats_dict['Start Date'] = start\n stats_dict[\"Min Temp\"] = r.min\n stats_dict[\"Avg Temp\"] = r.max\n stats_dict[\"Max Temp\"] = r.avg\n stats_data.append(stats_dict)\n\n return jsonify(stats_data)\n\n\n#########################################################################################\n# Route #2(/api/v1.0/start/end)\n#########################################################################################\n@app.route(\"/api/v1.0//\")\ndef temp_stats_start_end_date(start, end):\n\n results = (\n session.query(\n Measurements.date,\n func.min(Measurements.tobs).label(\"min\"),\n func.avg(Measurements.tobs).label(\"avg\"),\n func.max(Measurements.tobs).label(\"max\"),\n )\n .filter(Measurements.date >= start)\n .filter(Measurements.date <= end)\n .group_by(Measurements.date)\n .all()\n )\n\n start_end_stats_data = []\n for r in results:\n start_end_stats_dict = {}\n start_end_stats_dict[\"Start Date\"] = start\n start_end_stats_dict[\"End Date\"] = end\n start_end_stats_dict[\"Date\"] = r.date\n start_end_stats_dict[\"Min Temp\"] = r.min\n start_end_stats_dict[\"Avg Temp\"] = r.avg\n start_end_stats_dict[\"Max Temp\"] = r.max\n start_end_stats_data.append(start_end_stats_dict)\n\n return jsonify(start_end_stats_data)\n\n\n#########################################################################################\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"solution1/.ipynb_checkpoints/climate_app-checkpoint.py","file_name":"climate_app-checkpoint.py","file_ext":"py","file_size_in_byte":9254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"103223814","text":"from b_trainer.file import training_file_creator as tfc\r\nfrom e_database import question_and_answer as qna\r\nfrom e_database import training_config as db_training_config\r\nfrom flask import jsonify\r\nfrom a_builder.util import bucket_util\r\n\r\ndef search_question_and_bucket_id(request):\r\n req_dict = eval(request.data.decode('utf8'))\r\n user = req_dict['user']\r\n project = req_dict['project']\r\n partner_id = req_dict['partner_id']\r\n admin_yn = req_dict['admin_yn']\r\n readonly_yn = req_dict['readonly_yn']\r\n question_nm = req_dict['question_nm']\r\n answer_num = req_dict['answer_num']\r\n bucket_id = req_dict['bucket_id']\r\n res = qna.search_question_by_question_nm_and_answer_num('', answer_num, user, project, partner_id, admin_yn, readonly_yn)\r\n buckets = db_training_config.get_bucket(user, project).split(\",\")\r\n filtered_answer_num_arr = []\r\n for i in range(len(res)):\r\n question = res[i]['question']\r\n if question_nm in question:\r\n filtered_answer_num_arr.append(res[i]['answer_num'])\r\n res[i]['bucket_id'] = bucket_util.get_bucket_id_by_sentence(buckets, question)\r\n \r\n new_res = []\r\n for i in range(len(res)):\r\n if res[i]['answer_num'] in filtered_answer_num_arr:\r\n new_res.append(res[i])\r\n \r\n res = sorted(new_res, key = lambda x : (x[\"bucket_id\"], x[\"answer_num\"]))\r\n \r\n if bucket_id != '':\r\n new_res = []\r\n for i in range(len(res)):\r\n if res[i]['bucket_id'] == bucket_id:\r\n new_res.append(res[i])\r\n return jsonify(results = new_res)\r\n \r\n return jsonify(results = res)\r\n\r\ndef search_bucket_id(request):\r\n req_dict = eval(request.data.decode('utf8'))\r\n user = req_dict['user']\r\n project = req_dict['project']\r\n buckets = db_training_config.get_bucket(user, project)\r\n exception = str(tfc.exception)\r\n res = {'buckets' : buckets, 'exception' : exception}\r\n \r\n return jsonify(results = res)\r\n","sub_path":"d_service/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"346879724","text":"import bonobo\nimport bonobo_sqlalchemy\nimport os\n\nfrom bonobo.config import Service, use, use_no_input, use_context\nfrom bonobo.config.functools import transformation_factory\nfrom bonobo.constants import NOT_MODIFIED\n\nfrom dateutil import parser as dateparser\n\nimport re\n\nfrom lxml import etree\n\nimport untangle\n\nimport pprint\nimport fs\n\nMAX_DESCRIPTION_LENGTH = 8\n\n\n@transformation_factory\ndef GetOrderXML(glob=[], prefix=\"/etl/ivm\"):\n @use_context\n @use_no_input\n @use('brickftp')\n def _GetOrderXML(context, brickftp):\n\n for file in brickftp.filterdir(prefix, files=glob):\n if file.is_file:\n with brickftp.open(os.path.join(prefix, file.name)) as fp:\n file = untangle.parse(fp)\n\n for transaction in file.NewDataSet.transaction:\n emit = {}\n for element in transaction.get_elements():\n emit[element._name.title()] = element.cdata\n\n yield emit\n\n return _GetOrderXML\n\n\n@transformation_factory\ndef ParseDates(fields):\n fields = list(fields)\n\n def _ParseDates(row):\n modified = False\n for key in fields:\n if key in row:\n date = dateparser.parse(row[key])\n if date:\n row[key] = date.date()\n modified = True\n\n if modified:\n yield row\n else:\n yield NOT_MODIFIED\n\n return _ParseDates\n\n\ndef truncate_description(row):\n if len(row['Vendingmachines_Descr']) > MAX_DESCRIPTION_LENGTH:\n row['Vendingmachines_Descr'] = row[\n 'Vendingmachines_Descr'][:MAX_DESCRIPTION_LENGTH]\n return row\n else:\n return NOT_MODIFIED\n\n\ndef get_graph(**options):\n \"\"\"\n This function builds the graph that needs to be executed.\n\n :return: bonobo.Graph\n\n \"\"\"\n graph = bonobo.Graph()\n\n split_dbs = bonobo.noop\n\n graph.add_chain(\n GetOrderXML(\n prefix=\"/etl/ivm\",\n glob=[\n 'Mozilla_Corporation{timestamp:%Y_%m_%d}*.xml'.format(\n timestamp=options['now'])\n ]),\n ParseDates(['Transactionlog_Tranenddatetime']),\n truncate_description,\n bonobo.UnpackItems(0),\n bonobo.Rename(\n transaction_date='Transactionlog_Tranenddatetime',\n item_number='Transactionlog_Itemnumber',\n transaction_id='Transactionlog_Tlid',\n item_description='Transactionlog_Itemdesc'),\n bonobo.Rename(\n user_id='Transactionlog_User',\n quantity='Transactionlog_Qty',\n transaction_code='Transactionlog_Transcode',\n description='Vendingmachines_Descr',\n ),\n split_dbs,\n _name=\"main\")\n\n #insert into ivm (description, transaction_id, item_number, item_description, user_id, quantity, transaction_date, transaction_code) values\n\n for engine in list(set(options['engine'])):\n graph.add_chain(\n bonobo_sqlalchemy.InsertOrUpdate(\n table_name=options['table_name'] + options['table_suffix'],\n discriminant=('transaction_id', ),\n engine=engine),\n _input=split_dbs)\n\n return graph\n\n\ndef get_services(**options):\n \"\"\"\n This function builds the services dictionary, which is a simple dict of names-to-implementation used by bonobo\n for runtime injection.\n\n It will be used on top of the defaults provided by bonobo (fs, http, ...). You can override those defaults, or just\n let the framework define them. You can also define your own services and naming is up to you.\n\n :return: dict\n \"\"\"\n\n return {}\n\n\n# The __main__ block actually execute the graph.\nif __name__ == '__main__':\n if not __package__:\n from os import sys, path\n top = path.dirname(\n path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n sys.path.append(top)\n\n me = []\n me.append(path.split(path.dirname(path.abspath(__file__)))[1])\n me.insert(\n 0,\n path.split(path.dirname(path.dirname(path.abspath(__file__))))[1])\n me.insert(\n 0,\n path.split(\n path.dirname(\n path.dirname(path.dirname(path.abspath(__file__)))))[1])\n\n __package__ = '.'.join(me)\n\n from ... import add_default_arguments, add_default_services\n\n parser = bonobo.get_argument_parser()\n\n add_default_arguments(parser)\n\n parser.add_argument(\n '--table-name', type=str, default=os.getenv('BOOMI_TABLE', 'ivm'))\n\n with bonobo.parse_args(parser) as options:\n services = get_services(**options)\n add_default_services(services, options)\n bonobo.run(get_graph(**options), services=services)\n","sub_path":"mozilla_etl/boomi/ivm/ftp/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"83998994","text":"from __future__ import print_function\nimport numpy as np\nimport argparse\nimport os\n\nfrom nipy.core.api import Image\nfrom nipy import load_image, save_image\nfrom nipype.interfaces.ants import Registration, ApplyTransforms\nfrom nipype.algorithms.metrics import Similarity\nfrom nipype.interfaces import dcmstack\n\nimport threading\n\n\"\"\"\nGiven an rs-fMR image, calculate the rigid transformations between each pair ofsequential frames. Save the calculated transformations to the specified directory. If the directory doesn't exist, make it.\n\"\"\"\n\nclass pairwiseRegistrationThread(threading.Thread):\n def __init__(self, threadId, frame1, frame2, transformDir):\n threading.Thread.__init__(self)\n self.threadId = threadId\n self.frame1 = frame1\n self.frame2 = frame2\n self.transformDir = transformDir\n\n def run(self):\n print(\"Starting the registration for\", self.frame1, \"and\", self.frame2)\n calculateRigidTransforms(self.frame1, self.frame2, self.transformDir)\n\ndef expandFrames(imgFn, saveDir):\n \"\"\"\n Expand a timeseries image into a set of individual frames in the\n specified directory\n\n Inputs:\n - imgFn: the timeseries image's filename\n - saveDir: the directory in which the frames will be stored\n\n Returns:\n - frameFns: the list of filenames\n \"\"\"\n # Load the image\n img = load_image(imgFn)\n coord = img.coordmap\n frameFns = []\n\n # Make the save directory\n framesDir = saveDir+'/frames/' # need to check for //\n # check for duplicate //\n framesDir = framesDir.replace(\"//\", '/')\n if not os.path.exists(framesDir):\n os.mkdir(framesDir)\n\n for i in xrange(img.get_data().shape[3]):\n frame = img[:,:,:,i].get_data()[:,:,:,None]\n frameImg = Image(frame, coord)\n outFn = framesDir+str(i).zfill(3)+\".nii.gz\"\n save_image(frameImg, outFn)\n frameFns.append(outFn)\n\n return frameFns\n\n\ndef calculateRigidTransforms(frame1, frame2, saveFn):\n \"\"\"\n Given the pair of images, calculate the rigid transformation from frame2\n to frame1 and save it using the saveFn prefix.\n\n Inputs:\n - frame1: image at timepoint n\n - frame2: image at timepoint n+1\n - saveFn: the prefix filename where the transform will be saved\n \"\"\"\n # set up the registration\n reg = Registration()\n reg.inputs.fixed_image = frame1\n reg.inputs.moving_image = frame2\n reg.inputs.output_transform_prefix = saveFn\n reg.inputs.interpolation = 'NearestNeighbor'\n\n reg.inputs.transforms = ['Rigid']\n reg.inputs.transform_parameters = [(0.1,)]\n reg.inputs.number_of_iterations = [[100, 20]]\n reg.inputs.dimension = 3\n reg.inputs.write_composite_transform = False\n reg.inputs.collapse_output_transforms = True\n reg.inputs.initialize_transforms_per_stage = False\n reg.inputs.metric = ['CC']\n reg.inputs.metric_weight = [1]\n reg.inputs.radius_or_number_of_bins = [5]\n reg.inputs.sampling_strategy = ['Random']\n reg.inputs.sampling_percentage = [0.05]\n reg.inputs.convergence_threshold = [1.e-2]\n reg.inputs.convergence_window_size = [20]\n reg.inputs.smoothing_sigmas = [[2,1]]\n reg.inputs.sigma_units = ['vox']\n reg.inputs.shrink_factors = [[2,1]]\n\n reg.inputs.use_estimate_learning_rate_once = [True]\n reg.inputs.use_histogram_matching = [True]\n reg.inputs.output_warped_image = False\n reg.inputs.num_threads = 50\n\n # run the registration\n reg.run()\n\ndef main():\n # Set up the argparser\n parser = argparse.ArgumentParser(description=\"Calculate the rigid transformation between all pairs of frames in the specified image.\")\n parser.add_argument('-i', '--image', type=str, help='Full path to the name of the file to calculate registrations for')\n parser.add_argument('-d', '--savedir', type=str, help='Path of the directory where the registered frames and the transforms will be saved')\n\n args = parser.parse_args()\n\n # Parse the image\n imgFn = args.image \n # Parse the directory\n saveDir = args.savedir\n # check that the directory exists and make it if it doesn't exist\n if not os.path.exists(saveDir):\n os.mkdir(saveDir)\n \n # Expand the image into a set of frames\n frames = expandFrames(imgFn, saveDir)\n\n # Make the directory to store the transforms\n transformsDir = saveDir+'/transforms/'\n transformsDir = transformsDir.replace('//', '/')\n if not os.path.exists(transformsDir):\n os.mkdir(transformsDir)\n\n # Make a list of threads\n threads = []\n\n for i in xrange(len(frames)-1):\n # make the prefix for the transform\n prefix = transformsDir+'rigidTransform_'+str(i).zfill(3)+\"_\"+str(i+1).zfill(3)+\"_\"\n # start a thread to register the images\n t = pairwiseRegistrationThread(i, frames[i], frames[i+1], prefix)\n threads.append(t)\n t.start()\n \n # Make sure the threads are finished running\n for t in threads:\n t.join()\n \n # print a \"finished\" message\n print(\"Finished calculating all rigid transformations for image.\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"evaluation_code/calculateRigidTransforms.py","file_name":"calculateRigidTransforms.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"226959469","text":"import os\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import roc_auc_score\n\nfrom pepnet import Predictor, SequenceInput, Output\n\ndef make_predictors():\n return {\n \"pool\": Predictor(\n inputs=SequenceInput(name=\"peptide\", length=22, variable_length=True, global_pooling=True),\n outputs=Output(1, activation=\"sigmoid\")),\n \"rnn\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n rnn_layer_sizes=[32]),\n outputs=Output(1, activation=\"sigmoid\")),\n \"rnn2\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n rnn_layer_sizes=[32, 32]),\n outputs=Output(1, activation=\"sigmoid\")),\n \"conv-pool\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[9],\n conv_output_dim=16,\n conv_dropout=0.1,\n global_pooling=True),\n outputs=Output(1, activation=\"sigmoid\")),\n \"conv2-pool\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[9],\n n_conv_layers=2,\n conv_output_dim=16,\n conv_dropout=0.1,\n global_pooling=True),\n outputs=Output(1, activation=\"sigmoid\")),\n \"conv-rnn\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[9],\n conv_output_dim=16,\n conv_dropout=0.1,\n rnn_layer_sizes=[32]),\n outputs=Output(1, activation=\"sigmoid\")),\n \"multiconv-pool\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[3, 9],\n conv_output_dim=16,\n conv_dropout=0.1,\n global_pooling=True),\n outputs=Output(1, activation=\"sigmoid\")),\n \"multiconv2-pool\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[3, 9],\n n_conv_layers=2,\n conv_output_dim=16,\n conv_dropout=0.1,\n global_pooling=True),\n outputs=Output(1, activation=\"sigmoid\")),\n \"multiconv-rnn\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[3, 9],\n conv_output_dim=16,\n conv_dropout=0.1,\n rnn_layer_sizes=[32]),\n outputs=Output(1, activation=\"sigmoid\")),\n \"multiconv2-rnn\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[3, 9],\n n_conv_layers=2,\n conv_output_dim=16,\n conv_dropout=0.1,\n rnn_layer_sizes=[32]),\n outputs=Output(1, activation=\"sigmoid\")),\n \"multiconv2-rnn2\": Predictor(\n inputs=SequenceInput(\n name=\"peptide\",\n length=22,\n variable_length=True,\n conv_filter_sizes=[3, 9],\n n_conv_layers=2,\n conv_output_dim=16,\n conv_dropout=0.1,\n rnn_layer_sizes=[32, 32]),\n outputs=Output(1, activation=\"sigmoid\"))\n }\n\n\ndef make_decoy_set(hits, multiple=10):\n from collections import Counter\n import pyensembl\n proteins_dict = pyensembl.ensembl_grch38.protein_sequences.fasta_dictionary\n protein_list = list(proteins_dict.values())\n lengths = Counter()\n for hit in hits:\n lengths[len(hit)] += 1\n\n decoys = set([])\n n_proteins = len(protein_list)\n for length, count in lengths.items():\n for protein_idx in np.random.randint(low=0, high=n_proteins, size=count * multiple):\n protein = protein_list[protein_idx]\n if len(protein) < length:\n continue\n\n i = np.random.randint(low=0, high=len(protein) - length + 1, size=1)[0]\n peptide = protein[i:i + length]\n if \"X\" in peptide or \"U\" in peptide or \"*\" in peptide:\n continue\n decoys.add(peptide)\n return decoys\n\n\nif __name__ == \"__main__\":\n df = pd.read_excel(os.environ[\"CLASS_II_DATA\"])\n hits = {}\n for col in df.columns:\n hits[col] = [s.upper() for s in df[col] if isinstance(s, str) and len(s) > 0 and \"X\" not in s]\n print(col, len(hits[col]))\n n_splits = 3\n epochs = 30\n cv = StratifiedKFold(n_splits=n_splits, shuffle=True)\n\n with open('scores.csv', 'w') as f:\n f.write(\"model,allele,fold,auc\\n\")\n for allele in df.columns:\n print(allele)\n curr_hits = hits[allele]\n # stratify by short/medium/long\n peptide_length_groups = [0 if len(s) < 14 else (2 if len(s) > 17 else 1) for s in curr_hits]\n for fold_idx, (train_idx, test_idx) in enumerate(cv.split(X=curr_hits, y=peptide_length_groups)):\n train_hits = [curr_hits[i] for i in train_idx]\n train_decoys = make_decoy_set(train_hits)\n train = list(train_hits) + list(train_decoys)\n y_train = [True] * len(train_hits) + [False] * len(train_decoys)\n\n test_hits = [curr_hits[i] for i in test_idx]\n test_decoys = make_decoy_set(test_hits)\n test = list(test_hits) + list(test_decoys)\n y_test = [True] * len(test_hits) + [False] * len(test_decoys)\n predictor_dict = make_predictors()\n for model_name in sorted(predictor_dict.keys()):\n model = predictor_dict[model_name]\n print(\"==> Training %s\" % model_name)\n model.fit(train, y_train, epochs=epochs)\n pred = model.predict(test)\n auc = roc_auc_score(y_true=y_test, y_score=pred)\n print(\"==> %s %d/%d %s: %0.4f\" % (\n allele, fold_idx + 1, n_splits, model_name, auc))\n f.write(\"%s,%s,%d,%0.4f\\n\" % (model_name, allele, fold_idx, auc))\n","sub_path":"experiments/eval_architectures_class2_mhc_binding.py","file_name":"eval_architectures_class2_mhc_binding.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"356219779","text":"import socket\r\n\r\nHOST = '192.168.0.13'\r\nPORT = 9999\r\n\r\n# 주소 체계 IPv4, 소켓 타입 TCP 소켓 생성\r\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n# 포트 사용중이라 연결할 수 없다는 WinError 10048 에러 해결를 위해 필요\r\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\n# bind 함수에 (ip, port) 튜플 전달\r\n# HOST는 hostname, ip address, 빈 문자열 ''이 될 수 있다.\r\n# 빈 문자열이면 모든 네트워�� 인터페이스로부터의 접속을 허용\r\n# PORT는 1-65535 사이의 숫자를 사용할 수 있다.\r\nserver_socket.bind((HOST, PORT))\r\n\r\n# 총 1개의 동시접속을 허용. 입력하지 않을 시 파이썬이 자의적으로 판단\r\nserver_socket.listen(1)\r\n\r\n# accept 함수에서 대기하다가 클라이언트가 접속하면 새로운 소켓과 클라이언트의 AF를 반환\r\nconnection_socket, addr = server_socket.accept()\r\nprint('Connected by', addr)\r\n\r\ni=10\r\n# 무한루프를 돌면서\r\nwhile True:\r\n\r\n # 클라이언트가 보낸 메시지를 수신하기 위해 대기합니다.\r\n recv_msg = connection_socket.recv(1024) #1024바이트\r\n print('Received from: ', addr, recv_msg.decode('utf-8'))\r\n\r\n # 메시지 전송\r\n send_msg = input(\"Sending: \")\r\n connection_socket.send(send_msg.encode('utf-8'))\r\n\r\n i = i-1\r\n if i == 0:\r\n break\r\n\r\n# 소켓을 닫습니다.\r\nconnection_socket.close()\r\nserver_socket.close()\r\n","sub_path":"echo-Server.py","file_name":"echo-Server.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"299208034","text":"import torch\n\ndef str_to_labelvec(string, max_str_len):\n '''\n Truncates the string if str_len > max_str_len to max_str_len.\n Returns a vector with the corresponding alphabet character in each position\n of the string.\n '''\n string = string.lower()\n alphabet = \"abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{} \"\n alpha_to_num = {k:v+1 for k,v in zip(alphabet, range(len(alphabet)))}\n labels = torch.zeros(max_str_len).long()\n max_i = min(max_str_len, len(string))\n for i in range(max_i):\n labels[i] = alpha_to_num.get(string[i], alpha_to_num[' '])\n\n return labels\n\ndef labelvec_to_onehot(labels):\n '''\n Returns the one hot encoding of the character labels\n '''\n labels = torch.LongTensor(labels).unsqueeze(1)\n \n one_hot = torch.zeros(labels.size(0), 71).scatter_(1, labels, 1.)\n # ignore zeros in one-hot mask (position 0 = empty one-hot)\n one_hot = one_hot[:, 1:]\n one_hot = one_hot.permute(1,0)\n return one_hot\n\ndef prepare_text(string, max_str_len=201):\n '''\n Converts a text description from string format to one-hot tensor format.\n '''\n labels = str_to_labelvec(string, max_str_len)\n one_hot = labelvec_to_onehot(labels)\n return one_hot","sub_path":"utils/text_utils.py","file_name":"text_utils.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450117016","text":"import os\nfrom discord.ext import commands\nfrom module import twitter\n\n\nclass Twitter(commands.Cog):\n def __init__(self, client):\n self.client = client\n pass\n\n @commands.command()\n @commands.is_owner()\n async def tweet(self, ctx, *, context):\n \"\"\"Tweets a status update on Twitter [Format: %tweet (status)]\"\"\"\n twitter.update_status(context)\n f = open(\"twitterlink.txt\", \"r\")\n final_url = f.read()\n f.close()\n await ctx.send(\"> Your Tweet has been successfully uploaded to {}\".format(final_url))\n os.remove(\"twitterlink.txt\")\n\n @commands.command()\n @commands.is_owner()\n async def deletetweet(self, ctx, *, context):\n \"\"\"Delete a Tweet by it's ID [Format: %deletetweet (id)]\"\"\"\n twitter.delete_status(context)\n await ctx.send(\"> The Tweet ID **{}** has been successfully deleted.\".format(context))\n\n @commands.command()\n @commands.is_owner()\n async def recenttweets(self, ctx, *, context=20):\n \"\"\"Show Most Recents Tweets[Format: %recenttweets (amount)]\"\"\"\n twitter.recent_tweets(int(context))\n f = open(\"recent_tweets.txt\", \"r\")\n list = f.read()\n f.close()\n await ctx.send(\"> Here are the past **{}** tweets:\\n{}\".format(context, list))\n os.remove(\"recent_tweets.txt\")\n","sub_path":"module/Twitter2.py","file_name":"Twitter2.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"476159014","text":"# -*- coding: utf-8 -*-\n__author__ = 'saf'\n\nimport logging\n\nfrom flask import render_template, redirect, url_for, request, flash, jsonify\nfrom flask.views import View\nfrom sqlalchemy.exc import IntegrityError\n\nfrom arps_old.models import Repository, Release, TaskUpdateRepoMetadataMetaStore, CeleryResult\nfrom arps_old.restserver import app, db\nfrom .taskresult import TaskResultView\nfrom ..forms import RepositoryForm\n\nlog = logging.getLogger(__name__)\n\n\nclass RepositoryView(View):\n methods = ['GET', 'POST']\n\n endpoint_edit = 'endpoint_repository_edit'\n endpoint_list = 'endpoint_repository_list'\n endpoint_delete = 'endpoint_repository_delete'\n endpoint_show = 'endpoint_repository_show'\n endpoint_createrepo = 'endpoint_repository_createrepo'\n endpoint_ajax_results = 'endpoint_repository_ajax_results'\n\n template_edit = 'repository/repository_edit.html'\n template_list = 'repository/repository_list.html'\n template_show = 'repository/repository_show.html'\n template_delete = None\n\n def dispatch_request(self, *args, **kwargs):\n release_id = kwargs.get('release_id', None)\n repository_id = kwargs.get('repository_id', None)\n args = (release_id, repository_id)\n if request.endpoint == self.endpoint_ajax_results:\n return self.ajax_results(*args)\n if request.endpoint == self.endpoint_createrepo:\n return self.create_repo(*args)\n elif request.endpoint == self.endpoint_list:\n return self.list()\n elif request.endpoint == self.endpoint_delete:\n return self.delete_object(*args)\n elif request.endpoint == self.endpoint_show:\n return self.show_object(*args)\n elif request.endpoint == self.endpoint_edit:\n if release_id is None and repository_id is None:\n if request.method == 'GET':\n return self.new_form()\n elif request.method == 'POST':\n return self.create_object()\n else:\n if request.method == 'POST':\n return self.update_object(*args)\n elif request.method == 'GET':\n return self.edit_form(*args)\n else:\n self.return_404(*args)\n\n def ajax_results(self, release_id, repository_id):\n T = TaskUpdateRepoMetadataMetaStore\n results_for_repo = T.query.join(T.result).filter(T.release_id == release_id, T.repository_id == repository_id).order_by(CeleryResult.start.desc()).all()\n results_for_repo = [r.result for r in results_for_repo]\n results = []\n for result in results_for_repo:\n results.append(result.json)\n results[-1]['detail_url'] = url_for(TaskResultView.endpoint, id=result.id)\n\n return jsonify({'data': results})\n\n def create_repo(self, release_id, repository_id):\n repository = Repository.query.get_or_404((release_id, repository_id))\n result = repository.createrepo()\n return redirect(url_for(self.endpoint_show, release_id=release_id, repository_id=repository_id))\n\n def list(self):\n repositories = Repository.query.all()\n return render_template(self.template_list, repositories=repositories)\n\n def show_object(self, release_id, repository_id):\n repository = Repository.query.get_or_404((release_id, repository_id))\n T = TaskUpdateRepoMetadataMetaStore\n results_for_repo = T.query.join(T.result).filter(T.release_id == release_id, T.repository_id == repository_id).order_by(CeleryResult.start.desc()).all()\n results = [r.result for r in results_for_repo]\n return render_template(self.template_show, repository=repository, results=results)\n\n\n def edit_form(self, release_id, repository_id):\n repository = Repository.query.get_or_404((release_id, repository_id))\n form = RepositoryForm(obj=repository)\n form.users.data = [user.id for user in repository.users]\n if repository.origin is None:\n form.origin.data = '---'\n else:\n form.origin.data = '{}-{}'.format(repository.origin_release_id, repository.origin_repository_id)\n return render_template(self.template_edit, form=form, repository=repository)\n\n def handle_valid_form(self, form, repository):\n #\n # Handle users\n #\n repository.set_users(form.users.data)\n\n #\n # Handle Origin Repository\n #\n if form.origin.data == '---':\n repository.origin_release_id = None\n repository.origin_repository_id = None\n else:\n release_id, repository_id = form.origin.data.split('-')\n release_id = int(release_id)\n repository_id = int(repository_id)\n repository.origin_release_id = release_id\n repository.origin_repository_id = repository_id\n\n form.populate_obj(repository)\n\n with db.session.no_autoflush:\n release_name = Release.query.get(repository.release_id).name\n repository_name = repository.name\n\n db.session.add(repository)\n\n try:\n db.session.commit()\n flash(\"Repository {} updated.\".format(repository.name))\n except IntegrityError as e:\n if 'duplicate key value violates unique constraint \"repositories_release_id_name_key\"' in str(e):\n flash_message = \"An repository named {} already exists in release {}\".format(repository_name, release_name)\n form.release_id.errors.append(flash_message)\n else:\n flash_message = str(e)\n db.session.rollback()\n flash(flash_message, category='danger')\n return render_template(self.template_edit, form=form, repository=repository)\n return redirect(url_for(self.endpoint_show, release_id=repository.release_id, repository_id=repository.repository_id))\n\n def update_object(self, release_id, repository_id):\n repository = Repository.query.get_or_404((release_id, repository_id))\n form = RepositoryForm(obj=repository)\n if form.validate_on_submit():\n return self.handle_valid_form(form, repository)\n form.users.data = [user.id for user in repository.users]\n return render_template(self.endpoint_edit, form=form, repository=repository)\n\n def new_form(self):\n form = RepositoryForm()\n return render_template(self.template_edit, form=form)\n\n def create_object(self):\n form = RepositoryForm()\n if form.validate_on_submit():\n repository = Repository()\n return self.handle_valid_form(form, repository)\n return render_template(self.template_edit, form=form)\n\n def delete_object(self, release_id, repository_id):\n repository = Repository.query.get_or_404((release_id, repository_id))\n db.session.delete(repository)\n db.session.commit()\n flash('Repository {} deleted'.format(repository.name))\n return redirect(url_for(self.endpoint_list))\n\n\nrepository_view = RepositoryView.as_view(RepositoryView.endpoint_edit)\napp.add_url_rule('/repository', view_func=repository_view, defaults={'release_id': None, 'repository_id': None}, endpoint=RepositoryView.endpoint_edit)\napp.add_url_rule('/repository//', view_func=repository_view, endpoint=RepositoryView.endpoint_show)\napp.add_url_rule('/repository///edit', view_func=repository_view, endpoint=RepositoryView.endpoint_edit)\napp.add_url_rule('/repository///delete', view_func=repository_view, endpoint=RepositoryView.endpoint_delete)\napp.add_url_rule('/repository///createrepo', view_func=repository_view, endpoint=RepositoryView.endpoint_createrepo)\napp.add_url_rule('/repository/ajax///results', view_func=repository_view, endpoint=RepositoryView.endpoint_ajax_results)\napp.add_url_rule('/repositories', view_func=repository_view, endpoint=RepositoryView.endpoint_list)\n","sub_path":"arps_old/restserver/views/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"518522914","text":"#!/usr/bin/python\n\nimport pybeam\nimport socket\nimport sys\n\nhost = \"10.16.188.11\"\nport = 5007\n\nsoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsoc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nprint('socket created', str(soc))\n\ntry:\n soc.bind((host, port))\n print('Socket bind complete', flush=True)\n soc.listen(5)\n print('Server started and listening', flush=True)\nexcept socket.error as err:\n import sys\n print('Bind failed.. Error: {}'.format(str(err)), flush=True)\n sys.exit(1)\n\nwhile True:\n conn, addr = soc.accept()\n ip, port = str(addr[0]), str(addr[1])\n try:\n print('Accepting connection from {}:{}'.format(ip, port), flush=True)\n while True:\n data = conn.recv(1024).decode()\n print('Data received: {}'.format(data), flush=True)\n if data == \"PLAYBACK\":\n conn.send('RECORD'.encode())\n print('Playing back!', flush=True)\n pybeam.playback_wav_dir(\"bform_out6PM\")\n sys.exit(0)\n except:\n import traceback\n traceback.print_exc()\n\n\n","sub_path":"measureSoundServer.py","file_name":"measureSoundServer.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"302968532","text":"\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport random\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom models_gan import *\nfrom models_zero_shot import Composite_model32\nfrom utils.batch_making import *\nfrom utils.zero_shot_loss_functions import *\nfrom utils.gan import *\nfrom utils.training import *\n\nselected_labels = pickle.load(open('pickle_files/selected_labels.pickle', 'rb'))\ndata_train = pickle.load(open('pickle_files/all_data_train.pickle', 'rb'))\ndata_selected = [x for x in data_train if x[1] not in selected_labels]\n\n# Change here if necessary\ncheckpoint_path = 'fixed_size_GAN_minibatch_checkpoints/'\n\nlr = 0.0005\nbeta1 = 0.5\nbeta2 = 0.999\ngen_dims = 100\nnoise_den = 1.0\n\nepochs = 20\nbatch_size = 128\nIMAGE_SIZE = 32\nWORD2VEC_SIZE = 200\n\nprint('Creating reprs')\nlabels_reprs = [(word, find_word_vec(normalize_label(word))) for word in selected_labels]\nlabels_reprs *= 2\nprint('Done')\n\nfake_label_batch = [find_word_vec('fake')] * batch_size\n\nsamples = []\n\nif not os.path.isdir(checkpoint_path): os.mkdir(checkpoint_path)\n\n\ndef build_loss(model_output, target_labels):\n \"\"\"Change here which loss function you wish to use\"\"\"\n multiplying_term = 10.0\n return tf.sigmoid(multiplying_term * build_tendency_loss(model_output, target_labels, selected_labels))\n\n\ntf.reset_default_graph()\n\nreal_input = tf.placeholder(tf.float32,\n shape=[batch_size, 32, 32, 3],\n name='x-images')\nreal_label = tf.placeholder(tf.float32, shape=[batch_size, WORD2VEC_SIZE], name='image-labels')\nfake_label = tf.placeholder(tf.float32, shape=[batch_size, WORD2VEC_SIZE], name='fake-label')\ngen_input = tf.placeholder(tf.float32, shape=[batch_size, gen_dims], name='z-noise')\n\nreal_label_test = tf.placeholder(tf.float32, shape=[None, WORD2VEC_SIZE], name='image-labels')\ngen_input_test = tf.placeholder(tf.float32, shape=[None, gen_dims], name='z-noise')\n\ngenerator = DCGAN_generator_conditional(gen_input, real_label)\ngen_images = generator.out\n\nwith tf.name_scope('zero_shot_discriminator'):\n zs_model_real = Composite_model32(real_input, WORD2VEC_SIZE, reuse=False)\n zs_model_output_real = zs_model_real.projection_layer\n zs_model_image_repr_real = zs_model_real.image_repr\n\n zs_model_fake = Composite_model32(gen_images, WORD2VEC_SIZE, reuse=True)\n zs_model_output_fake = zs_model_fake.projection_layer\n zs_model_image_repr_fake = zs_model_fake.image_repr\n\nwith tf.name_scope('minibatch_discriminator'):\n discriminator_minibatch_real = Minibatch_discriminator(zs_model_image_repr_real, reuse=False,\n num_kernels=1)\n feats_mb_real = discriminator_minibatch_real.minibatch_features\n real_minibatch_output = discriminator_minibatch_real.logits\n\n discriminator_minibatch_fake = Minibatch_discriminator(zs_model_image_repr_fake, reuse=True,\n num_kernels=1)\n feats_mb_fake = discriminator_minibatch_fake.minibatch_features\n fake_minibatch_output = discriminator_minibatch_fake.logits\n\nrelevance_fat = 0.30\n# generator loss\ngen_loss = (1 - relevance_fat)*build_loss(zs_model_output_fake, real_label)\ngen_loss_minibatch = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_minibatch_output,\n labels=tf.ones_like(fake_minibatch_output)))\ngen_loss += relevance_fat * gen_loss_minibatch\n\n# discriminator loss\ndisc_loss_real_images = build_loss(zs_model_output_real, real_label)\ndisc_loss_gen_images = build_loss(zs_model_output_fake, fake_label)\n\ndisc_minibatch_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_minibatch_output,\n labels=tf.zeros_like(\n fake_minibatch_output)))\ndisc_minibatch_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real_minibatch_output,\n labels=tf.ones_like(\n real_minibatch_output)))\n\nfake_importance = 0.6\ndisc_loss = disc_loss_real_images + disc_loss_gen_images\ndisc_loss_minibatch = (1.0 - fake_importance)*disc_minibatch_loss_real + fake_importance*disc_minibatch_loss_fake\n\nprint('TRAINABLE VARS', tf.trainable_variables())\n# get the variables for the generator and discriminator\ngenerator_variables = [var for var in tf.trainable_variables() if var.name.startswith('generator')]\ndiscriminator_variables = [var for var in tf.trainable_variables() if\n not var.name.startswith('generator') and not 'minibatch' in var.name]\nminibatch_discriminator_variables = [var for var in tf.trainable_variables() if 'minibatch' in var.name]\n\nprint('---------------------------------')\nprint('Generator variables', generator_variables)\nprint('---------------------------------')\nprint('Discriminator variables', discriminator_variables)\nprint('---------------------------------')\nprint('Minibatch variables', minibatch_discriminator_variables)\nprint('---------------------------------')\n\n# setup the optimizers\n# comtrol for the global sample mean and variance\nwith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n gradients = tf.gradients(gen_loss, generator_variables)\n\n generator_optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=beta1, beta2=beta2).minimize(gen_loss,\n var_list=generator_variables)\n discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=beta1, beta2=beta2).minimize(disc_loss,\n var_list=discriminator_variables)\n discriminator_minibatch_optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=beta1, beta2=beta2).minimize(\n disc_loss_minibatch, var_list=minibatch_discriminator_variables)\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n # initialize the variables\n sess.run(tf.global_variables_initializer())\n\n # train the network\n for epoch in range(epochs):\n print('Current epoch', epoch)\n train_generator = get_batches(data_selected, batch_size, IMAGE_SIZE, word2vec=True)\n\n counter = 0\n\n for batch_xs, batch_ys in train_generator:\n normalized_batch = normalize_batch(batch_xs)\n # generate the noise\n noise = np.random.uniform(low=-1.0 / noise_den, high=1.0 / noise_den, size=(batch_size, gen_dims))\n\n # feed the noise through the generator\n sess.run(generator_optimizer, feed_dict={gen_input: noise, real_input: normalized_batch,\n real_label: batch_ys, fake_label: fake_label_batch})\n\n # feed the channel and the noise to the discriminator\n sess.run(discriminator_optimizer, feed_dict={gen_input: noise, real_input: normalized_batch,\n real_label: batch_ys, fake_label: fake_label_batch})\n\n sess.run(discriminator_minibatch_optimizer, feed_dict={gen_input: noise, real_input: normalized_batch,\n real_label: batch_ys, fake_label: fake_label_batch})\n\n # sample more noise\n sample_noise = np.random.uniform(low=-1.0 / noise_den, high=1.0 / noise_den,\n size=(len(labels_reprs), gen_dims))\n\n repr_vecs = [x[1] for x in labels_reprs]\n repr_texts = [x[0] for x in labels_reprs]\n\n # generate images\n generator_test = DCGAN_generator_conditional(gen_input_test, real_label_test, reuse=True, training=False)\n gen_images_test = generator_test.out\n gen_samples = sess.run(gen_images_test, feed_dict={gen_input_test: sample_noise,\n real_label_test: repr_vecs})\n\n plt.close(\"all\")\n view_samples(gen_samples, repr_texts, 6, figsize=(10, 5))\n plt.savefig(os.path.join(checkpoint_path, \"epoch%d.svg\" % (epoch)), format='svg')\n print('Sample figure generated')\n\n checkpoint_name = os.path.join(checkpoint_path, 'model_epoch' + str(epoch) + '.ckpt')\n save_path = saver.save(sess, checkpoint_name)\n\n print(\"{} Model checkpoint saved at {}\".format(datetime.now(), checkpoint_name))\n","sub_path":"train_fixed_size_gan_minibatch.py","file_name":"train_fixed_size_gan_minibatch.py","file_ext":"py","file_size_in_byte":8730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199956835","text":"# Definition for a binary tree node\nclass TreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n # @param root, a tree node\n # @return an integer\n\n def maxPathSum(self, root):\n stack, pathSum, mpSum = [root], {}, None\n while stack:\n left, right = stack[-1].left, stack[-1].right\n if left and left not in pathSum:\n stack.append(left)\n elif right and right not in pathSum:\n stack.append(right)\n else:\n node = stack.pop()\n if left is None and right is None:\n pathSum[node] = node.val\n if mpSum is None or mpSum < pathSum[node]:\n mpSum = pathSum[node]\n elif left and right:\n pathSum[node] = max(pathSum[left], pathSum[right], 0) + \\\n node.val\n mpSum = max(pathSum[node],\n pathSum[left] + pathSum[right] + node.val,\n mpSum)\n elif left:\n pathSum[node] = node.val + max(pathSum[left], 0)\n mpSum = max(pathSum[node], mpSum)\n else:\n pathSum[node] = node.val + max(pathSum[right], 0)\n mpSum = max(pathSum[node], mpSum)\n return mpSum\n","sub_path":"leetcode/python/maxPathSum.py","file_name":"maxPathSum.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143953518","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Filename: xelog.py\n# Author: chenkun\n# CreateDate: 2013-12-26\n\nimport logging\nimport logging.handlers\nimport inspect\n\n(XELOG_2_CONSOLE, XELOG_2_FILE, XELOG_2_NETWORK) = range(3)\n(CRITICAL, ERROR, WARNING, INFO, DEBUG) = (logging.CRITICAL,\n logging.ERROR, \n logging.WARNING, \n logging.INFO, \n logging.DEBUG\n )\n\n#mode = XELOG_2_CONSOLE, args is None\n#mode = XELOG_2_FILE, args[0] is file name\n#mode = XELOG_2_NETWORK, args[0] is target ip, args[1] is target port\ndef xeLOG_init(name, tostd, mode, level, *args):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n formatter = logging.Formatter( \n '%(asctime)s;<%(levelname)s>%(name)s(%(filename)s:%(lineno)d:%(threadName)s):%(message)s', \n '%Y-%m-%d %H:%M:%S'\n )\n if mode == XELOG_2_CONSOLE or tostd:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n if mode == XELOG_2_FILE:\n file_handler = logging.FileHandler(args[0])\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n if mode == XELOG_2_NETWORK:\n net_handler = logging.handlers.DatagramHandler(args[0], args[1])\n net_handler.setFormatter(formatter)\n logger.addHandler(net_handler)\n return logger\n\ndef xeLOG_get_object(name):\n if name == None:\n return None\n return logging.getLogger(name)\n\n#xelog test.\nif __name__ == \"__main__\":\n logger1 = xeLOG_init(\"test\", True, XELOG_2_FILE, \n INFO, 'test.txt'\n )\n logger1.info(\"abccddd %d\", 1)\n logger2 = xeLOG_get_object(\"test.tt\")\n logger2.error(\"ccd\")\n","sub_path":"projects/xelog.py","file_name":"xelog.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650632246","text":"from operator import itemgetter\r\nimport json\r\n\r\nclass HostSite():\r\n\t\r\n\tdef __init__(self):\r\n\t\tself.id = None\r\n\t\tself.name = None\r\n\t\tself.address = None\r\n\t\tself.city = None\r\n\t\tself.province = None\r\n\t\tself.postal_code = None\r\n\t\tself.phone = None\r\n\t\tself.email = None\r\n\t\tself.coordinatorIDs = None\r\n\t\tself.hours_of_operation = {}\r\n\r\n\r\n\tdef __init__(self, name, address, city, province, postal_code, coordinatorIDs, hours_of_operation):\r\n\t\t\r\n\t\tvalues = [None, '']\r\n\r\n\t\tself.phone = None\r\n\t\tself.email = None\r\n\r\n\t\tif name in values:\r\n\t\t\tself.name = None\r\n\t\telse:\r\n\t\t\tself.name = name\r\n\r\n\t\tif address in values:\r\n\t\t\tself.address = None\r\n\t\telse:\r\n\t\t\tself.address = address\r\n\r\n\t\tif city in values:\r\n\t\t\tself.city = None\r\n\t\telse:\r\n\t\t\tself.city = city\r\n\r\n\t\tif province in values:\r\n\t\t\tself.province = None\r\n\t\telse:\r\n\t\t\tself.province = province\r\n\r\n\t\tif postal_code in values:\r\n\t\t\tself.postal_code = None\r\n\t\telse:\r\n\t\t\tself.postal_code = postal_code\r\n\r\n\t\tif coordinatorIDs in values:\r\n\t\t\tself.coordinatorIDs = None\r\n\t\telse:\r\n\t\t\tself.coordinatorIDs = coordinatorIDs\r\n\r\n\t\tif hours_of_operation in values:\r\n\t\t\tself.hours_of_operation = None\r\n\t\telse:\r\n\t\t\tself.hours_of_operation = hours_of_operation","sub_path":"build/lib/gardenfreshbox/model/hostsite.py","file_name":"hostsite.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"340919780","text":"\"\"\"\n앞뒤를 뒤집어도 똑같은 문자열을 팰린드롬(palindrome)이라고 합니다.\n문자열 s가 주어질 때, s의 부분문자열(Substring)중 가장 긴 팰린드롬의 길이를 return 하는 solution 함수를 완성해 주세요.\n\n예를들면, 문자열 s가 abcdcba이면 7을 return하고 abacde이면 3을 return합니다.\n\"\"\"\ndef reverse(s):\n '''\n Extended Slices\n >>> L = range(10)\n >>> L[::2]\n [0, 2, 4, 6, 8]\n >>> L[::-1]\n [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n '''\n return s[::-1]\n\ndef ispalindrome(substring, len_substring):\n \"\"\"\n 먼저 모든 단어는 길이가 짝수 아니면 홀수겠지요? \n 따라서 앞의 N/2와 그에 상대되는 후반부의 element와 비교하여 끝까지 같음을 확인하면 그 단어나 문장은 Palindrome이 되겠지요.\n \"\"\"\n for k in range((len_substring//2)):\n if substring[k] != substring[-k-1]:\n return False\n return True\n\ndef solution(s):\n \"\"\"\n 효율성 테스트 실패\n 어떻게 개선할 수 있을까?\n \"\"\"\n longest_palindrome = 0\n for i in range(len(s)):\n for j in range(i, len(s)):\n substring = s[i:j+1]\n len_substring = j+1-i\n\n # check palidrome\n substring_palindrome = ispalindrome(substring, len_substring)\n\n # update longest length\n if substring_palindrome and len_substring > longest_palindrome:\n longest_palindrome = len_substring\n return longest_palindrome\n\n\ns = \"abcdcba\"\nprint(solution(s))\n","sub_path":"python/algorithm/find_longest_palindrome.py","file_name":"find_longest_palindrome.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"458454521","text":"import sys\nimport sqlite3\n\nfrom PyQt4.QtSql import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nclass DisplayTable(QWidget):\n \"\"\"this class will be used to display tables from the database\"\"\"\n \n def __init__(self):\n super().__init__()\n self.stacked_layout = QStackedLayout()\n self.setLayout(self.stacked_layout)\n self.db = None\n self.model = None\n self.open_database()\n\n\n def display_results_layout(self):\n self.results_table = QTableView()\n self.results_layout = QVBoxLayout()\n self.results_layout.addWidget(self.results_table)\n self.results_widget = QWidget()\n self.results_widget.setLayout(self.results_layout)\n self.stacked_layout.addWidget(self.results_widget)\n\n def open_database(self):\n if self.db:\n self.close_database()\n self.db = QSqlDatabase.addDatabase(\"QSQLITE\")\n self.db.setDatabaseName(\"restaurant.db\")\n opened_ok = self.db.open()\n return opened_ok\n\n def show_results(self,query):\n self.display_results_layout()\n if not self.model or not isinstance(self.model,QSqlQueryModel):\n self.model = QSqlQueryModel()\n self.model.setQuery(query)\n self.results_table.setModel(self.model)\n self.results_table.show()\n\n def show_table(self,tableName):\n self.display_results_layout()\n if not self.model or not isinstance(self.model,QSqlTableModel):\n self.model = QSqlTableModel()\n self.model.setTable(tableName)\n self.model.select()\n self.results_table.setModel(self.model)\n self.results_table.show()\n\n def refresh(self):\n \n self.results_table.setModel(self.model)\n self.model.select()\n \n\n \nif __name__ == \"__main__\":\n application = QApplication(sys.argv)\n window = DisplayTable()\n window.show()\n window.raise_()\n application.exec()\n","sub_path":"Implementation/GUI/table_display.py","file_name":"table_display.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"14259939","text":"import json\nfrom asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer\nfrom .models import *\n\n\nclass GameConsumer(WebsocketConsumer):\n\n http_user = True\n\n def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['game_name']\n self.room_group_name = 'game_%s' % self.room_name\n\n # Join room group\n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n\n self.accept()\n\n def disconnect(self, close_code):\n # Leave room group\n\n async_to_sync(self.channel_layer.group_discard)(\n self.room_group_name,\n self.channel_name\n )\n\n # Receive message from WebSocket\n def receive(self, text_data):\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n cell = text_data_json['cell']\n turn = text_data_json['turn']\n # Send message to room group\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type': 'game_message',\n 'message': message,\n 'cell': cell,\n 'turn': turn,\n }\n )\n\n # Receive message from room group\n def game_message(self, event):\n message = event['message']\n cell = event['cell']\n turn = event['turn']\n # Send message to WebSocket\n self.send(text_data=json.dumps({\n 'message': message,\n 'cell': cell,\n 'turn': turn,\n }))\n","sub_path":"main/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600742414","text":"#!/usr/bin/env python\n# coding:utf-8\n\nimport os\n\nsettings = {\n \"debug\": True,\n \"template_path\": os.path.join(os.path.dirname(__file__), 'html'),\n \"cookie_secret\": \"61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=\",\n \"login_url\": \"/login\",\n}\n\nport = 8088\n\nNAME = \"OnlySync\"\n","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"206294751","text":"# -*- coding: utf-8 -*-\n# @Author: longfengpili\n# @Date: 2023-08-14 11:22:47\n# @Last Modified by: longfengpili\n# @Last Modified time: 2023-08-16 11:05:20\n# @github: https://github.com/longfengpili\n\nimport cloudscraper\nfrom lxml.etree import Element as elem\n\nfrom .base import PhoneBase\n\nimport logging\nglogger = logging.getLogger(__name__)\n\n\nclass Kimovil(PhoneBase):\n KIND_PATH = '//div[@class=\"item-wrap\"]'\n PKIND_SELECTOR = 'all'\n KIND_MPATHS = (\n ('mname', ('.//div[@class=\"title\"]/text()', )),\n ('purl', ('.//a[@class=\"device-link\"]/@href', )),\n ('prelease', ('.//div[@class=\"status available\"]/text()', )),\n )\n\n def __init__(self):\n super(Kimovil, self).__init__(self.KIND_PATH, self.PKIND_SELECTOR, self.KIND_MPATHS)\n\n @property\n def base_url(self):\n base_url = 'https://www.kimovil.com/en/compare-smartphones'\n return base_url\n\n @property\n def scraper(self):\n scraper = cloudscraper.create_scraper()\n return scraper\n\n def base_request(self, url: str):\n # print(url)\n res = self.scraper.get(url)\n res = res.text\n return res\n\n def request(self, pname: str = None, page: int = None):\n param = f'name.{pname}' if pname else f'page.{page}'\n url = f\"{self.base_url}/{param}\"\n res = self.base_request(url)\n return url, res\n \n def parse_phone(self, phone: elem):\n mpaths = (\n ('screen', ('.//li[@class=\"item item-screen k-rowspan-2\"]//span/text()',)),\n ('size', ('.//li[@class=\"item item-size k-rowspan-2\"]//span/text()',)),\n ('soc', ('.//li[@class=\"item item-soc k-rowspan-2\"]//span/text()',)),\n ('antutu', ('.//li[@class=\"item item-antutu\"]//span/text()',)),\n ('battery', ('.//li[@class=\"item item-battery\"]//span/text()',)),\n ('os', ('.//li[@class=\"item item-os k-rowspan-2\"]//span/text()',)),\n )\n\n phone_xpath = './/ul[@class=\"kiui-grid k-main k-auto-column device-mini-datasheet device-mini-datasheet-sheet\"]'\n phone = self.get_elem(phone, phone_xpath)[0]\n phone_info = self.get_elem_mpath(phone, *mpaths)\n\n return phone_info\n","sub_path":"device_info/devices/device/kimovil.py","file_name":"kimovil.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"305927755","text":"class Solution:\n def findDuplicates(self, nums: List[int]) -> List[int]:\n \"\"\"\n Time complexity:\n The time complexity of the above algorithm is O(n).\n\n Space complexity:\n Ignoring the space required for storing the duplicates, the\n algorithm runs in constant space O(1).\n \"\"\"\n\n i = 0\n duplicate_numbers = []\n\n while i < len(nums):\n j = nums[i]-1\n # if the value is not equal to its index, swap\n if nums[i] != nums[j]:\n nums[i], nums[j] = nums[j], nums[i]\n else:\n i += 1\n\n for i in range(len(nums)):\n if nums[i] != i+1:\n duplicate_numbers.append(nums[i])\n\n return duplicate_numbers\n","sub_path":"Problems/Leetcode/442_FindAllDuplicatesInAnArray.py","file_name":"442_FindAllDuplicatesInAnArray.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275665114","text":"import slack_utility\r\nimport time\r\n\r\nfile = open('info.txt', 'w') \r\norder = 0\r\norder_num = 1\r\n\r\n#Пока что бот может работать только по определенному алгоритму:\r\n#Для начала диалога необходимо поздароваться с ботом или сообщить о готовности сделать заказ \r\n\r\ndef handle_command(slack_api, command, channel):\r\n\r\n\tfile = open('info.txt', 'a') \r\n\tglobal order\r\n\tglobal order_num\r\n\r\n\tif order == 0:\r\n\t\tif command.lower().startswith('hi') or command.lower().startswith('hey') or command.lower().endswith('order') or command.lower().startswith('pizza'):\r\n\t\t\tslack_api.rtm_send_message(channel, 'Hi, do you want to place an order?')\r\n\t\t\torder = 1\r\n\t\telse:\r\n\t\t\tprint ('Invalid Command: Not Understood')\r\n\t\t\tslack_api.rtm_send_message(channel, 'Invalid Command: Not Understood')\r\n\telif order == 1:\r\n\t\tif command.lower().startswith('yes') or command.lower().startswith('sure'):\r\n\t\t\tslack_api.rtm_send_message(channel, 'I\\'m writing down')\r\n\t\t\torder = 2\r\n\t\telse:\r\n\t\t\tprint ('Invalid Command: Not Understood')\r\n\t\t\tslack_api.rtm_send_message(channel, 'Invalid Command: Not Understood')\r\n\telif order == 2:\r\n\t\tfile.write('Заказ № ' + str(order_num) + ' ' + command + '\\n')\r\n\t\tslack_api.rtm_send_message(channel, 'Thank! Your order is accepted')\r\n\t\torder_num = order_num + 1\r\n\t\torder = 0\r\n\r\n\t\r\n\r\n\tfile.close()\r\n\r\n\r\n\t\r\n\t\r\ndef main():\r\n\r\n\tREAD_WEBSOCKET_DELAY = 1 \r\n\tslack_api = slack_utility.connect()\r\n\tif slack_api.rtm_connect():\r\n\t\tprint ('SLACK_BOT connected and running')\r\n\t\twhile True:\r\n\t\t\tcommand, channel = slack_utility.parse_slack_response(slack_api.rtm_read())\r\n\t\t\tif command and channel:\r\n\t\t\t\thandle_command(slack_api, command, channel)\r\n\t\t\ttime.sleep(READ_WEBSOCKET_DELAY)\r\n\telse:\r\n\t\tprint ('Connection failed. Invalid Slack token or bot ID?') \r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\nfile.close()","sub_path":"mainbot.py","file_name":"mainbot.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"28527752","text":"import re\nfrom bs4 import BeautifulSoup\nfrom .util import get_soup, text_normalize\n\nbasic_url_form = 'http://movie.naver.com/movie/bi/mi/basic.nhn?code={}' # movie_id\n\n\ndef get_movie_data(movie_id):\n url = basic_url_form.format(movie_id)\n soup = get_soup(url)\n infomation = {\n 'movie_id': movie_id,\n 'title': get_title(soup),\n 'genres': get_genres(soup),\n 'story': get_story(soup)\n }\n return infomation\n\n\ndef get_title(soup):\n a = soup.select('div[class=mv_info] h3[class=h_movie] a')\n if not a:\n return ''\n return text_normalize(a[0].text)\n\n\ndef get_genres(soup):\n genres = soup.select('a[href^=\"/movie/sdb/browsing/bmovie.nhn?genre=\"]')\n return list({genre.text for genre in genres})\n\n\ndef get_story(soup):\n try:\n story_soup = BeautifulSoup(str(soup.select(\"div[class=story_area]\")[\n 0]).replace('
', '\\n').replace('\\xa0', '\\n'), 'lxml')\n sentences = story_soup.text.split('\\n')\n sentences = [text_normalize(sentence)\n for sentence in sentences if sentence]\n sentences = [sentence for sentence in sentences if sentence != '줄거리']\n return '\\n'.join(sentences)\n except:\n return 'error occurred while fetching data'\n","sub_path":"naver_movie/movie_scrap.py","file_name":"movie_scrap.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"195860484","text":"from flask import Flask, redirect, url_for, render_template, request, session\nfrom datetime import timedelta\n\napp = Flask(__name__)\napp.secret_key = \"test\"\napp.permanent_session_lifetime = timedelta(minutes = 1)\n\n@app.route(\"/\")\ndef index():\n # return \"

Hello WOrld

\"\n return render_template(\"index.html\", content = ['ricky', 'rhea', 'wifey'])\n\n@app.route(\"/home\")\ndef home():\n # return \"

Hello WOrld

\"\n return render_template(\"home.html\")\n\n@app.route(\"/admin/\")\ndef admin():\n return redirect(url_for(\"user\",name=\"Admin!\"))\n\n@app.route(\"/login/\", methods=[\"GET\",\"POST\"])\ndef login():\n if(request.method == \"POST\"):\n session.permanent = True\n user = request.form[\"name\"]\n session['user'] = user\n return redirect(url_for(\"user\"))\n else:\n if \"user\" in session:\n return redirect(url_for(\"user\"))\n return render_template(\"login.html\")\n\n@app.route(\"/logout\")\ndef logout():\n session.pop(\"user\", None)\n return redirect(url_for(\"login\"))\n\n@app.route(\"/user\")\ndef user():\n if \"user\" in session:\n user = session['user']\n return f\"Hello {user}!\"\n else:\n return redirect(url_for(\"login\"))\n \n # return render_template(\"index.html\", content=name, r=2)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"308044666","text":"from Pages.ContentPages.BasePage import Page\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nfrom magic_box.find_elements import find_element\n\nimport time\nimport pytest\n\n\nclass BusinessCase(Page):\n def __init__(self, driver):\n self.driver = driver\n super().__init__(driver)\n self.locators = {\n 'picto_tab':{'by': By.XPATH, 'value': '//summary[contains(text(),\"Picto in Hero banner\")]'},\n 'categories_tab': {'by': By.XPATH, 'value': '//summary[contains(text(),\"Categories for list filters\")]'},\n 'zone_select': {'by': By.XPATH, 'value': '//select[@id=\"edit-field-zone\"]'},\n 'contentaside_paragraph': {'by': By.XPATH, 'value': '//div[@id=\"edit-field-paragraphs-wrapper\"]'},\n 'latitude': {'by': By.XPATH, 'value': '//input[contains(@class,\"geofield-lat form-text\")]'},\n 'longitude': {'by': By.XPATH, 'value': '//input[contains(@class,\"geofield-lon form-text\")]'},\n 'picto_banner_tab': {'by': By.XPATH, 'value': '//summary[contains(text(),\"Picto in Hero banner\")]'},\n 'picto_banner': {\n 'picto_banner_title': {'by': By.XPATH, 'value': '//input[contains(@id,\"business-case-text\")]'},\n 'picto_banner_color': {'by': By.XPATH,\n 'value': '//select[contains(@id,\"business-case-color\")]'},\n 'picto_icon_iframe': {'by': By.XPATH,\n 'value': '//div[contains(@id,\"business-case-icon\")]//iframe[contains(@id,\"iframe\")]'},\n 'picto_image_iframe': {'by': By.XPATH,\n 'value': '//div[contains(@id,\"business-case-image\")]//iframe[contains(@id,\"iframe\")]'},\n }\n\n }\n microtime = int(round(time.time() * 1000))\n self.bc_test_data = {\n 'title': 'Business case title {}'.format(microtime),\n 'zone_select': 1,\n 'latitude': '54.23',\n 'longitude': '-0.12',\n 'picto_title': 'picto title',\n 'picto_color': 'Green (Waste)',\n }\n self.bc_ct_paragraphs = {\n 'text',\n 'quote',\n 'media',\n 'content_with_aside',\n 'grid',\n 'carousel',\n 'accordion',\n }\n\n def get_categories_tab(self):\n return find_element(self.driver, **self.locators['categories_tab'])\n\n def get_picto_tab(self):\n return find_element(self.driver, **self.locators['picto_tab'])\n\n def get_zone_select(self):\n return Select(find_element(self.driver, **self.locators['zone_select']))\n\n def get_paragraph(self):\n return find_element(self.driver, **self.locators['contentaside_paragraph'])\n\n def get_latitude(self):\n return find_element(self.driver, **self.locators['latitude'])\n\n def get_longitude(self):\n return find_element(self.driver, **self.locators['longitude'])\n\n def get_picto_title(self):\n return find_element(self.driver, **self.locators['picto_banner']['picto_banner_title'])\n\n def get_picto_color(self):\n return Select(find_element(self.driver, **self.locators['picto_banner']['picto_banner_color']))\n\n @pytest.allure.step('Fill mandatory fields for Business Case CT')\n def fill_business_case_mandatory(self, title=None):\n if title:\n self.get_title().send_keys(title)\n else:\n self.get_title().send_keys(self.bc_test_data['title'])\n #self.get_categories_tab().click()\n #self.get_zone_select().select_by_index(self.bc_test_data['zone_select'])\n self.get_latitude().send_keys(self.bc_test_data['latitude'])\n self.get_longitude().send_keys(self.bc_test_data['longitude'])\n\n @pytest.allure.step('Add new paragraphs to the BC page')\n def add_paragraphs(self):\n for item in self.bc_ct_paragraphs:\n self.add_paragraph(str(item))\n time.sleep(2)\n\n @pytest.allure.step('Fill page\\'s picto in hero banner')\n def fill_picto_banner(self):\n picto_banner = find_element(self.driver, **self.locators['picto_banner_tab'])\n picto_banner.click()\n icon_iframe = find_element(self.driver, **self.locators['picto_banner']['picto_icon_iframe'])\n self.media_browser.choose_image(icon_iframe)\n image_iframe = find_element(self.driver, **self.locators['picto_banner']['picto_image_iframe'])\n self.media_browser.choose_image(image_iframe)\n time.sleep(1)\n self.get_picto_title().send_keys(self.bc_test_data['picto_title'])\n self.get_picto_color().select_by_visible_text(self.bc_test_data['picto_color'])","sub_path":"Pages/ContentPages/BusinessCase.py","file_name":"BusinessCase.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"192816806","text":"#!usr/bin/env python\n#! -*-coding:utf-8 -*-\n\ndef numToLetter(lists, maxs=0):\n '''\n 使用迭代的方法去搜索一个list中的最大数\n 然后以对应的字母输出\n '''\n try:\n if len(lists) <= 0:\n final = ''\n tables = {'1':'One', '2':' Two','3':' Three',\n '4':' Four', '5':' Five','6':' Six', '7':' Seven',\n '8':' Eight','9':' Nine', '0':' Zero', }\n for word in str(maxs):\n for k, v in tables.iteritems():\n if k == word:final += v\n return final.lstrip()\n a = lists.pop()\n maxs = a if a > maxs else maxs\n return numToLetter(lists, maxs)\n except TypeError:\n return 'Please input a list.'","sub_path":"numToLetter.py","file_name":"numToLetter.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"185538256","text":"from django.urls import path\nfrom .views import ClassCreateAPIView, CreateSubjectAPIView, NoticeBoardUploadView,TeacherRegisterModelViewSet, StudentRegisterModelViewSet , RoleforTeacherModelView,DirectMessageModelView\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.authtoken.views import obtain_auth_token\n\n\nr= DefaultRouter()\nr.register('studentregister', StudentRegisterModelViewSet,basename=\"studentregister\")\nr.register('subjectregister', CreateSubjectAPIView,basename=\"subjectregister\")\nr.register('noticeboard', NoticeBoardUploadView,basename=\"notice\")\nr.register('roleforteacher', RoleforTeacherModelView,basename=\"role\")\nr.register('directmessage', DirectMessageModelView,basename=\"message\")\nr.register('teacherregister',TeacherRegisterModelViewSet,basename=\"teacherregister\")\n# r.register(\"admindatas\", AdminLoginAPIVeiw)\n\napp_name=\"adminsite\"\nurlpatterns=[\n path('add-classnumber/',ClassCreateAPIView.as_view()),\n #path('admin-datas/', AdminLoginAPIVeiw.as_view()),\n path('login/', obtain_auth_token)\n]+r.urls\n","sub_path":"e_portal_system/adminsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"334777027","text":"#!/usr/bin/env python\n\n\"\"\"\nThis is a module contains scripts for finding domains in HiC interaction data.\n\nConcepts\n--------\n\nThese functions rely on the :class:`HiC` class in conjunction with the :class:`Fend` and :class:`HiCData` classes.\n\n\nAPI Documentation\n-----------------\n\"\"\"\n\nimport os\nimport sys\n\nimport numpy\nimport h5py\ntry:\n from mpi4py import MPI\nexcept:\n pass\n\nimport libraries._hic_tads as _hic_tads\nimport hic_binning\nimport plotting\n\nclass TAD( object ):\n \"\"\"\n \"\"\"\n\n def __init__(self, hic, silent=False):\n self.hic = hic\n self.silent = silent\n\n def __getitem__(self, key):\n \"\"\"Dictionary-like lookup.\"\"\"\n if key in self.__dict__:\n return self.__dict__[key]\n else:\n return None\n\n def __setitem__(self, key, value):\n \"\"\"Dictionary-like value setting.\"\"\"\n self.__dict__[key] = value\n return None\n\n \"\"\"\n def learn_TADs(self, maxsize=2000000, maxtreesize=25, p=3, q=12, gamma=0.5, chroms=[], binsize=10000,\n minsize=50000):\n if isinstance(chroms, list) and len(chroms) == 0:\n chroms = list(self.hic.fends['chromosomes'][...])\n elif isinstance(chroms, str):\n chroms = [chroms]\n if maxsize % binsize != 0:\n print >> sys.stderr, (\"Maximum TAD size must be a multiple of bin size.\\n\"),\n return\n self.parameters = {\n 'maxsize': int(maxsize),\n 'minsize': int(minsize),\n 'maxtreesize': int(maxtreesize),\n 'p': int(p),\n 'q': int(q),\n 'gamma': float(gamma),\n 'binsize': int(binsize),\n }\n self.chromosomes = numpy.array(chroms)\n for chrom in chroms:\n self[chrom] = self.find_TAD(chrom)\n\n def find_TAD(self, chrom):\n p, q, gamma = self.parameters['p'], self.parameters['q'], self.parameters['gamma']\n maxdist = max(max(p, q) * self.parameters['binsize'], self.parameters['maxsize'])\n data = self.hic.cis_heatmap(chrom, binsize=self.parameters['binsize'], arraytype='compact',\n datatype='fend', include_diagonal=False, maxdistance=maxdist)\n #where = numpy.where(data[:, :, 0] == 0)\n #data[where[0], where[1], 1] = 0\n #where = numpy.where(data[:, :, 0] > 0)\n #data[where[0], where[1], 0] = numpy.log(data[where[0], where[1], 0] / data[where[0], where[1], 1])\n #data[where[0], where[1], 1] = 1\n maxbins = self.parameters['maxsize'] / self.parameters['binsize']\n minbins = self.parameters['minsize'] / self.parameters['binsize']\n n = data.shape[0]\n m = data.shape[1]\n print >> sys.stderr, (\"\\rFinding BI scores for chromosome %s...\") % (chrom),\n BIs = numpy.zeros((n, m, 2), dtype=numpy.float32)\n BIs.fill(-numpy.inf)\n _hic_tads.find_BIs(data, BIs, p, minbins)\n where = numpy.where(BIs[:, :, 1] == 0)\n print numpy.amin(BIs[where[0], where[1], 0]), numpy.mean(BIs[where[0], where[1], 0]), numpy.amax(BIs[where[0], where[1], 0]) \n where = numpy.where(BIs[:, :, 1] == 1)\n print numpy.amin(BIs[where[0], where[1], 0]), numpy.mean(BIs[where[0], where[1], 0]), numpy.amax(BIs[where[0], where[1], 0])\n BI_scores = numpy.zeros((n, maxbins - minbins + 1), dtype=numpy.float32)\n for i in range(minbins, maxbins + 1):\n BI_scores[:(n - i + 1), i - minbins] = BIs[:(n - i + 1), 0] * BIs[(i - 1):, 1]\n where = numpy.where(BI_scores <= 0.0)\n BI_scores[where] = -numpy.inf\n where = numpy.where(BI_scores > -numpy.inf)\n BI_scores[where] = BI_scores[where] ** gamma\n where = numpy.where(data[:, :, 0] == 0)\n data[where[0], where[1], 1] = 0\n where = numpy.where(data[:, :, 0] > 0)\n data[where[0], where[1], 0] = numpy.log(data[where[0], where[1], 0] / data[where[0], where[1], 1])\n data[where[0], where[1], 1] = 1\n print >> sys.stderr, (\"\\rFinding TAD parameters for chromosome %s...\") % (chrom),\n scores = numpy.zeros((n, maxbins + 1), numpy.float32)\n scores.fill(numpy.inf)\n std_params = numpy.zeros((n, maxbins + 1, 3), dtype=numpy.float32)\n _hic_tads.find_initial_TAD_std_params(data, BIs, scores, std_params, maxbins, minbins, gamma)\n paths = numpy.zeros((n, maxbins - minbins + 2), dtype=numpy.int32)\n paths.fill(-1)\n path_scores = numpy.zeros((n + 1, maxbins - minbins + 2, 2), dtype=numpy.float32)\n #path_scores[:, :, 0].fill(numpy.inf)\n final_path = numpy.zeros(n, dtype=numpy.int32)\n _hic_tads.find_TAD_path(scores, paths, path_scores, final_path, minbins, maxbins)\n print list(final_path)\n for i in range(10):\n print list(path_scores[i, :, 0])\n where = numpy.where(numpy.abs(scores) < numpy.inf)\n print numpy.amin(scores[where]), numpy.mean(scores[where]), numpy.amax(scores[where])\n where = numpy.where(numpy.abs(BI_scores) < numpy.inf)\n print numpy.amin(BI_scores[where]), numpy.mean(BI_scores[where]), numpy.amax(BI_scores[where])\n #numpy.savetxt('temp.txt',paths, fmt=\"%i\", delimiter='\\t')\n #subTAD_scores = numpy.zeros((n, maxbins - minbins + 1, self.parameters['maxtreesize']), dtype=numpy.float32)\n #subTAD_params = numpy.zeros((n, maxbins - minbins + 1, self.parameters['maxtreesize'], 3), dtype=numpy.float32)\n #_hic_tads.find_TAD_subparts(subTAD_scores, subTAD_params, BIs, std_params, minbins, gamma)\n #where = numpy.where(std_params[:, :, 0] >= 3)\n #errors = numpy.zeros((std_params.shape[0], std_params.shape[1], 2), dtype=numpy.float32)\n ##errors[where[0], where[1], 0] = (std_params[where[0], where[1], 2] / std_params[where[0], where[1], 0]) - (std_params[where[0], where[1], 1] / std_params[where[0], where[1], 0]) ** 2 - numpy.maximum(0, BIs[where[0], 0] * BIs[where[0] + where[1] + minbins - 1, 1])**gamma\n #print numpy.amax(where[0] + where[1] + minbins), BIs.shape\n #where1 = numpy.where(errors[where[0], where[1], 0] < 0)\n ##errors[where[0][where1], where[1][where1], 1] = 1\n #print numpy.mean(errors), numpy.amax(errors)\n #print numpy.amax(BIs), numpy.mean(BIs)\n #_hic_tads.find_betadeltas(data, betas, deltas, fits, errors, maxbins)\n #where = numpy.where(data[:, :, 0] == 0)\n #data[where[0], where[1], 1] = 0\n #where = numpy.where(data[:, :, 0] > 0)\n #data[where[0], where[1], 0] = numpy.log(data[where[0], where[1], 0] / data[where[0], where[1], 1])\n #data[where[0], where[1], 1] = 1\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n if BIs[i] < 0 or BIs[j] < 0:\n fits[i,j] = numpy.inf\n else:\n fits[i,j] -= gamma * (BIs[i] + BIs[j])\n print >> sys.stderr, (\"\\rFinding TAD trees for chromosome %s...\") % (chrom),\n scores = numpy.zeros((n, n, self.parameters['maxtreesize']), dtype=numpy.float32)\n _hic_tads.build_TAD_trees(data, fits, deltas, betas, errors, scores, maxbins)\n alldata = numpy.zeros((n, n, 2), dtype=numpy.float32)\n for i in range(data.shape[0] - 1):\n alldata[(i + 1):min(i + data.shape[1] + 1, n), i, :] = data[i, :min(data.shape[1], n - i - 1), :]\n for i in range(scores.shape[0] - 1 - minbins):\n where = numpy.where(numpy.abs(scores[i, :min(scores.shape[1], n - i - minbins - 1)]) < numpy.inf)[0]\n alldata[i, where + i + minbins, 0] = scores[i, where]\n alldata[i, where + i + minbins, 1] = 1\n for i in range(BIs.shape[0] - 1):\n alldata[i, (i + 1):min(i + data.shape[1] + 1, n), :] = BIs[i, :min(data.shape[1], n - i - 1), :]\n #where = numpy.where(numpy.abs(BIs[i, :min(BIs.shape[1], n - i - 1), 1]) < numpy.inf)[0]\n #alldata[i, where + i, 0] = BIs[i, where, 1]\n #alldata[i, where + i, 1] = 1\n indices = numpy.triu_indices(n, 1)\n where = numpy.where(alldata[indices[0], indices[1], 1] > 0)[0]\n alldata[indices[0][where], indices[1][where], 0] -= numpy.amin(alldata[indices[0][where], indices[1][where], 0])\n alldata[indices[0][where], indices[1][where], 0] /= numpy.amax(alldata[indices[0][where], indices[1][where], 0])\n where = numpy.where(alldata[indices[1], indices[0], 1] > 0)[0]\n alldata[indices[1][where], indices[0][where], 0] -= numpy.amin(alldata[indices[1][where], indices[0][where], 0])\n alldata[indices[1][where], indices[0][where], 0] /= numpy.amax(alldata[indices[1][where], indices[0][where], 0])\n for i in range(n):\n if final_path[i] != 0:\n indices = numpy.triu_indices(final_path[i], 1)\n temp = alldata[i + final_path[i] - 1, i, 0]\n alldata[indices[1] + i, indices[0] + i, :] = 1.0\n alldata[i + final_path[i] - 1, i, 0] = temp\n img = plotting.plot_full_array(alldata, symmetricscaling=False, logged=False)\n img.save('BIs_%s.png' % chrom)\n \"\"\"\n\n def find_arrowhead_TADs(self, binsize, minbins, maxbins, chroms=[]):\n if isinstance(chroms, str):\n chroms = [chroms]\n if len(chroms) == 0:\n chroms = list(self.hic.fends['chromosomes'][...])\n self.binsize = int(binsize)\n self.minsize = self.binsize * max(1, int(minbins))\n self.maxsize = self.binsize * (int(maxbins) + 1)\n self.TADs = {}\n for chrom in chroms:\n self.TADs[chrom] = []\n if not self.silent:\n print >> sys.stderr, (\"\\r%s\\rFinding heatmap for chromosome %s...\") % (' ' * 80, chrom),\n temp = self.hic.cis_heatmap(chrom, binsize=binsize * 16, datatype='fend', arraytype='full', returnmapping=True)\n if temp is None:\n continue\n temp_data, mapping = temp\n heatmap = numpy.zeros((temp_data.shape[0] * 16, temp_data.shape[0] * 16, 2), dtype=numpy.float32)\n temp = numpy.zeros(heatmap.shape, dtype=numpy.float32)\n for i in range(16):\n for j in range(16):\n heatmap[i::16, j::16, :] += temp_data\n temp_data = self.hic.cis_heatmap(chrom, binsize=binsize * 4, datatype='fend', arraytype='full', start=mapping[0, 0], stop=mapping[-1, 1])\n for i in range(4):\n for j in range(4):\n temp[i::4, j::4, :] += temp_data\n where = numpy.where(temp[:, :, 0] > 0)\n heatmap[where[0], where[1], :] = temp[where[0], where[1], :]\n temp_data, mapping = self.hic.cis_heatmap(chrom, binsize=binsize, datatype='fend', arraytype='full', start=mapping[0, 0], stop=mapping[-1, 1], returnmapping=True)\n where = numpy.where(temp_data[:, :, 0] > 0)\n heatmap[where[0], where[1], :] = temp_data[where[0], where[1], :]\n #temp = self.hic.cis_heatmap(chrom, binsize=binsize, maxdistance=self.maxsize, datatype='fend',\n # arraytype='compact', returnmapping=True, start=mapping[0, 0], stop=mapping[-1, 1])\n #if temp is None:\n # continue\n #data, mapping = temp[:2]\n data = numpy.zeros((heatmap.shape[0], maxbins - 1, 2), dtype=numpy.float32)\n for i in range(heatmap.shape[0] - 1):\n data[i, :min(data.shape[1], data.shape[0] - i - 1), :] = heatmap[i, (i + 1):min(data.shape[1] + i + 1, data.shape[0]), :]\n #heatmap = plotting.plot_compact_array(data, logged=True, symmetricscaling=False, silent=True)\n where = numpy.where(data[:, :, 1] > 0)\n data[where[0], where[1], 0] /= data[where[0], where[1], 1]\n scores = numpy.zeros((data.shape[0], data.shape[1]), dtype=numpy.float32)\n if not self.silent:\n print >> sys.stderr, (\"\\r%s\\rFinding arrowhead transformation for chromosome %s...\") % (' ' * 80, chrom),\n _hic_tads.find_arrowhead_transformation(data, scores, maxbins)\n sums = numpy.zeros(data.shape, dtype=numpy.float32)\n signs = numpy.zeros(data.shape, dtype=numpy.float32)\n variances = numpy.zeros((data.shape[0], data.shape[1], 2, 2), dtype=numpy.float32)\n domain_scores = numpy.zeros(scores.shape, dtype=numpy.float32)\n if not self.silent:\n print >> sys.stderr, (\"\\r%s\\rFinding arrowhead scoring for chromosome %s...\") % (' ' * 80, chrom),\n _hic_tads.find_arrowhead_scores(scores, sums, signs, variances, domain_scores, minbins)\n \"\"\"\n where = numpy.where(heatmap[:, :, 0] == 0)\n heatmap[where[0], where[1], 1] = 0\n where = numpy.where(heatmap[:, :, 0] > 0)\n heatmap[where[0], where[1], 0] = numpy.log(heatmap[where[0], where[1], 0] / heatmap[where[0], where[1], 1])\n heatmap[where[0], where[1], 0] -= numpy.amin(heatmap[where[0], where[1], 0])\n heatmap[where[0], where[1], 0] /= numpy.amax(heatmap[where[0], where[1], 0])\n heatmap[where[0], where[1], 1] = 1\n scores = numpy.zeros((domain_scores.shape[0], domain_scores.shape[1], 2), dtype=numpy.float32)\n scores[:, :, 0] = domain_scores\n scores[:, :, 0] -= numpy.amin(domain_scores)\n scores[:, :, 0] /= numpy.amax(scores[:, :, 0])\n scores[:, :, 1] = 1\n temp = hic_binning._compact_to_upper(scores)\n indices = numpy.triu_indices(heatmap.shape[0], 1)\n heatmap[indices[1], indices[0], :] = temp\n img = plotting.plot_full_array(heatmap, symmetricscaling = False, logged=False)\n img.save(\"arrowhead_scoring_%s.png\" % chrom)\n \"\"\"\n\n path = numpy.zeros(data.shape[0], dtype=numpy.int32)\n path_scores = numpy.zeros(data.shape[0], dtype=numpy.float64)\n if not self.silent:\n print >> sys.stderr, (\"\\r%s\\rFinding optimal domains for chromosome %s...\") % (' ' * 80, chrom),\n _hic_tads.find_arrowhead_path(domain_scores, path, path_scores, minbins, maxbins)\n i = path.shape[0] - 1\n domains = []\n while i > 0:\n if path[i] != 1:\n domains.append([i - path[i], i])\n self.TADs[chrom].append([mapping[domains[-1][0], 0], mapping[domains[-1][1], 1]])\n i -= path[i]\n\n \"\"\"\n c = pyx.canvas.canvas()\n c.insert(pyx.bitmap.bitmap(0, 0, img, width=20))\n span = float(mapping[-1, 1] - mapping[0, 0])\n for i in range(len(self.TADs[chrom])):\n start = (self.TADs[chrom][i][0] - mapping[0, 0]) / span * 20.0\n stop = (self.TADs[chrom][i][1] - mapping[0, 0]) / span * 20.0\n c.stroke(pyx.path.rect(start, 20 - start, stop-start, start-stop), [pyx.style.linewidth.THIN])\n c.writePDFfile('arrowhead_scoring_%s.pdf' % chrom)\n\n c = pyx.canvas.canvas()\n c0 = pyx.canvas.canvas([pyx.canvas.clip(pyx.path.path(\n pyx.path.moveto(0, 20),\n pyx.path.lineto(20, 20),\n pyx.path.lineto(20, 0),\n pyx.path.closepath()))])\n c1 = pyx.canvas.canvas([pyx.canvas.clip(pyx.path.path(\n pyx.path.moveto(0, 20),\n pyx.path.lineto(0, 0),\n pyx.path.lineto(20, 0),\n pyx.path.closepath()))])\n width = 20.0 * (2.0 ** 0.5)\n offset = data.shape[1] / float(data.shape[0]) * width / 2.0\n c = pyx.canvas.canvas([pyx.canvas.clip(pyx.path.rect(0, -offset, width, 2.0 * offset))])\n c0.insert(pyx.bitmap.bitmap(0, 0, heatmap, width=20))\n c.insert(c0, [pyx.trafo.translate(0, -20), pyx.trafo.rotate(45)])\n c1.insert(pyx.bitmap.bitmap(0, 0, domain_img, width=20))\n c.insert(c1, [pyx.trafo.translate(0, -20), pyx.trafo.rotate(45)])\n for d in domains:\n x0 = d[0] / float(data.shape[0]) * width\n x1 = d[1] / float(data.shape[0]) * width\n y = (x1 - x0) / 2.0\n c.stroke(pyx.path.path(\n pyx.path.moveto(x0, 0),\n pyx.path.lineto(x0 + y, y),\n pyx.path.lineto(x1, 0),\n pyx.path.lineto(x0 + y, -y),\n pyx.path.closepath()),\n [pyx.style.linewidth.Thin])\n c.writePDFfile('arrowhead_heatmap_%s.pdf' % chrom)\n \"\"\"\n if not self.silent:\n print >> sys.stderr, (\"\\r%s\\rFinished finding TADs\\n\") % (' ' * 80),\n\n def write_TADs(self, fname):\n output = open(fname, 'w')\n chroms = self.TADs.keys()\n chroms.sort()\n for chrom in chroms:\n self.TADs[chrom].sort()\n for domain in self.TADs[chrom]:\n print >> output, \"%s\\t%i\\t%i\" % (chrom, domain[0], domain[1])\n output.close()\n\n\n\n\n\n\n\n","sub_path":"hifive/tads.py","file_name":"tads.py","file_ext":"py","file_size_in_byte":17043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"423032067","text":"from flask import current_app\nfrom ipaddress import ip_network\nimport random\nimport string\nfrom app.models import ScopeItem\n\n\ndef get_target_tags(target):\n\ttargetnet = ip_network(target)\n\ttags = []\n\tfor scope in current_app.ScopeManager.get_scope():\n\t\tif scope.overlaps(targetnet):\n\t\t\tscopetags = ScopeItem.query.filter_by(target=str(scope)).first().tags.all()\n\t\t\tfor tag in scopetags:\n\t\t\t\ttags.append(tag.name)\n\treturn list(set(tags)) # make it a set for only uniques, then make it a list to serialize to JSON\n\n\ndef get_unique_scan_id():\n\tscan_id = ''\n\twhile scan_id == '':\n\t\trand = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16))\n\t\tcount, context = current_app.elastic.get_host_by_scan_id(rand)\n\t\tif count == 0:\n\t\t\tscan_id = rand\n\treturn scan_id\n\n\ndef prepare_work(work):\n\twork['tags'] = get_target_tags(work['target'])\n\twork['type'] = 'nmap'\n\twork['agent_config'] = current_app.agentConfig\n\twork['agent_config']['scripts'] = current_app.agentScriptStr\n\twork[\"services_hash\"] = current_app.current_services[\"sha256\"]\n\twork['scan_id'] = get_unique_scan_id()\n\twork['status'] = 200\n\twork['message'] = \"Target: \" + str(work['target'])\n\treturn work\n","sub_path":"natlas-server/app/api/prepare_work.py","file_name":"prepare_work.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"249517922","text":"import heapq\n\n\nclass Solution:\n def minCost_dijkstra(self, grid) -> int:\n dir = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = len(grid), len(grid[0])\n dp = [[float(\"inf\") for j in range(n)] for i in range(m)]\n dp[0][0] = 0\n q = [[0, 0, 0]]\n while q:\n c, x, y = heapq.heappop(q)\n if dp[x][y] < c:\n continue\n for k, d in enumerate(dir):\n dx, dy = d\n i, j = x + dx, y + dy\n if 0 <= i < m and 0 <= j < n:\n cost = c if k == grid[x][y] - 1 else c + 1\n if dp[i][j] > cost:\n dp[i][j] = cost\n heapq.heappush(q, [cost, i, j])\n return dp[m - 1][n - 1]\n\n def minCost(self, grid) -> int:\n dir = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = len(grid), len(grid[0])\n visited = set()\n\n def dfs(x, y):\n visited.add((x, y))\n ans = [(x, y)]\n for k, d in enumerate(dir):\n dx, dy = d\n i, j = x + dx, y + dy\n if 0 <= i < m and 0 <= j < n and k == grid[x][y] - 1 and (i, j) not in visited:\n ans += dfs(i, j)\n return ans\n\n res = 0\n q = dfs(0, 0)\n while q:\n newQ = []\n for x, y in q:\n if x == m - 1 and y == n - 1:\n return res\n for dx, dy in dir:\n i, j = x + dx, y + dy\n if 0 <= i < m and 0 <= j < n and (i, j) not in visited:\n newQ += dfs(i, j)\n q = newQ\n res += 1\n return -1\n\n\ns = Solution()\nprint(s.minCost([[1, 1, 1, 1], [2, 2, 2, 2], [1, 1, 1, 1], [2, 2, 2, 2]]))\n","sub_path":"leetcode/2021/minimum-cost-to-make-at-least-one-valid-path-in-a-grid.py","file_name":"minimum-cost-to-make-at-least-one-valid-path-in-a-grid.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528694950","text":"from flask import Flask,render_template,request\r\nfrom chatterbot import ChatBot\r\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\r\n\r\napp = Flask(__name__) \r\nportuguese_bot = ChatBot(\"Chatterbot\",storage_adapter=\"chatterbot.storage.SQLStorageAdapter\")\r\ntrainer = ChatterBotCorpusTrainer(portuguese_bot)\r\ntrainer.train(\"chatterbot.corpus.portuguese\")\r\ntrainer.train(\"data/data.yml\")\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/get\")\r\ndef get_bot_response():\r\n userText = request.args.get(\"msg\")\r\n return str(portuguese_bot.get_response(userText))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug = True)\r\n\r\n\r\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"532698863","text":"from pathlib import Path\nimport modules\nimport os\n\nfile_path = Path(r\"c:\\Users\\ehom\\Documents\\IdeaProjects\\Python\\Projects\\trackChangesQuote\\sample\\\\\")\n\nfor file in file_path.iterdir():\n tcq_folder = str(file.parent)+\"\\\\\"+file.name+\"_tcq\"\n os.mkdir(tcq_folder)\n acceptPath = modules.accept_all(str(file), tcq_folder)\n rejectPath = modules.reject_all(str(file), tcq_folder)\n modules.convert_to_txlf(tcq_folder, \"en-us\")\n modules.segment_and_pseudo(tcq_folder)\n tm_path = tcq_folder+\"\\\\tm\"\n os.mkdir(tm_path)\n modules.create_tm_and_update(tm_path, rejectPath)\n modules.analyze_accepted(tm_path, acceptPath, tcq_folder)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"88181773","text":"\"\"\"\nGiven an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.\n\nNote: The solution set must not contain duplicate triplets.\n\nFor example, given array S = [-1, 0, 1, 2, -1, -4],\n\nA solution set is:\n[\n [-1, 0, 1],\n [-1, -1, 2]\n]\n\"\"\"\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums = sorted(nums)\n \n ans = []\n for i in range(0,len(nums)-1):\n if i==0 or (nums[i-1]!=nums[i]):\n l = i + 1\n r = len(nums) - 1\n while r > l:\n s = nums[i] + nums[l] + nums[r]\n if s == 0:\n ans.append([nums[i], nums[l], nums[r]])\n l += 1\n r -= 1\n\n # Skip duplicates\n while l nums[r] --> we need to increase l (in order to decrease |nums[i] + nums[l]| \n # since nums[r] would only get smaller if we were to decrease r)\n l += 1\n else:\n r -= 1\n return ans\n\ndef main():\n solution = Solution()\n print(solution.threeSum([-1,0,1,2,-1,-4]))\n print(solution.threeSum([0,0,0,0]))\n\nif __name__ == \"__main__\": main()","sub_path":"python/3sum.py","file_name":"3sum.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119633468","text":"# python\n\n# 1.\na = 5\nb = 12\na, b = b, a\n\n# 2.\n\n\ndef invers(string=\"Hello World\"):\n new_string = \"\"\n for i, elem in enumerate(string):\n new_string += string[-(i+1)].upper()\n return new_string\n\ninvers()\n\n# 3.\n\n\ndef wordandsent(text_string):\n text_string_1 = text_string.split(\".\")\n text_string_2 = text_string.split(\" \")\n return len(text_string_1) - 1, len(text_string_2)\n\ntext_string = \"Управляющие и клиентские аккаунты подходят для организации иерархии, но как насчет тестирования \" \\\n \"экспериментальных изменений или вызовов API без оказания влияния на рабочую среду. Именно для этого и \" \\\n \"предназначены тестовые аккаунты.\"\"\"\nl1, l2 = wordandsent(text_string)\n\n# 4.\n\n\ndef sandp(number):\n number = str(number)\n sum = 0\n p = 1\n for elem in number:\n sum += int(elem)\n p *= int(elem)\n return sum, p\n\nsandp(73)\n\n# Django\n# 1\n# используем кастомную модель юзер.\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Divice_list(models.Model):\n name = models.CharField(max_length=100, unique=True)\n # .\n # .\n # .\n # дальше добавляются поля, характеризующие девайс(система, модель...)\n\n\nclass Device(models.Model):\n user = models.ForeignKey(User, unique=True, primary_key=True, verbose_name=\"Пользователь\")\n device = models.ForeignKey(Divice_list ,verbose_name=\"Имя устройства\", primary_key=True)\n\n\n# 2\n\nclass Faculty(models.Model):\n title = models.CharField(max_length=255, verbose_name='Faculty name')\n description = models.TextField(verbose_name='Faculty description', default=\"\", blank=True)\n\n\nclass Student(models.Model):\n first_name = models.CharField(max_length=255, verbose_name='First name')\n last_name = models.CharField(max_length=255, verbose_name='Last name')\n birthday = models.DateField(verbose_name='User birthday')\n faculty = models.ForeignKey(Faculty, verbose_name='Faculty')\n\n\na = len(Student.objects.all())\nb = len(Student.objects.filter(faculty=Faculty.objects.get(title='faculty1')))\nb = len(Student.objects.filter(faculty__title='faculty1')) # вариант 2\nc = []\nfor elem in a:\n x = {\n 'имя студента': elem.first_name,\n 'фамилия студента': elem.last_name,\n 'наименование факультета': Faculty.objects.get(id=elem.faculty),\n }\n c.append(x)\n\nfac_list = Faculty.objects.all()\nd = {}\nfor elem in fac_list:\n d[elem.title] = len(Student.objects.filter(faculty=elem))\n\n\n# Python; - 2\n# Django или другие Python-фреймворки; - 2\n# PostgreSQL или другой SQL-сервер; - 1\n# NoSQL базы данных; - 0\n# ОС *nix, стандартные средства shell; - 1\n# алгоритмы и структуры данных; - 2\n# Git, SVN, другие VCS. - 1\n# HTML / JS (верстка и прочее не требуется, просто общие знания) - 2\n# практика разработки ПО - 1\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"451014402","text":"import sqlite3\nimport pickle\nimport pandas\nimport datetime\nimport os\n\ndefault_database_name = 'bdaybot.db'\n\n# File used to create all the tables in `bdaybot-data.db`\n\n# SQL Command to create TABLE guilds\ncreate_guilds_table = \"\"\"CREATE TABLE guilds(\n guild_id INT PRIMARY KEY,\n announcements_id INT,\n role_id INT,\n today_names_cycle BLOB,\n nickname_notice BOOLEAN DEFAULT 1\n )\"\"\"\n\n# SQL Command to create TABLE student_data\ncreate_student_data_table = \"\"\"CREATE TABLE student_data(\n StuID INT PRIMARY KEY,\n LastName TEXT,\n FirstName TEXT,\n Grd INT\n )\"\"\"\n\n# SQL Command to create TABLE discord_users\ncreate_discord_users_table = \"\"\"CREATE TABLE discord_users(\n discord_user_id INT PRIMARY KEY,\n student_id INT UNIQUE,\n FOREIGN KEY(student_id) REFERENCES student_data(StuID) ON DELETE CASCADE\n )\"\"\"\n\nconfirm = False\nif __name__ == '__main__':\n if os.path.isfile(default_database_name):\n yes_or_no = input((f\"Running this WILL OVERWRITE the existing database '{default_database_name}' \"\n \"Are you sure you want to continue? Type YES to confirm: \")).lower()\n if 'yes' in yes_or_no:\n confirm = True\n os.remove(default_database_name)\n else:\n confirm = True\n\nconnection = None\n\nif __name__ == '__main__' and confirm:\n # connection = sqlite3.connect(':memory:')\n connection = sqlite3.connect(default_database_name)\n\n # DEBUG: **MUST** include this line in order to use\n # FOREIGN KEYS, by default they are **DISABLED**\n connection.execute(\"PRAGMA foreign_keys = 1\")\n\n cursor = connection.cursor()\n\n # Creating TABLE guilds\n cursor.execute(create_guilds_table)\n\n # Creating TABLE student_data and adding data\n cursor.execute(create_student_data_table)\n official_student_df = pandas.concat([pandas.read_csv('Student Locator Spring 2020.csv',\n usecols=['StuID', 'LastName', 'FirstName', 'Grd']),\n pandas.DataFrame({'StuID': [123456], 'LastName': ['Neat'], 'FirstName': ['Dr.'], 'Grd': [-1]})])\n official_student_df.to_sql('student_data', connection, index=False, if_exists='append')\n\n # Creating TABLE discord_users\n\n cursor.execute(create_discord_users_table)\n\n # --- Transfering data from .pickle files to SQL database ---\n\n # Writing to TABLE guilds\n with open('announcements.pickle', mode='rb') as file:\n announcements = pickle.load(file)\n with open('guilds_info.pickle', mode='rb') as file:\n guilds_info = pickle.load(file)\n\n for (guild_id, (cycler, nickname_notice, role_id)), (_, announcements_id) in zip(guilds_info.items(), announcements.items()):\n cursor.execute(\"INSERT INTO guilds VALUES(?, ?, ?, ?, ?)\",\n (guild_id, announcements_id, role_id, pickle.dumps(cycler), not nickname_notice))\n\n # Writing to TABLE discord_users\n with open('bday_dict.pickle', mode='rb') as file:\n bday_dict = pickle.load(file)\n with open('temp_id_storage.pickle', mode='rb') as file:\n temp_id_storage = pickle.load(file)\n\n for wishee_id, wishers in bday_dict.items():\n # print(f\"wishee_id: {wishee_id}\")\n # Writing to TABLE ?\n # WARNING: Below is NOT A GOOD idea due to the possibility of an SQL injection attack\n # If you will be accepting input from users you **MUST** find a way to prevent\n # this type of attack\n create_id_table = \"\"\"CREATE TABLE {}(\n discord_user_id INT,\n year INT,\n PRIMARY KEY(discord_user_id, year),\n FOREIGN KEY(discord_user_id) REFERENCES discord_users(discord_user_id)\n ON DELETE CASCADE\n )\"\"\".format(f\"id_{wishee_id}\")\n # cursor.execute(create_id_table, (f\"id_{wishee_id}\",))\n cursor.execute(create_id_table)\n for discord_id, student_id in wishers.items():\n # WARNING: Line 113 is also a **BAD** idea for the reasons mentioned above\n # print(f\"studentID: {student_id}\")\n try:\n cursor.execute(\"INSERT INTO discord_users VALUES(?, ?)\", (discord_id, student_id))\n except sqlite3.IntegrityError as error:\n if \"UNIQUE constraint failed\" not in str(error):\n raise error\n cursor.execute(\"INSERT INTO id_{} VALUES(?, ?)\".format(wishee_id), (discord_id, datetime.date.today().year))\n\n for discord_id, student_id in temp_id_storage.items():\n cursor.execute(\"INSERT INTO discord_users VALUES(?, ?)\", (discord_id, student_id))\n print(f\"Succesfully created '{default_database_name}'\")\nelif __name__ == '__main__':\n print(\"Cancelling database creation!\")\nelse:\n from argparser import args\n if not os.path.isfile(args.database):\n connection = sqlite3.connect(args.database)\n cursor = connection.cursor()\n cursor.execute(create_guilds_table)\n cursor.execute(create_student_data_table)\n cursor.execute(create_discord_users_table)\n official_student_df = pandas.concat([pandas.read_csv('Student Locator Spring 2020.csv',\n usecols=['StuID', 'LastName', 'FirstName', 'Grd']),\n pandas.DataFrame({'StuID': [123456], 'LastName': ['Neat'], 'FirstName': ['Dr.'], 'Grd': [-1]})])\n official_student_df.to_sql('student_data', connection, index=False, if_exists='append')\n\n\n\nif connection is not None:\n # Add all the data to the database\n connection.commit()\n\n # Close the connection when finished\n connection.close()\n","sub_path":"create_database.py","file_name":"create_database.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"21422833","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: vich1119\n#\n# Created: 31/08/2013\n# Copyright: (c) vich1119 2013\n# Licence: \n#-------------------------------------------------------------------------------\nimport sys\nimport os\nimport wx\n\nfrom lipids_search import *\nfrom calcMass import *\n\nclass MainWindow(wx.Frame):\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title=title, size=(700,1000))\n\n # self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)\n# self.control = MyCanvas(self)\n\n\n self.CreateStatusBar() # A StatusBar in the bottom of the window\n\n # Setting up the menu.\n\n\n####################################\n\n panel = wx.Panel(self)\n\n font = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT)\n font.SetPointSize(9)\n\n\n############### Search masses ##################################################\n wx.StaticText(panel, label='Mass to search', pos=(220,10))\n self.tc_mz = wx.TextCtrl(panel, pos=(220, 25), size=(50,20))\n wx.StaticText(panel, label='m/z tolerance', pos=(300,10))\n self.tc_mzTol = wx.TextCtrl(panel, pos=(300,25), size=(50,20))\n\n wx.StaticText(panel, label='RT to search', pos=(220,55))\n self.tc_rt = wx.TextCtrl(panel, pos=(220, 70), size=(50,20))\n wx.StaticText(panel, label='RT tolerance', pos=(300,55))\n self.tc_rtTol = wx.TextCtrl(panel, pos=(300,70), size=(50,20))\n\n wx.StaticText(panel, label='Ions to search', pos=(380,10))\n cb_h = wx.CheckBox(panel, label='H', pos=(380, 30))\n cb_h.SetValue(False)\n cb_na = wx.CheckBox(panel, label='Na', pos=(410, 30))\n cb_na.SetValue(False)\n\n st2 = wx.StaticText(panel, label='Search result', pos=(220, 110))\n self.tc_output = wx.TextCtrl(panel, pos=(220, 125), size=(200,20))\n btn_output = wx.Button(panel, label='Output', pos=(425,125), size=(60, 20))\n\n btn_search = wx.Button(panel, label='Search', pos=(220, 170), size=(60, 20))\n\n\n\n############### Formula to mass ################################################\n wx.StaticText(panel, label='Calc mass of the lipid (HG CN:DB)', pos=(10,10))\n self.tc_formula = wx.TextCtrl(panel, pos=(10,25))\n\n wx.StaticText(panel, label='H adduct', pos=(10,50))\n self.tc_HAdduct = wx.TextCtrl(panel, -1,pos=(10,65), size=(100, -1))\n\n wx.StaticText(panel, label='Na adduct', pos=(10,90))\n self.tc_NaAdduct = wx.TextCtrl(panel, -1,pos=(10,105), size=(100, -1))\n\n wx.StaticText(panel, label='K adduct', pos=(10,130))\n self.tc_KAdduct = wx.TextCtrl(panel, -1,pos=(10,145), size=(100, -1))\n\n wx.StaticText(panel, label='Ag adduct', pos=(10,170))\n self.tc_AgAdduct = wx.TextCtrl(panel, -1,pos=(10,185), size=(100, -1))\n\n btn_formToMass = wx.Button(panel, label='Calc Mass', pos=(120,25), size=(60, 20))\n\n################################################################################\n\n\n\n\n\n\n self.ions = []\n cb_h.Bind(wx.EVT_CHECKBOX, self.AddRemoveH)\n cb_na.Bind(wx.EVT_CHECKBOX, self.AddRemoveNa)\n##\n##\n self.headGroups = ['PC', 'PE', 'PI', 'PS']\n## cb1.Bind(wx.EVT_CHECKBOX, self.AddRemovePC)\n## cb2.Bind(wx.EVT_CHECKBOX, self.AddRemovePE)\n## cb3.Bind(wx.EVT_CHECKBOX, self.AddRemovePI)\n## cb4.Bind(wx.EVT_CHECKBOX, self.AddRemovePS)\n\n\n\n# panel.SetSizer(vbox)\n\n# self.Bind(wx.EVT_BUTTON, self.OnOpenInput, btn_input)\n self.Bind(wx.EVT_BUTTON, self.OnOpenOutput, btn_output)\n self.Bind(wx.EVT_BUTTON, self.OnSearch, btn_search)\n self.Bind(wx.EVT_BUTTON, self.OnFormToMass, btn_formToMass)\n\n###########################\n\n\n self.Show(True)\n\n def AddRemovePC(self, e):\n\n sender = e.GetEventObject()\n isChecked = sender.GetValue()\n\n if isChecked:\n self.headGroups.append('PC')\n else:\n self.headGroups = [x for x in self.headGroups if x != 'PC']\n\n def AddRemovePE(self, e):\n\n sender = e.GetEventObject()\n isChecked = sender.GetValue()\n\n if isChecked:\n self.headGroups.append('PE')\n else:\n self.headGroups = [x for x in self.headGroups if x != 'PE']\n\n def AddRemovePI(self, e):\n\n sender = e.GetEventObject()\n isChecked = sender.GetValue()\n\n if isChecked:\n self.headGroups.append('PI')\n else:\n self.headGroups = [x for x in self.headGroups if x != 'PI']\n\n def AddRemovePS(self, e):\n\n sender = e.GetEventObject()\n isChecked = sender.GetValue()\n\n if isChecked:\n self.headGroups.append('PS')\n else:\n self.headGroups = [x for x in self.headGroups if x != 'PS']\n\n\n def AddRemoveH(self, e):\n\n sender = e.GetEventObject()\n isChecked = sender.GetValue()\n\n if isChecked:\n self.ions.append('H')\n else:\n self.ions = [x for x in self.ions if x != 'H']\n\n\n\n def AddRemoveNa(self, e):\n\n sender = e.GetEventObject()\n isChecked = sender.GetValue()\n\n if isChecked:\n self.ions.append('Na')\n else:\n self.ions = [x for x in self.ions if x != 'Na']\n\n\n\n def OnOpenInput(self,e):\n wildcard = \"Masses to search file (*.txt)|*.txt\"\n\n dlg = wx.FileDialog(\n self, message=\"Choose a file with masses to search\",\n defaultDir=os.getcwd(),\n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n\n # Show the dialog and retrieve the user response. If it is the OK response,\n # process the data.\n if dlg.ShowModal() == wx.ID_OK:\n # This returns a Python list of files that were selected.\n paths = dlg.GetPaths()\n\n for path in paths:\n self.tc1.SetValue(path)\n\n\n # Destroy the dialog. Don't do this until you are done with it!\n # BAD things can happen otherwise!\n dlg.Destroy()\n\n def OnOpenOutput(self,e):\n wildcard = \"Results file (*.txt)|*.txt\"\n\n dlg = wx.FileDialog(\n self, message=\"Choose a file for results\",\n defaultDir=os.getcwd(),\n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n\n # Show the dialog and retrieve the user response. If it is the OK response,\n # process the data.\n if dlg.ShowModal() == wx.ID_OK:\n # This returns a Python list of files that were selected.\n paths = dlg.GetPaths()\n for path in paths:\n self.tc_output.SetValue(path)\n\n\n\n # Destroy the dialog. Don't do this until you are done with it!\n # BAD things can happen otherwise!\n dlg.Destroy()\n\n def OnSearch(self,e):\n self.lipids = LipidsID()\n outPath = self.tc_output.GetValue()\n self.lipids.SearchMass1(float(self.tc_mz.GetValue()), self.headGroups,\n self.ions, float(self.tc_mzTol.GetValue()), 14,50,0,10)\n self.lipids.SaveOutput(outPath)\n if self.tc_rt != '':\n self.lipids.FilterRT(float(self.tc_rt.GetValue()), float(self.tc_rtTol.GetValue()))\n\n def OnFormToMass(self,e):\n a = FormToMass()\n formula = self.tc_formula.GetValue()\n resultMasses = a.ParceFormula(formula)\n\n self.tc_HAdduct.ChangeValue(str(resultMasses[0]))\n self.tc_NaAdduct.ChangeValue(str(resultMasses[1]))\n self.tc_KAdduct.ChangeValue(str(resultMasses[2]))\n self.tc_AgAdduct.ChangeValue(str(resultMasses[3]))\n\n\n\n # myFile.write(\"%s %s %s \\n\" % d)\napp = wx.App(False)\nframe = MainWindow(None, \"Lipids identification\")\napp.MainLoop()\n","sub_path":"2015-01-23/lSearch.py","file_name":"lSearch.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"296467548","text":"\"\"\"\ntimethis.py\n\nAuthor : David Beazley\n http://www.dabeaz.com\n Copyright (C) 2010\n\ntimethis is a utility library for making simple timing benchmarks. A\nsingle function timethis() is provided. The function operates as\neither a context manager or a decorator. Here are some examples.\n\nIf you want to time a block of code, do this:\n\nwith timethis(\"Counting to a million\"):\n n = 0\n while n < 1000000:\n n += 1\n\nThe string in quotes is a description that describes the code block\nin question. It will be printed in the output.\n\nIf you want to time a function, you can use a decorator:\n\n@timethis\ndef count_to_a_million():\n n = 0\n while n < 1000000:\n n += 1\n\ncount_to_a_million()\n\nAll timing output is collected and not printed until a program\nexits. If any code block or function marked with timethis() is\nexecuted more than once, timing measurements are collected \nand used to calculate a mean and standard deviation.\n\"\"\"\nimport sys\n\nif sys.version_info < (2,6):\n if sys.version_info < (2,5):\n raise ImportWarning('with statement not available for Python %s' % sys.version)\n from __future__ import with_statement\n\nimport atexit\nimport time\nimport math\nfrom contextlib import contextmanager\nfrom collections import defaultdict\n\n# Dictionary holding timing measurements\n_stats = defaultdict(list)\n\n# Exit processing to print performance results\ndef _printstats():\n if not _stats:\n return\n maxwidth = max(len(str(key)) for key in _stats)\n for key,times in sorted(_stats.items(),key=lambda x: str(x[0])):\n # Compute average and standard deviation\n mean = sum(times)/float(len(times))\n stddev = math.sqrt(sum((x-mean)**2 for x in times)/len(times))\n print(\"{0:<{maxwidth}s} : {1:0.5f}s : N={2:5d} : stddev={3:0.5f}\".format(\n key,mean,len(times),stddev,maxwidth=maxwidth))\n\natexit.register(_printstats)\n\n# This utility function is used to perform timing benchmarks\ndef timethis(what):\n @contextmanager\n def benchmark():\n start = time.time()\n yield\n end = time.time()\n _stats[what].append(end-start)\n if hasattr(what,\"__call__\"):\n def timed(*args,**kwargs):\n with benchmark():\n return what(*args,**kwargs)\n return timed\n else:\n return benchmark()\n\n# Example\nif __name__ == '__main__':\n # A single measurement\n with timethis(\"count to ten million\"):\n n = 0\n while n < 10000000:\n n += 1\n\n # Repeated measurements\n for i in range(10):\n with timethis(\"count to one million\"):\n n = 0\n while n < 1000000:\n n += 1\n\n # A function call\n @timethis\n def count_to_a_million():\n n = 0\n while n < 1000000:\n n += 1\n\n count_to_a_million()\n count_to_a_million()\n count_to_a_million()\n","sub_path":"stdnet/utils/timethis.py","file_name":"timethis.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"154873648","text":"from settings import HOST, PORT\nimport sys\nsys.path.append('collections')\nfrom synopsis import synopsis\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef up():\n\treturn 'PaniniApi Up'\n\n@app.route('/synopsis', methods=['GET'])\ndef _get_synopsis():\n response = synopsis()\n return jsonify(response)\n\n@app.route('/synopsis', methods=['POST'])\ndef _post_synopsis():\n response = synopsis(request)\n return jsonify(response)\n\napp.run(host=HOST, port=PORT, debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"250421595","text":"import boto\nimport functools\nimport sys\nimport pyinotify\nimport argparse\nimport os\nfrom boto.s3.key import Key\n\nAWS_ACCESS_KEY_ID = ''\nAWS_SECRET_ACCESS_KEY = ''\nBUCKET_NAME = 'hwatcher-bucket'\n\ndef setup_arg_parser():\n parser = argparse.ArgumentParser(description='Create a daemon that watches the given directory and the stores the pid of the newly created subprocess in the given pid file')\n parser.add_argument('-pid', help='The file to store the pid, this file should NOT exist before calling.')\n parser.add_argument('-dir', help='The directory to watch.')\n return parser\n\ndef upload_to_s3(filepath):\n conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n bucket = conn.get_bucket(BUCKET_NAME)\n bucket_key = Key(bucket)\n bucket_key.key = os.path.basename(filepath)\n bucket_key.set_contents_from_filename(filepath)\n\nclass EventHandler(pyinotify.ProcessEvent):\n def process_IN_CLOSE_WRITE(self, event): \n upload_to_s3(event.pathname)\n\nif __name__ == '__main__':\n ## parse cmd arguments\n parser = setup_arg_parser()\n args = parser.parse_args()\n pid_file_path = args.pid\n watch_dir = args.dir \n\n ## create watcher and notifier\n wm = pyinotify.WatchManager()\n notifier = pyinotify.Notifier(wm, EventHandler())\n wm.add_watch(watch_dir, pyinotify.IN_CLOSE_WRITE)\n \n ## start notifier\n notifier.loop(daemonize=True, pid_file=pid_file_path)\n\n","sub_path":"daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116452653","text":"# -*- coding: utf-8 -*-\n# author : anthony\n# version : python 3.6\n# ssh传输命令和命令写入交互\n\n# Copyright (C) 2003-2007 Robey Pointer \n#\n# This file is part of paramiko.\n#\n# Paramiko is free software; you can redistribute it and/or modify it under the\n# terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation; either version 2.1 of the License, or (at your option)\n# any later version.\n#\n# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with Paramiko; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.\n\n\nimport socket\nimport sys\nfrom paramiko.py3compat import u\nfrom models import models\n# from modules.views import log_recording\nimport datetime\nimport redis\nimport time\n\n# windows does not have termios...\ntry:\n import termios\n import tty\n\n has_termios = True\nexcept ImportError:\n has_termios = False\n\n\ndef interactive_shell(chan, user_obj, bind_host_obj, cmd_caches, log_recording):\n '''\n :param chan:\n :param user_obj:\n :param bind_host_obj: 主机\n :param cmd_caches: 命令列表\n :param log_recording: 日志记录\n :return:\n '''\n # 判断是否是windows shell\n if has_termios:\n posix_shell (chan, user_obj, bind_host_obj, cmd_caches, log_recording)\n else:\n windows_shell (chan)\n\n\ndef posix_shell(chan, user_obj, bind_host_obj, cmd_caches, log_recording):\n '''\n\n :param chan:\n :param user_obj:\n :param bind_host_obj:\n :param cmd_caches:\n :param log_recording:\n :return:\n '''\n import select\n\n oldtty = termios.tcgetattr (sys.stdin)\n try:\n tty.setraw (sys.stdin.fileno ())\n tty.setcbreak (sys.stdin.fileno ())\n chan.settimeout (0.0)\n cmd = ''\n tab_key = False\n while True:\n r, w, e = select.select ([chan, sys.stdin], [], [])\n if chan in r:\n try:\n x = u (chan.recv (1024))\n if tab_key:\n if x not in ('\\x07', '\\r\\n'):\n # print('tab:',x)\n cmd += x\n tab_key = False\n if len (x) == 0:\n sys.stdout.write ('\\r\\n*** EOF\\r\\n')\n # test for redis to mysql\n break\n sys.stdout.write (x)\n sys.stdout.flush ()\n except socket.timeout:\n pass\n if sys.stdin in r:\n x = sys.stdin.read (1)\n if '\\r' != x:\n cmd += x\n else:\n user_record_cmd = user_obj.username + '_user_record'\n pool = redis.ConnectionPool (host='192.168.21.128', port=6379)\n user_record = [user_obj.id, bind_host_obj.id, 'cmd', cmd,\n time.strftime (\"%Y-%m-%d %H:%M:%S\", time.localtime ())]\n r = redis.Redis (connection_pool=pool)\n r.lpush (user_record_cmd, user_record)\n cmd = ''\n # 最后用户退出的时候取出来log_item 列表循环写入数据库\n if '\\t' == x:\n tab_key = True\n if len (x) == 0:\n break\n chan.send (x)\n\n finally:\n termios.tcsetattr (sys.stdin, termios.TCSADRAIN, oldtty)\n\n\n# thanks to Mike Looijmans for this code\ndef windows_shell(chan):\n '''\n\n :param chan:\n :return:\n '''\n import threading\n\n sys.stdout.write (\"Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\\r\\n\\r\\n\")\n\n def writeall(sock):\n while True:\n data = sock.recv (256)\n if not data:\n sys.stdout.write ('\\r\\n*** EOF ***\\r\\n\\r\\n')\n sys.stdout.flush ()\n break\n sys.stdout.write (data.decode ())\n sys.stdout.flush ()\n\n writer = threading.Thread (target=writeall, args=(chan,))\n writer.start ()\n\n try:\n while True:\n d = sys.stdin.read (1)\n if not d:\n break\n chan.send (d)\n except EOFError:\n # user hit ^Z or F6\n pass\n\n\n\n\n\n","sub_path":"python_py_project/myjumpserver/MyJumpServer/modules/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"107887422","text":"import matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport scipy.constants\nfrom Vector import Vector\n\n\nclass Body:\n def __init__(self, position, velocity, mass):\n self._position = position\n self._velocity = velocity\n self._mass = mass\n\n def move(self, force, dt): # dt = time elapsed\n acceleration = force / self._mass\n # self._velocity = self._velocity + acceleration * dt\n self._velocity += acceleration * dt\n self._position += self._velocity * dt\n return self\n\n def force_from(self, other):\n G = scipy.constants.G\n delta = other._position - self._position\n dist = abs(delta)\n magnitude = (G * self._mass * other._mass) / (dist ** 2)\n return magnitude * delta.direction()\n\n def get_patch(self, radius=None):\n if radius is None:\n radius = self._mass/22\n x, y = self._position\n return patches.Circle((x, y), radius, facecolor='black')\n\n\ndef main():\n pos = Vector([3, 4])\n velocity = Vector([.5, .1])\n body = Body(pos, velocity, 50)\n body.move(Vector([.2, .1]), 2)\n fig, ax = plt.subplots()\n ax.axis('equal')\n ax.add_patch(body.get_patch())\n ax.set_xlim(-10, 10)\n ax.set_ylim(-10, 10)\n plt.savefig('tes.png')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Body.py","file_name":"Body.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256683556","text":"import sqlite3\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nfrom flask import jsonify\nfrom models.item import ItemModel\n\nclass Item(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('price', type=str, required=True, help='This field cannot be left blank.')\n\n @jwt_required()\n def get(self, name):\n item = ItemModel.find_by_name(name)\n if item is not None:\n return {'Status' : 200, 'Message' : 'Item Found', 'Item' : item.json()}, 200\n return {'Status' : 404, 'Message' : 'Item Not Found'}, 404\n\n def post(self, name):\n if ItemModel.find_by_name(name) is not None:\n return {'Status' : 400, 'Message' : 'Bad Request', 'Details' : f'{name} Item already present in database'}, 400\n request_data = Item.parser.parse_args()\n new_item = ItemModel(name, request_data['price'])\n try:\n new_item.save_to_db()\n return {'Status' : 201, 'Message' : 'Item Added Successfully', 'Item Details' : new_item.json()}, 201\n except:\n return {'Status' : 500, 'Message' : 'Error: Failed to insert into the database.'}, 500\n\n def delete(self, name):\n item = ItemModel.find_by_name(name)\n if item is None:\n return {'Status' : 400, 'Message' : 'Bad Request', 'Details' : f'{name} Item not present in database'}, 400\n item.delete_from_db()\n return {'Status' : 200, 'Message' : 'Item Deleted Successfully'}, 200\n\n def put(self, name):\n data = Item.parser.parse_args()\n item = ItemModel.find_by_name(name)\n if item is None:\n item = ItemModel(name, data['price'])\n else:\n item.price = data['price']\n try:\n item.save_to_db()\n return {'Status' : 200, 'Message' : 'Item Updated Successfully', 'Item Details' : updated_item.json()}, 200\n except:\n return {'Status' : 500, 'Message' : f'Error: Failed to update {name} price in the database.'}, 500\n\nclass ItemList(Resource):\n def get(self):\n rows = ItemModel.get_all()\n data = list(map(lambda x: {'name': x.name, 'price': x.price}, rows))\n return jsonify({'Items' : data})\n","sub_path":"resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"48787009","text":"#!/usr/bin/python\n\nimport sys\nprint(\"Select something from the menu: \")\nprint(\"\")\nprint(\"Press 1 to convert from decimal\")\nprint(\"Press 2 to convert from binary\")\nprint(\"\")\nin_str = input(str(\"Select a number or press 000 to exit: \"))\n\nexitcode = 0 #keeps code in loop indefinitely\n\n#Convert from dec to bin\nif (in_str == 1):\n while exitcode < 1:\n inpt = raw_input(\"Enter a decimal number: \")\n out = int(inpt, 10)\n #print (bin(out))\n\n #Check if user entered \"killcode\"\n if (inpt == \"000\" or inpt == \"000\"):\n break\n else:\n print (bin(out))\n\n#Convert from bin to dec\nif (in_str == 2):\n while exitcode < 1:\n inpt = raw_input(\"Enter a binary number: \")\n out = int(inpt, 2)\n\n #Check if user entered \"killcode\"\n if (inpt == \"000\" or inpt == \"000\"):\n break\n else:\n print (out)\n \n \n","sub_path":"gpio-test/multi-conv.py","file_name":"multi-conv.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"51794584","text":"from unittest import TestCase\nfrom packages.store_tools import Array\n\n\nclass TestArray(TestCase):\n def test_clear(self):\n ar_int = Array(10)\n ar_int.clear(3)\n for i in ar_int:\n self.assertEqual(i, ar_int[i])\n","sub_path":"tests/test_array.py","file_name":"test_array.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"225513518","text":"import urllib.parse\nfrom datetime import datetime, date\n\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom btc_analysis.mongo_func import query_mongo\nfrom dash.dependencies import Input, Output\nfrom btc_analysis.market_data import yesterday_str\nfrom btc_analysis.dashboard_func import (\n date_elements, btc_price_min, btc_yearly_perf)\n\n# start app\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.CYBORG],\n meta_tags=[{'name': 'viewport',\n 'content': 'width=device-width, initial-scale=1.0'}])\n\n# app.css.append_css(\n# {\"external_url\": \"https://codepen.io/chriddyp/pen/bWLwgP.css\"})\n\nserver = app.server\n\n# ----------\n# Date variables\n\nS2F_list = [\"S2F price 365d average\", \"S2F price\"]\n# ----------------\n# app layout: bootstrap\n\napp.layout = dbc.Container([\n\n # create as much rows and columns as needed foe the dashboard\n dbc.Row([\n dbc.Col(html.H1(\"Bitcoin & Blockchain Statistics\",\n className='text-center text-primary, mb-4'),\n width=12)\n ]),\n\n dbc.Row([\n dbc.Col([\n\n dbc.Card(\n [\n dbc.CardBody(\n [\n dbc.Row(\n [\n dbc.Col([\n\n dcc.Graph(id=\"price_indicator\", figure={},\n config={'displayModeBar': False}),\n\n html.Hr(),\n\n html.Label(['Date Range:']),\n\n html.Br(),\n\n dcc.DatePickerRange(\n id='date_range_price',\n min_date_allowed=date(\n 2011, 2, 1),\n start_date=date(2011, 2, 1),\n ),\n\n ])\n ]),\n\n html.Hr(),\n\n dbc.Row(\n [\n dbc.Col([\n\n\n dcc.Graph(id=\"btc_price\", figure={},\n config={'displayModeBar': False}),\n\n html.A(\n 'Download Data',\n id='download-link_price',\n download=\"btc_price.csv\",\n href='',\n target=\"_blank\"\n ),\n\n ], width=7),\n\n dbc.Col([\n\n dcc.Graph(\n id='btc_perf', figure={}),\n\n ], width=5)\n\n ], no_gutters=True),\n\n ]),\n ],\n style={\"width\": \"70rem\"},\n className=\"mt-3\"\n )\n\n ]),\n\n ], justify='center'),\n\n dbc.Row([\n dbc.Col([\n\n dbc.Card(\n [\n dbc.CardBody(\n [\n\n html.Hr(),\n\n dbc.Row(\n [\n dbc.Col([\n\n dcc.Graph(id=\"btc_price_log\", figure={},\n config={'displayModeBar': False}),\n\n\n ], width=8),\n\n dbc.Col([\n\n dcc.Graph(\n id='btc_log_perf', figure={}),\n\n ], width=4)\n ], no_gutters=True),\n\n ]),\n ],\n style={\"width\": \"70rem\"},\n className=\"mt-3\"\n )\n\n ]),\n\n ], justify='center'),\n\n\n dbc.Row([\n dbc.Col([\n\n dbc.Card(\n [\n dbc.CardBody(\n [\n dbc.Row(\n [\n dbc.Col([\n\n\n dcc.Graph(id=\"supply\", figure={},\n config={'displayModeBar': True}),\n\n\n ])\n ]),\n\n ]),\n ],\n style={\"width\": \"70rem\"},\n className=\"mt-3\"\n )\n\n ]),\n\n ], justify='center'),\n\n dbc.Row([\n dbc.Col([\n\n dbc.Card(\n [\n dbc.CardBody(\n [\n dbc.Row(\n [\n dbc.Col([\n\n html.Hr(),\n\n html.Label(['Date Range:']),\n\n html.Br(),\n\n dcc.DatePickerRange(\n id='date_range_hash',\n min_date_allowed=date(\n 2011, 2, 1),\n start_date=date(2020, 1, 1),\n ),\n\n\n\n dcc.Graph(id=\"hash_rate\", figure={},\n config={'displayModeBar': True}),\n\n\n ])\n ]),\n\n ]),\n ],\n style={\"width\": \"70rem\"},\n className=\"mt-3\"\n )\n\n ]),\n\n ], justify='center'),\n\n\n dcc.Interval(id='update', n_intervals=0, interval=1000 * 5),\n\n dcc.Interval(id='df-update', interval=100000, n_intervals=0)\n\n])\n\n# --------------------------\n# Callbacks part\n\n# bitcoin price\n\n\n@app.callback(\n Output(component_id=\"date_range_price\",\n component_property=\"initial_visible_month\"),\n Input(component_id=\"df-update\", component_property=\"n_intervals\")\n)\ndef set_initial_date(n):\n\n max_y, max_m, _ = date_elements()\n\n initial_visible_month_ = date(max_y, max_m, 1)\n\n return initial_visible_month_\n\n\n@app.callback(\n Output(component_id=\"date_range_price\",\n component_property=\"max_date_allowed\"),\n Input(component_id=\"df-update\", component_property=\"n_intervals\")\n)\ndef set_max_date(n):\n\n max_y, max_m, max_d = date_elements()\n\n max_date = date(max_y, max_m, max_d)\n\n return max_date\n\n\n@app.callback(\n Output(component_id=\"date_range_price\",\n component_property=\"end_date\"),\n Input(component_id=\"df-update\", component_property=\"n_intervals\")\n)\ndef set_end_date(n):\n\n max_y, max_m, max_d = date_elements()\n\n end_date_ = date(max_y, max_m, max_d)\n\n return end_date_\n\n\n@app.callback(\n [\n Output(component_id='btc_price', component_property='figure'),\n Output(component_id='btc_perf', component_property='figure'),\n Output(component_id='download-link_price', component_property='href')\n ],\n [\n Input(component_id='date_range_price',\n component_property='start_date'),\n Input(component_id='date_range_price', component_property='end_date'),\n Input(component_id='df-update', component_property='n_intervals'),\n ]\n)\ndef update_index_df(start, stop, n):\n\n df_price = query_mongo(\"btc_analysis\", \"btc_price\")\n df_price[\"Datetime\"] = [datetime.strptime(\n d, \"%d-%m-%Y\") for d in df_price[\"Date\"]]\n\n dff = df_price.copy()\n dff_for_perf = df_price.copy()\n df_to_download = df_price.copy()\n\n dff_range = dff.loc[dff.Datetime.between(\n start, stop, inclusive=True)]\n dff_range.reset_index(drop=True, inplace=True)\n\n price_ = go.Figure()\n\n price_.add_trace(\n go.Scatter(\n x=dff_range[\"Datetime\"],\n y=dff_range[\"BTC Price\"],\n name=\"BTC Price\",\n mode='lines',\n line_color=\"#FEAF16\",\n ))\n\n price_.update_layout(\n title_text=\"Bitcoin Price\",\n template='plotly_dark'\n )\n\n price_.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1,\n ),\n height=600\n )\n\n price_.update_yaxes(\n tickprefix=\"$\",\n title_text=\"BTC Price (USD)\",\n fixedrange=True\n )\n\n price_.update_xaxes(\n title_text=\"Date\",\n )\n\n # table\n\n perf_df = btc_yearly_perf(dff_for_perf)\n\n table_perf = go.Figure(data=[go.Table(\n columnwidth=[100, 80, 100],\n header=dict(values=list(perf_df.columns),\n line_color='white',\n fill_color='black',\n align='center',\n font=dict(color='white', size=12),\n height=35),\n cells=dict(values=[perf_df.Date, perf_df.Price, perf_df[\"Yearly Performance\"]],\n line_color='white',\n fill_color='black',\n align=['center', 'right', 'right'],\n font=dict(color='white', size=11),\n format=[None, \",.2f\", \",.2f%\"],\n suffix=[None, '$', '%'],\n height=25)\n )\n ])\n\n table_perf.update_layout(\n title_text=\"Bitcoin Yearly Performances\",\n template='plotly_dark',\n height=600,\n )\n\n csv_string_price = df_to_download.to_csv(index=False, encoding='utf-8')\n csv_string_price = \"data:text/csv;charset=utf-8,\" + \\\n urllib.parse.quote(csv_string_price)\n\n return price_, table_perf, csv_string_price\n\n\n@ app.callback(\n Output('price_indicator', 'figure'),\n Input('update', 'n_intervals')\n)\ndef update_indicator(timer):\n\n df_price = query_mongo(\"btc_analysis\", \"btc_price\")\n df_price[\"Datetime\"] = [datetime.strptime(\n d, \"%d-%m-%Y\") for d in df_price[\"Date\"]]\n dff_p = df_price.copy()\n\n dff_last_p = dff_p.tail(2)\n dff_ind_y = dff_last_p[dff_last_p['Datetime']\n == dff_last_p['Datetime'].min()]['BTC Price'].values[0]\n dff_ind_t = dff_last_p[dff_last_p['Datetime']\n == dff_last_p['Datetime'].max()]['BTC Price'].values[0]\n\n fig_indicator = go.Figure(go.Indicator(\n mode=\"delta\",\n value=dff_ind_t,\n delta={'reference': dff_ind_y, 'relative': True, 'valueformat': '.2%'}))\n\n fig_indicator.update_traces(delta_font={'size': 18})\n\n fig_indicator.update_layout(height=50, width=100)\n\n if dff_ind_t >= dff_ind_y:\n fig_indicator.update_traces(delta_increasing_color='green')\n elif dff_ind_t < dff_ind_y:\n fig_indicator.update_traces(delta_decreasing_color='red')\n\n return fig_indicator\n\n\n# btc price log scale\n\n@ app.callback(\n [Output(component_id='btc_price_log', component_property='figure'),\n Output(component_id='btc_log_perf', component_property='figure'),\n ],\n Input(component_id='df-update', component_property='n_intervals')\n\n)\ndef update_log_price(n):\n\n df_price = query_mongo(\"btc_analysis\", \"btc_price\")\n df_price[\"Datetime\"] = [datetime.strptime(\n d, \"%d-%m-%Y\") for d in df_price[\"Date\"]]\n\n dff = df_price.copy()\n\n min_point = btc_price_min(dff)\n\n model_cap = go.Figure()\n\n model_cap.add_trace(\n go.Scatter(\n x=dff[\"Datetime\"],\n y=dff[\"BTC Price\"],\n name=\"BTC Price Log Scale\",\n mode='lines',\n line_color=\"#FEAF16\",\n ))\n\n model_cap.add_trace(\n go.Scatter(\n x=min_point[\"Datetime\"],\n y=min_point[\"BTC Price\"],\n name=\"BTC Price Minimum\",\n text=min_point[\"BTC Price\"],\n textposition=\"bottom center\",\n # mode='markers+text',\n mode='markers',\n marker=dict(color=\"#C0C0C0\",\n size=10\n ),\n ))\n\n model_cap.update_layout(\n title_text=\"Bitcoin Price Log Scale\",\n template='plotly_dark'\n )\n\n # model_cap.add_annotation(x=min_point[\"Datetime\"],\n # y=min_point[\"BTC Price\"],\n # text=min_point[\"BTC Price\"],\n # showarrow=True,\n # arrowhead=1)\n\n model_cap.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ),\n height=500,\n )\n\n model_cap.update_yaxes(\n tickvals=[1, 10, 100, 1000, 10000, 100000, 1000000],\n tickprefix=\"$\",\n title_text=\"BTC Price (USD)\",\n type=\"log\",\n fixedrange=True\n )\n\n model_cap.update_xaxes(\n title_text=\"Date\",\n )\n\n # table\n\n min_point_dff = min_point.copy()\n min_point_dff[\"Year\"] = [int(d.year) for d in min_point_dff[\"Datetime\"]]\n min_point_dff = min_point_dff.drop(columns=[\"Datetime\", \"Date\"])\n\n table_log_perf = go.Figure(data=[go.Table(\n columnwidth=[60, 150],\n header=dict(values=[\"Year\", \"Min Price\"],\n line_color='white',\n fill_color='black',\n align='center',\n font=dict(color='white', size=12),\n height=35),\n cells=dict(values=[min_point_dff[\"Year\"], min_point_dff[\"BTC Price\"]],\n line_color='white',\n fill_color='black',\n format=[None, \",.2f\"],\n suffix=[None, '$'],\n align=['center', 'right'],\n font=dict(color='white', size=11),\n height=25)\n )\n ])\n\n table_log_perf.update_layout(\n title_text=\"Bitcoin Minimum Prices\",\n template='plotly_dark',\n height=500,\n )\n\n return model_cap, table_log_perf\n\n# bitcoin supply\n\n\n@ app.callback(\n Output('supply', 'figure'),\n Input('df-update', 'n_intervals')\n\n\n)\ndef update_supply(n):\n\n supply_df = query_mongo(\"btc_analysis\", \"btc_total_supply\")\n supply_dff = supply_df.copy()\n\n try:\n\n supply_dff[\"Date\"] = [datetime.strptime(\n date, \"%Y-%m-%d\") for date in supply_dff[\"Date\"]]\n\n except TypeError:\n pass\n\n supply_graph = go.Figure()\n\n supply_graph.add_trace(\n go.Scatter(\n x=supply_dff[\"Date\"],\n y=supply_dff[\"Supply\"],\n name=\"BTC Effective Supply\",\n mode='lines',\n line_color='#FFFFFF',\n ))\n\n supply_graph.add_trace(\n go.Scatter(\n x=supply_dff[\"Date\"],\n y=supply_dff[\"Theoretical Supply\"],\n name=\"BTC Theoretical Supply\",\n mode='lines',\n line_color='#028A0F',\n ))\n\n supply_graph.update_layout(\n title_text=\"Bitcoin Supply\",\n template='plotly_dark'\n )\n\n supply_graph.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ))\n\n supply_graph.update_yaxes(\n title_text=\"Number of Bitcoin\",\n )\n supply_graph.update_xaxes(nticks=20,\n title_text=\"Date\"\n )\n\n return supply_graph\n\n\n# hash rate\n\n@app.callback(\n Output(component_id=\"date_range_hash\",\n component_property=\"initial_visible_month\"),\n Input(component_id=\"df-update\", component_property=\"n_intervals\")\n)\ndef set_initial_date_hash(n):\n\n max_y, max_m, _ = date_elements()\n\n initial_visible_month_ = date(max_y, max_m, 1)\n\n return initial_visible_month_\n\n\n@app.callback(\n Output(component_id=\"date_range_hash\",\n component_property=\"max_date_allowed\"),\n Input(component_id=\"df-update\", component_property=\"n_intervals\")\n)\ndef set_max_date_hash(n):\n\n max_y, max_m, max_d = date_elements()\n\n max_date = date(max_y, max_m, max_d)\n\n return max_date\n\n\n@app.callback(\n Output(component_id=\"date_range_hash\",\n component_property=\"end_date\"),\n Input(component_id=\"df-update\", component_property=\"n_intervals\")\n)\ndef set_end_date_hash(n):\n\n max_y, max_m, max_d = date_elements()\n\n end_date_ = date(max_y, max_m, max_d)\n\n return end_date_\n\n\n@ app.callback(\n Output(component_id='hash_rate', component_property='figure'),\n [\n Input(component_id='date_range_hash',\n component_property='start_date'),\n Input(component_id=\"date_range_hash\",\n component_property=\"end_date\"),\n Input(component_id='df-update', component_property='n_intervals')]\n)\ndef update_hash_rate(start, stop, n):\n\n hr_df = query_mongo(\"btc_analysis\", \"hash_rate\")\n hr_dff = hr_df.copy()\n\n hr_dff[\"Datetime\"] = [datetime.strptime(\n date, \"%Y-%m-%d\") for date in hr_dff[\"Date\"]]\n\n hr_dff_range = hr_dff.loc[hr_dff.Datetime.between(\n start, stop, inclusive=True)]\n hr_dff_range.reset_index(drop=True, inplace=True)\n\n hr_graph = go.Figure()\n\n hr_graph.add_trace(\n go.Scatter(\n x=hr_dff_range[\"Datetime\"],\n y=hr_dff_range[\"Hash Rate\"],\n name=\"BTC Hash Rate\",\n mode='lines',\n line_color='#FFFFFF',\n ))\n\n hr_graph.update_layout(\n title_text=\"Bitcoin Hash Rate\",\n template='plotly_dark'\n )\n\n hr_graph.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ))\n\n hr_graph.update_yaxes(\n title_text=\"Hash\",\n )\n hr_graph.update_xaxes(nticks=20,\n title_text=\"Date\"\n )\n\n return hr_graph\n\n\nprint(\"Done\")\n# --------------------\nif __name__ == '__main__':\n app.run_server(debug=True, port=3500, host='0.0.0.0')\n","sub_path":"dashboard/btc_stats_dashboard.py","file_name":"btc_stats_dashboard.py","file_ext":"py","file_size_in_byte":18702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"51946965","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CnnTextClassifier2(nn.Module):\n def __init__(self, weights, num_classes,num_filters,drop_out=0.5,window_sizes=(3, 4, 5)):\n\n super(CnnTextClassifier2, self).__init__()\n\n self.embedding = nn.Embedding.from_pretrained(weights)\n self.n_layers=2\n self.hidden_dim=256\n\n self.convs = nn.ModuleList([\n nn.Conv2d(1, num_filters, [window_size, 300], padding=(window_size - 1, 0))\n for window_size in window_sizes\n ])\n self.drop_out=nn.Dropout(drop_out)\n self.fc = nn.Linear(num_filters * len(window_sizes), num_classes)\n self.cosine = nn.CosineSimilarity(dim=0)\n self.fc_out=nn.Linear(num_classes,2)\n self.soft_max = nn.Softmax()\n\n def forward(self, x):\n batch_size=x.size(0)\n x = self.embedding(x) # [batch_size, T, embeding_dim]\n\n # Apply a convolution + max pool layer for each window size\n x = torch.unsqueeze(x, 1) # [batch_size, C, T, embeding_dim] Add a channel dim.\n xs = []\n for conv in self.convs:\n x2=conv(x)\n x2 = F.relu(x2) # [batch_size, F, T, 1]\n x2 = torch.squeeze(x2, -1) # [batch_size, F, T]\n x2 = F.max_pool1d(x2, x2.size(2)) # [batch_size, F, 1]\n xs.append(x2)\n x = torch.cat(xs, 2) # [batch_size, F, window]\n x=self.drop_out(x)\n # FC\n x = x.view(x.size(0), -1) # [batch_size, F * window]\n logits = self.fc(x) # [batch_size, class]\n out_put=self.fc_out(logits)\n out_put=self.soft_max(out_put)\n # logits=logits.double()\n # list_out_test=[logits[x,:,] for x in range(batch_size)]\n #\n #\n # array=[]\n #\n # for index,v in enumerate(list_out_test):\n # x=list_out_test[index]\n # for i,value in enumerate(list_out_test):\n # y = self.cosine(x, value)\n # array.append(y)\n #\n # test = torch.stack(array,0)\n # test=test.view(batch_size,batch_size)\n\n # Prediction\n # probs = F.softmax(logits) # [batch_size, class]\n\n\n return out_put\n","sub_path":"caitien/model_cnn_2.py","file_name":"model_cnn_2.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621688945","text":"import re\nfrom resolving import Challenge\n\nthis_challenge = Challenge(file='equality')\nhtml = this_challenge.access_website()\n\nthis_challenge.print_action('Identifying the characters')\nresult = \"\".join(re.findall(\"[^A-Z]+[A-Z]{3}([a-z])[A-Z]{3}[^A-Z]+\", html))\nprint(result)\n\nthis_challenge.get_next_challenge(result)\n","sub_path":"03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"558432474","text":"from pymongo import MongoClient\nfrom pymongo import DESCENDING\nfrom pymongo import ASCENDING\nfrom matplotlib import pyplot\n\nclass MongoFindSample(object):\n\n def __init__(self, dbName, collectionName):\n self.client = MongoClient()\n self.db = self.client[dbName]\n self.collection = self.db.get_collection(collectionName)\n\n def find_one(self, projection=None,filter=None, sort=None):\n return self.collection.find_one(projection=projection,filter=filter,sort=sort)\n\n def find(self, projection=None,filter=None, sort=None):\n return self.collection.find(projection=projection,filter=filter,sort=sort)\n\n def count_documents(self, filter=None):\n return self.collection.count_documents(filter)\n\n\nmongo = MongoFindSample('test', 'salary')\nfindOne = mongo.find_one()\nprint('-----------------find_One-----------------')\nprint(type(findOne))\nprint(findOne)\n\nfind = mongo.find()\nprint('-------------------find-------------------')\n# print(type(find))\n# for i in range(find.count()):\n# print(find[i]) \ntry:\n doc = find.next()\n while doc != None:\n print(doc)\n doc = find.next()\nexcept StopIteration:\n pass\n\nprint('-------------------前方一致-------------------')\nresult1 = mongo.find(filter={'depId':{'$regex':'^A'}})\nprint(type(result1))\nfor doc in result1:\n print(doc)\n\nprint('-------------------後方一致-------------------')\nresult1 = mongo.find(filter={'depId':{'$regex':'2$'}})\nprint(type(result1))\nfor doc in result1:\n print(doc)\n\nprint('-------------------含む-------------------')\nresult1 = mongo.find(filter={'name':{'$regex':'田'}})\nprint(type(result1))\nfor doc in result1:\n print(doc)\n\nprint('-------------------以上-------------------')\nresult1 = mongo.find(filter={'salary':{'$gte':300000,'$lte':400000}})\nprint(type(result1))\nfor doc in result1:\n print(doc)\n\nprint('-------------------かつ-------------------')\nresult1 = mongo.find(filter={'$and':[{'name':'山田'},{'depId':'C0002'}]})\nprint(type(result1))\nfor doc in result1:\n print(doc)\n\nprint('-------------------または-------------------')\nresult1 = mongo.find(filter={'$or':[{'name':'山田'},{'depId':'B0001'}]})\nprint(type(result1))\nfor doc in result1:\n print(doc)\n\nresult1 = mongo.find(filter={'$and':[{'salary':{'$gte':300000}},{'salary':{'$lte':400000}}]})\nprint(type(result1))\nfor doc in result1:\n print(doc)\n# print('-------------------find-------------------')\n# #find = mongo.find(filter={'name':'山田','salary':{'$gte':400000}})\n# #find = mongo.find(projection={'_id':0, 'name':1, 'salary':1},sort=[('salary',DESCENDING),('name',ASCENDING)])\n# find = mongo.find()\n# y = []\n# data = []\n# for doc in find.sort([('salary',DESCENDING),('name',ASCENDING)]):\n# print(doc)\n# y += [doc['salary']]\n# data += [doc['name']]\n\n# x = range(len(y))\n# print(y)\n# print(data)\n# pyplot.bar(x, y, tick_label=data)\n# pyplot.show()\n","sub_path":"MongoFindSample.py","file_name":"MongoFindSample.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"613040492","text":"def changeletter(pattern):\n \"\"\"\n mutete pattern by one letter\n \"\"\"\n newmismatch = list()\n nucleobases = ['A', 'T', 'C', 'G']\n for position, character in enumerate(pattern):\n for letter in nucleobases:\n if position == 0:\n new_pattern = letter + pattern[1:]\n else:\n new_pattern = pattern[:position] + letter + pattern[position + 1:]\n \n newmismatch.append(new_pattern)\n \n corrected = correct_list(newmismatch)\n return(corrected)\n \ndef modifylist(mismatch):\n \"\"\"\n mutate a list\n \"\"\"\n newmismatch = list()\n for word in mismatch:\n newmismatch = newmismatch + changeletter(word)\n\n corrected = correct_list(newmismatch)\n return(corrected)\n\ndef correct_list(mismatch):\n \"\"\"\n get rid of repeting instances in a list\n \"\"\"\n \n corrected_mismatch = list()\n for pos in mismatch:\n if pos in corrected_mismatch:\n pass\n else:\n corrected_mismatch.append(pos)\n return(corrected_mismatch)\n\ndef aproximate_match(candidate_pattern, num_of_mismatches, the_whole_string):\n\n \"\"\" returns indeces of aprox matching words \"\"\"\n\n pattern = candidate_pattern\n n = num_of_mismatches\n string = the_whole_string\n\n mustmatch = len(pattern) - n # this many char must match\n indeces = list()\n\n string_for_counting = string[:-len(pattern) + 1]\n for pos, val in enumerate(string_for_counting):\n word = string[pos:pos + len(pattern)]\n similarity = 0\n for ind, char in enumerate(word):\n if char == pattern[ind]:\n similarity += 1\n if similarity >= mustmatch:\n indeces.append(pos)\n \n return(indeces) \n\n","sub_path":"biolib.py","file_name":"biolib.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"617912316","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras.models import load_model\nimport numpy as np\nmodel = load_model('model.h5')\n\n\n# In[23]:\n\n\nheight=float(input(\"please enter your height in meter:\"))\n\nmass=float(input(\"please enter your mass:\"))\nbmi=mass/(height*height)\n\nprint(\"your bmi:\",format(bmi, '.2f'))\n\n\n# In[26]:\n\n\ngender=input(\"please enter your gender:\")\nif gender == \"male\" :\n a=1\nelif gender == \"female\" :\n a=0\nelif gender == \"Female\" :\n a=0\n \nelif gender == \"Male\" :\n a=1\n \n\n\n# In[27]:\n\n\nab=model.predict([[[mass,height,bmi,a]]])\nm=np.max(model.predict([[[ mass,height,bmi,a]]]))\nif ab[0,0] == m:\n print('ideal')\nelif ab[0,1] == m:\n print('obesitas')\nelif ab[0,2] == m:\n print('over weight')\nelif ab[0,3] == m:\n print('under weight')\n\n","sub_path":"ibm/Pt_lim.py","file_name":"Pt_lim.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"537719647","text":"import os\r\nimport yaml\r\nfrom xlrd import open_workbook\r\n\r\nclass YamlReader(object):\r\n '''实现读取YML文件内容'''\r\n def __init__(self,yaml):\r\n if os.path.exists(yaml):\r\n self.yaml = yaml\r\n else:\r\n raise FileNotFoundError(\"请检查Config模块,是不是缺少了yml文件!\")\r\n self._data = None\r\n\r\n @property\r\n def data(self):\r\n if not self._data:\r\n with open(self.yaml,'rb') as f:\r\n self._data = list(yaml.safe_load_all(f))\r\n return self._data\r\n\r\nclass ExcelNotFoundError(Exception):\r\n pass\r\nclass SheetTypeError(Exception):\r\n pass\r\n\r\nclass ExcelReader(object):\r\n '''实现读取Excel功能'''\r\n def __init__(self,excel,sheet=0,title_line=True):\r\n if os.path.exists(excel):\r\n self.excel = excel\r\n else:\r\n raise ExcelNotFoundError(\"请检查data中的Excel文件,是不是没有你设置的那个Excel文件!\")\r\n self.sheet = sheet\r\n self.title = title_line\r\n self._data = list()\r\n\r\n @property\r\n def data(self):\r\n '''是否存在title栏,一般第一行为title;如果有title栏,返回dict,否则返回list'''\r\n if not self._data:\r\n workbook = open_workbook(self.excel)\r\n if type(self.sheet) not in [int,str]:\r\n raise SheetTypeError('Sheet的类型不正确,请修正')\r\n elif type(self.sheet) == int:\r\n sheet = workbook.sheet_by_index(self.sheet)\r\n elif type(self.sheet) == str:\r\n sheet = workbook.sheet_by_name(self.sheet)\r\n\r\n if self.title:\r\n title = sheet.row_values(0)\r\n for col in range(1,sheet.nrows):\r\n self._data.append(dict(zip(title,sheet.row_values(col))))\r\n\r\n else:\r\n for col in range(0,sheet.nrows):\r\n self._data.append(sheet.row_values(col))\r\n return self._data","sub_path":"UIFramework/utils/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"90034703","text":"#!/usr/bin/python\n#coding:utf-8\nfrom spidertool import searchTask\nfrom spidertool import dealTask\nfrom spidertool import SQLTool\nimport datetime\nif __name__ == \"__main__\":\n\tlinks = [ 'http://www.bunz.edu.cn','http://www.baidu.com','http://www.hao123.com','http://www.cctv.com','http://www.vip.com']\n\tS_produce= searchTask.searchTask()#表示创建的是线程\n\tS_produce.set_deal_num(10)\n\tS_produce.add_work(links)\n\n\tS_produce.start_task()\n\n\tsearchResultSQL=SQLTool.DBmanager()\n\tsearchResultSQL.connectdb()\n\tF_consume=dealTask.dealTask(0)#参数0表示创建的是进程\n\tF_consume.set_deal_num(10)\n\t\n\twhile S_produce.has_work_left():\n\t\tv,b=S_produce.get_finish_work()\n\n\t\tsearchResultSQL.inserttableinfo_byparams('webdata', [\"address\",\"content\",\"meettime\"], [(v,b,str(datetime.datetime.now()))])\t\t\n\t\tF_consume.add_work(b)\n\twhile True:\n\t\tpass\n\n\n\n\n","sub_path":"src/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"638131096","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport argparse\nimport shutil\nfrom glob import glob\n\nfrom mdkit.utility import mol2\nfrom mdkit.amber import ambertools\n\nparser = argparse.ArgumentParser(description=\"Prepare rescoring of MD frames\")\n\nparser.add_argument('-l',\n type=str,\n dest='files_l',\n nargs='+',\n required=True,\n help='Files ligands')\n\nparser.add_argument('-r',\n type=str,\n dest='files_r',\n nargs='+',\n required=True,\n help='Files receptor')\n\nparser.add_argument('-rref',\n type=str,\n dest='file_r_ref',\n required=True,\n help='File receptor ref')\n\nparser.add_argument('-lref',\n type=str,\n dest='file_l_ref',\n required=True,\n help='File ligand ref')\n\nparser.add_argument('-ligname',\n type=str,\n dest='ligname',\n default='LIG',\n help='Ligand name (default: LIG)')\n\nargs = parser.parse_args()\n\nvalues = ambertools.compute_rmsd(args.files_r, args.files_l, args.file_r_ref, args.file_l_ref, rmsddir='rmsd', ligname=args.ligname, cleanup=False)\nshutil.copyfile('rmsd/rmsd.dat', 'rmsd.dat')\nshutil.rmtree('rmsd', ignore_errors=True)\n","sub_path":"bin/compute_rmsd.py","file_name":"compute_rmsd.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"285453456","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bids', '0019_auto_20150410_1845'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='recipient_deleted_at',\n field=models.DateTimeField(null=True, verbose_name=b'Recipient deleted at', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='message',\n name='replied_at',\n field=models.DateTimeField(null=True, verbose_name=b'replied at', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='message',\n name='sender_deleted_at',\n field=models.DateTimeField(null=True, verbose_name=b'Sender deleted at', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='message',\n name='subject',\n field=models.CharField(default='subject', max_length=120, verbose_name=b'Subject'),\n preserve_default=False,\n ),\n ]\n","sub_path":"bids/migrations/0020_auto_20150413_1802.py","file_name":"0020_auto_20150413_1802.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"453832997","text":"#! python3\r\n\r\n# This program moves mp3 files from my download folder into a specific music folder\r\n# It also creates a folder with my preferred syntax if it's not already there\r\n\r\nimport os\r\nimport shutil\r\nimport datetime\r\nimport calendar\r\n\r\n\r\n# Get current date and set newFolderName to the month/year ex: 'June 2019'\r\ncurrentDate = datetime.datetime.now()\r\nnewFolderName = calendar.month_name[currentDate.month] + ' ' + str(currentDate.year)\r\n\r\n\r\n# Check music directory if there is a current month/year folder\r\nbeatsFolderPath = 'C:\\\\Users\\\\Paul\\\\Desktop\\\\Beats\\\\Beats'\r\nnewFolderPath = beatsFolderPath + '\\\\' + newFolderName\r\nlistOfFolders = []\r\n\r\nfor folderName, subFolders, fileNames in os.walk('C:\\\\Users\\\\Paul\\\\Desktop\\\\Beats\\\\Beats'):\r\n listOfFolders.append(folderName)\r\n\r\nprint('\\nSearching for ' + newFolderName + ' folder...')\r\n\r\nfor folder in listOfFolders:\r\n if folder.endswith(newFolderName):\r\n print('found')\r\n break\r\nelse:\r\n print('not found... ' + newFolderName + ' folder created\\n')\r\n # create a folder with the newFolderName\r\n os.makedirs(newFolderPath)\r\n\r\n\r\n# Iterate through the files in the download folder\r\n# Move them if they end in .mp3\r\ndownloadFolderPath = 'C:\\\\Users\\\\Paul\\\\Downloads'\r\n\r\nprint('The following files have been moved to ' + newFolderPath + '...\\n')\r\n\r\nfor folderName, subFolders, fileNames in os.walk(downloadFolderPath):\r\n for file in fileNames:\r\n if file.endswith('.mp3'):\r\n print(file)\r\n # move file from Downloads folder to beats folder\r\n # if there is a file with the same filename there already, add (copy) to the end\r\n try:\r\n shutil.move(downloadFolderPath + '\\\\' + file, newFolderPath)\r\n except shutil.Error:\r\n print('copy')\r\n shutil.move(downloadFolderPath + '\\\\' + file, newFolderPath + '\\\\' + file + ' (copy)')\r\n","sub_path":"musicMover.py","file_name":"musicMover.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71326748","text":"FoodCourt1item = {(\"Food\",\"Ma La Xiang Guo\"): 5.0, \n (\"Food\",\"Ramen\") : 2.50, \n (\"Food\",\"Cai Fan\") : 4.0, \n (\"Beverage\",\"Teh Beng\") : 1.2, \n (\"Beverage\",\"Kopi C Beng\") : 1.2}\nFoodCourt2item = {(\"Food\",\"Chicken Rice\"): 3.5, \n (\"Food\",\"Waffle\") : 1.3, \n (\"Food\",\"Penyet Chickem\") : 4.8, \n (\"Food\",\"Cai Fan\") : 4.0, \n (\"Food\",\"Chicken Chpp\") : 5.5, \n (\"Beverage\",\"Kopi Beng\") : 1.2, \n (\"Food\",\"Takoyaki\") : 2.0, \n (\"Food\",\"Bimbimbap\") : 6.0}\nFoodCourt9item = {(\"Food\",\"Mala la Xiang Guo \"): 5.5, \n (\"Food\",\"Japanese Curry Rice\") : 4.8, \n (\"Food\",\"Tze Char\") : 5.5, \n (\"Beverage\",\"Pepsi\") : 1.0, \n (\"Beverage\",\"Cola\") : 1.0}\nFoodCourt11item = {(\"Food\",\"Cai Fan\"): 4.0, \n (\"Food\",\"Chinese Cuisine\") : 7.0, \n (\"Food\",\"Fruit\") : 1.0, \n (\"Food\",\"Japanese Cuisine\") : 5.0, \n (\"Beverage\",\"Teh C Beng\"): 1.3, \n (\"Beverage\",\"Fruit Juice\") : 4.0}\nFoodCourt13item = {(\"Food\",\"Salted Egg Chicken Chop\"): 6.0, \n (\"Food\",\"Ramen\") : 6.0, \n (\"Food\",\"Fried Rice\") : 5.0, \n (\"Food\",\"Chicken Cutlet Rice\") : 5.0, \n (\"Beverage\",\"Teh O Beng\") : 1.1, \n (\"Beverage\",\"Plain Water\") : 0.5}\nFoodCourt14item = {(\"Food\",\"Dumpling Noodle\") : 5.0, \n (\"Food\",\"Western Cuisine\") : 6.0, \n (\"Food\",\"Cai Fan\") : 4.0, \n (\"Beverage\",\"Bandung\") : 1.5, \n (\"Food\",\"Soup Rice\") : 3.7}\nFoodCourt16item = {(\"Food\",\"Ma La Xiang Guo\") : 6.0, \n (\"Food\",\"Chicken Noodle\") : 3.5, \n (\"Food\",\"Duck Rice\") : 3.0, \n (\"Food\",\"Salad\") : 3.0, \n (\"Beverage\",\"Milk Tea\") : 3.5}\nAnandaKitchenitem = {(\"Food\",\"Prata\"): 1.0, \n (\"Food\",\"Chicken Rice\") : 3.5, \n (\"Food\",\"Pasta\") : 5.1, \n (\"Beverage\",\"Read Bean\") : 1.0, \n (\"Beverage\",\"Soya Bean\") : 1.4}\nFoodgleFoodCourtitem = {(\"Food\",\"Fish and Chips\"): 6.0, \n (\"Food\",\"Pasta\") : 4.0, \n (\"Food\",\"Korean Cuisine\") : 6.0, \n (\"Beverage\",\"Kopi O Beng\") : 0.9, \n (\"Beverage\",\"Ice Lemon Tea\") : 1.5,\n (\"Beverage\",\"Coconut\") : 2.5}\nNorthHillFoodCourtitem = {(\"Food\",\"Vegetable Rice\"): 3.0, \n (\"Food\",\"Lamb Chop\") : 9.0, \n (\"Food\",\"Ice Cream\") : 2.0, \n (\"Beverage\",\"Ice Peach Tea\") : 1.5, \n (\"Beverage\",\"Ice Lemon Tea\") : 1.3, \n (\"Beverage\",\"Kickapoo\") : 1.0}\n\n\n\nFoodCourt1={\"name\" : \"Foodcourt 1\" ,\n \"itemlist\" : FoodCourt1item , \n \"rank\" : 1 , \n 'location' : (556,514) }\nFoodCourt2={\"name\" : \"Foodcourt 2\" ,\n \"itemlist\" : FoodCourt2item , \n \"rank\" : 2 , \n 'location' : (613,419) }\nFoodCourt9={\"name\" : \"Foodcourt 9\" ,\n \"itemlist\" : FoodCourt9item , \n \"rank\" : 3 , \n 'location' : (792,241) }\nFoodCourt11={\"name\" : \"Foodcourt 11\" ,\n \"itemlist\" : FoodCourt11item , \n \"rank\" : 4 , \n 'location' : (948,712) }\nFoodCourt13={\"name\" : \"Foodcourt 13\" ,\n \"itemlist\" : FoodCourt13item , \n \"rank\" : 5 , \n 'location' : (560,74) }\nFoodCourt14={\"name\" : \"Foodcourt 14\" ,\n \"itemlist\" : FoodCourt14item , \n \"rank\" : 6 , \n 'location' : (668,82) }\nFoodCourt16={\"name\" : \"Foodcourt 16\" ,\n \"itemlist\" : FoodCourt16item , \n \"rank\" : 7 , \n 'location' : (494,412) }\nAnandaKitchen={\"name\" : \"Ananda Kitchen\" ,\n \"itemlist\" : AnandaKitchenitem , \n \"rank\" : 8 , \n 'location' : (931,274) }\nFoodgleFoodCourt={\"name\" : \"Foodgle Foodcourt\" ,\n \"itemlist\" : FoodgleFoodCourtitem , \n \"rank\" : 9 , \n 'location' : (861,101) }\nNorthHillFoodCourt={\"name\" : \"Northhill Foodcourt\" ,\n \"itemlist\" : NorthHillFoodCourtitem , \n \"rank\" : 10 , \n 'location' : (961,257) }\n\n\n\n\n\n\n\nCanteen = [FoodCourt1,\n FoodCourt2,\n FoodCourt9,\n FoodCourt11,\n FoodCourt13,\n FoodCourt14,\n FoodCourt16,\n AnandaKitchen,\n FoodgleFoodCourt,\n NorthHillFoodCourt]","sub_path":"Main/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"436126541","text":"#coding=utf-8\nimport tensorflow as tf\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nfrom skimage import io, transform\n\n#将数据打包,转换成tfrecords格式,以便后续高效读取\ndef encode_to_tfrecords(file_dir,tfrecord_name,resize=None):\n '''\n 此处我加载的数据目录如下:\n bluesky.0.jpg\n bluesky.0.jpg\n bluesky.0.jpg\n ....\n bluesky.1.jpg\n bluesky.1.jpg\n bluesky.1.jpg\n bluesky.1.jpg\n ...\n ''' \n writer=tf.python_io.TFRecordWriter(tfrecord_name)\n num_example=0\n for file in os.listdir(file_dir):\n f = file_dir + file\n image = io.imread(f)\n# plt.imshow(image)\n# plt.show()\n if resize is not None:\n image=cv2.resize(image,resize)\n height,width,nchannel=image.shape\n label=int(1 if(file.split('.')[0].find('bluesky')>0) else 0)\n# print f,label\n\n example=tf.train.Example(features=tf.train.Features(feature={\n 'height':tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),\n 'width':tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),\n 'nchannel':tf.train.Feature(int64_list=tf.train.Int64List(value=[nchannel])),\n 'image':tf.train.Feature(bytes_list=tf.train.BytesList(value=[image.tobytes()])),\n 'label':tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))\n }))\n serialized=example.SerializeToString()\n writer.write(serialized)\n num_example+=1\n print (\"样本数据量:\",num_example)\n writer.close()\n\n#读取tfrecords文件\ndef decode_from_tfrecords(filename,num_epoch=None):\n filename_queue=tf.train.string_input_producer([filename],num_epochs=num_epoch)#因为有的训练数据过于庞大,被分成了很多个文件,所以第一个参数就是文件列表名参数\n reader=tf.TFRecordReader()\n _,serialized=reader.read(filename_queue)\n example=tf.parse_single_example(serialized,features={\n 'height':tf.FixedLenFeature([],tf.int64),\n 'width':tf.FixedLenFeature([],tf.int64),\n 'nchannel':tf.FixedLenFeature([],tf.int64),\n 'image':tf.FixedLenFeature([],tf.string),\n 'label':tf.FixedLenFeature([],tf.int64)\n })\n label=tf.cast(example['label'], tf.int32)\n image=tf.decode_raw(example['image'],tf.uint8)\n# Normalize the values of the image from the range [0, 255] to [-0.5, 0.5]\n# image = tf.cast(img, tf.float32) / 255 - 0.5\n image = tf.cast(image, tf.float32) * (1. / 255) - 0.5\n image=tf.reshape(image,tf.stack([\n tf.cast(example['height'], tf.int32),\n tf.cast(example['width'], tf.int32),\n tf.cast(example['nchannel'], tf.int32)]))\n \n print('decode_from_tfrecords: ',image) \n print('decode_from_tfrecords: ',label)\n #label=example['label']\n return image,label\n\n#根据队列流数据格式,解压出一张图片后,输入一张图片,对其做预处理、及样本随机扩充\ndef get_batch(image, label, batch_size,crop_size = 224):\n #数据扩充变换\n print('get_batch: ',image) \n print('get_batch: ',label)\n# distorted_image = tf.reshape(image,[224,224,3])\n distorted_image = tf.image.resize_image_with_crop_or_pad(image, 228,228)#随机裁剪**************\n distorted_image = tf.random_crop(distorted_image, [crop_size, crop_size, 3])#随机裁剪**************\n distorted_image = tf.image.random_flip_up_down(distorted_image)#上下随机翻转***************s\n# distorted_image = tf.image.random_brightness(distorted_image,max_delta=63)#亮度变化\n# distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)#对比度变化\n distorted_image = tf.image.per_image_standardization(distorted_image)\n #生成batch\n #shuffle_batch的参数:capacity用于定义shuttle的范围,如果是对整个训练数据集,获取batch,那么capacity就应该够大\n #保证数据打的足够乱\n# distorted_image = tf.cast(distorted_image, tf.float32)\n images, label_batch = tf.train.shuffle_batch([distorted_image, label],batch_size=batch_size,\n num_threads=16,capacity=1500,min_after_dequeue=1000)\n #images, label_batch=tf.train.batch([distorted_image, label],batch_size=batch_size)\n\n # 调试显示\n #tf.image_summary('images', images)\n \n return images, tf.reshape(label_batch, [batch_size])\n\n#这个是用于测试阶段,使用的get_batch函数\ndef get_test_batch(image, label, batch_size,crop_size):\n #数据扩充变换\n distorted_image=tf.image.central_crop(image,39./45.)\n distorted_image = tf.random_crop(distorted_image, [crop_size, crop_size, 3])#随机裁剪\n images, label_batch=tf.train.batch([distorted_image, label],batch_size=batch_size)\n return images, tf.reshape(label_batch, [batch_size])\n\n\n#测试上面的压缩、解压代码\ndef test():\n# encode_to_tfrecords(file_dir=\"picture/train/\",tfrecord_name=\"picture/data/bluesky_train.tfrecords\")\n image,label=decode_from_tfrecords('picture/data/bluesky_train.tfrecords')\n batch_image,batch_label=get_batch(image,label,5)#batch 生成测试\n init=tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n for l in range(5):#每run一次,就会指向下一个样本,一直循环\n image_np,label_np=session.run([image,label])#每调用run一次,那么\n plt.imshow(image_np)\n plt.show()\n print(label_np)\n \n batch_image_np,batch_label_np=session.run([batch_image,batch_label])\n plt.imshow(batch_image_np[l,:,:,:])\n plt.show()\n print(batch_label_np[l])\n# print batch_image_np.shape\n# print batch_label_np.shape\n \n coord.request_stop()#queue需要关闭,否则报错\n coord.join(threads)\n# test()\n","sub_path":"example_v2/tfrecord_create_from_folder.py","file_name":"tfrecord_create_from_folder.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493118399","text":"# RNA-ATAC multi-modal model specification\nimport os\nfrom pathlib import Path\nfrom tempfile import mkdtemp\n\nimport torch\nimport torch.distributions as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom numpy import sqrt, prod\nfrom torch.utils.data import DataLoader\nfrom torchnet.dataset import TensorDataset, ResampleDataset\nfrom torchvision.utils import save_image, make_grid\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.cm as cm\nimport matplotlib.colors\nfrom scipy.stats import pearsonr\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nfrom scipy.io import mmwrite, mmread\nfrom scipy.sparse import csr_matrix\n\nfrom .vis import plot_embeddings, plot_kls_df, embed_umap\nfrom .mmvae import MMVAE\nfrom .vae_rna import RNA\nfrom .vae_atac import ATAC\n\nscale_factor = 10000\nmodal = [\"r\", \"m\"]\n\n\nclass RNA_ATAC(MMVAE):\n def __init__(self, params):\n prior = dist.Laplace\n super(RNA_ATAC, self).__init__(prior, params, RNA, ATAC)\n grad = {\"requires_grad\": params.learn_prior}\n self._pz_params = nn.ParameterList(\n [\n nn.Parameter(\n torch.zeros(1, params.latent_dim), requires_grad=False\n ), # mu\n nn.Parameter(torch.zeros(1, params.latent_dim), **grad), # logvar\n ]\n )\n self.vaes[0].llik_scaling = (\n prod(self.vaes[1].dataSize) / prod(self.vaes[0].dataSize)\n if params.llik_scaling == 0\n else params.llik_scaling\n )\n self.modelName = \"rna-atac\"\n\n @property\n def pz_params(self):\n return self._pz_params[0], F.softmax(\n self._pz_params[1], dim=1\n ) * self._pz_params[1].size(-1)\n\n def getDataLoaders(self, datasets, batch_size, shuffle, drop_last, device=\"cuda\"):\n datasets_rna_atac = TensorDataset(datasets)\n\n kwargs = {\"num_workers\": 2, \"pin_memory\": True} if device == \"cuda\" else {}\n dataloader = DataLoader(\n datasets_rna_atac,\n batch_size=batch_size,\n shuffle=shuffle,\n drop_last=drop_last,\n **kwargs\n ) # Shuffle here\n\n return dataloader\n\n def forward(self, x):\n qz_xs, zss = [], []\n read_counts = []\n # initialise cross-modal matrix\n px_zs = [[None for _ in range(len(self.vaes))] for _ in range(len(self.vaes))]\n for m, vae in enumerate(self.vaes):\n read_counts.append(vae.enc.read_count(x[m]))\n qz_x, px_z, zs = vae(x[m])\n qz_xs.append(qz_x)\n zss.append(zs)\n px_zs[m][m] = px_z # fill-in diagonal\n for e, zs in enumerate(zss):\n for d, vae in enumerate(self.vaes):\n if e != d: # fill-in off-diagonal\n if d == 0:\n r, p = vae.dec(zs)\n r = r / scale_factor * read_counts[d]\n px_zs[e][d] = vae.px_z(r, p)\n else:\n r, p, g = vae.dec(zs)\n r = r / scale_factor * read_counts[d]\n px_zs[e][d] = vae.px_z(r, p, g)\n\n return qz_xs, px_zs, zss\n\n def reconstruct(self, data, train_test, runPath, sampling=False, N=1):\n if not sampling:\n recons_mat = super(RNA_ATAC, self).reconstruct(data)\n for r, recons_list in enumerate(recons_mat):\n for o, recon in enumerate(recons_list):\n _data = data[r].cpu()\n recon = recon.squeeze(0).cpu().detach().numpy()\n recon = csr_matrix(recon)\n mmwrite(\n \"{}/{}_recon_{}x{}.mtx\".format(\n runPath, train_test, modal[r], modal[o]\n ),\n recon,\n )\n else:\n for n in range(N):\n recons_mat = super(RNA_ATAC, self).reconstruct_sample(data)\n for r, recons_list in enumerate(recons_mat):\n for o, recon in enumerate(recons_list):\n _data = data[r].cpu()\n recon = recon.squeeze(0).cpu().detach().numpy()\n recon = csr_matrix(recon)\n mmwrite(\n \"{}/{}_recon_{}x{}.mtx\".format(\n runPath, train_test, modal[r], modal[o]\n ),\n recon,\n )\n\n def predict(self, data, sampling=False, N=1):\n if not sampling:\n recons_mat = super(RNA_ATAC, self).reconstruct(data)\n else:\n recons_mat = super(RNA_ATAC, self).reconstruct_sample(data)\n return recons_mat\n\n def analyse(self, data, runPath, epoch, K=1):\n zemb, zsl, kls_df = super(RNA_ATAC, self).analyse(data, K=K)\n labels = [\"Prior\", *[vae.modelName.lower() for vae in self.vaes]]\n plot_embeddings(\n zemb, zsl, labels, \"{}/emb_umap_{:03d}.png\".format(runPath, epoch)\n )\n plot_kls_df(\n kls_df, \"{}/kl_distance_{:03d}.png\".format(runPath, epoch), yscale=\"log\"\n )\n\n def get_latent(self, data, train_test, runPath, sampling=False):\n lats = super(RNA_ATAC, self).latents(data, sampling)\n for m, lat in enumerate(lats):\n lat = lat.cpu().detach().numpy()\n lat = pd.DataFrame(lat)\n lat.to_csv(\"{}/lat_{}_{}.csv\".format(runPath, train_test, modal[m]))\n\n mean_lats = sum(lats) / len(lats)\n mean_lats = mean_lats.cpu().detach().numpy()\n mean_lats = pd.DataFrame(mean_lats)\n mean_lats.to_csv(\"{}/lat_{}_mean.csv\".format(runPath, train_test))\n\n def plot_klds(self, data, runPath):\n kls_df = super(RNA_ATAC, self).kls_df(data)\n plot_kls_df(kls_df, \"{}/kl_distance.png\".format(runPath), yscale=\"linear\")\n\n def traverse(self, runPath):\n traverse_path = runPath + \"/traverse\"\n traverse_dir = Path(traverse_path)\n traverse_dir.mkdir(parents=True, exist_ok=True)\n\n mu = self._pz_params[0].cpu().detach().numpy()\n var = torch.exp(self._pz_params[1]).cpu().detach().numpy()\n sd = np.sqrt(var)\n strt = -10\n stp = 10\n for i in range(strt, stp):\n adj_mu = mu + sd * 0.5 * i\n adj = adj_mu if i == -10 else np.vstack([adj, adj_mu])\n\n mu_ = np.tile(mu, (len(range(strt, stp)), 1))\n\n # traverse_list = []\n for i in range(self.params.latent_dim):\n adj_dim = adj[:, i]\n traverse = np.copy(mu_)\n traverse[:, i] = np.copy(adj_dim)\n\n adj_dim = pd.DataFrame(adj_dim)\n adj_dim.to_csv(\n traverse_path + \"/traverse_dim{}.csv\".format(i + 1)\n ) # from python to R index\n\n zs = torch.from_numpy(traverse).to(device)\n px_zs = []\n for m, vae in enumerate(self.vaes):\n px_z = vae.px_z(*vae.dec(zs))\n px_zs.append(px_z)\n r_traverse = px_zs[0].mean.cpu().detach().numpy()\n p_traverse = px_zs[1].mean.cpu().detach().numpy()\n\n r_traverse = pd.DataFrame(r_traverse.numpy())\n r_traverse.to_csv(traverse_path + \"/rna_traverse_dim{}.csv\".format(i + 1))\n\n p_traverse = pd.DataFrame(p_traverse.numpy())\n p_traverse.to_csv(traverse_path + \"/atac_traverse_dim{}.csv\".format(i + 1))\n","sub_path":"task01_predictmodality/method/scmm/vaes/mmvae_rna_atac.py","file_name":"mmvae_rna_atac.py","file_ext":"py","file_size_in_byte":7589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"356178930","text":"\"\"\"\r\nAuthor: Stephen Krewson\r\n\r\nFirst checks in the given directory for files of the form:\r\n\r\n_saved_paths.pkl\r\n_saved_distances.npy\r\n_saved_indices.npy\r\n\r\nif these don't exist (with the same date for each), it exits. Normal usage is:\r\n\r\n\tpython ht_show_neighbors.py \r\n\r\nIf no argument is supplied, a random index is chosen.\r\n\r\nUses this package; run from \"tensorflow\" environment in Windows\r\nhttps://anaconda.org/htrc/pysolr\r\n\r\n\"\"\"\r\n\r\n\r\nimport cv2\r\nimport glob\r\nfrom htrc import metadata as bib\r\nfrom imutils import build_montages\r\nfrom mpl_toolkits.axes_grid1 import ImageGrid\r\nimport numpy as np\r\nimport os\r\nimport pickle\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n\r\n# Make sure we have three files of format _saved_*\r\nsaved = glob.glob(\"*_saved*\")\r\nif len(saved) is not 3:\r\n\texit(\"Saved files for distances, indices, and paths must exist. Run \\\r\n\t\tht_calc_neighbors.py to generate these three files.\")\r\n\r\n# Make sure the dates are the same for all three\r\ndates = [x.split('_')[0] for x in saved]\r\nif len(set(dates)) is not 1:\r\n\texit(\"The dates for the saved distance matrix mappings must be identical.\")\r\ndate = dates[0]\r\n\r\n# handles for the three saved mappings (maybe refactor this, but OK for now)\r\nsaved_paths = date + \"_saved_paths.pkl\"\r\nsaved_distances = date + \"_saved_distances.npy\"\r\nsaved_indices = date + \"_saved_indices.npy\"\r\n\r\n# Now go ahead and open everything\r\nwith open(saved_paths, 'rb') as f:\r\n\tpaths = pickle.load(f)\r\n\r\n\t# load the numpy arrays\r\n\tdistances = np.load(saved_distances)\r\n\tindices = np.load(saved_indices)\r\n\r\n\t# ensure the total number of images is the same (number of rows)\r\n\tif not len(paths) == distances.shape[0] == indices.shape[0]:\r\n\t\texit(\"Paths, distances, and indices must have same number of rows.\")\r\n\tnum_rows = len(paths)\r\n\r\n\t# ensure the number of neighbors is the same (the \"K\" in KNN)\r\n\tif not distances.shape == indices.shape:\r\n\t\texit(\"Distances and indices must have same shape.\")\r\n\tnum_nbrs = distances.shape[1]\r\n\r\n\t# pick a random row from the matrix (avoid ) 3721 is great\r\n\t# 1509 is cover photo snakes and such\r\n\t# 376 Aunt Sue Puzzle Bureau headers\r\n\t# 2109 is parley's cabinet: image of children spinning globe by pyramids\r\n\t# this also occurs in universal history i think\r\n\t# 3231 is pretty great\r\n\t# idea: will ML pick up reverse traced images?? test out on UH to see what\r\n\t# their vector similarities are\r\n\tidx = 149 #1509 #random.randint(0, num_rows)\r\n\tprint(\"idx =\", idx)\r\n\r\n\t# loop over the neighbors of this image and open them in OpenCV\r\n\t# first image is image itself; we will send array to montage \r\n\timages = []\r\n\timages_full_page = []\r\n\tnearest_nbrs = [y for (x,y) in sorted(zip(distances[idx],indices[idx]),\\\r\n\t\tkey=lambda pair: pair[0])]\r\n\r\n\t# try to see how many of the images are coming from the same volume\r\n\thtids = {}\r\n\thtidx = 0\r\n\r\n\tfor i, nbr in enumerate(nearest_nbrs):\r\n\r\n\t\t# HACK: paths should really have another column with just the HTID\r\n\t\t# this will break if other people not using my exact directory and OS\r\n\t\t# setup try to use it!!\r\n\t\tvolume = paths[nbr].split('\\\\')[5]\r\n\t\t\r\n\t\t# keep track of which volumes we've seen\r\n\t\tif not volume in htids.keys():\r\n\t\t\thtids[volume] = htidx\r\n\t\t\thtidx = htidx+1\r\n\r\n\t\t# See: https://htrc.github.io/HTRC-WorksetToolkit/sdk.html\r\n\t\t# docs are still being written, so be safe since Solr API might not\r\n\t\t# always work or could require higher credentials\r\n\t\t#metadata = bib.volume_solr_metadata(volume)\r\n\t\t\r\n\t\t# NOTE: 11/12: API change made solr call not work\r\n\t\tmetadata = bib.get_volume_metadata(volume)\r\n\t\tsafe_metadata = bib.safe_volume_metadata(volume)\r\n\r\n\t\tannotation = \"{0:2}: {1:25}\\t{2})\".format(\r\n\t\t\thtids[volume],\r\n\t\t\tsafe_metadata['titles'][0][:25],\r\n\t\t\t','.join(safe_metadata['publishDates'])\r\n\t\t)\r\n\t\ttag = \"{},{}\".format(i,htids[volume])\r\n\t\t\r\n\t\t'''\r\n\t\telse:\r\n\t\t\tannotation = \"{2:25.25}. {3:13.13}: {4:20.20}\\t{5}\\t{0:2}:{1:2}:{6}\".format(\r\n\t\t\t\ti,\r\n\t\t\t\thtids[volume],\r\n\t\t\t\tmetadata['title_top'][0],\r\n\t\t\t\t','.join(metadata['publication_place']),\r\n\t\t\t\t','.join(metadata['publisher']),\r\n\t\t\t\t','.join(metadata['publishDate']),\r\n\t\t\t\tvolume\r\n\t\t\t)\r\n\t\t\ttag = \"{},{}\".format(i,htids[volume])\r\n\t\t'''\t\t\r\n\t\t# can look at this alongside the montage (eventually will be REACT UI)\r\n\t\tprint(annotation,paths[nbr])\r\n\t\t#exit(\"ok what are fields\")\r\n\r\n\r\n\t\timg = cv2.imread(paths[nbr])\r\n\t\tif img.all() == None:\r\n\t\t\tcontinue\r\n\t\th,w = img.shape[:2]\r\n\t\t\r\n\t\t# Move away from this approach: not very elegant\r\n\t\t# Write some text on the image before appending it to array\r\n\t\tfont = cv2.FONT_HERSHEY_SIMPLEX \r\n\t\tcv2.putText(\r\n\t\t\timg,\t\t\t# target image\r\n\t\t\ttag,\t\t\t# text that will be drawn\r\n\t\t\t(10,h//2),\t\t# pixel coordinates of bottom left corner\r\n\t\t\tfont, \t\t\t# typeface\r\n\t\t\t5,\t\t\t\t# scale factor (e.g. 2 would be double base size)\r\n\t\t\t(0,0,255),\t\t# BGR color (full red)\r\n\t\t\t4,\t\t\t\t# thickness of line\r\n\t\t\tTrue\t\t\t# \"bottomLeftOrigin\" is (0,0) (otherwise top left)\r\n\t\t)\r\n\t\t\r\n\t\timages.append(img)\r\n\r\n\t\t# full page image is a directory up, outside of /extracted subfolder\r\n\t\t# N.B. this slash is different for Windows vs. Unix!\r\n\t\tfull_page_path = paths[nbr].replace(\"\\extracted\", \"\")\r\n\r\n\t\t# strip out the _ex_[0-9]* tag\r\n\t\textracted_tag = re.search(\"(_ex_[0-9]+)\", full_page_path)\r\n\t\tif extracted_tag:\r\n\t\t\textracted_tag = extracted_tag.group(1)\r\n\t\t\tfull_page_path = full_page_path.replace(extracted_tag, \"\")\r\n\r\n\t\t\t# TODO: this is a hack, since extractor has jpg hardcoded into it\r\n\t\t\t# either insist on one format or re-run the extractor\r\n\t\t\tif not os.path.exists(full_page_path):\r\n\t\t\t\tfull_page_path = full_page_path.replace(\"jpg\", \"png\")\r\n\r\n\r\n\t\t\timages_full_page.append(cv2.imread(full_page_path))\r\n\r\n\t\t\t# https://github.com/htrc/htrc-feature-reader\r\n\t\t\t# http://www.porganized.com/\r\n\t\t\t# https://github.com/htrc/HTRC-WorksetToolkit\r\n\t\t\t# https://joshpeng.github.io/post/wsl/\r\n\r\n\r\n\r\n\t# concat the full pages onto the extracts; montage will page through the\r\n\t# extracts first and then the full page images\r\n\timages = images + images_full_page\r\n\r\n\t# construct montage: first tuple (width, height) then (columns, rows)\r\n\t# http://www.pyimagesearch.com/2017/05/29/montages-with-opencv/\r\n\tmontages = build_montages(images, (166, 234), (5, num_nbrs//10))\r\n\tfor montage in montages:\r\n\t\tcv2.imshow(\"Montage\", montage)\r\n\t\tcv2.waitKey(0)\r\n","sub_path":"hathi-trust/ht_show_neighbors.py","file_name":"ht_show_neighbors.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256269531","text":"# hw5.py\n# ​Cheng-Han Hsieh,​ 104061153\n\nimport threading\nimport time\nimport random\n\n\ndef makeParkingLot(N):\n global sem\n global spots\n # global spotsSync\n\n sem = threading.BoundedSemaphore(N)\n spots = [None for i in range(N)]\n\n\n\ndef makeCars(C):\n cars = []\n for i in range(C):\n t = threading.Thread(target=park, args=(i, ))\n cars.append(t)\n\n return cars\n\n\ndef park(car):\n global sem, spots, spotsSync\n\n sem.acquire()\n # The semaphore blocks other threads, the threads\n # in the critical section will only modify its own\n # spot. We don't have to block the codes\n for i in range(len(spots)):\n if spots[i] is None:\n spots[i] = car\n break\n snapshot = spots[:]\n print('Car %d go spot: %s' % (car, snapshot))\n\n st = random.randrange(1, 10)\n time.sleep(st)\n\n for i in range(len(spots)):\n if spots[i] is car:\n spots[i] = None\n break\n myCopySpots = spots[:]\n sem.release()\n\n print('Car %d left after %d sec, %s' %\n (car, st, myCopySpots))\n\n\nif __name__ == '__main__':\n makeParkingLot(5)\n cars = makeCars(15)\n for i in range(15):\n cars[i].start()\n","sub_path":"hw5/hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39828637","text":"\"\"\"\nDecorators, higher-order functionals, functools module extensions.\n\"\"\"\nimport dis\nimport functools\nimport inspect\nimport sys\n\nfrom ml_utils.common import (is_sequence, pack_args,\n is_variable_name, include_keys, exclude_keys)\nfrom ml_utils.meta.inspect_utils import (is_class, hasattrs,\n deepcopy_cls, get_arg_names,\n has_kwargs)\n\ndef meta_wrap(decor):\n \"\"\"\n a decorator decorator, allowing the wrapped decorator to be used as:\n @decorator(*args, **kwargs)\n def callable()\n -- or --\n @decorator # without parenthesis, args and kwargs will use default\n def callable()\n \n Args:\n decor: a decorator whose first argument is a callable (function or class\n to be decorated), and the rest of the arguments can be omitted as default.\n decor(f, ... the other arguments must have default values)\n\n Warning:\n decor can NOT be a function that receives a single, callable argument. \n See stackoverflow: http://goo.gl/UEYbDB\n \"\"\"\n single_callable = (lambda args, kwargs: \n len(args) == 1 and len(kwargs) == 0 and callable(args[0]))\n @functools.wraps(decor)\n def new_decor(*args, **kwargs):\n if single_callable(args, kwargs):\n # this is the double-decorated f. \n # It should not run on a single callable.\n return decor(args[0])\n else:\n # decorator arguments\n return lambda real_f: decor(real_f, *args, **kwargs)\n\n return new_decor\n\n\n@meta_wrap\ndef noop_decorator(func, *args, **kwargs):\n \"\"\"\n For debugging purposes. \n A decorator that does nothing. Compatible with `meta_wrap` style. \n \"\"\"\n return func\n\n\n@meta_wrap\ndef deprecated(func, msg='', action='warning'):\n \"\"\"\n Function/class decorator: designate deprecation.\n \n Args:\n msg: string message. \n action: string mode\n - 'warning': (default) prints `msg` to stderr\n - 'noop': do nothing\n - 'raise': raise DeprecatedError(`msg`)\n \"\"\"\n action = action.lower()\n if action not in ['warning', 'noop', 'raise']:\n raise ValueError('unknown action type {}'.format(action))\n if not msg:\n msg = 'This is a deprecated feature.'\n\n # only does the deprecation when being called\n @functools.wraps(func)\n def _deprecated(*args, **kwargs):\n if action == 'warning':\n print(msg, file=sys.stderr)\n elif action == 'raise':\n raise DeprecationWarning(msg)\n return func(*args, **kwargs)\n return _deprecated\n\n\n@meta_wrap\ndef experimental(func, msg='', action='warning'):\n \"\"\"\n Function/class decorator: warn user of experimental feature.\n \n Args:\n msg: string message\n action: string mode\n - 'warning': (default) prints `msg` to stderr\n - 'noop': do nothing\n \"\"\"\n action = action.lower()\n if action not in ['warning', 'noop']:\n raise ValueError('unknown action type {}'.format(action))\n msg = 'experimental feature: ' + msg\n\n # only issues warning when being called\n @functools.wraps(func)\n def _experimental(*args, **kwargs):\n if action == 'warning':\n print(msg, file=sys.stderr)\n return func(*args, **kwargs)\n return _experimental\n\n\ndef next_iterable(cls):\n \"\"\"\n Decorator to make a class iterable with `next()` given `__iter__()`\n Compatible with both python 2 and 3.\n \n Args:\n cls: must define __iter__() (returns a generator or has `yield` statement)\n \"\"\"\n assert is_class(cls)\n assert hasattr(cls, '__iter__'), '__iter__ needs to return a generator'\n assert not hasattrs(cls, '_next_iterator')\n old_init = cls.__init__\n @functools.wraps(old_init)\n def _init(self, *args, **kwargs):\n self._next_iterator = self.__iter__()\n old_init(self, *args, **kwargs)\n # override the subclass' constructor\n cls.__init__ = _init\n \n def _next(self):\n return next(self._next_iterator)\n \n cls.next = cls.__next__ = _next\n return cls\n\n\n@meta_wrap\ndef circular_iterable(cls, cycles=0):\n \"\"\"\n Decorator to make a class cyclic iterable with for-loop.\n Compatible with both python 2 and 3. \n Meta-wrap style.\n \n Args:\n cls: must define __iter__() (returns a generator or has `yield` statement)\n cycles: \n - 0: infinitely iterable\n - N: repeat a specific number of times\n \"\"\"\n assert is_class(cls)\n assert hasattr(cls, '__iter__'), '__iter__ needs to return a generator'\n assert not hasattrs(cls, '_circular_iterator')\n \n # must make a deep copy of the class so that the old class is not modified\n cls = deepcopy_cls(cls)\n old_iter = cls.__iter__\n @functools.wraps(old_iter)\n def _new_iter(self):\n it = old_iter(self)\n is_inf = cycles == 0 # infinitely iterable\n c = cycles\n while is_inf or c > 0:\n try:\n yield next(it)\n except StopIteration:\n it = old_iter(self)\n c -= 1\n cls.__iter__ = _new_iter\n return cls\n\n\nclass noop_context:\n \"\"\"\n Placeholder context manager that does nothing.\n We could have written simply as:\n\n @contextmanager\n def noop_context(*args, **kwargs):\n yield\n\n but the returned context manager cannot be called twice, i.e.\n my_noop = noop_context()\n with my_noop:\n do1()\n with my_noop: # trigger generator error\n do2()\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n\n# =================== function tools ===================\ndef compose(*functions):\n \"\"\"\n Compose multiple functions\n compose(f, g, h)(x) <=> h(g(f(x)))\n \"\"\"\n functions = pack_args(functions)\n return functools.reduce(lambda f, g: \n lambda *args, **kwargs: g(f(*args, **kwargs)),\n functions)\n\n\ndef chain_and(*predicates):\n \"\"\"\n Args:\n *predicates: a list of lambdas that take one arg and return bool\n \n Returns:\n a lambda predicate that is true iff all predicates are true.\n employs short-circuit logic. Internally, use a generator instead of \n list comprehension to enable short-circuit.\n \"\"\"\n predicates = pack_args(predicates)\n return lambda obj: all((pred(obj) for pred in predicates))\n\n\ndef chain_or(*predicates):\n \"\"\"\n Args:\n *predicates: a list of lambdas that take one arg and return bool\n\n Returns:\n a lambda predicate that is true if at least one of the predicates are true\n \"\"\"\n predicates = pack_args(predicates)\n return lambda obj: any((pred(obj) for pred in predicates))\n\n\ndef apply_map(func, seq):\n assert is_sequence(seq)\n for i in range(len(seq)):\n seq[i] = func(seq[i])\n\n\n@meta_wrap\ndef lru_cache(func, maxsize=128, typed=False):\n \"\"\"\n Can be used with or without parenthesis. See `meta_wrap`'s effect. \n \"\"\"\n return functools.lru_cache(maxsize, typed)(func)\n\n\n# ======================== Type conversion ========================\nclass TypeConverter:\n \"\"\"\n Convert N types to each other\n \"\"\"\n def __init__(self, names, get_type, convert_matrix):\n \"\"\"\n Args:\n names: type names of N types\n get_type: a function that takes an object and outputs its type.\n output must be a string in `names`\n convert_matrix: dict of converter functions, should have size N*(N-1)\n each dict entry can be one of 3 forms:\n 1. {(type_A, type_B): function_A_to_B}\n `function_A_to_B`=None means the conversion is not available\n 2. {(A, B): [C1, D, C2]}\n means compose functions A->C1, C1->D, D->C2, C2->B to get A->B\n every part of the chain must be well-defined\n 3. {(A, B): \"C->B\"}\n a string of the form \"type1->type2\" to reuse another converter\n \"\"\"\n self.names = names\n self.get_type = get_type\n self.convert_matrix = convert_matrix\n all_combos = [(i, j) for i in names for j in names]\n # sanity check and fill in missing methods (conversion unavailable)\n for n1, n2 in all_combos:\n if (n1, n2) in convert_matrix:\n converter = convert_matrix[n1, n2]\n if is_sequence(converter):\n # chain of existing converters\n assert is_sequence(converter) and len(converter) > 0\n assert n1 not in converter and n2 not in converter, \\\n 'Intermediate conversion chain must not have {} and {}'.format(n1, n2)\n convert_matrix[n1, n2] = list(converter)\n elif isinstance(converter, str):\n assert '->' in converter, 'string must have `->`'\n spec = converter.split('->')\n assert len(spec) == 2, 'must be of form `A->B`'\n m1, m2 = map(str.strip, spec)\n assert m1 in names, 'type {} does not exist'.format(m1)\n assert m2 in names, 'type {} does not exist'.format(m2)\n assert (m1, m2) in convert_matrix\n m12 = convert_matrix[m1, m2]\n assert not isinstance(m12, str), 'cannot reference another string spec'\n convert_matrix[n1, n2] = m12\n else:\n if n1 == n2:\n # sometimes we need self-convert, like numpy float32 -> float64\n # if no self-converter exists, fall back to identity\n convert_matrix[n1, n1] = lambda x: x\n else:\n convert_matrix[n1, n2] = None\n\n convert_matrix_copy = convert_matrix.copy()\n for n1, n2 in all_combos:\n converter = convert_matrix[n1, n2]\n if converter is None or callable(converter):\n continue\n # expand converter function chains\n chain = [n1] + converter + [n2]\n convert_matrix[n1, n2] = self._resolve_chain(convert_matrix_copy, chain)\n self._generate_converters()\n self._generate_type_ids()\n\n\n def _resolve_chain(self, matrix, chain):\n # recursively resolve A -> B -> D -> C converter chain\n # Args: chain [a list of intermediate types]\n funcs = []\n while len(chain) >= 2:\n n1 = chain[0]\n n2 = chain[1]\n c12 = matrix[n1, n2]\n del chain[0]\n if callable(c12):\n funcs.append(c12)\n elif c12 is None:\n raise ValueError('Converter {}->{} is None'.format(n1, n2))\n else:\n # a nested chain, needs to expand\n chain = [n1] + c12 + chain\n if len(chain) != len(set(chain)):\n raise ValueError('Loop detected in converter chain: '+str(chain))\n return compose(funcs)\n\n def _generate_converters(self):\n # generate `to_typeA()`, `to_typeB()` methods\n for name in self.names:\n method_name = 'to_{}'.format(name)\n method = functools.partial(self.__call__, target=name)\n if is_variable_name(method_name):\n setattr(self, method_name, method)\n\n def _generate_type_ids(self):\n # generate `is_typeA()`, `is_typeB()` methods\n for name in self.names:\n method_name = 'is_{}'.format(name)\n method = lambda x, name=name: self.get_type(x) == name\n if is_variable_name(method_name):\n setattr(self, method_name, method)\n\n def __call__(self, obj, target, **kwargs):\n \"\"\"\n Args:\n obj: to be converted\n target (str): target type\n **kwargs: additional args passed to the converter\n\n Returns:\n converted object\n \"\"\"\n assert target in self.names, 'unknown target type: {}'.format(target)\n typ = self.get_type(obj)\n assert typ in self.names, 'unknown obj type: {}'.format(typ)\n converter = self.convert_matrix[typ, target]\n if converter is None:\n raise TypeError('No conversion method exists for {} -> {}'\n .format(typ, target))\n return converter(obj, **kwargs)\n\n @property\n def convert_return(self):\n \"\"\"\n Decorator that adds `target=` and `**target_kwargs` to the function\n\n @MyConverter.convert_return\n def f(x, y, z):\n return obj\n\n f(x, y, z, target=TypeA, target_kwargs=dict(flag1=1, flag2=2))\n ... is equivalent to\n MyConverter(f(x,y,z), target=typeA, flag1=1, flag2=2)\n\n if `target=None`, return the output unaltered.\n \"\"\"\n def _convert_type(f):\n @functools.wraps(f)\n def new_f(*args, target=None, target_kwargs=None, **kwargs):\n obj = f(*args, **kwargs)\n if target is None:\n return obj\n else:\n if target_kwargs is None:\n target_kwargs = {}\n return self.__call__(obj, target=target, **target_kwargs)\n return new_f\n return _convert_type\n\n\n def convert_factory(self, func, target, *, unpack_target_kwargs=True):\n \"\"\"\n Makes a new function such that the return type is `target`\n\n Args:\n unpack_target_kwargs: if False, the new function will take an extra\n keyword `target_kwargs` for the target type. Otherwise unpack the\n target kwargs as regular kwargs to `func`. Make sure the kwargs don't\n have name conflicts! If `func` has **kwargs, all kwargs will go to\n func instead of target converter\n\n \"\"\"\n assert target in self.names, 'unknown target type: {}'.format(target)\n if unpack_target_kwargs:\n _has_kwargs = has_kwargs(func)\n _arg_names = get_arg_names(func)\n @functools.wraps(func)\n def new_f(*args, **kwargs):\n if _has_kwargs:\n func_kwargs, target_kwargs = kwargs, {}\n else:\n func_kwargs = include_keys(_arg_names, kwargs)\n target_kwargs = exclude_keys(_arg_names, kwargs)\n obj = func(*args, **func_kwargs)\n return self.__call__(obj, target=target, **target_kwargs)\n else:\n @functools.wraps(func)\n def new_f(*args, target_kwargs=None, **kwargs):\n obj = func(*args, **kwargs)\n if target_kwargs is None:\n target_kwargs = {}\n return self.__call__(obj, target=target, **target_kwargs)\n return new_f\n","sub_path":"ml_utils/meta/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":15009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195095875","text":"import utils\n\n\ndef ChaCha20Encrypt(key, counter, nonce, plaintext):\n cipher = b''\n for j in range(len(plaintext) // 64):\n keyStream = ChaCha20Block(key, counter+j, nonce)\n block = plaintext[j*64:(j+1)*64]\n cipher += utils.byte_xor(block, keyStream)\n if ((len(plaintext) % 64) != 0):\n j = len(plaintext) // 64\n keyStream = ChaCha20Block(key, counter+j, nonce)\n block = plaintext[j*64:]\n cipher += utils.byte_xor(block,\n keyStream)[:(len(plaintext) % 64)+1]\n\n return cipher\n\n\ndef ChaCha20Block(key, counter, nonce):\n state = [0x61707865, 0x3320646e,\n 0x79622d32, 0x6b206574]\n state += key\n state += [counter]\n state += nonce\n initialState = state.copy()\n for i in range(10):\n Round(state)\n state = [(b[0] + b[1]) & 0xFFFFFFFF for b in zip(state, initialState)]\n return serialize(state)\n\n\ndef QuarterRoundAux(a, b, c, d):\n a = (a + b) & 0xFFFFFFFF\n d ^= a\n d = utils.left_rotate(d, 16)\n c = (c + d) & 0xFFFFFFFF\n b ^= c\n b = utils.left_rotate(b, 12)\n a = (a + b) & 0xFFFFFFFF\n d ^= a\n d = utils.left_rotate(d, 8)\n c = (c + d) & 0xFFFFFFFF\n b ^= c\n b = utils.left_rotate(b, 7)\n return (a, b, c, d)\n\n\ndef QuarterRound(state, x, y, z, w):\n a = state[x]\n b = state[y]\n c = state[z]\n d = state[w]\n (a, b, c, d) = QuarterRoundAux(a, b, c, d)\n state[x] = a\n state[y] = b\n state[z] = c\n state[w] = d\n\n\ndef Round(state):\n # column round\n QuarterRound(state, 0, 4, 8, 12)\n QuarterRound(state, 1, 5, 9, 13)\n QuarterRound(state, 2, 6, 10, 14)\n QuarterRound(state, 3, 7, 11, 15)\n # diagonal round\n QuarterRound(state, 0, 5, 10, 15)\n QuarterRound(state, 1, 6, 11, 12)\n QuarterRound(state, 2, 7, 8, 13)\n QuarterRound(state, 3, 4, 9, 14)\n\n\ndef serialize(state):\n \"\"\"\n Used to serialize a state into bytes\n \"\"\"\n serializedBlock = b''\n for i in state:\n serializedBlock += i.to_bytes(4, \"little\")\n return serializedBlock\n","sub_path":"ChaCha20.py","file_name":"ChaCha20.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621017152","text":"from .car_company import *\n\n\ndef exit():\n return\ncompany=Car_company()\nMENU = [\n ['Add car', company.add_car_company],\n ['Add driver', company.add_driver_company],\n ['Edit', company.edit],\n ['Delete', company.delete],\n ['Print all', company.print_all],\n ['Print in file', company.wrt_file],\n ['Read from file', company.rd_file],\n ['Clear file', company.clr_file],\n ['Exit', exit]\n ]\n\n\ndef main():\n print(\"------------------------------\")\n i = 0\n for item in MENU:\n print(\"{0:2}. {1}\".format(i, item[0]))\n i += 1\n print(\"------------------------------\")\n o = int(input())\n MENU[o][1]()\n if o != 8:\n main()\n \nif __name__ == \"__main__\":\n try:\n main()\n except:\n print(\"error\")\n\n","sub_path":"st12/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215615150","text":"# By submitting this assignment, I agree to the following:\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do.\"\n# \"I have not given or received any unauthorized aid on this assignment.\"\n#\n# Name: Nick Cheng\n# Section: 509\n# Assignment: Lab Assignment 10b Act 1\n# Date: 31 SPOOKtober 2019\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nxTup = []\nyTup = []\na = np.array([1,0])\nb = np.array([[1.00583, -0.087156] , [0.087156, 1.00583]])\n\n#run the matrix multiplication and add x and y values\nfor i in range(100):\n c = a.dot(b)\n xTup.append(c[0])\n yTup.append(c[1])\n a = c\n \n\n#plot format plot(x,y)\nplt.plot(xTup,yTup)\nplt.xlabel('x values of resultant matrix')\nplt.ylabel('y values of resultant matrix')\nplt.title('Spooky Halloween Spiral, Curves Inward')\nplt.show()\n\n\n\n\n","sub_path":"Week 10/Lab10b_Act1.py","file_name":"Lab10b_Act1.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"202786751","text":"import tornado\nimport json\n\nclass APIHandler(tornado.web.RequestHandler):\n\n\n def set_default_headers(self):\n print( \"setting headers!!!\" )\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Headers\", \"content-type\")\n self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')\n\n def initialize(self):\n \"\"\"\n - Set Content-type for JSON\n \"\"\"\n self.set_header(\"Content-Type\", \"application/json\")\n self.db = self.settings['db']\n self.colony = self.settings['colony']\n\n def api_response(self, data):\n \"\"\"return an api response in the proper output format with status_code == 200\"\"\"\n self.set_header(\"Content-Type\", \"application/javascript; charset=UTF-8\")\n data = json.dumps(data)\n self.finish(data)\n\n def get_current_user(self):\n user_json = self.get_secure_cookie(\"user\")\n if not user_json: return None\n return tornado.escape.json_decode(user_json)\n\n def get_arguments(self, name, default = None):\n try:\n data = tornado.escape.json_decode(self.request.body)\n except:\n self.error(\"Bad request\")\n raise HTTPError(304)\n return None\n\n if name not in data:\n if default is None:\n self.error(\"%s is required\" % (name))\n raise HTTPError(304)\n return None\n else:\n return default\n else:\n return data[name]\n\n\n","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100899015","text":"#! bin/python3 \n\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport argparse\nimport os\nfrom joblib import Parallel, delayed, parallel_backend\nfrom collections import ChainMap\nfrom numpy import genfromtxt\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Making gene-centric dictionary\")\n## parser.add_argument('--dataframe', help=\"Dataframe with scores\")\n parser.add_argument('--weight', help=\"weight for promoter embeddings\")\n parser.add_argument('--embeddings', help=\"Embeddings\")\n parser.add_argument('--outdir', help=\"Outdir\")\n parser.add_argument('--type', help=\"Type links\")\n args = parser.parse_args()\n return args\n\n# generator_function \ndef gen_genes(genes):\n for gene in genes:\n yield gene\n\ndef generate_constant_promoter_weights(constantpromoterweight_df, weight):\n constantpromoterweight_df.iloc[:24672, 4] = weight\n return constantpromoterweight_df\n\ndef do_calc(dict_key, gene, jaspar_embeddings, distance):\n print(\"Gene: \", str(gene))\n indices = distance.loc[distance[3]==gene].index.astype(int)\n print(indices)\n total_dist_scores = [jaspar_embeddings[i] for i in indices]\n dict_key[gene] = np.sum(total_dist_scores, axis=0)\n return dict_key\n\ndef process(numpy_list_total, distance):\n dict_key = {}\n n_jobs=5\n genes = distance.iloc[:,3]\n# scores = np.array((distance.iloc[:,4]).ravel())\n with parallel_backend(\"loky\", inner_max_num_threads=2):\n dict_key = Parallel(n_jobs=n_jobs)(delayed(do_calc)(dict_key, gene, numpy_list_total, distance) for gene in genes)\n return dict_key\n\ndef main(args, embeddings):\n print(\"Making directory\")\n if not os.path.exists(args.outdir):\n os.mkdir(args.outdir)\n print(\"Making constant promoter dataframe\")\n constantpromoterweight_df = pd.read_csv(\"/mnt/lab_data2/kmualim/enh-gene-linking/datasets/embeddings/new_version/constant_promoter_weights/constantweightedpromoters_abc.csv\", sep=\"\\t\", header=None)\n print(\"Adjusting weights for constant promoter weights dataframe\")\n constantpromoterweight_df.iloc[:24672, 4] = args.weight\n print(\"Grabbing genes\")\n genes = constantpromoterweight_df.iloc[:,3]\n print(\"Grabbing embeddings...\")\n if embeddings.lower().find(\"basset\") != -1: \n embeddingfile = np.load(embeddings, mmap_mode='r')['embeddings']\n else:\n embeddingfile = np.load(embeddings, mmap_mode='r')['arr_0']\n print(\"Types of links\")\n if args.type=='Random' or args.type=='all':\n print(\"Grabbing random links...\")\n random_scores = np.random.rand(13995659, 1)\n calc_embeddings = [i*j for i,j in zip(random_scores, embeddingfile)]\n dict_key = process(calc_embeddings, constantpromoterweight_df)\n total_dict = dict(ChainMap(*dict_key))\n pickle.dump(total_dict, open(os.path.join(args.outdir, \"RandomDictionary.p\"), \"wb\"))\n del calc_embeddings\n del total_dict\n elif args.type=='Uniform' or args.type=='all':\n print(\"Grabbing uniform links...\")\n calc_embedding = embeddingsfile\n dict_key = process(calc_embeddings, constantpromoterweight_df)\n total_dict = dict(ChainMap(*dict_key))\n pickle.dump(total_dict, open(os.path.join(args.outdir, \"UniformDictionary.p\"), \"wb\"))\n del calc_embeddings\n del total_dict\n elif args.type=='distance' or args.type=='all':\n print(\"Grabbing distance links...\")\n df = pd.read_csv(\"/mnt/lab_data2/kmualim/enh-gene-linking/datasets/embeddings/new_version/constant_promoter_weights/Bassetdistanceweightedpromoters_abc.csv\", sep=\"\\t\", header=None)\n random_scores = np.array((df.iloc[:,4]).ravel())\n calc_embeddings = [i*j for i,j in zip(random_scores, embeddingfile)]\n dict_key = process(calc_embeddings, df)\n total_dict = dict(ChainMap(*dict_key))\n pickle.dump(total_dict, open(os.path.join(args.outdir, \"DistanceDictionary.p\"), \"wb\"))\n del calc_embeddings\n del total_dict\n elif args.type=='abc' or args.type=='all':\n print(\"Grabbing abc links...\")\n random_scores = np.array((constantpromoterweight_df.iloc[:,4]).ravel())\n calc_embeddings = [i*j for i,j in zip(random_scores, embeddingfile)]\n dict_key = process(calc_embeddings, df)\n total_dict = dict(ChainMap(*dict_key))\n pickle.dump(total_dict, open(os.path.join(args.outdir, \"ABCDictionary.p\"), \"wb\"))\n del calc_embeddings\n del total_dict\nif __name__==\"__main__\":\n args = parse_args()\n main(args, args.embeddings)\n","sub_path":"scripts/grabRandomLinks.py","file_name":"grabRandomLinks.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"227815018","text":"import numpy as np\nimport os\nimport torch\nimport torch.utils.data as data\nimport json\nimport cv2\n\nclass ShapeNetDataset(data.Dataset):\n def __init__(self,\n datafile,\n image_transform,\n npoints=2500,\n classification=False,\n class_choice=None,\n split='train',\n data_augmentation=False,\n seg_part = 0):\n self.npoints = npoints\n self.datafile = datafile\n self.catfile = './datasets/synsetoffset2category.txt'\n self.cat = {}\n self.data_augmentation = data_augmentation\n self.classification = classification\n self.seg_classes = {}\n\n with open(self.catfile, 'r') as f:\n for line in f:\n ls = line.strip().split()\n self.cat[ls[0]] = ls[1]\n\n if not class_choice is None:\n self.cat = {k: v for k, v in self.cat.items() if k in class_choice}\n\n self.id2cat = {v: k for k, v in self.cat.items()}\n\n self.meta = {}\n splitfile = os.path.join(self.datafile, 'train_test_split', 'shuffled_{}_file_list.json'.format(split))\n filelist = json.load(open(splitfile, 'r'))\n for item in self.cat:\n self.meta[item] = []\n\n for file in filelist:\n _, category, uuid = file.split('/')\n if category in self.cat.values():\n self.meta[self.id2cat[category]].append((os.path.join(self.datafile, category, 'points', uuid + '.pts'),\n os.path.join(self.datafile, category, 'points_label',uuid + '.seg'),\n os.path.join(self.datafile, category, 'image_renders', uuid, 'rendering')))#, '{02d}.png'.format(i)) for i in range(24)))\n\n self.datapath = []\n for item in self.cat:\n for fn in self.meta[item]:\n for j in range(24):\n f = os.path.join(fn[2], '{:02d}.png'.format(j))\n self.datapath.append((item, fn[0], fn[1], f))\n\n self.classes = dict(zip(sorted(self.cat), range(len(self.cat))))\n with open('./datasets/num_seg_classes.txt', 'r') as f:\n for line in f:\n ls = line.strip().split()\n self.seg_classes[ls[0]] = int(ls[1])\n self.num_seg_classes = self.seg_classes[list(self.cat.keys())[0]]\n #print(self.seg_classes, self.num_seg_classes)\n self.image_transform = image_transform\n def __getitem__(self, index):\n fn = self.datapath[index]\n cls = self.classes[self.datapath[index][0]]\n point_set = np.loadtxt(fn[1]).astype(np.float32)\n seg = np.loadtxt(fn[2]).astype(np.int64)\n #image = np.load(fn[3])\n image = np.transpose(np.array(cv2.imread(fn[3]), dtype = np.uint8), (2, 0, 1))\n if self.image_transform is not None:\n image = self.image_transform(image)\n\n choice = np.random.choice(len(seg), self.npoints, replace=True)\n # resample\n point_set = point_set[choice, :]\n\n point_set = point_set - np.expand_dims(np.mean(point_set, axis=0), 0) # center\n dist = np.max(np.sqrt(np.sum(point_set ** 2, axis=1)), 0)\n point_set = point_set / dist # scale\n\n\n if self.data_augmentation:\n theta = np.random.uniform(0, np.pi * 2)\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n point_set[:, [0, 2]] = point_set[:, [0, 2]].dot(rotation_matrix) # random rotation\n point_set += np.random.normal(0, 0.02, size=point_set.shape) # random jitter\n seg = seg[choice]\n point_set = torch.from_numpy(point_set)\n seg = torch.from_numpy(seg)\n cls = torch.from_numpy(np.array([cls]).astype(np.int64))\n image = torch.from_numpy(image)\n\n return point_set, seg, image, self.num_seg_classes\n\n def __len__(self):\n return len(self.datapath)\n\n\nif __name__=='__main__':\n datapath='/Users/lmy/Dataset/Shapenet/shapenetcore_partanno_segmentation_benchmark_v0/'\n d = ShapeNetDataset(datafile=datapath, class_choice=['Chair'])\n print(len(d))\n ps, seg, image = d[0]\n print(ps.size(), ps.type(), seg.size(), seg.type())\n print(image.size(), image.type())\n\n\n\n\n\n\n","sub_path":"datasets/get_segs.py","file_name":"get_segs.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"129366087","text":"import tensorflow as tf\r\nimport os\r\nimport xlrd\r\nimport time\r\nfrom six.moves import xrange\r\nimport numpy as np\r\nin_vector_size=2\r\nbrand_vector_size=2\r\nlabels_vector_size=1\r\nbatch_size=64\r\ntext_size=300\r\npic_size=25088\r\ntext_layer1_size=300\r\ntext_layer4_size=512\r\npic_layer1_size=4096\r\npic_layer4_size=512\r\nmodel_path='G:\\\\aaa\\\\weighted_mean\\\\model3'\r\nzu_size=3\r\nzzh=0.001\r\ndef influencer_vectors_inputs():\r\n influencers_text_placeholder = tf.placeholder(tf.float32, shape=(None,\r\n text_size))\r\n influencers_pic_placeholder = tf.placeholder(tf.float32, shape=(None,\r\n pic_size))\r\n \r\n return influencers_text_placeholder,influencers_pic_placeholder\r\n\r\ndef label_vector_inputs():\r\n \r\n labels_placeholder = tf.placeholder(tf.float32, shape=(None))\r\n return labels_placeholder\r\ndef brand_vector_inputs():\r\n brand_text_placeholder = tf.placeholder(tf.float32, shape=(None,\r\n text_size))\r\n brand_pic_placeholder = tf.placeholder(tf.float32, shape=(None,\r\n pic_size))\r\n return brand_text_placeholder,brand_pic_placeholder\r\n\r\ndef get_batch(brand_text_,brand_pic_,in_text_,in_pic_,labels_,step):\r\n if((step+1)*batch_size*zu_size>len(brand_text_)):\r\n brand_text=brand_text_[step*batch_size*zu_size:]\r\n brand_pic=brand_pic_[step*batch_size*zu_size:]\r\n in_text=in_text_[step*batch_size*zu_size:]\r\n in_pic=in_pic_[step*batch_size*zu_size:]\r\n label_=labels_[step*batch_size*zu_size:]\r\n label_ = label_.reshape([batch_size*zu_size])\r\n else:\r\n brand_text=brand_text_[step*batch_size*zu_size:(step+1)*batch_size*zu_size]\r\n brand_pic=brand_pic_[step*batch_size*zu_size:(step+1)*batch_size*zu_size]\r\n in_text=in_text_[step*batch_size*zu_size:(step+1)*batch_size*zu_size]\r\n in_pic=in_pic_[step*batch_size*zu_size:(step+1)*batch_size*zu_size]\r\n label_=labels_[step*batch_size*zu_size:(step+1)*batch_size*zu_size]\r\n label_ = label_.reshape([batch_size*zu_size])\r\n #print('label:',label_)\r\n return brand_text,brand_pic,in_text,in_pic,label_\r\n\r\ndef fill_feed_dict_train(brand_text_train,brand_pic_train,in_text_train,in_pic_train,labels_train, brand_text_pl,brand_pic_pl, in_text_pl,in_pic_pl, labels_pl,step,keep_prob):\r\n # Create the feed_dict for the placeholders filled with the next\r\n # `batch size` examples.\r\n brand_text_feed,brand_pic_feed,in_text_feed,in_pic_feed,labels_feed = get_batch(brand_text_train,brand_pic_train,in_text_train,in_pic_train,labels_train,step)\r\n feed_dict = {\r\n brands_text: brand_text_feed,\r\n brands_pic:brand_pic_feed,\r\n influencers_text: in_text_feed,\r\n influencers_pic:in_pic_feed,\r\n labels: labels_feed,\r\n keep_prob:0.5,\r\n }\r\n return feed_dict\r\n\r\n\r\ndef fill_feed_dict_test(brand_text_train,brand_pic_train,in_text_train,in_pic_train,labels_train, brand_text_pl,brand_pic_pl, in_text_pl,in_pic_pl, labels_pl,step,keep_prob):\r\n # Create the feed_dict for the placeholders filled with the next\r\n # `batch size` examples.\r\n brand_text_feed,brand_pic_feed,in_text_feed,in_pic_feed,labels_feed = get_batch(brand_text_train,brand_pic_train,in_text_train,in_pic_train,labels_train,step)\r\n feed_dict = {\r\n brands_text: brand_text_feed,\r\n brands_pic:brand_pic_feed,\r\n influencers_text: in_text_feed,\r\n influencers_pic:in_pic_feed,\r\n labels: labels_feed,\r\n keep_prob:1,\r\n }\r\n return feed_dict\r\n\r\ndef get_weights(shape, lambd):\r\n var = tf.Variable(tf.random_normal(shape,stddev=0.1))\r\n tf.add_to_collection('losses', tf.contrib.layers.l1_regularizer(lambd)(var))\r\n return var\r\n\r\ndef metrics(l_user,l_in,l_ist,l_score):#MedR,recall@10,recall@50\r\n all_positive=790\r\n part=len(l_user)/all_positive\r\n part=int(part)\r\n index=0\r\n index2=0\r\n lll=[]\r\n lll1=[]\r\n lll2=[]\r\n \r\n lis=[]\r\n for k in range(0,all_positive):\r\n lis.append(k+1)\r\n for j in range(0,part):\r\n print(j)\r\n l=[]\r\n for i in range(0,all_positive):\r\n a=l_score[index]\r\n l.append(a)\r\n index+=1\r\n if((i+1)%all_positive ==0 or index == len(l_user)):\r\n break\r\n \r\n l.sort()\r\n ll=[]\r\n #shu=0\r\n for i in range(0,all_positive):\r\n \r\n user=l_user[index2]\r\n in_=l_in[index2]\r\n ist=l_ist[index2]\r\n score=l_score[index2]\r\n paiming=l.index(score)\r\n \r\n ist=int(ist)\r\n \r\n \r\n if(ist==1):\r\n \r\n ll.append(all_positive-paiming)\r\n \r\n if((i+1)%all_positive==0 or index2 == len(l_user)):\r\n \r\n print(ll)\r\n y=min(ll)\r\n lll2.append(y)\r\n index1=0\r\n index3=0\r\n for a in ll:\r\n if(a<11):\r\n index1+=1\r\n if(a<51):\r\n index3+=1\r\n p1=index1/len(ll)\r\n p2=index3/len(ll)\r\n \r\n lll.append(p1)\r\n lll1.append(p2)\r\n \r\n break\r\n index2+=1\r\n \r\n\r\n\r\n al=0.0\r\n lll2.sort()\r\n med=0\r\n if(len(lll2)%2==0):\r\n med=len(lll2)/2\r\n else:\r\n med=int(len(lll2)/2)+1\r\n for xy in range(0,len(lll2)):\r\n if(xy==(med-1)):\r\n al=lll2[xy]\r\n #MedR\r\n print(al)\r\n \r\n al2=0.0\r\n for xy1 in lll:\r\n \r\n al2+=xy1\r\n #recall@10\r\n print(al2/len(lll))\r\n \r\n al3=0.0\r\n for xy2 in lll1:\r\n \r\n al3+=xy2\r\n #recall@50\r\n print(al3/len(lll1))\r\n \r\n \r\ndef auc(l_user,l_in,l_ist,l_score):\r\n ExcelFile1=xlrd.open_workbook('G:\\\\aaa\\\\test_set_auc_v3.xlsx')\r\n sheet1=ExcelFile1.sheet_by_index(0)\r\n err=0\r\n AUC=0.0\r\n AUC_all=0.0\r\n cAUC=0.0\r\n cAUC_all=0.0\r\n \r\n dict_s={}\r\n \r\n for i in range(0,len(l_user)):\r\n a_=l_user[i]\r\n b_=l_in[i]\r\n score1=l_score[i]\r\n a=a_+b_\r\n dict_s[a]=score1\r\n for i in range(0,sheet1.nrows):\r\n if(i%10000==0):\r\n print(i)\r\n AUC_all+=1\r\n a=sheet1.cell(i,0).value.encode('utf-8').decode('utf-8-sig')\r\n b=sheet1.cell(i,1).value.encode('utf-8').decode('utf-8-sig')\r\n c=sheet1.cell(i,3).value.encode('utf-8').decode('utf-8-sig')\r\n d=sheet1.cell(i,4).value.encode('utf-8').decode('utf-8-sig')\r\n e=sheet1.cell(i,5).value\r\n score1=0.0\r\n score2=0.0\r\n if a+b in dict_s.keys():\r\n score1=dict_s[a+b]\r\n if c+d in dict_s.keys():\r\n score2=dict_s[c+d]\r\n \r\n \r\n if(score1==0.0 or score2==0.0):\r\n err+=1\r\n if(e!=0):\r\n cAUC_all+=1\r\n if(score1>score2):\r\n AUC+=1\r\n if(e!=0):\r\n #print(score1,score2)\r\n cAUC+=1\r\n \r\n print('AUC is:',AUC/AUC_all,AUC)\r\n print('cAUC is:',cAUC/cAUC_all,cAUC)\r\n print(err)\r\ngraph1 = tf.Graph()\r\nwith graph1.as_default():\r\n \r\n keep_prob = tf.placeholder(tf.float32)\r\n\r\n influencers_text,influencers_pic=influencer_vectors_inputs()#pic_size=25088,text_size=300\r\n brands_text,brands_pic=brand_vector_inputs()\r\n labels=label_vector_inputs()\r\n brands_pic_=tf.reshape(brands_pic,[batch_size*zu_size,7,7,512])\r\n influencers_pic_=tf.reshape(influencers_pic,[batch_size*zu_size,7,7,512])\r\n normal_brands_pic=tf.nn.local_response_normalization(brands_pic_,2,0.1,1,1)\r\n normal_influencers_pic=tf.nn.local_response_normalization(influencers_pic_,2,0.1,1,1)\r\n\r\n normal_brands_pic=tf.reshape(normal_brands_pic,[batch_size*zu_size,25088])\r\n normal_influencers_pic=tf.reshape(normal_influencers_pic,[batch_size*zu_size,25088])\r\n\r\n\r\n\r\n w_brand_text1=get_weights([text_size,text_layer1_size],zzh)\r\n dropout1=tf.nn.dropout(w_brand_text1,keep_prob)\r\n b_brand_text1=tf.Variable(tf.zeros([text_layer1_size],name=\"bias_brands_text_1\"))\r\n w_in_text1=get_weights([text_size,text_layer1_size],zzh)\r\n dropout2=tf.nn.dropout(w_in_text1,keep_prob)\r\n b_in_text1=tf.Variable(tf.zeros([text_layer1_size],name=\"bias_influencers_text_1\"))\r\n\r\n brand_text_embed_v1=tf.nn.leaky_relu(tf.matmul(brands_text,dropout1)+b_brand_text1,0.01)\r\n in_text_embed_v1=tf.nn.leaky_relu(tf.matmul(influencers_text,dropout2)+b_in_text1,0.01)\r\n\r\n w_brand_text4=get_weights([text_layer1_size,text_layer4_size],zzh)\r\n w_in_text4=get_weights([text_layer1_size,text_layer4_size],zzh)\r\n\r\n brand_text_embed_v4=tf.matmul(brand_text_embed_v1,w_brand_text4)\r\n in_text_embed_v4=tf.matmul(in_text_embed_v1,w_in_text4)\r\n\r\n\r\n\r\n w_brand_pic1=get_weights([pic_size,pic_layer1_size],zzh)\r\n dropout3=tf.nn.dropout(w_brand_pic1,keep_prob)\r\n b_brand_pic1=tf.Variable(tf.zeros([pic_layer1_size],name=\"bias_brands_pic_1\"))\r\n w_in_pic1=get_weights([pic_size,pic_layer1_size],zzh)\r\n dropout4=tf.nn.dropout(w_in_pic1,keep_prob)\r\n \r\n b_in_pic1=tf.Variable(tf.zeros([pic_layer1_size],name=\"bias_influencers_pic_1\"))\r\n brand_pic_embed_v1=tf.nn.leaky_relu(tf.matmul(normal_brands_pic,dropout3)+b_brand_pic1,0.01)\r\n in_pic_embed_v1=tf.nn.leaky_relu(tf.matmul(normal_influencers_pic, dropout4)+b_in_pic1,0.01)\r\n \r\n w_brand_pic4=get_weights([pic_layer1_size,pic_layer4_size],zzh)\r\n w_in_pic4=get_weights([pic_layer1_size,pic_layer4_size],zzh)\r\n \r\n brand_pic_embed_v4=tf.matmul(brand_pic_embed_v1,w_brand_pic4)\r\n in_pic_embed_v4=tf.matmul(in_pic_embed_v1,w_in_pic4)\r\n \r\n\r\n\r\n brand_embed = tf.multiply(brand_text_embed_v4,brand_pic_embed_v4)\r\n in_embed=tf.multiply(in_text_embed_v4,in_pic_embed_v4)\r\n \r\n \r\n \r\n product_1=tf.multiply(brand_embed,in_embed)\r\n x=tf.reduce_mean(product_1,axis=1)\r\n y=tf.reshape(x,[batch_size,zu_size])\r\n y_1=tf.nn.softmax(y)\r\n y_2=tf.reshape(y_1,[batch_size*zu_size])\r\n y_2=y_2+1e-8\r\n cross_entropy2=-tf.reduce_mean(tf.reduce_sum(labels*tf.log(y_2)))\r\n \r\n \r\n \r\n LEARNING_RATE_BASE = 0.002\r\n LEARNING_RATE_DECAY = 0.99\r\n LEARNING_RATE_STEP = 400\r\n gloabl_steps = tf.Variable(0, trainable=False)\r\n learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, gloabl_steps,LEARNING_RATE_STEP,LEARNING_RATE_DECAY,staircase=True)\r\n tf.add_to_collection('losses', cross_entropy2)\r\n loss = tf.add_n(tf.get_collection('losses'))\r\n train_op=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\r\n\r\n\r\n\r\n\r\ndef main():\r\n Epoch_=30\r\n part_=15\r\n Step_=0\r\n Epoch=0\r\n \r\n config = tf.ConfigProto()\r\n config.gpu_options.per_process_gpu_memory_fraction = 0.75\r\n config.gpu_options.visible_device_list = \"0\"\r\n #先引入dataset\r\n \r\n with tf.Session(graph=graph1) as sess:\r\n \r\n saver = tf.train.Saver()\r\n \r\n \r\n init=tf.global_variables_initializer()\r\n sess.run(init)\r\n ckpt = tf.train.get_checkpoint_state(model_path)\r\n if ckpt and ckpt.all_model_checkpoint_paths:\r\n path_=''\r\n \r\n for path in ckpt.all_model_checkpoint_paths:\r\n path_=path\r\n \r\n print(path_)\r\n saver.restore(sess, path)\r\n else:\r\n init=tf.global_variables_initializer()\r\n sess.run(init)\r\n \r\n\r\n\r\n for j in range(0,Epoch_):\r\n \r\n for i in range(0,part_):\r\n part=i\r\n #print('part:',i)\r\n print('loading...train')\r\n path1='G:\\\\aaa\\\\weighted_mean\\\\dataset_k3\\\\brand_text_train_'+str(i)+'.npy'\r\n brand_text_train=np.load(path1)\r\n print('loading...train')\r\n in_text_train=np.load('G:\\\\aaa\\\\weighted_mean\\\\dataset_k3\\\\in_text_train_'+str(i)+'.npy')\r\n print('loading...train')\r\n brand_pic_train=np.load('G:\\\\aaa\\\\dataset_k3\\\\brand_pic_train_'+str(i)+'.npy')\r\n print('loading...train')\r\n in_pic_train=np.load('G:\\\\aaa\\\\dataset_k3\\\\in_pic_train_'+str(i)+'.npy')\r\n print('loading...train')\r\n labels_train=np.load('G:\\\\aaa\\\\weighted_mean\\\\dataset_k3\\\\label_train_'+str(i)+'.npy')\r\n if(len(brand_text_train)%(zu_size*batch_size)==0):\r\n Step_=len(brand_text_train)/(zu_size*batch_size)\r\n else:\r\n Step_=int(len(brand_text_train)/(zu_size*batch_size))\r\n Step_=int(Step_)\r\n \r\n \r\n print('Epoch %d, Part %d'%(Epoch,part))\r\n mean_loss=0\r\n for step in xrange(Step_):\r\n start_time = time.time()\r\n feed_dict = fill_feed_dict_train(brand_text_train,brand_pic_train,in_text_train,in_pic_train,labels_train,brands_text,brands_pic,influencers_text,influencers_pic,labels,step,keep_prob)\r\n \r\n _brand_pic,_brand_embed, _in_embed_v4, _labels, _x,_y=sess.run([y_2,brand_text_embed_v4, x, labels,normal_brands_pic, brands_text],feed_dict=feed_dict)\r\n \r\n _, loss_value = sess.run([train_op, cross_entropy2],feed_dict=feed_dict)\r\n mean_loss+=loss_value\r\n \r\n duration = time.time() - start_time\r\n \r\n if (step % 10 == 0 and step!=0):\r\n \r\n print('Step %d: loss = %.2f (%.3f sec)' % (step, mean_loss/step, duration))\r\n \r\n globalstep=part+(15*Epoch)+1\r\n if ((globalstep%15==8 or globalstep%15==0 ) and (step==Step_-1) and globalstep!=0):\r\n checkpoint_file = os.path.join(model_path, 'model.ckpt')\r\n saver.save(sess, checkpoint_file, global_step=globalstep)\r\n \r\n if ((part+1)==8 or (part+1)==15):\r\n print('-----test-----')\r\n ExcelFile1=xlrd.open_workbook('G:\\\\aaa\\\\test_set.xlsx')\r\n sheet1=ExcelFile1.sheet_by_index(0)\r\n l_user=[]\r\n l_in=[]\r\n l_ist=[]\r\n l_score=[]\r\n index=0\r\n for k in range(0,6):\r\n print('loading...test')\r\n path1='F:\\\\dataset_k_3_test\\\\brand_text_test_'+str(k)+'.npy'\r\n brand_text_test=np.load(path1)\r\n print('loading...test')\r\n in_text_test=np.load('F:\\\\dataset_k_3_test\\\\in_text_test_'+str(k)+'.npy')\r\n print('loading...test')\r\n brand_pic_test=np.load('F:\\\\dataset_k_3_test\\\\brand_pic_test_'+str(k)+'.npy')\r\n print('loading...test')\r\n in_pic_test=np.load('F:\\\\dataset_k_3_test\\\\in_pic_test_'+str(k)+'.npy')\r\n print('loading...test')\r\n labels_test=np.load('F:\\\\dataset_k_3_test\\\\label_test_'+str(k)+'.npy')\r\n if(len(brand_text_test)%(zu_size*batch_size)==0):\r\n tStep_=len(brand_text_test)/(zu_size*batch_size)\r\n else:\r\n tStep_=int(len(brand_text_test)/(zu_size*batch_size))\r\n tStep_=int(tStep_)\r\n test_mean_loss=0.0\r\n \r\n for t in xrange(tStep_):\r\n \r\n feed_dict = fill_feed_dict_test(brand_text_test,brand_pic_test,in_text_test,in_pic_test,labels_test,brands_text,brands_pic,influencers_text,influencers_pic,labels,t,keep_prob)\r\n test_labels, test_x,test_loss=sess.run([labels, x,cross_entropy2],feed_dict=feed_dict)\r\n test_mean_loss+=test_loss\r\n if(t % 10 == 0 and t!=0):\r\n print('Step %d: loss = %.2f ' % (t, test_mean_loss/t))\r\n for xx in test_x:\r\n \r\n user=sheet1.cell(index,0).value.encode('utf-8').decode('utf-8-sig')\r\n influencer=sheet1.cell(index,1).value.encode('utf-8').decode('utf-8-sig')\r\n ist=sheet1.cell(index,2).value\r\n l_user.append(user)\r\n l_in.append(influencer)\r\n l_ist.append(ist)\r\n l_score.append(xx)\r\n index+=1\r\n metrics(l_user,l_in,l_ist,l_score)\r\n auc(l_user,l_in,l_ist,l_score)\r\n \r\n Epoch+=1\r\nif __name__ == '__main__':\r\n main()\r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"code/mir_k3.py","file_name":"mir_k3.py","file_ext":"py","file_size_in_byte":17180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"637208537","text":"# coding:utf-8\n\n\ndef compatable(defType, value):\n \"\"\"\n 检查值和类型是否兼容\n \"\"\"\n if defType == 'Int':\n return type(value) == int\n elif defType == 'Float':\n return type(value) == float\n elif defType == 'Bool':\n return type(value) == bool\n elif defType == 'Dict':\n return type(value) == dict\n elif defType == 'String':\n return type(value) == str\n elif defType == 'List':\n return type(value) == list\n elif defType == 'Vec3':\n if type(value) not in [str, str]:\n return False\n from util import Vec3\n val = Vec3.valueFromString(value)\n return val != None\n elif defType == 'Any':\n return type(value) in [int, float, bool, str, list, object]\n elif defType == 'Array':\n return type(value) == list\n\n\n# 根据nodes的meta文件,检查图数据的类型是否有不兼容的地方\ndef checkCompat(defData, graphData):\n \"\"\"\n 将图数据和节点定义文件的类型信息进行比对,查看有没有不兼容的地方。\n 如果有,输出兼容性错误信息\n \"\"\"\n nodes = graphData['nodes']\n fastDef = {}\n for defNode in defData:\n fastDef[defNode['name'][-1]] = defNode\n\n compatErrors = []\n\n for node in nodes:\n typeId = node['type']\n nodeDef = fastDef[typeId]\n\n args = node['args']\n for argKey, argVal in list(args.items()):\n # argKey就是SubItem的TypeId\n for argDef in nodeDef['args']:\n if argDef['name'][-1] != argKey:\n continue\n\n if not compatable(argDef['type'], argVal):\n compatErrors.append({\n 'id': node['id'],\n 'subItemTypeId': argKey,\n 'type': argDef['type'],\n 'value': argVal\n })\n\n return compatErrors\n\n\ndef convert(graphData, compatErrors, convertRule):\n \"\"\"\n 将图数据中,不兼容的类型,根据compatErrors信息和给定的转换规则convertRule\n 转换为新的图数据,此后才能打开\n \"\"\"\n nodes = graphData['nodes']\n fastCompatErrors = {}\n for compatError in compatErrors:\n fastCompatErrors[compatError['id']] = compatError\n\n newNodes = []\n\n for node in nodes:\n if node['id'] not in fastCompatErrors:\n continue\n\n compatError = fastCompatErrors[node['id']]\n\n args = node['args']\n\n newArgs = {}\n\n for argKey, argVal in list(args.items()):\n if argKey != compatError['subItemTypeId']:\n newArgs[argKey] = argVal\n continue\n\n convertType = compatError['type']\n newArgs[argKey] = convertRule[convertType](argVal)\n\n node['args'] = newArgs\n\n\ndef convertFile(filename):\n \"\"\"\n 根据compat_error给出的错误信息,利用rules\n 将filename中的图文件内容进行转换\n \"\"\"\n import json\n import os, sys\n import traceback\n from ConvertRules import convertRule\n compatErrors = []\n with open('compat_error.json') as f:\n compatErrors = json.load(f)\n\n graphData = None\n with open(filename) as f:\n graphData = json.load(f)\n\n dataString = json.dumps(graphData)\n newGraphData = json.loads(dataString)\n\n failed = False\n\n try:\n convert(newGraphData, compatErrors, convertRule)\n except Exception as e:\n failed = True\n traceback.print_exc(file=sys.stderr)\n\n if failed:\n # 转换失败,不写文件\n return False\n\n # 原有的图形数据写入到一个old文件中\n parts = os.path.splitext(filename)\n oldFilename = parts[0] + '_old' + parts[1]\n with open(oldFilename, 'w') as f:\n json.dump(graphData, f, indent=4)\n\n # 新的图形数据覆盖原来的文件\n with open(filename, 'w') as f:\n json.dump(newGraphData, f, indent=4)\n\n return True\n\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) != 2:\n print('usage: python backcompat.py [filename]')\n sys.exit(1)\n\n print('converting', sys.argv[1])\n\n if convertFile(sys.argv[1]):\n print('convert success')\n else:\n print('convert failed')\n\n\n","sub_path":"start/editor/backcompat.py","file_name":"backcompat.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"438575603","text":"import simplejson\nimport models\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom constants import constants\n\nvalid_sorts = [\"date\", \"title\", \"tag\", \"owner\"]\nvalid_limits = [\"upcoming\", \"all\"]\n\n\n#debug function\ndef update_calendars():\n account = models.GoogleAccount.objects.get()\n account.update_calendars()\n\n\n@login_required\ndef register_api_view(request, workshop_id):\n return HttpResponse(request.user.profile.register_event(workshop_id), mimetype=\"application/json\")\n\n\n@login_required\ndef account_api_view(request):\n email = request.REQUEST.get(\"email\")\n return HttpResponse(request.user.profile.update_account(email), mimetype=\"application/json\")\n\n\ndef workshop_api_view(request, workshop_id):\n return HttpResponse(\"not implemented\")\n\n\n@login_required\ndef workshops_api_view(request, workshop_id=\"-1\", action=\"\"):\n response = {\"status\": \"ok\"}\n sort = request.REQUEST.get(\"sort\")\n limit = request.REQUEST.get(\"limit\")\n format = request.REQUEST.get(\"format\")\n if sort is None or not sort in valid_sorts:\n response[\"status\"] = \"fail\"\n response[\"error\"] = \"Invalid sort param\"\n if limit is None or not limit in valid_limits:\n response[\"status\"] = \"fail\"\n response[\"error\"] = \"Invalid limit param\"\n\n if response[\"status\"] != \"fail\":\n events = models.Event.objects.query_events(sort, limit)\n if format is None:\n workshops_html = []\n for event in events:\n workshops_html.append(event.output_to_html(request.user))\n return render_to_response('workshops_query.html', {\"workshops\": workshops_html}, context_instance=RequestContext(request))\n elif format == \"json\":\n response[\"data\"] = simplejson.dumps(events)\n return HttpResponse(simplejson.dumps(events))\n else:\n if format is None:\n return HttpResponse(\"not implemented\")\n elif format == \"json\":\n return HttpResponse(simplejson.dumps(response), mimetype=\"application/json\")\n\n\n@login_required\ndef workshops_view(request, workshop_id=\"-1\", action=\"\"):\n #update_calendars()\n if workshop_id == \"-1\":\n view_variables = get_base_variables()\n return render_to_response('workshops.html', view_variables, context_instance=RequestContext(request))\n else:\n return HttpResponse(\"not implemented\")\n\n\n@login_required\ndef account_view(request):\n view_variables = get_base_variables()\n view_variables[\"email\"] = request.user.profile.receive_reminders\n return render_to_response('account.html', view_variables, context_instance=RequestContext(request))\n\n\n@login_required\ndef visual_schedule_view(request):\n view_variables = get_base_variables()\n view_variables[\"active_calendars\"] = models.Calendar.objects.get_active()\n return render_to_response('visual_schedule.html', view_variables, context_instance=RequestContext(request))\n\n\n@login_required\n@permission_required('UserProfile.use_admin_panel')\ndef admin_view(request):\n return HttpResponse(\"not implemented\")\n\n\ndef get_base_variables():\n return {\"constants\": constants}\n","sub_path":"calman/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500685630","text":"import os\n\nimport pytest\nfrom mock import patch\n\nimport runez\n\n\ndef custom_serializer(value):\n if value == \"invalid\":\n return None\n\n return {\"value\": value}\n\n\ndef test_no_tty():\n v = runez.ask_once(\"test\", \"Please enter value: \", fatal=False)\n assert v is None\n\n with pytest.raises(runez.system.AbortException):\n runez.ask_once(\"test\", \"Please enter value: \")\n\n with pytest.raises(Exception):\n # pytest should raise an exception if trying to call input() from a test case\n runez.prompt.interactive_prompt(\"test\")\n\n\n@patch(\"sys.stdin.isatty\", return_value=True)\n@patch(\"runez.prompt.interactive_prompt\", side_effect=str)\ndef test_with_tty(*_):\n with runez.TempFolder() as tmp:\n v = runez.ask_once(\"test\", \"foo\", serializer=custom_serializer, fatal=False, base=tmp)\n assert v == {\"value\": \"foo\"}\n\n # Verify that file was indeed stored\n path = os.path.join(tmp, \"test.json\")\n assert runez.read_json(path) == {\"value\": \"foo\"}\n\n # Verify that returned value is the 1st one stored\n v = runez.ask_once(\"test\", \"bar\", fatal=False, base=tmp)\n assert v == {\"value\": \"foo\"}\n\n # Verify that if `serializer` returns None, value is not returned/stored\n v = runez.ask_once(\"test-invalid\", \"invalid\", serializer=custom_serializer, fatal=False, base=tmp)\n assert v is None\n\n # Simulate CTRL+C\n with patch(\"runez.prompt.interactive_prompt\", side_effect=KeyboardInterrupt):\n v = runez.ask_once(\"test2\", \"test2\", serializer=custom_serializer, fatal=False, base=tmp)\n assert v is None\n","sub_path":"tests/test_prompt.py","file_name":"test_prompt.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262904797","text":"# _*_ coding:utf-8 _*_\n# It's no need.\n\nimport sqlite3\n\ndef main():\n conn = sqlite3.connect('noterecord.db')\n cursor = conn.cursor()\n cursor.execute('create table record (time text, record varchar)')\n cursor.execute(\"insert into record (time, record) values ('2015-12-07 00:01:00', 'test')\")\n cursor.close()\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n main()","sub_path":"_src/om2py7w/7wex0/createdata.py","file_name":"createdata.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"560777837","text":"# \n# Copyright 2017 , UT-Battelle, LLC\n# All rights reserved\n# [Home Assistant- VOLTTRON Integration, Version 1.0]\n# OPEN SOURCE LICENSE (Permissive)\n# \n# Subject to the conditions of this License, UT-Battelle, LLC (the “Licensor”)\n# hereby grants, free of charge, to any person (the “Licensee”) obtaining a copy\n# of this software and associated documentation files (the \"Software\"), a perpetual,\n# worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license \n# to use, copy, modify, merge, publish, distribute, and/or sublicense copies of the\n# Software.\n# \n# 1. Redistributions of Software must retain the above open source license grant, \n# copyright and license notices, this list of conditions, and the disclaimer listed\n# below. Changes or modifications to, or derivative works of the Software must be\n# noted with comments and the contributor and organization’s name.\n# \n# 2. Neither the names of Licensor, the Department of Energy, or their employees may\n# be used to endorse or promote products derived from this Software without their\n# specific prior written permission.\n# \n# 3. If the Software is protected by a proprietary trademark owned by Licensor or the\n# Department of Energy, then derivative works of the Software may not be distributed\n# using the trademark without the prior written approval of the trademark owner. \n# \n# \n# \n# ****************************************************************************************************************\n# DISCLAIMER\n# \n# UT-Battelle, LLC AND THE GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES,\n# BOTH EXPRESSED AND IMPLIED. THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY\n# OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY\n# PATENT, COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL \n# ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY\n# OR DAMAGE. The user assumes responsibility for all liabilities, penalties, fines, claims,\n# causes of action, and costs and expenses, caused by, resulting from or arising out of, in\n# whole or in part the use, storage or disposal of the SOFTWARE.\n# \n# ****************************************************************************************************************\n#\n\nimport logging\nimport sys\nimport json\n\nfrom volttron.platform.vip.agent import Agent, Core, PubSub\nfrom volttron.platform.agent import utils\nfrom . import settings\n\nimport requests\n\nutils.setup_logging()\n_log = logging.getLogger(__name__)\n__version__ = '3.0'\nrecord_topic = 'record/'\n\nclass HASSLockAgent(Agent):\n \n \n def __init__(self, config_path, **kwargs):\n '''\n Initializes the HASS Lock Agent for communicating with HASS API\n regarding lock components\n '''\n \n super(HASSLockAgent, self).__init__(**kwargs)\n \n self.config = utils.load_config(config_path)\n self.agentId = self.config['agentId']\n self.hassConfig = self.config['hassConfigPath']\n self.url = self.config['url']\n self.urlPass = self.config['urlPass'] \n self.data = []\n \n self.GetData()\n \n \n \n @PubSub.subscribe('pubsub', record_topic + 'hass/lock/')\n def on_match(self, peer, sender, bus, topic, headers, message):\n '''\n subscribes to the messages received from HASS Agent about the lock components loaded on HASS API\n '''\n \n #For testing purposes only print the messages for now\n print('Peer: {0}, Sender: {1}:, Bus: {2}, Topic: {3}, Headers: {4}, Message: {5}'.format\n (peer, sender, bus, topic, headers, message)) \n \n \n \n def on_publish_topic(self):\n '''\n Publishes the information about lock components loaded on HASS API\n '''\n \n msg = []\n \n self.GetData()\n \n try:\n \n if(self.data == []):\n \n msg = \"No data was received from HASS API, Please check the connection to the API and the Agent configuration file\"\n \n self.vip.pubsub.publish(peer = 'pubsub',\n topic = record_topic + 'hass/error',\n message = msg,\n headers = {'AgentId':self.agentId}).get(timeout=10)\n \n else: \n \n msg = []\n \n for entry in self.data:\n \n entityId = entry['entity_id']\n \n if(entityId.startswith(\"lock.\")):\n '''\n publishes data about lock device\n '''\n msg = entry['attributes']\n \n self.vip.pubsub.publish(peer = 'pubsub',\n topic = record_topic + 'hass/lock/' + entityId,\n message = msg,\n headers = {'AgentId':self.agentId}).get(timeout=10) \n \n except requests.exceptions.RequestException as e:\n print(e)\n\n\n\n def GetData(self):\n '''\n Get the current state for loaded components\n from Home Assistant API\n '''\n urlStates = self.url+'states'\n \n try:\n \n self.data = requests.get(urlStates).json()\n \n except requests.exceptions.RequestException as e:\n print(e)\n \n \n \n def Lock(self, entityId, lockCode):\n '''\n Locks the lock.entityId device\n '''\n \n if lockCode is None:\n return\n \n urlServices = self.url+'services/lock/lock'\n \n try:\n \n jsonMsg = json.dumps({\"entity_id\" : entityId, \"code\": lockCode})\n \n header = {'Content-Type': 'application/json'}\n \n requests.post(urlServices, data = jsonMsg, headers = header)\n \n self.on_publish_topic()\n \n except requests.exceptions.RequestException as e:\n print(e)\n \n \n \n def Unlock(self, entityId, unlockCode):\n '''\n Unlocks the lock.entityId device\n '''\n \n if lockCode is None:\n return\n \n urlServices = self.url+'services/lock/unlock'\n \n try:\n \n jsonMsg = json.dumps({\"entity_id\" : entityId, \"code\": unlockCode})\n \n header = {'Content-Type': 'application/json'}\n \n requests.post(urlServices, data = jsonMsg, headers = header)\n \n self.on_publish_topic()\n \n except requests.exceptions.RequestException as e:\n print(e) \n \n \n \ndef main(argv=sys.argv):\n '''Main method called by the platform.'''\n utils.vip_main(HASSClimateAgent,version=__version__)\n\n\n\nif __name__ == '__main__':\n # Entry point for script\n try:\n sys.exit(main())\n except KeyboardInterrupt:\n pass \n","sub_path":"ornl/HomeAssistant-VOLTTRON-Integration-Agents/HASSLockAgent/HASSLockAgent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215704621","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# pylint: disable=too-many-lines\n\"\"\"\nCustom filters for use in openshift-ansible\n\"\"\"\n\nfrom ansible import errors\n\n\ndef odc_join_files_from_dict(files, inc_dict):\n '''Take a list of dictionaries with name, path and insert them into\n inc_dict[name] = path\n '''\n if not isinstance(files, list):\n raise errors.AnsibleFilterError(\"|failed expects files param to be a list of dicts\")\n\n if not isinstance(inc_dict, dict):\n raise errors.AnsibleFilterError(\"|failed expects inc_dict param to be a dict\")\n\n for item in files:\n inc_dict[item['name']] = item['path']\n\n return inc_dict\n\n\nclass FilterModule(object):\n \"\"\" Custom ansible filter mapping \"\"\"\n\n # pylint: disable=no-self-use, too-few-public-methods\n def filters(self):\n \"\"\" returns a mapping of filters to methods \"\"\"\n return {\n \"odc_join_files_from_dict\": odc_join_files_from_dict,\n }\n","sub_path":"ansible/roles/openshift_daemonset_config/filter_plugins/config_filters.py","file_name":"config_filters.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"76600451","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nfrom os.path import join, dirname\nsys.path.insert(0, join(dirname(__file__), '..'))\n\nimport cv2\nimport time\nimport random\nimport argparse\nimport numpy as np\nfrom PIL import Image\nfrom datetime import datetime\n\nimport torch\nimport torchvision.transforms as transforms\nfrom models import GeneratorUNet\nfrom imitation_model import Model\n\nfrom device import SensorManager, scan_usb\nfrom device.controller import Controller\nfrom utils.local_planner import get_cost_map\nfrom utils.camera_info import camera2lidar\nfrom utils.navigator import NavMaker\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--img_height', type=int, default=128, help='size of image height')\nparser.add_argument('--img_width', type=int, default=256, help='size of image width')\nparser.add_argument('--show', type=bool, default=False, help='show image')\nopt = parser.parse_args()\n\nrandom.seed(datetime.now())\ntorch.manual_seed(999)\n\ndevice = torch.device('cpu')\ngenerator = GeneratorUNet()\n\ngenerator = generator.to(device)\ngenerator.load_state_dict(torch.load('ckpt/g.pth', map_location=device))\ngenerator.eval()\n\nimg_trans_ = [\n transforms.Resize((opt.img_height, opt.img_width), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n]\nimg_trans = transforms.Compose(img_trans_)\n\ndef get_nav():\n global nav_maker\n nav = nav_maker.get()\n return nav\n\ndef get_img(nav):\n img = sm['camera'].getImage()\n img = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))\n img = img_trans(img)\n nav = img_trans(nav)\n input_img = torch.cat((img, nav), 0).unsqueeze(0)\n return input_img\n \ndef get_net_result(input_img):\n with torch.no_grad():\n input_img = input_img#.to(device)\n result = generator(input_img)\n return result\n\ntheta_y = 20.0*np.pi/180.\npitch_rotationMat = np.array([\n [np.cos(theta_y), 0., np.sin(theta_y)],\n [ 0., 1., 0. ],\n [-np.sin(theta_y), 0., np.cos(theta_y)],\n]) \n\ntransforms_ = [\n transforms.ToTensor(),\n transforms.Normalize((0.5), (0.5)),\n]\ntransform = transforms.Compose(transforms_)\ncmd_model = Model().to(device)\ncmd_model.load_state_dict(torch.load('ckpt/model.pth', map_location=device))\ncmd_model.eval()\n\ndef inverse_perspective_mapping(img):\n global sm, ctrl\n point_cloud = sm['lidar'].get()\n mask = np.where((point_cloud[3] > 10))[0]\n point_cloud = point_cloud[:,mask][:3,:]\n point_cloud = np.dot(pitch_rotationMat, point_cloud)\n img = cv2.resize(img, (1280, 720), interpolation=cv2.INTER_AREA)\n res = np.where(img > 100)\n image_uv = np.stack([res[1],res[0]])\n trans_pc = camera2lidar(image_uv)\n img = get_cost_map(trans_pc, point_cloud, False)\n cost_map = transform(img)\n output = cmd_model(cost_map.unsqueeze(0))\n cmd = output.data.numpy()[0][0]\n ctrl.set_speed(1.0)\n print(cmd)\n ctrl.set_rotation(cmd)\n\nif __name__ == '__main__':\n ctrl = Controller(scan_usb('CAN'))\n ctrl.start()\n ctrl.set_forward()\n ctrl.set_max_speed(1000)\n \n\n sensor_dict = {\n 'lidar':None,\n 'camera':None,\n 'gps':None,\n 'imu':None,\n }\n sm = SensorManager(sensor_dict)\n sm.init_all()\n nav_maker = NavMaker(sm['gps'], sm['imu'])\n nav_maker.start()\n time.sleep(1)\n while True:\n x,y,t = sm['gps'].get()\n nav = get_nav()\n if False:\n cv2.imshow('Nav', np.array(nav))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n input_img = get_img(nav)\n result = get_net_result(input_img)[0][0]\n result = result.data.numpy()*255+255\n inverse_perspective_mapping(result)\n\n sm.close_all()","sub_path":"scripts/real/run_imitation.py","file_name":"run_imitation.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36493038","text":"#encoding=gbk\n\nfrom cj.web import *\nfrom cj.utils.common import date2str,get_serial\nimport decimal\nfrom cj.utils import flow\n\nsess_info = chk_session()\nuser_login = sess_info['user_login']\n\nmethod = query_string('_method')\ncontract_no = request('contract_no')\nif(method and method==\"load_sponsor\"):\n if(contract_no):\n sqlconn = dbconn(app_config.dbconn.admin)\n data = sqlconn.select(\"select user_login,isnull(user_no,'')+user_name user_name from adm_user where user_login not in (select sponsor from pjtz.dbo.cont_sponsor where contract_no='%s') and dept_no='79DF'\" % contract_no)\n #data = app.get_combo_data(\"select user_login,user_name from adm_user where user_login not in (select sponsor from pjtz.dbo.cont_sponsor where contract_no='%s')\" % contract_no, dbconn(app_config.dbconn.admin),'user_login','user_name')\n sqlconn.close()\n html.output(js.dumps({'data':data}))\n\n exit()\n\nsql = \"select contract_no,sponsor,prop*100 prop from cont_sponsor where contract_no='%s'\" % contract_no\n\nfield = \"sponsor,prop\"\nlabel = \"主办,占比(%)\"\n\ndm = modal.Modal(field, label, \"contract_no,sponsor\")\n\ndm.sponsor.xtype = \"combo\"\ndm.sponsor.datasource = app.get_combo_data(\"select user_login,isnull(user_no,'')+user_name user_name from adm_user where dept_no='79DF'\", dbconn(app_config.dbconn.admin),'user_login','user_name')\n\ndm.sponsor.form_cfg.store = \"\"\"@new Ext.data.JsonStore({ url:'%s?contract_no=%s&_method=load_sponsor', fields:['user_login','user_name'], autoLoad:true, root:\"data\"})\"\"\" % (get_self_url(), contract_no)\ndm.sponsor.form_cfg.mode = \"remote\"\ndm.sponsor.form_cfg.valueField = \"user_login\"\ndm.sponsor.form_cfg.displayField = \"user_name\"\ndm.sponsor.form_cfg = {'editable':True, 'triggerAction':'all'}\ndm.sponsor.allow_blank = False\n#dm.sponsor.form_cfg.listeners = \"\"\"@{beforeload: function(){alert('ok');this.getLoader().baseParams.user_login=Ext.getCmp('id_sponsor').getValue();}}\"\"\"\n\ndm.prop.allow_blank = False\n\ndm.prop.form_cfg.regex = \"@/^\\d{1,3}/\"\ndm.prop.form_cfg.regexText = \"必须输入0-100之��的整数\"\n\ndef update_status(contract_no):\n sqlconn = dbconn(app_config.dbconn.flow)\n tmp = sqlconn.get(\"select status_id from flow_item where item_id='%s'\" % contract_no)\n new_status_id = 0\n if(tmp.status_id>0):\n new_status_id = 1\n flow.update(user_login, sqlconn, 1, contract_no, new_status_id, 1)\n\ndef addnew(**argv):\n argv['data']['contract_no'] = contract_no\n argv['data']['prop'] = decimal.Decimal(argv['data']['prop'])/100\n\n sql_adm = dbconn(app_config.dbconn.admin)\n if(not sql_adm.get(\"select * from adm_user where user_login='%s'\" % argv['data']['sponsor'])):\n return {'success':False, 'data':'系统中不存在%s行员信息' % argv['data']['sponsor']}\n sql_adm.close()\n\n prop = 0\n data = argv['sqlconn'].get(\"select sum(prop) all_prop from cont_sponsor where contract_no='%s'\" % contract_no)\n prop = data.all_prop if(data.all_prop) else 0\n if(prop + argv['data']['prop'] > 1):\n return {'success':False, 'data':'占比合计不能超过100%'}\n ret = {'success':True,'data':'添加记录成功'}\n argv['sqlconn'].insert('cont_sponsor', argv['data'])\n if(prop + argv['data']['prop'] < 1):\n ret['data'] = '温馨提示:分配占比不足100%'\n\n update_status(contract_no)\n return ret\n\ndef update(**argv):\n argv['data']['prop'] = decimal.Decimal(argv['data']['prop'])/100\n data = argv['sqlconn'].get(\"select sum(prop) all_prop from cont_sponsor where contract_no='%s' and sponsor<>'%s'\" % (contract_no,argv['data']['sponsor']))\n prop = data.all_prop if(data.all_prop) else 0\n if(prop + argv['data']['prop'] > 1):\n return {'success':False, 'data':'占比合计不能超过100%'}\n ret = {'success':True,'data':'修改记录成功'}\n argv['sqlconn'].update('cont_sponsor', argv['data'], \"contract_no='%s' and sponsor='%s'\" % (argv['data']['contract_no'],argv['data']['sponsor']))\n if(prop + argv['data']['prop'] < 1):\n ret['data'] = '温馨提示:分配占比不足100%'\n\n update_status(contract_no)\n return ret\n\ndef delete(**argv):\n update_status(contract_no)\n return 0\n\nhandle = {'addnew':addnew, 'update':update, 'delete':delete}\n\ngrid.DataGrid(dbconn(app_config.dbconn.pjtz), sql, dm, handle=handle, title='主办人员占比')","sub_path":"pjtz/cont_sponsor.py","file_name":"cont_sponsor.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"26332994","text":"# STRETCH: implement Linear Search\t\t\t\t\ndef linear_search(arr, target):\n for x in range(len(arr)):\n if arr[x] == target:\n return x\n return -1\n # TO-DO: add missing code\n\n \n\nimport random\n# STRETCH: write an iterative implementation of Binary Search \ndef binary_search(arr, target):\n\n if len(arr) == 0:\n return -1 # array empty\n \n low = 0\n high = len(arr)-1\n\n # TO-DO: add missing code\n x = random.randint(low,high)\n unfound = True\n while unfound:\n unfound = False\n if arr[x] == target:\n return x\n elif arr[x] > target:\n high = x\n x = random.randint(low,high)\n unfound = True\n elif arr[x] < target:\n low = x \n x = random.randint(low,high)\n unfound = True\n\n return -1 # not found\narr1 = [-9, -8, -6, -4, -3, -2, 0, 1, 2, 3, 5, 7, 8, 9]\nprint(binary_search(arr1,5))\n# STRETCH: write a recursive implementation of Binary Search \ndef binary_search_recursive(arr, target, low, high):\n \n middle = (low+high)//2\n if len(arr) == 0:\n return -1 # array empty\n if arr[middle] == target:\n return middle\n elif arr[middle] > target:\n return binary_search_recursive(arr, target, low, middle-1) \n elif arr[middle] < target:\n return binary_search_recursive(arr, target, middle+1, high)\n \n # TO-DO: add missing if/else statements, recursive calls\n","sub_path":"src/searching/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"572828225","text":"\n\n#calss header\nclass _CYMBAL():\n\tdef __init__(self,): \n\t\tself.name = \"CYMBAL\"\n\t\tself.definitions = [u'a flat, round musical instrument made of brass that makes a loud noise when hit with a stick or against another cymbal']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_cymbal.py","file_name":"_cymbal.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"267526","text":"\nfrom rest_framework.routers import DefaultRouter\nfrom .views import *\nfrom django.urls import path\n\n\napp_name = 'blog_api'\n\n# router = DefaultRouter()\n# router.register('',PostList,basename='post')\n\n# urlpatterns = router.urls\n\nurlpatterns = [\n path('', PostList.as_view(), name='listpost'),\n path('post//', PostDetail.as_view(), name='detailpost'),\n path('search/', PostListDetailfilter.as_view(), name='searchpost'),\n # Post Admin URLs\n path('admin/create/', CreatePost.as_view(), name='createpost'),\n path('admin/edit/postdetail//', AdminPostDetail.as_view(), name='admindetailpost'),\n path('admin/edit//', EditPost.as_view(), name='editpost'),\n path('admin/delete//', DeletePost.as_view(), name='deletepost'),\n]","sub_path":"django/blog_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"576618512","text":"from verity_sdk.protocols.Protocol import Protocol\nfrom verity_sdk.utils import EVERNYM_MSG_QUALIFIER\n\n\nclass Relationship(Protocol):\n MSG_FAMILY = 'relationship'\n MSG_FAMILY_VERSION = '1.0'\n\n CREATED = 'created'\n INVITATION = 'invitation'\n CREATE = 'create'\n CONNECTION_INVITATION = 'connection-invitation'\n\n def __init__(self,\n for_relationship: str = None,\n thread_id: str = None,\n label: str = None,\n logo_url: str = None):\n super().__init__(\n self.MSG_FAMILY,\n self.MSG_FAMILY_VERSION,\n msg_qualifier=EVERNYM_MSG_QUALIFIER,\n thread_id=thread_id\n )\n\n self.for_relationship = for_relationship\n if label:\n self.label = label\n else:\n self.label = ''\n self.logo_url = logo_url\n\n def create_msg(self, _):\n msg = self._get_base_message(self.CREATE)\n self._add_thread(msg)\n msg['label'] = self.label\n if self.logo_url:\n msg['logoUrl'] = self.logo_url\n\n return msg\n\n async def create_msg_packed(self, context):\n return await self.get_message_bytes(context, self.create_msg(context))\n\n async def create(self, context):\n await self.send_message(context, await self.create_msg_packed(context))\n\n def connection_invitation_msg(self, _):\n msg = self._get_base_message(self.CONNECTION_INVITATION)\n self._add_thread(msg)\n self._add_relationship(msg, self.for_relationship)\n return msg\n\n async def connection_invitation_msg_packed(self, context):\n return await self.get_message_bytes(context, self.connection_invitation_msg(context))\n\n async def connection_invitation(self, context):\n await self.send_message(context, await self.connection_invitation_msg_packed(context))\n","sub_path":"sdk/python-sdk/verity_sdk/protocols/v1_0/Relationship.py","file_name":"Relationship.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623700746","text":"import copy\nimport re\n\n\ndef letterize_documents(documents):\n result = copy.deepcopy(documents)\n regex = re.compile('[^a-zA-Z]')\n\n for key, value in result.items():\n for documentIndex in range(len(value)):\n document = value[documentIndex]\n for tokenIndex in range(len(document)):\n document[tokenIndex] = regex.sub('', document[tokenIndex])\n value[documentIndex] = list(filter(lambda token: token != '', document))\n\n return result\n","sub_path":"apps/letterizing_documents.py","file_name":"letterizing_documents.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"241028401","text":"import sendgrid\nimport os\nfrom sendgrid.helpers.mail import *\n\n# If you are on OSX with Python3.6, run /Applications/Python\\ 3.6/Install\\ Certificates.command to bypass any SSL issues when running.\n\nsg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\nfrom_email = Email(\"kaleo.sato@gmail.com\")\nto_email = Email(\"kaleo.sato@gmail.com\")\nsubject = \"Sending with SendGrid is Fun\"\ncontent = Content(\"text/plain\", \"and easy to do anywhere, even with Python\")\nmail = Mail(from_email, subject, to_email, content)\nresponse = sg.client.mail.send.post(request_body=mail.get())\nprint(response.status_code)\nprint(response.body)\nprint(response.headers)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"373025079","text":"from base import Base\r\n\r\nclass MediaExtension(Base):\r\n\r\n def __init__(self, **kwargs):\r\n super(MediaExtension, self).__init__(**kwargs)\r\n\r\n def create_media_extension(self, **kwargs):\r\n label = \"MediaExtension.create_media_extension\"\r\n name = kwargs.get(\"name\", None)\r\n media_profile = kwargs.get(\"media_profile\", None)\r\n response = self.create_entity(url=\"URL2\",\r\n data_frame=\"media-ext-data\",\r\n to_create={\"attributes\": {\"name__s\": name,\r\n \"mmProfileId__s\": self.condition_response(\r\n data=media_profile)}})\r\n if response == 409:\r\n media_ext_profiles = self.get_entities(url=\"URL2\", filter_=\"auxiliaryDataList\")\r\n names = self.lookup_key(field=\"name__s\", data=media_ext_profiles)\r\n try:\r\n index = names.index(name)\r\n if index:\r\n media_ext_profile = media_ext_profiles[0][index]\r\n if media_ext_profile:\r\n self.delete_media_extension(id_=media_ext_profile.get(\"id\"))\r\n response = self.create_media_extension(name=name, media_profile=media_profile)\r\n except ValueError as error_:\r\n self.print_message(message=str(error_),\r\n function=label,\r\n type_=\"error\")\r\n\r\n return response\r\n\r\n def delete_media_extension(self, **kwargs):\r\n id_ = kwargs.get(\"id_\", None)\r\n return self.delete_entity(url=\"URL2\",\r\n id_=id_)\r\n","sub_path":"Library/Provisioning/media_extension.py","file_name":"media_extension.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"580879413","text":"from unittest import TestCase\n\nfrom tests.folders.helpers import JSON_FOLDER_TEST_STRING, YAML_FOLDER_TEST_STRING\nfrom tools.folders.models.blueprint import Blueprint, Folder\n\n\nclass TestBlueprint(TestCase):\n def test_blueprint_init(self):\n blueprint = Blueprint()\n\n def test_build_parent_child(self):\n parent = Folder(name='parent')\n child = Folder(name='child')\n parent.add_child(child)\n bp = Blueprint(root=parent)\n print(bp)\n\n def test_build_complex_filder(self):\n parent = Folder(name='parent')\n child1 = Folder(name='child1')\n child2 = Folder(name='child2')\n grandchild = Folder(name='grandchild')\n grandchild.add_parent(child2)\n child1.add_parent(parent)\n child2.add_parent(parent)\n bp = Blueprint(root=parent)\n print(bp)\n\n def test_print_ymal(self):\n parent = Folder(name='parent')\n child1 = Folder(name='child1')\n child2 = Folder(name='child2')\n grandchild = Folder(name='grandchild')\n grandchild.add_parent(child2)\n child1.add_parent(parent)\n child2.add_parent(parent)\n bp = Blueprint(root=parent)\n print(bp.to_yaml())\n\n def test_json_to_folder(self):\n bp = Blueprint()\n bp.from_json(JSON_FOLDER_TEST_STRING)\n bp\n\n def test_yaml_to_folder(self):\n bp = Blueprint()\n bp.from_yaml(YAML_FOLDER_TEST_STRING)\n bp","sub_path":"tests/folders/test_blueprint.py","file_name":"test_blueprint.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"344152133","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom rest_framework.views import status\nfrom datetime import datetime\nfrom django.shortcuts import render\n\nimport json\nimport time\n\n\nfrom .red import new_bid, new_ask, match, get_bids, get_asks, get_deals, put_default\n\n\n\n\n\n\ndef get_timestamp():\n timestamp = time.time()\n print('timestamp:', timestamp)\n return timestamp\n\n\ndef handling_click(request, timestamp, _type, pair):\n\n print('handling click')\n price = request.GET.get('price')\n pair = request.GET.get('pair')\n print(pair)\n print('get price:', price)\n amount = request.GET.get('amount')\n print('get amount:', amount)\n user_id = request.GET.get('user_id')\n print('get user_id:', user_id)\n\n\n data = {\n 'price': str(price),\n 'timestamp': str(timestamp),\n 'amount': str(amount)\n }\n if _type == 'bid':\n print(price, amount, user_id, timestamp)\n new_bid(price, amount, user_id, timestamp, pair)\n data['type'] = 'bid'\n else: # == ask\n new_ask(price, amount, user_id, timestamp, pair)\n data['type'] = 'ask'\n\n print(\"match\")\n ret = match(pair)\n print(\"match\")\n\n return data\n\n\ndef home(request):\n timestamp = get_timestamp()\n if(request.GET.get('pair')):\n pair = request.GET.get('pair')\n else:\n pair = 'XMRUSD'\n print('get shit:', request.GET.get('ask'))\n\n match_data = {}\n if(request.GET.get('ask')):\n _type = 'ask'\n data = handling_click(request, timestamp, _type, pair)\n print('handling asks..')\n if match(pair):\n match_data = {\n 'price': request.GET.get('price'),\n 'amount': request.GET.get('amount')\n }\n elif(request.GET.get('bid')):\n _type = 'bid'\n data = handling_click(request, timestamp, _type, pair)\n if match(pair):\n match_data = {\n 'price': request.GET.get('price'),\n 'amount': request.GET.get('amount')\n }\n print('handling bid..')\n elif(request.GET.get('category')):\n _type = 'category'\n pair = request.GET.get('category')\n put_default(pair)\n\n bids = []\n asks = []\n trades = []\n print(\"pair\", pair)\n if get_bids(5, pair):\n _bids = get_bids(5, pair)\n for b in _bids:\n bid = {\n 'price': b.price,\n 'amount': b.amount\n }\n bids.append(bid)\n while len(bids) < 5:\n bids.append({})\n else:\n print('get no bids')\n\n bids = [{},{},{},{},{}]\n\n if get_asks(5, pair):\n _asks = get_asks(5, pair)\n for a in _asks:\n ask = {\n 'price': a.price,\n 'amount': a.amount\n }\n asks.append(ask)\n print(\"===========\", asks)\n while len(asks) < 5:\n asks.append({})\n else:\n print(pair)\n asks = [{},{},{},{},{}]\n\n if get_deals(5, pair):\n _trades = get_deals(5, pair)\n for t in _trades:\n trade = {\n 'price': t.price,\n 'amount': t.amount,\n 'timestamp': t.trade_id\n }\n trades.append(trade)\n while len(trades) < 5:\n trades.append({})\n else:\n trades = [{},{},{},{},{}]\n\n print('b:', bids)\n print('a:', asks)\n print('t:', trades)\n table = bids + asks + trades\n print('table15:', table)\n table.append(match_data)\n\n return render(request, 'home.html', {\n 'data': json.dumps(table),\n 'pair': pair,\n })\n","sub_path":"ba_server/bid_ask_server/banda/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"608196720","text":"import argparse\nimport re\n\nimport sys\nsys.path.insert(0, '../')\nfrom sejong_corpus_cleaner.simplifier import eojeol_morphtags_to_lr\nfrom sejong_corpus_cleaner.processed_data import EojeolMorphtagSentence\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cleandata_directory', type=str, default='../data/clean/')\n parser.add_argument('--colloquial_input_filename', type=str, default='eojeol_morphtag_colloquial.txt')\n parser.add_argument('--written_input_filename', type=str, default='eojeol_morphtag_written.txt')\n\n args = parser.parse_args()\n dirs = args.cleandata_directory\n colloquial_input = args.colloquial_input_filename\n written_input = args.written_input_filename\n\n input_path = '%s/%s' % (dirs, colloquial_input)\n output_path = '%s/lr_eojeol_morphtag_colloquial.txt' % dirs\n with open(output_path, 'w', encoding='utf-8') as f:\n create(input_path, f)\n\n # fe.write('# WRITTEN CORPUS\\n')\n\n input_path = '%s/%s' % (dirs, written_input)\n output_path = '%s/lr_eojeol_morphtag_written.txt' % dirs\n with open(output_path, 'w', encoding='utf-8') as f:\n create(input_path, f)\n\ndef only_hangle(s):\n pattern = re.compile('[^가-힣]+')\n return pattern.sub('', s)\n\ndef create(input_path, f):\n\n def eojeol_to_strf(l, r, l_tag, r_tag):\n if not r:\n return '%s/%s' % (l, l_tag)\n return '%s/%s %s/%s' % (l, l_tag, r, r_tag)\n\n eps = EojeolMorphtagSentence(input_path)\n n_exceptions = 0\n\n for i, sent in enumerate(eps):\n if i % 1000 == 0:\n print('\\rbuilding %d sents' % i, end='', flush=True)\n\n try:\n for eojeol, morphtags in sent:\n lr_unsep = eojeol_morphtags_to_lr(eojeol, morphtags, separate_xsv=False)[0]\n lr_sep = eojeol_morphtags_to_lr(eojeol, morphtags, separate_xsv=True)\n if len(lr_sep) == 2:\n eojeol = only_hangle(eojeol)\n lr_sep = (lr_sep[0][0], eojeol[len(lr_sep[0][0]):], lr_sep[0][2], 'Josa')\n else:\n lr_sep = lr_sep[0]\n\n morphtags_strf = ' '.join(['{}/{}'.format(m,t) for m,t in morphtags])\n lr_unsep_strf = eojeol_to_strf(*lr_unsep)\n lr_sep_strf = eojeol_to_strf(*lr_sep)\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(eojeol, morphtags_strf, lr_unsep_strf, lr_sep_strf))\n f.write('\\n')\n\n except Exception as e:\n n_exceptions += 1\n #print('\\nException: sent # {}'.format(i))\n #print(e)\n #print(lr_unsep)\n #print(lr_sep)\n #print()\n #break\n\n print('\\rbuilding was done. (%d sents, %d exceptions)' % (i + 1, n_exceptions))\n\nif __name__ == '__main__':\n main()","sub_path":"scripts/build_lr_morphtag.py","file_name":"build_lr_morphtag.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"444833736","text":"import numpy as np\nfrom numpy import array\nfrom skimage import morphology\nimport matplotlib.pyplot as plt\nimport copy\nfrom sklearn.cluster import KMeans\nfrom skimage import measure\nfrom PIL import Image, ImageDraw\ndef seperate_roi(imgs_to_process, bottom_left, top_right, id, out_path, lung_mask):\n print(\"Seperating ROI...\")\n zsize = imgs_to_process.shape[0]\n ysize = imgs_to_process.shape[1]\n xsize = imgs_to_process.shape[2]\n reduced_roi = np.zeros((zsize, bottom_left[0] - top_right[0]+1,top_right[1]-bottom_left[1]+1))\n imgs_to_process.shape[0]\n\n for z in range(0, zsize, 1):\n np_mask_smaller = morphology.erosion(lung_mask[z], np.ones([17, 17]))\n slice = (np_mask_smaller * imgs_to_process[z]).astype(int)\n\n small_slice = reduced_roi[z]\n for y in range(0, ysize, 1):\n for x in range(0, xsize, 1):\n if ((x >= bottom_left[1]) & (x <= top_right[1]) & (y >= top_right[0]) & (y <= bottom_left[0])):\n small_slice[y-top_right[0]][x-bottom_left[1]] = slice[y][x]\n else:\n slice[y][x] = 0\n reduced_roi[z] = small_slice\n imgs_to_process[z] = slice\n np.save(out_path + \"Tumor_roi_%d.npy\" % (id), imgs_to_process)\n np.save(out_path + \"Reduced Tumor_roi_%d.npy\" % (id), reduced_roi)\n plt.title(\"Isolated Tumor\")\n plt.imshow(imgs_to_process[229], cmap='gray')\n plt.show()\n return imgs_to_process, reduced_roi\ndef plot_img(img, title):\n print(\"Plotting Image...\")\n plt.title(title)\n plt.imshow(img, cmap='gray')\n plt.show()\ndef threshold_kmeans(middle):\n kmeans = KMeans(n_clusters=2).fit(np.reshape(middle, [np.prod(middle.shape), 1]))\n centers = sorted(kmeans.cluster_centers_.flatten())\n threshold = np.mean(centers)\n thresh_img = np.where(middle < threshold, 0, 255)\n return thresh_img\ndef threshold_reg(img, min_intensity):\n thresh_img = np.where(img < min_intensity, 0, 255)\n return thresh_img\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):\n if (display1):\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig,ax = plt.subplots(rows,cols,figsize=[12,12])\n for i in range((rows*cols)):\n ind = start_with + i*show_every\n ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)\n ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')\n ax[int(i/rows),int(i % rows)].axis('off')\n plt.show()\ndef show_roi(RGB_img, bottom_left, top_right):\n print(\"Displaying ROI...\")\n # for the 4 lines of the box, make them red\n # left vertical\n for i in range(top_right[0], bottom_left[0], 1):\n RGB_img[i][bottom_left[1]][0] = 255\n RGB_img[i][bottom_left[1]][1] = 0\n RGB_img[i][bottom_left[1]][2] = 0\n # right vertical\n for i in range(top_right[0], bottom_left[0], 1):\n RGB_img[i][top_right[1]][0] = 255\n RGB_img[i][top_right[1]][1] = 0\n RGB_img[i][top_right[1]][2] = 0\n # top horizontal\n for i in range(bottom_left[1], top_right[1], 1):\n RGB_img[top_right[0]][i][0] = 255\n RGB_img[top_right[0]][i][1] = 0\n RGB_img[top_right[0]][i][2] = 0\n\n # bottom horizontal\n for j in range(bottom_left[1], top_right[1], 1):\n RGB_img[340][j][0] = 255\n RGB_img[340][j][1] = 0\n RGB_img[340][j][2] = 0\n\n RGB_img[340][290][0] = 200\n RGB_img[340][290][1] = 100\n RGB_img[340][290][2] = 50\n\n plt.title('Region of Interest')\n plt.imshow(RGB_img)\n plt.show()\ndef apply_lung_mask(imgs, lungs):\n z = imgs.shape[0]\n for i in range(0, z, 1):\n imgs[i] = imgs[i]*lungs[i]\n return imgs\ndef connected_threshold(img, seed, thresh_min):\n\n def surround_with_ones(L1, center):\n # l1 = list of ones\n x = center[0]\n y = center[1]\n z = center[2]\n L1.append([x + 1, y, z])\n L1.append([x - 1, y, z])\n L1.append([x, y + 1, z])\n L1.append([x, y - 1, z])\n L1.append([x, y, z + 1])\n L1.append([x, y, z - 1])\n return l1\n print(\"Region Growing...\")\n pt = np.zeros_like(img)\n xseed = int(np.median((np.where(img[seed] > thresh_min))[0]))\n yseed = int(np.median((np.where(img[seed] > thresh_min))[1]))\n zseed = seed\n #pt = array of zeros slowly filled with 255, 0, or 3\n pt[zseed][xseed][yseed] = 255\n center = [zseed, xseed, yseed]\n l1 = []\n l1 = surround_with_ones(l1, center)\n while (len(l1) > 0):\n points = l1.pop()\n x = points[0]\n y = points[1]\n z = points[2]\n\n try:\n if (pt[x][y][z] == 0):\n if (img[x][y][z] >= thresh_min):\n pt[x][y][z] = 255\n l1 = surround_with_ones(l1, [x, y, z])\n else:\n pt[x][y][z] = 3\n except:\n pass\n\n #l1 = list of ones\n return pt\ndef place_into_lungs(id, tumor, bottom_left, top_right):\n imgs = np.load(out_path + \"nplungs_%d.npy\" % (id))\n\n entire_lungs = np.zeros_like(imgs)\n\n print(\"Re-placing ROI...\")\n zsize = tumor.shape[0]\n ysize = tumor.shape[1]\n xsize = tumor.shape[2]\n\n for z in range(0, zsize, 1):\n slice = tumor[z]\n entire_lungs_slice = entire_lungs[z]\n for y in range(0, ysize, 1):\n for x in range(0, xsize, 1):\n entire_lungs_slice[y+bottom_left[1]][x+top_right[0]] = slice[y][x]\n entire_lungs[z] = entire_lungs_slice\n np.save(out_path + \"Tumor_in_lungs_%d.npy\" % (id), entire_lungs)\n plt.title(\"Tumor in Lungs\")\n plt.imshow(entire_lungs[229], cmap='gray')\n plt.show()\n return entire_lungs\ndef connected_threshold_larger(img, seed, thresh_min, thresh_max, num3s):\n def Getxyz(i):\n return i[0], i[1], i[2]\n print(\"Running Connected Threshold...\")\n img_height = np.size(img, 0) - 10\n def contains3(voxel, PT):\n total_3s = 0\n for i in voxel:\n x = i[0]\n y = i[1]\n z = i[2]\n #print(\"pt: {}, img: {}\".format(PT[x][y][z], img[x][y][z]))\n if ((x >= 0) & (x < img_height)):\n if ((PT[x][y][z] == 3) or (img[x][y][z] >= thresh_max) or (img[x][y][z] < thresh_min)):\n total_3s += 1\n #print(img[x][y][z])\n #print(\"Total 3s: \"+ str(total_3s))\n return total_3s\n def surround_with_ones_larger(center, L1):\n x = center[0]\n y = center[1]\n z = center[2]\n voxel = []\n voxel.append([x+1, y, z])\n voxel.append([x+1, y+1, z])\n voxel.append([x+1, y, z+1])\n voxel.append([x+1, y+1, z+1])\n voxel.append([x+1, y-1, z])\n voxel.append([x+1, y, z-1])\n voxel.append([x+1, y-1, z-1])\n voxel.append([x+1, y+1, z-1])\n voxel.append([x+1, y-1, z+1])\n voxel.append([x-1, y, z])\n voxel.append([x-1, y+1, z])\n voxel.append([x-1, y, z+1])\n voxel.append([x-1, y+1, z+1])\n voxel.append([x-1, y-1, z])\n voxel.append([x-1, y, z-1])\n voxel.append([x-1, y-1, z-1])\n voxel.append([x-1, y+1, z-1])\n voxel.append([x-1, y-1, z+1])\n voxel.append([x, y-1, z])\n voxel.append([x, y+1, z])\n voxel.append([x, y, z-1])\n voxel.append([x, y, z+1])\n voxel.append([x, y-1, z+1])\n voxel.append([x, y+1, z-1])\n voxel.append([x, y+1, z+1])\n voxel.append([x, y-1, z-1])\n #voxel.append([x+2, y, z])\n #voxel.append([x-2, y, z])\n #voxel.append([x, y+2, z])\n #voxel.append([x, y-2, z])\n #voxel.append([x, y, z+2])\n #voxel.append([x, y, z-2])\n L1.append(voxel)\n return L1\n seed = [seed, int(np.median((np.where(img[seed] > thresh_min))[0])), int(np.median((np.where(img[seed] > thresh_min))[1]))]\n xseed = seed[0]\n yseed = seed[1]\n zseed = seed[2]\n pt = np.zeros_like(img)\n pt[xseed][yseed][zseed] = 255\n center = [xseed, yseed, zseed]\n L1 = []\n L1 = surround_with_ones_larger(center, L1)\n while (len(L1) > 0):\n voxel = L1.pop()\n if (contains3(voxel, pt) > num3s):\n #do the normal thing but don't create more voxels\n for pixel in voxel:\n x = pixel[0]\n y = pixel[1]\n z = pixel[2]\n if ((pt[x][y][z] == 0) & (x >= 0) & (x < img_height)):\n if ((img[x][y][z] <= thresh_max) & (img[x][y][z] >= thresh_min)):\n pt[x][y][z] = 255\n else:\n pt[x][y][z] = 3\n else:\n #do the normal thing\n for pixel in voxel:\n x = pixel[0]\n y = pixel[1]\n z = pixel[2]\n if ((pt[x][y][z] == 0) & (x >= 0) & (x < img_height)):\n if ((img[x][y][z] <= thresh_max) & (img[x][y][z] >= thresh_min)):\n pt[x][y][z] = 255\n L1 = surround_with_ones_larger([x, y, z], L1)\n else:\n pt[x][y][z] = 3\n #l1 = list of ones\n return pt\n\nid = 0\nseed_index = 229\nmin_intensity = 45\nbottom_left = [340,275]\ntop_right = [300,310]\nout_path = \"/Users/paulmccabe/Desktop/Segmentation Project/\"\n\n\"\"\"\nimgs_to_process = np.load(out_path + \"nplungs_%d.npy\" % (id))\nnp_mask = np.load(out_path + \"justmask_%d.npy\" % (id))\n#id0 tumor at 229\ntum_slice = copy.deepcopy(imgs_to_process[seed_index])\n\nplt.title('Grayscale Image')\nplt.imshow(tum_slice, cmap = 'gray')\nplt.show()\nRGB_img = np.dstack((tum_slice, tum_slice, tum_slice))\n\n#show_roi(RGB_img, bottom_left, top_right)\n#seperate out roi into array\n\nsep_roi, reduced_roi = seperate_roi(imgs_to_process, bottom_left, top_right, id, out_path, np_mask)\n#sep_roi = np.load(out_path + \"Tumor_roi_%d.npy\" % (id))\n#reduced_roi = np.load(out_path + \"Reduced Tumor_roi_%d.npy\" % (id))\nplt.title('Reduced ROI')\nplt.imshow(reduced_roi[seed_index], cmap = 'gray')\nplt.show()\n#thresh_img = threshold_kmeans(reduced_roi)\n#thresh_img = threshold_reg(reduced_roi, min_intensity)\n#plot_img(thresh_img[seed_index], \"Thresholded Image\")\n\nreduced_roi = np.load(out_path + \"Thresh_roi_%d.npy\" % (id))\ntumor_after_rg = connected_threshold_larger(reduced_roi, seed_index, min_intensity, 300, 14)\n#num3s works well at 14 for id0\nnp.save(out_path + \"Thresh_tumor_%d.npy\" % (id), tumor_after_rg)\nplot_img(tumor_after_rg[seed_index], \"Tumor After RG\")\n\"\"\"\ntumor_after_rg = np.load(out_path + \"Thresh_tumor_%d.npy\" % (id))\ntumor_in_lungs = place_into_lungs(id, tumor_after_rg, bottom_left, top_right)\n#place back into lungs\n\n#tumor_in_lungs =\n\n#thresh_roi = threshold(sep_roi)\n\n#multiply by lung mask to remove outer lung\n\n\"\"\"\nPIL_img = Image.fromarray(RGB_img)\nPIL_img_background = Image.fromarray(RGB_img)\n\nnew_image = Image.new(\"RGB\", PIL_img.size, \"WHITE\")\ndraw = ImageDraw.Draw(new_image)\ndraw.rectangle([(305, 275), (340,310)], fill=None, outline= \"Red\")\ndel draw\nnp_new_image = array(new_image)\nplt.title('New Image/Box')\nplt.imshow(np_new_image)\n#plt.show()\nPIL_img_background = PIL_img_background.convert(\"RGBA\")\nnew_image = new_image.convert(\"RGBA\")\nfinal_img = Image.blend(PIL_img_background, new_image, 1)\nfinal_img.show()\n\"\"\"","sub_path":"Volume of Interest.py","file_name":"Volume of Interest.py","file_ext":"py","file_size_in_byte":11398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357805442","text":"import os, html\r\nimport re\r\n\r\ndef one():\r\n retable = re.compile(\".*?
\", flags=re.U | re.DOTALL)\r\n regtr = re.compile('.*?', flags=re.U | re.DOTALL)\r\n regthai = re.compile('.*?', flags=re.U | re.DOTALL)\r\n regeng = re.compile('\\w*.*?', flags=re.U | re.DOTALL) \r\n repart = re.compile('.*?', flags = re.DOTALL)\r\n \r\n pages = os.listdir('thai_pages')\r\n thaidi = {} #тай-англ словарь\r\n for pg in pages:\r\n f = open( \"thai_pages/\"+ pg, 'r', encoding=\"UTF-8\")\r\n res = f.read()\r\n tabl = retable.search(res)\r\n tabl = tabl.group(0)\r\n resf = regtr.findall(tabl)\r\n for i in resf:\r\n try: \r\n thaiw = regthai.search(i)\r\n thaiw = clean(thaiw.group(0))\r\n engw = regeng.search(i)\r\n engw = repart.sub('', engw.group(0))\r\n engw = clean(engw)\r\n upd = {thaiw:engw}\r\n thaidi.update(upd)\r\n except:\r\n continue\r\n \r\ndef clean(t):\r\n retag = re.compile('<.*?>', flags = re.DOTALL)\r\n rex = re.compile('\\xa0', flags = re.DOTALL)\r\n tn = retag.sub('',t)\r\n tn = rex.sub('',tn)\r\n tn = html.unescape(tn)\r\n return tn\r\n \r\ndef main():\r\n one()\r\n\r\nmain()\r\n","sub_path":"control/thai_thing.py","file_name":"thai_thing.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447923837","text":"from django import forms\n\nfrom .models import Item, Comment\n\nfrom users.models import UserProfile\nfrom projects.models import Project\n\n\nclass ItemForm(forms.ModelForm):\n class Meta:\n model = Item\n fields = [\"title\", \"description\"]\n widgets = {\n \"title\": forms.TextInput(attrs={\n \"class\": \"item_title\",\n }),\n \"description\": forms.Textarea(attrs={\n \"class\": \"item_description\",\n }),\n \"priority\": forms.Select(attrs={\n \"class\": \"bootstrap-select\",\n })\n }\n\n priority = forms.CharField(widget=forms.Select(attrs={\"class\": \"bootstrap-select\"}, choices=[(\"none\", \"\"), (\"low\", \"Low\"), (\"medium\", \"Medium\"), (\"high\", \"High\")]))\n assigned_to = forms.CharField(widget=forms.Select(\n choices=sorted([\n (user_profile.user.username, user_profile.display_name) for user_profile in UserProfile.objects.all()\n ], key=lambda k: k[1]),\n attrs={\n \"style\": \"min-width: 200px; max-width: 200px;\"\n }\n ))\n project = forms.CharField(widget=forms.Select(\n choices=sorted([(\"none\", \"\")] + [\n (project.pk, project.title) for project in Project.objects.all()\n ], key=lambda k: k[1]),\n attrs={\n \"style\": \"min-width: 200px; max-width: 200px;\"\n }\n ))\n tag_bug = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={\n \"class\": \"tag_checkbox\",\n }))\n tag_feature = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={\n \"class\": \"tag_checkbox\",\n }))\n tag_task = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={\n \"class\": \"tag_checkbox\",\n }))\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = [\"body\"]","sub_path":"items/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"513646002","text":"\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nimport sys\nimport csv\nfrom math import sin as sin # motherfucking sinus\nfrom heapq import nsmallest # Zahl am naechsen zu X-ray line finden\nfrom scipy.signal import argrelextrema # um locale minima zu finden (background)\nimport warnings # to supress polyfit warning\n\n# nice on heatmap and subfigure usage: \n# https://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set#2371812\n\n\ndef print_hpm_mail():\n\tmail_text = '''\nHallo, \n\nnoch einige Tipps bzw. Hinweise für die Hausaufgabe mit der Eichkurve:\n\n1.) Bitte die Eichkurven sowohl mit der Bruttozaehlrate (also ohne Background-Abzug) als auch mit der Nettozaehlrate (mit Background-Abzug) durchführen.\n\n2.) Bitte die Zaehlraten jeweils so umrechnen, dass nur die mittlere Zaehlrate gewaehlt wird. Das bedeutet also, dass Sie die Summe der gewaehlten Zaehlraten durch die Anzahl der einzelnen Summanden dividieren. Also z.B = Summe (B891:B897)/7\n\n3.) Den Background bitte nicht auf beiden Seiten komplett abziehen, sondern links und rechts jeweils den Faktor 0,5 waehlen (bei symmetrischem Background). Wenn der Background asymmetrisch gewaehlt wird, dann bitte die Hebelregel anwenden. \n\n4.) Die Eichung, die nur den Brutto- oder Netto-Peak berücksichtigt, wird ein schlechtes Ergebnis bringen. Daher ist in einem zweiten Schritt eine Eichung über die Verhaeltnisbildung zur Höhe des Compton-Peaks (bei 20,6 KeV) vorzunehmen. \nDas sieht dann in etwa so aus: =(summe(b891:B897)/7))/((summe(B1372:B1378)/7))\nDer Quotient ist dann auf der Y-Achse zu plotten.\nDann werden die Eichkurven DEUTLICH besser.\n\t'''\n\tprint(mail_text)\n\n\n# reads .txt spectrum file, returns (2,n)-np.array, n = length of file - header \ndef read_txt(filename):\n\tnum_head_lines = 38 # length of header\n\t# load txt-file, get number of rows\n\tcrs = open(filename, \"r\")\n\trows = (row.strip().split() for row in crs)\n\tnum_lines = sum(1 for line in open(filename))\n\n\t# ommit header\n\tfor i in range(num_head_lines):\n\t\tblabla=4\n\t\tnext_row = next(rows)\n\t\t# but save 2d-value\n\t\tif next_row[0] == \"_2d\":\n\t\t\ttwo_d = float(next_row[2])\n\n\t# put in list\n\tdegree = [] # to be filled with degrees\n\tcounts = [] # to be filled with counts\n\tfor i in range(num_lines - num_head_lines):\n\t\t# get next\n\t\tnext_row = next(rows)\n\t\t# append if not empty\n\t\tif next_row != []:\n\t\t\tdegree.append(float(next_row[0]))\n\t\t\tcounts.append(float(next_row[1]))\n\t\telse:\n\t\t\tprint(i, \" empty\")\n\n\t# convert degrees to energy using 2d from header\n\t'''\n\tfor i in range(len(degree)):\n\t\tdegree[i] = np.sin(np.deg2rad(degree[i]))\n\t\tdegree[i] = degree[i] * two_d\n\t\tdegree[i] = 12.396 / degree[i] \n\t'''\n\n\t# convert to np.arrays\t\n\tdegree = np.asarray(degree)\n\tcounts = np.asarray(counts)\n\t# return as (2,n)-np.array\n\treturn np.vstack((degree,counts))\n\n\n\ndef multiplot():\n\tplt.subplot(111)\n\tnumber_of_plots = 7\n\tkoord = number_of_plots * 100 + 10 \n\tfor i in range(number_of_plots):\n\t\tkoord = koord + 1\n\t\tplt.subplot(koord)\n\t\ttxtfile = str(i + 1) + \".txt\"\n\t\tspec1 = read_txt(txtfile)\n\t\tplt.plot(spec1[0],spec1[1])\n\tplt.show()\n\tpass\n\ndef load_standards(name_csv):\n\t# load, transpose, convert to np.array\n\treader = csv.reader(open(name_csv, \"r\"), delimiter=\",\")\n\tx = list(reader)\n\tstan_label = x[0]\n\tstan_spec = np.array(x[1::]).astype(\"float\")\n\tstan_spec = np.transpose(stan_spec)\n\treturn((stan_label,stan_spec))\n\ndef print_label(labellist):\n\tprint()\n\tprint(\"available:\")\n\tindex = 0\n\tfor i in labellist:\n\t print(index, i)\n\t index += 1\n\tprint(\"\")\n\ndef load_all_standards():\n\tdata = load_standards(\"stan_spec.csv\")\n\tstan_label = label = data[0][1:89]\n\tstan_spec = data[1][1:89]\n\tkev = data[1][0]\n\tdata = load_standards(\"stan_soll.csv\")\n\tstan_soll = data[1]\n\tcrs = open(\"soll_label.txt\", \"r\")\n\trows = (row.strip() for row in crs)\n\tsoll_label = list(rows)\n\treturn((kev,stan_label,stan_spec,stan_soll,soll_label)) \t\n\ndef get_Ka(element):\n\tif element == \"Rb\":\n\t\treturn 13.396\n\t# static peak: [13.35, 13.365, 13.38, 13.395, 13.41, 13.425, 13.44, 13.455]\n\t# static bg1: [13.02, 13.035, 13.05, 13.065, 13.08, 13.095]\n\t# static bg2: [13.71, 13.725, 13.74, 13.755, 13.77, 13.785]\n\telif element == \"Sr\":\n\t\treturn 14.165\n\t# static peak: [14.1, 14.115, 14.13, 14.145, 14.16, 14.175, 14.19, 14.205]\n\t# static bg1: [13.815, 13.83, 13.845, 13.86]\n\t# static bg2: [14.445, 14.46, 14.475, 14.49]\n\telif element == \"Y\":\n\t\treturn 14.958\n\t# static peak: [14.85, 14.865, 14.88, 14.895, 14.91, 14.925, 14.94, 14.955, 14.97, 14.985, 15.0, 15.015, 15.03, 15.045]\n\t# static bg1: [14.55, 14.565, 14.58, 14.595, 14.61]\n\t# static bg2: [15.24, 15.255, 15.27, 15.285, 15.3] \n\telif element == \"Zr\":\n\t\treturn 15.775\n\telif element == \"Ni\":\n\t\treturn 7.480\n\telif element == \"Pb\":\n\t\treturn 10.551\n\telif element == \"Ti\":\n\t\treturn 4.512\n\telif element == \"Cr\":\n\t\treturn 5.415\n\telif element == \"Nb\":\n\t\treturn 16.615\n\t# static peak: [16.5, 16.515, 16.53, 16.545, 16.56, 16.575, 16.59, 16.605, 16.62, 16.635, 16.65, 16.665, 16.68, 16.695]\n\t# static bg1: [16.26, 16.275, 16.29, 16.305]\n\t# static bg2: [16.95, 16.965, 16.98, 16.995] \n\telif element == \"Tc\": # Technetium K-beta1 (?)\n\t\treturn 20.626\n\t# static peak: [20.505, 20.52, 20.535, 20.55, 20.565, 20.58, 20.595, 20.61, 20.625, 20.64, 20.655, 20.67, 20.685, 20.7]\n\t# static bg1: [19.65, 19.665, 19.68, 19.695, 19.71, 19.725]\n\t# static bg2: [21.51, 21.525, 21.54, 21.555, 21.57, 21.585] \n\telif element == \"Mo\":\n\t\treturn 17.480\n\telse:\n\t\treturn 0.0\n\ndef get_static_bg_area(element):\n\t# areas designated by HPM to take average background\n\tif element == \"Rb\":\n\t\treturn [13.02, 13.035, 13.05, 13.065, 13.08, 13.095, 13.71, 13.725, 13.74, 13.755, 13.77, 13.785]\n\telif element == \"Sr\":\n\t\treturn [13.815, 13.83, 13.845, 13.86, 14.445, 14.46, 14.475, 14.49]\n\telif element == \"Y\":\n\t\treturn [14.55, 14.565, 14.58, 14.595, 14.61, 15.24, 15.255, 15.27, 15.285, 15.3]\n\telif element == \"Nb\":\n\t\treturn [16.26, 16.275, 16.29, 16.305, 16.95, 16.965, 16.98, 16.995]\n\telse:\n\t\treturn []\n\ndef get_static_bg(element, spectrum, energies):\n\tbackground = 0\n\tfor i in get_static_bg_area(element):\n\t\tbackground = background + spectrum[energies.tolist().index(i)]\n\treturn background / len(get_static_bg_area(element))\n\n\ndef get_Ka_pos(energys, element):\n energys = np.asarray(energys)\n return (np.abs(energys - get_Ka(element))).argmin()\n\ndef peak_height(spectrum, energies, element, width):\n\tsumme = spectrum[get_Ka_pos(energies,element)]\n\tradius = int((width - 1)/2)\n\tfor i in range(radius):\n\t\ti = i +1\n\t\tsumme = summe + spectrum[get_Ka_pos(energies,element) - i]\n\t\tsumme = summe + spectrum[get_Ka_pos(energies,element) + i]\n\tpeak_height = summe / width\n\tbackground = min(spectrum[get_Ka_pos(energies,element) - radius], \n\t\t\t\t\t spectrum[get_Ka_pos(energies,element) + radius])\n\t#print(\"bg: \", background)\n\treturn peak_height - background\n\ndef peak_height_static(spectrum, energies, element, width):\n\tpeak = get_Ka_pos(energies,element)\n\tbackground = get_static_bg(element, spectrum, energies)\n\tsumme = spectrum[peak]\n\tradius = int((width - 1)/2)\n\tfor i in range(radius):\n\t\ti = i +1\n\t\tsumme = summe + spectrum[peak - i]\n\t\tsumme = summe + spectrum[peak + i]\n\treturn summe / width - background\n\ndef peak_height_poly(spectrum, energies, element, width):\n\tbg = background_poly(spectrum, energies)\n\tpeak = get_Ka_pos(energies,element)\n\tsumme = spectrum[peak] - bg(energies[peak])\n\tradius = int((width - 1)/2)\n\tfor i in range(radius):\n\t\ti = i +1\n\t\tsumme = summe + spectrum[peak - i] - bg(energies[peak - i])\n\t\tsumme = summe + spectrum[peak + i] - bg(energies[peak + i])\n\treturn summe / width\n\ndef peak_height_nobg(spectrum, energies, element, width):\n\tpeak = get_Ka_pos(energies,element)\n\tsumme = spectrum[peak]\n\tradius = int((width - 1)/2)\n\tfor i in range(radius):\n\t\ti = i +1\n\t\tsumme = summe + spectrum[peak - i]\n\t\tsumme = summe + spectrum[peak + i]\n\treturn summe / width\n\ndef mark_peak(figure, energies,element,width, barcolor='r'):\n\twidth = int((width - 1) / 2)\n\tfigure.axvline(energies[get_Ka_pos(energies,element) - width], color=barcolor)\n\tfigure.axvline(energies[get_Ka_pos(energies,element) + width], color=barcolor)\n\n\ndef print_Zr_peaks():\n\t(kev,stan_label,stan_spec,stan_soll,soll_label) = load_all_standards()\n\n\tf, ax = plt.subplots()\n\tax.set_title(stan_label[21] + \", Zr-Ka markiert\")\n\n\n\tmark_peak(ax,kev,\"Zr\",7)\n\n\tax.plot(kev,stan_spec[21])\n\t#ax.plot(kev,stan_spec[21])\n\n\t# print(stan_label[21])\n\n\t#get local minima (order ~ -sensitivity)\n\tlocal_minima = argrelextrema(stan_spec[21], np.less, order=20)[0]\n\t# print(local_minima)\n\t# save to coordinates of local minima\n\ttie_points = np.zeros(shape=[2, len(local_minima)])\n\tfor i in range(len(local_minima)):\n\t\ttie_points[0][i] = kev[local_minima[i]]\n\t\ttie_points[1][i] = stan_spec[21][local_minima[i]]\n\t# create polynom for background\n\tbackground = np.poly1d(np.polyfit(tie_points[0], tie_points[1], 20))\n\tsmall_kev = kev[local_minima[0]:local_minima[len(local_minima)-2]] # between smallest and biggest tie point\n\tax.plot(small_kev, background(small_kev))\n\n\t# funktioniert, aber nicht schön zum plotten, weil min max verloren geht\n\t#bpp = background_poly(stan_spec[21],kev)\n\t#ax.plot(small_kev, bpp(small_kev))\n\n\n\t# mark local minima with lines\n\t#mark_x(ax, local_minima, kev)\n\n\tplt.show()\n\n\n\ndef background_poly(spectrum, energies):\n\t#get local minima (order ~ -sensitivity)\n\tlocal_minima = argrelextrema(spectrum, np.less, order=20)[0]\n\t# save to coordinates of local minima\n\ttie_points = np.zeros(shape=[2, len(local_minima)])\n\tfor i in range(len(local_minima)):\n\t\ttie_points[0][i] = energies[local_minima[i]]\n\t\ttie_points[1][i] = spectrum[local_minima[i]]\n\t# create polynom for background\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter(\"ignore\")\n\t\treturn np.poly1d(np.polyfit(tie_points[0], tie_points[1], 20))\n\n\n\t#return background\n\t\n\n\t#small_energies = energies[local_minima[0]:local_minima[len(local_minima)-2]] # between smallest and biggest tie point\n\t#ax.plot(small_energies, background(small_energies))\n\n\ndef mark_x(figure, x_values, energies):\n\tfor i in x_values:\n\t\tfigure.axvline(energies[i], color=\"orange\", alpha=0.5)\n\n\n\n\ndef eichkurve_zr():\n\t(kev,stan_label,stan_spec,stan_soll,soll_label) = load_all_standards()\n\n\tf, ax = plt.subplots()\n\tax.set_title('Zr-Eichkurve')\n\n\tzr_values = np.zeros(shape=[2, len(stan_spec)])\n\tfor i in range(len(stan_spec)):\n\t\tzr_values[0][i] = peak_height(stan_spec[i], kev, \"Zr\", 31)\n\t\tzr_values[1][i] = stan_soll[i][len(stan_soll[i])-1]\n\n\tfor i in range(len(stan_label)):\n\t\tprint(i, stan_label[i])\n\n#\tprint(\"Zr values of standards:\")\n#\tfor i in range(len(zr_values[1])):\n#\t\tprint(i, zr_values[1][i])\n#\tprint()\n\n\tzr_values = np.delete(zr_values, (57), axis=1)\n\n#\tprint(\"Zr values of standards:\")\n#\tfor i in range(len(zr_values[1])):\n#\t\tprint(i, zr_values[1][i])\n#\tprint()\n\n\tax.scatter(zr_values[0], zr_values[1], marker='.', color='black', s=15, label='Zr')\n\t#ax.hist2d(zr_values[0], zr_values[1],bins=50,cmap='gist_gray_r')\n\t#ax.hist2d(zr_values[0], zr_values[1],bins=40,cmap='viridis_r')\n\n\tA = np.vstack([zr_values[0], np.ones(len(zr_values[0]))]).T\n\tprint(A)\n\tm, c = np.linalg.lstsq(A, zr_values[1])[0]\n\tprint(\"Regression: y = \", m, \" * x + \", c)\n\tax.plot(zr_values[0], m*zr_values[0] + c, 'r', label='Fitted line')\n\t\n\t#ax.margins(0)\n\tax.axvline(0, color=\"black\", linewidth=0.5)\n\tax.axhline(0, color=\"black\", linewidth=0.5)\n\n\n\tplt.show(f)\n\n\ndef eichkurve_stat(element, figure, stan_spec, energies, stan_conc, stan_conc_label, width=7, verbous=False):\n\tfigure.set_title(element + '-Eichkurve, Peakweite: ' + str(width) + \", statische Background-Korrektur\")\n\tfigure.set_xlabel('Peak Height [counts]')\n\tfigure.set_ylabel('Concentration in Standard [ppm]')\n\n\n\telement_values = np.zeros(shape=[2, len(stan_spec)])\n\tfor i in range(len(stan_spec)):\n\t\telement_values[0][i] = peak_height_static(stan_spec[i], energies, element, width)\n\t\telement_values[1][i] = stan_conc[i][stan_conc_label.index(element)]\n\n\n\t# clensing\n\tzero_values = []\n\toutliner = []\n\tfor i in range(len(element_values[1])):\n\t\tif element_values[1][i] == 0.0: # nuller raus\n\t\t\tzero_values.append(i)\n\t\tif element_values[1][i] >= 600.0:\n\t\t\toutliner.append(i) # zu grosse raus\t\t\n\tdelete_values = zero_values + outliner # seperate (for analysis) lists joined\n\telement_values = np.delete(element_values, delete_values, axis=1)\n\t\n\tif verbous == True:\n\t\tprint(element, \"data points not used: \")\n\t\tprint(\"Zeros: \", zero_values)\n\t\tprint(\"Outliner: \", outliner)\n\t\tprint()\n\t\tprint(element + \"-values of standards:\")\n\t\tfor i in range(len(element_values[1])):\n\t\t\tprint(i, element_values[1][i])\n\t\tprint()\n\n\t# print(stan_conc_label)\n\n\tfigure.scatter(element_values[0], element_values[1], marker='.', color='black', s=15, label=element + ' in Standards')\n\t#figure.hist2d(element_values[0], element_values[1],bins=50,cmap='gist_gray_r')\n\t#figure.hist2d(element_values[0], element_values[1],bins=40,cmap='viridis_r')\n\n\n\t# Linear regression\n\tm, c = np.polyfit(element_values[0], element_values[1], 1)\n\tformula_string = \"y = %.2f * x + %.2f \" % (m, c)\n\tx_range = np.linspace(0, element_values[0].max())\n\t# Plot ŕegression\n\tfigure.plot(x_range, m * x_range + c , color='red',linewidth=3, label=formula_string)\n\t\n\t#mark zeros\n\tfigure.axvline(0, color=\"black\", linewidth=0.5)\n\tfigure.axhline(0, color=\"black\", linewidth=0.5)\n\t# legend in upper left\n\tfigure.legend(loc=2) \n\n\ndef eichkurve_nobg(element, figure, stan_spec, energies, stan_conc, stan_conc_label, width=7, verbous=False):\n\tfigure.set_title(element + '-Eichkurve, Peakweite: ' + str(width) + \", Keine Background-Korrektur\")\n\tfigure.set_xlabel('Peak Height [counts]')\n\tfigure.set_ylabel('Concentration in Standard [ppm]')\n\n\n\telement_values = np.zeros(shape=[2, len(stan_spec)])\n\tfor i in range(len(stan_spec)):\n\t\telement_values[0][i] = peak_height_nobg(stan_spec[i], energies, element, width)\n\t\telement_values[1][i] = stan_conc[i][stan_conc_label.index(element)]\n\n\n\t# clensing\n\tzero_values = []\n\toutliner = []\n\tfor i in range(len(element_values[1])):\n\t\tif element_values[1][i] == 0.0: # nuller raus\n\t\t\tzero_values.append(i)\n\t\tif element_values[1][i] >= 600.0:\n\t\t\toutliner.append(i) # zu grosse raus\t\t\n\tdelete_values = zero_values + outliner # seperate (for analysis) lists joined\n\telement_values = np.delete(element_values, delete_values, axis=1)\n\t\n\tif verbous == True:\n\t\tprint(element, \"data points not used: \")\n\t\tprint(\"Zeros: \", zero_values)\n\t\tprint(\"Outliner: \", outliner)\n\t\tprint()\n\t\tprint(element + \"-values of standards:\")\n\t\tfor i in range(len(element_values[1])):\n\t\t\tprint(i, element_values[1][i])\n\t\tprint()\n\n\t# print(stan_conc_label)\n\n\tfigure.scatter(element_values[0], element_values[1], marker='.', color='black', s=15, label=element + ' in Standards')\n\t#figure.hist2d(element_values[0], element_values[1],bins=50,cmap='gist_gray_r')\n\t#figure.hist2d(element_values[0], element_values[1],bins=40,cmap='viridis_r')\n\n\n\t# Linear regression\n\tm, c = np.polyfit(element_values[0], element_values[1], 1)\n\tformula_string = \"y = %.2f * x + %.2f \" % (m, c)\n\tx_range = np.linspace(0, element_values[0].max())\n\t# Plot ŕegression\n\tfigure.plot(x_range, m * x_range + c , color='red',linewidth=3, label=formula_string)\n\t\n\t#mark zeros\n\tfigure.axvline(0, color=\"black\", linewidth=0.5)\n\tfigure.axhline(0, color=\"black\", linewidth=0.5)\n\t# legend in upper left\n\tfigure.legend(loc=2) \n\ndef eichkurve_poly(element, figure, stan_spec, energies, stan_conc, stan_conc_label, width=7, verbous=False):\n\tfigure.set_title(element + '-Eichkurve, Peakweite: ' + str(width) + \", dynamische Background-Korrektur\")\n\tfigure.set_xlabel('Peak Height [counts]')\n\tfigure.set_ylabel('Concentration in Standard [ppm]')\n\n\n\telement_values = np.zeros(shape=[2, len(stan_spec)])\n\tfor i in range(len(stan_spec)):\n\t\telement_values[0][i] = peak_height_poly(stan_spec[i], energies, element, width)\n\t\telement_values[1][i] = stan_conc[i][stan_conc_label.index(element)]\n\n\n\t# clensing\n\tzero_values = []\n\toutliner = []\n\tfor i in range(len(element_values[1])):\n\t\tif element_values[1][i] == 0.0: # nuller raus\n\t\t\tzero_values.append(i)\n\t\tif element_values[1][i] >= 600.0:\n\t\t\toutliner.append(i) # zu grosse raus\t\t\n\tdelete_values = zero_values + outliner # seperate (for analysis) lists joined\n\telement_values = np.delete(element_values, delete_values, axis=1)\n\t\n\tif verbous == True:\n\t\tprint(element, \"data points not used: \")\n\t\tprint(\"Zeros: \", zero_values)\n\t\tprint(\"Outliner: \", outliner)\n\t\tprint()\n\t\tprint(element + \"-values of standards:\")\n\t\tfor i in range(len(element_values[1])):\n\t\t\tprint(i, element_values[1][i])\n\t\tprint()\n\n\t# print(stan_conc_label)\n\n\tfigure.scatter(element_values[0], element_values[1], marker='.', color='black', s=15, label=element + ' in Standards')\n\t#figure.hist2d(element_values[0], element_values[1],bins=50,cmap='gist_gray_r')\n\t#figure.hist2d(element_values[0], element_values[1],bins=40,cmap='viridis_r')\n\n\n\t# Linear regression\n\tm, c = np.polyfit(element_values[0], element_values[1], 1)\n\tformula_string = \"y = %.2f * x + %.2f \" % (m, c)\n\tx_range = np.linspace(0, element_values[0].max())\n\t# Plot ŕegression\n\tfigure.plot(x_range, m * x_range + c , color='red',linewidth=3, label=formula_string)\n\t\n\t#mark zeros\n\tfigure.axvline(0, color=\"black\", linewidth=0.5)\n\tfigure.axhline(0, color=\"black\", linewidth=0.5)\n\t# legend in upper left\n\tfigure.legend(loc=2) \n\n\n\t\n\n\n\ndef eichkurve(element, width):\n\n\n\t(kev,stan_label,stan_spec,stan_soll,soll_label) = load_all_standards()\n\n\tf, ax = plt.subplots(1)\n\teichkurve_poly(element, ax, stan_spec, kev, stan_soll, soll_label, width)\n\n\tfilename = \"plots/\" + 'eichkurve_' + element + \"_\" + str(width) + '.pdf'\n\tpp = PdfPages(filename)\n\tpp.savefig(f)\n\tpp.close()\n\n\ndef eichkurve_stupid(element, width):\n\n\n\t(kev,stan_label,stan_spec,stan_soll,soll_label) = load_all_standards()\n\n\tf, ax = plt.subplots(1)\n\teichkurve_nobg(element, ax, stan_spec, kev, stan_soll, soll_label, width)\n\n\tfilename = \"plots/\" + 'eichkurve_' + element + \"_\" + str(width) + '_no-bg.pdf'\n\tpp = PdfPages(filename)\n\tpp.savefig(f)\n\tpp.close()\n\n\ndef eichkurve_semistupid(element, width):\n\n\n\t(kev,stan_label,stan_spec,stan_soll,soll_label) = load_all_standards()\n\n\tf, ax = plt.subplots(1)\n\teichkurve_stat(element, ax, stan_spec, kev, stan_soll, soll_label, width)\n\n\tfilename = \"plots/\" + 'eichkurve_' + element + \"_\" + str(width) + '_static.pdf'\n\tpp = PdfPages(filename)\n\tpp.savefig(f)\n\tpp.close()\n\n\n\n\n\n\n# (kev,stan_label,stan_spec,stan_soll,soll_label) = load_all_standards()\n\n\n\n# for element in [\"Mo\", \"Rb\", \"Sr\", \"Y\", \"Zr\", \"Nb\"]:\n# \teichkurve(element, 7)\n\n\nprint_Zr_peaks()\n\n# for element in [\"Rb\", \"Sr\", \"Y\"]:\n# \teichkurve_semistupid(element, 7)\n\n\n\n","sub_path":"rfa.py","file_name":"rfa.py","file_ext":"py","file_size_in_byte":18574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"380873873","text":"# Definition for singly-linked list with a random pointer.\n# class RandomListNode(object):\n# def __init__(self, x):\n# self.label = x\n# self.next = None\n# self.random = None\n\nclass Solution(object):\n def copyNext(self, head):\n while head != None:\n temp = RandomListNode(0)\n temp.label = head.label\n temp.random = head.random\n temp.next = head.next\n head.next = temp\n head = head.next.next\n \n def copyRandom(self, head):\n while head != None:\n if head.next.random != None:\n head.next.random = head.random.next\n head = head.next.next\n \n def splitList(self, head):\n newHead = head.next\n while head != None:\n temp = head.next\n head.next = temp.next\n head = head.next\n if temp.next != None:\n temp.next = temp.next.next\n return newHead\n \n def copyRandomList(self, head):\n \"\"\"\n :type head: RandomListNode\n :rtype: RandomListNode\n \"\"\"\n if not head:\n return None\n \n self.copyNext(head)\n self.copyRandom(head)\n return self.splitList(head)","sub_path":"LeetCode/Copy_List_with_Random_Pointer.py","file_name":"Copy_List_with_Random_Pointer.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"562651829","text":"# I use Node class to implement the DFS\nclass Node:\n visited = set() # class variable\n def __init__(self, name):\n self.name = name\n self.children = [] # store the children node object\n self.childrenNames = {} # store the children name and edge cost from self to this child\n \n def __repr__(self):\n return \"node %s\" % self.name\n \n def findPath(self, dst): #dst: the name of final destination(t), is a string\n #print(\"node {}, its chilren are{}\".format(self,self.children))\n '''\n find path from current Node to final destination, use DFS\n @Input: destination node\n @Output: a list containing node names, reversed path from destination to current node\n return the reversed path to save some complexity(prepend list is O(N), \n append list is O(1))\n '''\n if self.name in Node.visited: # run into a circle, return empty path\n return []\n \n Node.visited.add(self.name)\n\n if self.name == dst:\n return [self.name]\n\n if len(self.children) == 0: # run into node without children\n return []\n\n for child in self.children:\n path = child.findPath(dst)\n if(len(path) > 0):\n path.append(self.name)\n return path\n return []\n \n\nclass graph:\n def __init__(self):\n self.nodes = {} # a dic storing all nodes, key is name(string), val is node object\n \n def addNode(self, name):\n self.nodes[name] = Node(name)\n\n # add edge for the first time\n # cumulative edge capacity if we have add (u,v,w) where e,v exist\n def add_edge(self, u, v, w): # direct edge from u to v with cpacity w\n '''\n add edge to my graph\n Input:\n u: string represent u name, v same\n w: int represent capacity\n '''\n if w<0:\n raise ValueError(\"No negative edge cost! Check your input!\")\n \n for name in [u,v]:\n if not name in self.nodes:\n self.addNode(name)\n \n # if we do not add v to u's children, add now\n if not self.nodes[v] in self.nodes[u].children and w > 0:\n self.nodes[u].children.append(self.nodes[v])\n \n\n # parallel dic store the cost from u to v, also including update costs\n if v in self.nodes[u].childrenNames:\n self.nodes[u].childrenNames[v] += w\n else:\n self.nodes[u].childrenNames[v] = w\n\n # update edge capacity if we have add (u,v,w) where e,v exist\n def edge(self, u, v, w): # direct edge from u to v with cpacity w\n '''\n add edge to my graph\n Input:\n u: string represent u name, v same\n w: int represent capacity\n '''\n if w<0:\n raise ValueError(\"No negative edge cost! Check your input!\")\n \n for name in [u,v]:\n if not name in self.nodes:\n self.addNode(name)\n \n # if we do not add v to u's children, add now\n if not self.nodes[v] in self.nodes[u].children and w > 0:\n self.nodes[u].children.append(self.nodes[v])\n \n # if we add some edge from u to v previously, but\n # w is 0 after updating the residual graph, (no edge from u to v now), remove edge\n # i.e. remove v from u's children\n if self.nodes[v] in self.nodes[u].children and w == 0:\n self.nodes[u].children.remove(self.nodes[v])\n\n # parallel dic store the cost from u to v, also including update costs\n self.nodes[u].childrenNames[v] = w\n \n\n def DFS(self, s, t):\n '''\n find path from s to t\n s: source node\n t: final destination\n return a list path storing the name(string) of nodes along the path\n '''\n Node.visited = set() # clear up visited set before each DFS\n reversedPath = self.nodes[s].findPath(t)\n\n\n return [i for i in reversed(reversedPath)]\n \n def FordFulkerson(self, s, t):\n '''\n greedy method for finding max flow\n input: s source node, t destination node\n '''\n if s==t:\n raise ValueError(\"Error! Source and sink is the same node!\")\n maxFlow = 0\n while True: \n path = self.DFS(s, t)\n \n if len(path) == 0:\n break\n\n # while we can still find flow from s to t, an augmenting path P from s to t\n bottleNeck = float(\"Inf\") # find bottleNeck(P,Gf)\n for i in range(len(path)-1):\n # path[i] is string\n # the edge is path[i] and one of its children path[i+1]\n # the edge cost of edge (path[i], path[i+1]) is self.nodes[path[i]].childrenNames[path[i+1]]\n if self.nodes[path[i]].childrenNames[path[i+1]] < bottleNeck:\n bottleNeck = self.nodes[path[i]].childrenNames[path[i+1]]\n # now find the bottleNeck of current residual graph\n if bottleNeck <= 0:\n raise ValueError(\"Non-posotive bottle neck! Check your computation!\")\n maxFlow += bottleNeck\n\n # now update residual capacities of the edges and reverse edges\n # along the path\n for j in range(len(path)-1):\n w = self.nodes[path[j]].childrenNames[path[j+1]]\n self.edge(path[j], path[j+1], w - bottleNeck) \n\n # check whether path[j+1] is children of path[j] (main check in childrenNames incase \n # trying to find non existing key in dic)\n if path[j] in self.nodes[path[j+1]].childrenNames:\n reverseFlow = bottleNeck + self.nodes[path[j+1]].childrenNames[path[j]]\n else:\n reverseFlow = bottleNeck\n \n self.edge(path[j+1], path[j], reverseFlow)\n\n return maxFlow\n\n\ndef main():\n numInstance = int(input()) # the number of instance, first input\n resList = []\n for i in range(numInstance):\n numNode, numEdge = input().strip().split() # the number of nodes and edges in i-th instance\n numEdge = int(numEdge)\n\n g = graph()\n edgeIdx = 0 # count how many nodes have been added to i-th instance\n while edgeIdx < numEdge:\n u, v, w = input().strip().split()\n g.add_edge(u, v, int(w))\n edgeIdx += 1\n \n try:\n resList.append(g.FordFulkerson(\"1\", numNode))\n except ValueError:\n print(\"Oops! That was a value error. Try again...\")\n \n \n for res in resList:\n print(res)\n \nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"hw9/FF.py","file_name":"FF.py","file_ext":"py","file_size_in_byte":6726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"218353620","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"This module provides some functionalities for logging.\"\"\"\nimport logging\nimport os\nimport sys\nimport time\nfrom collections import deque, namedtuple\n\nfrom scripts.helpers.funs import seconds_to_hhmmssms\n\nLogEntry = namedtuple(\"LogEntry\", [\"message\", \"start_time\", \"logger\"]) # Container for log entries.\nlog_stack = deque() # Stack where the log-entries are collected.\n\n\ndef log_start(message, logger):\n \"\"\"Logs the start of a process step.\n\n Args:\n message (str): message to log.\n logger (Logger): instance of the logger where the message should be logged.\n \"\"\"\n log_stack.append(LogEntry(message, time.time(), logger))\n logger.info(\"({}) start {}.\".format(len(log_stack), message))\n\n\ndef log_end(additional_message=None):\n \"\"\"Logs the end of a process step together with the start message and the elapsed.\n\n Args:\n additional_message (str): Additional message, which is added to the message from the start log.\n \"\"\"\n n = len(log_stack)\n log_entry = log_stack.pop()\n log_message = \"({}) end {}. time elapsed: {}{}\".format(n, log_entry.message,\n seconds_to_hhmmssms(time.time() - log_entry.start_time),\n \". {}. \".format(\n additional_message) if additional_message else \".\")\n log_entry.logger.info(log_message)\n\n\ndef init_logging(directory, file_name, log_level=logging.INFO):\n \"\"\"Initializes the logger for the project.\n\n Args:\n directory (str): Path to the folder where the log file is written.\n file_name (str): Name of the log file.\n log_level (int): log level\n \"\"\"\n logger = logging.getLogger()\n logger.level = log_level\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(formatter)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n file_handler = logging.FileHandler(os.path.join(directory, file_name), mode=\"w\")\n file_handler.setFormatter(formatter)\n logger.addHandler(stdout_handler)\n logger.addHandler(file_handler)\n","sub_path":"scripts/helpers/my_logging.py","file_name":"my_logging.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"596531089","text":"# -*- coding:utf-8 -*-\nclass Solution:\n # s 源字符串\n def replaceSpace(self, s):\n # write code here\n s = list(s)\n count=len(s)\n for i in range(0,count):\n if s[i]==' ':\n s[i]='%20'\n return ''.join(s)\n\n\nif __name__ == \"__main__\":\n s = Solution()\n src_string = input()\n new_string = s.replaceSpace(src_string)\n print(new_string)","sub_path":"算法/剑指offer/02-替换空格.py","file_name":"02-替换空格.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47490670","text":"\n__author__ = 'Tom Van den Eede'\n__copyright__ = 'Copyright 2018, Palette2 Splicer Post Processing Project'\n__credits__ = ['Tom Van den Eede',\n 'Tim Brookman'\n ]\n__license__ = 'GPL'\n__version__ = '3.0.0'\n__maintainer__ = 'Tom Van den Eede'\n__email__ = 'P2PP@pandora.be'\n__status__ = 'Beta'\n\n\n#########################################\n# Variable default values\n#########################################\n\n# Filament Transition Table\npaletteInputsUsed = [False,\n False,\n False,\n False]\n\nfilamentType = [\"\",\n \"\",\n \"\",\n \"\"]\n\nfilamentDescription = [\"Unnamed\",\n \"Unnamed\",\n \"Unnamed\",\n \"Unnamed\"]\nfilamentColorCode = [\"-\",\n \"-\",\n \"-\",\n \"-\"]\n\nusedFilamentTypes = []\n\ndefaultSpliceAlgorithm = \"D000 D000 D000\"\nprocessWarnings = []\nspliceAlgorithmTable = []\nspliceAlgorithmDictionary = {}\n\nprinterProfileString = '' # A unique ID linked to a printer configuration profile in the Palette 2 hardware.\n\ninputGcode = []\nprocessedGCode = [] # final output array with Gcode\n\n# these variables are used to build the splice information table (Omega-30 commands in GCode) that will drive the Palette2\n# spliceoffset allows for a correction of the position at which the transition occurs. When the first transition is scheduled\n# to occur at 120mm in GCode, you can add a number of mm to push the transition further in the purge tower. This serves a similar\n# function as the transition offset in chroma\nsplice_offset = 0\nspliceExtruderPosition = []\nspliceUsedTool = []\nspliceLength = []\n\n# SIDE WIPES\nside_wipe_loc = \"\"\nside_wipe = False\nside_wipe_length = 0\nside_wipe_skip = False\ndefineTower = False\nsideWipeMinY = 25\nsideWipeMaxY = 175\nmaxWipe = -1\nwipeFeedRate = 2000\nemptyGrid = False\n\nbefore_sidewipe_gcode = []\nafter_sidewipe_gcode = []\n\nbed_size_x = 250\nbed_size_y = 220\nbed_origin_x = 0\nbed_origin_y = -10.00 #eccount for the purge line at the start of the print\n\nwipe_tower_info = {'minx': 9999,\n 'miny': 9999,\n 'maxx': -9999,\n 'maxy': -9999\n }\n\nwipetower_posx = 0\nwipetower_posy = 0\n\ncurrentPositionX = 0\ncurrentPositionY = 0\n\n# ping text is a text variable to store information about the PINGS generated by P2PP. this information is pasted after\n# the splice information right after the Palette2 header\npingExtruderPosition = []\n\n\n# Hotswapcount is the number of hotswaps generated during the print.... not sure what this is used for, this variable is\n# only used to complete the header\nhotSwapCount = 0\n\n# TotalExtrusion keeps track of the total extrusion in mm for the print taking into account the Extruder Multiplier set\n# in the GCode settings...\ntotalMaterialExtruded = 0\n\n# The next 3 variables are used to generate pings. A ping is scheduled every ping interval. The LastPing option\n# keeps the last extruder position where a ping was generated. It is set to -100 to pring the first PING forward...\n# Not sure this is a good idea. Ping distance increases over the print in an exponential way. Each ping is 1.03 times\n# further from the previous one. Pings occur in random places!!! as the are non-intrusive and don't causes pauses in the\n# print they aren ot restricted to the wipe tower and they will occur as soon as the interval length for ping is exceeded.\nlastPingExtruderPosition = 0\npingIntervalLength = 350\nmaxPingIntervalLength = 3000\npingLengthMultiplier = 1.03\nsidewipecorrection=1.0\nsidewiperetract = 0.4\ncorrectWipeRetract = False\nwipeRetracted = False\nmmu_unload_remove = False\nvolumetricE = False\n\n\n# currenttool/lastLocation are variables required to generate O30 splice info. splice info is generated at the end of the tool path\n# and not at the start hence the requirement to keep the toolhead and lastlocation to perform the magic\ncurrentTool = -1\npreviousToolChangeLocation = 0\n\ncurrent_layer = \"0\" # Capture layer information for short splice texts\nextrusionMultiplier = 1.0 # Monitors M221 commands during the print.\ncurrentprintFeedrate = 100 # Monitors the current feedrate\ncurrent_print_feed = 2000\nextraRunoutFilament = 150 # Provide extra filament at the end of the print.\nminimalSpliceLength = 80 # Minimum overall splice length.\nminimalStartSpliceLength = 100 # Minimum first splice length.\nwithin_tool_change_block = False # keeps track if the processed G-Code is part of a toolchange or a regular path.\nallowFilamentInformationUpdate = False # TBA\n\nreprap_compatible = False # Enables the cleanup/removal of M900 commands as RepRap uses M572 which is slightly different\n\nversion=\"0.0.0\"","sub_path":"p2pp/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"267764516","text":"import calendar\n\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport requests\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport credentials\nfrom pprint import pprint\n\nBILLBOARD_BASE_URL = 'https://www.billboard.com/charts/hot-100/'\nSPOTIFY_BASE_URL = ''\nis_correct_date = False\n\n# Get user input\ndate_str = \"\"\nsongs = []\n\nwhile not is_correct_date:\n try:\n date_str = input(\"Please enter a date in the format YYY-MM-DD: \")\n date_time_obj = datetime.strptime(date_str, '%Y-%m-%d')\n is_correct_date = True\n date_str = date_time_obj.strftime('%Y-%m-%d')\n except ValueError:\n print(\"Sorry, please enter the date in the correct format.\\n\")\n\nres = requests.get(url=f'{BILLBOARD_BASE_URL}{date_str}')\nsoup = BeautifulSoup(res.text, 'html.parser')\nli_elements = soup.find_all(name=\"li\", class_=\"chart-list__element\")\n\nfor el in li_elements:\n songs.append(\n {\n \"rank\": el.find(class_=\"chart-element__rank__number\").string,\n \"name\": el.find(class_=\"chart-element__information__song\").string,\n \"artist\": el.find(class_=\"chart-element__information__artist\").string\n }\n )\n\nscope = 'playlist-modify-private'\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth(\n client_id=credentials.SPOTIFY['app_id'],\n client_secret=credentials.SPOTIFY['app_secret'],\n redirect_uri='https://example.com',\n scope=scope\n))\ncurrent_user_id = sp.current_user()['id']\nsong_uris = []\nsongs_not_found = []\nyear = date_str.split(\"-\")[0]\n\nfor song in songs:\n search_q = f\"track:{song['name']} year:{year}\"\n res = sp.search(q=search_q, type=\"track\")\n try:\n uri = res['tracks']['items'][0]['uri']\n song_uris.append(uri)\n except IndexError:\n print(f\"\\\"{song['name']}\\\" not found and skipped from list.\")\n songs_not_found.append(song)\n\nplaylist_res = sp.user_playlist_create(user=current_user_id, name=f\"Top 100 as of {date_str}\", public=False)\nadd_items_res = sp.playlist_add_items(playlist_id=playlist_res['id'], items=song_uris)\n\nprint(\"Done! Check your Spotify Playlist.\")\n\n","sub_path":"day46/scraped-spotify-playlist/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"448673527","text":"# Генерация вопросов\n\nfrom random import randint\n\ndef get_q_templs( \n\t\tznanie = {}, \n\t\tkolvo = 5, \n\t\tform = \"Правда ли что %s %s %s?\"):\n\trez = {}\n\ti = 1\n\tfor d1, rel, d2 in znanie.keys():\n\t\tif i > kolvo: break\n\t\tif randint(0,10) > 2:\n\t\t\tif randint(0,1):\n\t\t\t\trez.update({\n\t\t\t\t\tform % (d2, rel, d1):{\n\t\t\t\t\t\t\"a\":{\"text\":\"да\", \"correct\": 1}, \n\t\t\t\t\t\t\"b\":{\"text\":\"нет\", \"correct\": 0}, \n\t\t\t\t\t\t\"c\":{\"text\":\"не знаю\", \"correct\": 0}, \n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\telse:\n\t\t\t\tfor _d1, _rel, _d2 in znanie.keys():\n\t\t\t\t\tif _d1 != d1 and rel != _rel:\n\t\t\t\t\t\trez.update({\n\t\t\t\t\t\t\tform % (_d2, rel, d1):{\n\t\t\t\t\t\t\t\t\"a\":{\"text\":\"да\", \"correct\": 0}, \n\t\t\t\t\t\t\t\t\"b\":{\"text\":\"нет\", \"correct\": 1}, \n\t\t\t\t\t\t\t\t\"c\":{\"text\":\"не знаю\", \"correct\": 0}, \n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t})\n\t\t\t\t\t\tbreak\n\t\t\ti += 1\n\treturn rez\n\t\nif __name__ == '__main__':\n\tprint(get_q_templs( # list of str\n\t\tznanie = {('брикетирование', 'мочь быть', 'прессование'): 1, ('брикетирование', 'помогать', 'разделение мусор'): 1, ('разделение мусор', 'производить', 'вторичный сырье'): 1, ('захоронение', 'являться', 'свалка'): 1, ('свалка', 'иметь', 'влияние'): 1}, \n\t\tkolvo = 1, \n\t\tform = \"%s) Правда ли что %s %s %s?\"))\n","sub_path":"get_q_templs.py","file_name":"get_q_templs.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"244054483","text":"# -*- coding: utf-8 -*-\nimport random as r\nimport os\nimport Addons\n\n\nclass Code:\n def __init__(self):\n self.code = \"\"\n self.unknown = []\n self.boss_name = \"\"\n self.boss_pict = \"\"\n self.score_multiplier = 1\n\n def __str__(self):\n return self.code\n\n def generate(self, amount):\n self.code = \"\".join(map(lambda a: str(r.randint(1, 9)), range(amount)))\n self.unknown = list(map(lambda a: str(\"*\" * a + self.code[a] + \"*\" * (amount - a - 1)), range(amount)))\n\n def get_code_digit(self):\n if self.unknown:\n num = r.choice(self.unknown)\n self.unknown.remove(num)\n return \"\\\"Kod: %s\\\"\\n\" % num\n else:\n return \"\\\"Kod: %s\\\"\\n\" % self\n\n def return_known_code(self):\n known = list(self.code)\n\n for i in self.unknown:\n index = list(map(lambda a: a != \"*\", list(i))).index(True)\n known.insert(index, \"*\")\n known.pop(index+1)\n\n known = \"\".join(known)\n return known\n\n def game_end(self, player):\n sec = 0.005\n Addons.countdown()\n Addons.slow_print(\"\\nWylądowałeś w pokoju przeznaczenia!\", 0.05)\n input(\"\\nWciśnij ENTER, aby kontunuować...\")\n\n while True:\n os.system('cls')\n print(\"-\" * 20)\n Addons.slow_print(\"\"\"Jesteś w pokoju przeznaczenia\\n\nTwoje serce zaczyna bić szybciej. Przed Tobą znajdują się duże straszliwe wrota.\nWygląda na to że, aby je otworzyć należy podać odpowiedni kod.\"\"\", sec, newline=False)\n if self.return_known_code() == self.code:\n print(\"\\nZnasz już cały kod: \" + self.code)\n else:\n print(\"\\nZnasz część cyfr kodu: \" + self.return_known_code())\n\n Addons.slow_print(\"\"\"Jednak czy jesteś na tyle odważny aby przekonać się co kryje się za tymi drzwiami?\nWidzisz, że masz też prawdopodobną możliwość powrotu przez ten sam portal,\nz którego tu przyszedłeś.\"\"\", sec)\n sec = 0\n print(\"Co robisz? (1/2)\\n\")\n print(\"1. Próbujesz wpisać kod\")\n print(\"2. Wchodzisz do portalu\\n\")\n p = input(\">>>\")\n\n if p == \"1\":\n if self.guess(player):\n return True\n\n elif p == \"2\":\n Addons.countdown()\n Addons.slow_print(\"Portal przenosi Cię z powrotem do pokoju startowego.\\n\", 0.05)\n input(\"\\nWciśnij ENTER, aby kontunuować...\")\n return False\n\n def guess(self, player):\n print(\"\\nPodaj kod\")\n code_input = input(\">>>\")\n\n while len(code_input) != len(self.code):\n code_input = input(\"\\n>>>\")\n\n if self.code == code_input:\n Addons.slow_print(\"\\nPodałeś właściwy Kod!\\n\", 0.05, newline=False)\n player.update_lvl(50)\n self.fight_with_boss(player)\n return True\n\n else:\n Addons.slow_print(\"\\nZły kod.\\nZ podłogi wysuwają się kłujące kolce.\\n\", 0.05, newline=False)\n player.update_hp(10)\n if not player.dead:\n input(\"\\nWciśnij ENTER, aby kontunuować...\")\n else:\n return True\n\n return False\n\n def fight_with_boss(self, player):\n Addons.slow_print(\"Wrota otwierają się z wielkim piskiem...\\n\" + self.boss_name + \" chce pożreć Twoją duszę!\",\n 0.1)\n Addons.slow_print(self.boss_pict, 0.0001)\n input(\"\\nWciśnij ENTER, aby kontunuować...\")\n\n player.fight(self.boss_name, int(self.score_multiplier * player.max_hp / 9) * 10)\n if not player.dead:\n Addons.slow_print(\"Teraz już nic nie stoi na przeszkodzie, aby opuścić to miejsce.\\nOdzyskałeś wolność...\",\n 0.05)\n Addons.print_congrats()\n print(\"\\nKONIEC GRY\")\n player.save_score(self.score_multiplier)\n input(\"\\nWciśnij ENTER, aby kontunuować...\")\n os.system('cls')\n\n def load_boss(self, player_class):\n self.boss_name = ['Deathwing', 'Czarnoksieznik', 'Ksiezniczka'][int(player_class) - 1]\n self.boss_pict = \"\"\n\n with open(\"boss.txt\", \"r\") as f:\n tmp = 0\n for line in f:\n if line.startswith(\"x x\"):\n tmp += 1\n if tmp > int(player_class)*2:\n break\n if tmp > int(player_class)*2 - 2:\n self.boss_pict += line\n","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"238403832","text":"import torch\nimport torch.nn as nn\n\nfrom arglib import add_argument, init_g_attr\nfrom xib.data_loader import MetricLearningBatch\nfrom xib.model.modules import get_effective_c_idx\n\n\n@init_g_attr(default='property')\nclass MetricLearningModel(nn.Module):\n\n add_argument('num_layers', default=1, dtype=int, msg='number of trainable layers.')\n\n def __init__(self, hidden_size, feat_groups, num_layers):\n super().__init__()\n effective_num_feat_groups = len(get_effective_c_idx(feat_groups)) + 1 # NOTE(j_luo) +1 due to 'avg' score.\n if num_layers == 1:\n self.regressor = nn.Linear(effective_num_feat_groups, 1)\n else:\n modules = [nn.Linear(effective_num_feat_groups, hidden_size), nn.LeakyReLU(negative_slope=0.1)]\n for _ in range(num_layers - 2):\n modules.append(nn.Linear(hidden_size, hidden_size))\n modules.append(nn.LeakyReLU(negative_slope=0.1))\n modules.append(nn.Linear(hidden_size, 1))\n self.regressor = nn.Sequential(*modules)\n\n def forward(self, batch: MetricLearningBatch) -> torch.FloatTensor:\n output = self.regressor(batch.normalized_score.rename(None)).view(-1)\n return output\n","sub_path":"xib/model/metric_learning_model.py","file_name":"metric_learning_model.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574342180","text":"from automation import CommandSequence, TaskManager\nimport pandas as pd\n\n# number of browsers\nNUM_BROWSERS = 2\nDB_PATH = '~/Documents/MachineLearning/WebCrawler/vanilla_sqlite_results/'\nLOG_PATH = '~/Documents/MachineLearning/WebCrawler/vanilla_logs/'\n\n# retrieve list of sights query from the csv\ntop_sites = pd.read_csv('~/Downloads/top-1m.csv', nrows=100, header=None)\nsites = top_sites.loc[:, 1].values\n\n# Loads the default manager params\n# and NUM_BROWSERS copies of the default browser params\nmanager_params, browser_params = TaskManager.load_default_params(NUM_BROWSERS)\n\nfor i in range(NUM_BROWSERS):\n # Record HTTP Requests and Responses\n browser_params[i]['http_instrument'] = True\n # Record cookie changes\n browser_params[i]['cookie_instrument'] = True\n # Record Navigations\n browser_params[i]['navigation_instrument'] = True\n # Record JS Web API calls\n browser_params[i]['js_instrument'] = True\n # Record the callstack of all WebRequests made\n browser_params[i]['callstack_instrument'] = True\n\nmanager_params['data_directory'] = DB_PATH\nmanager_params['log_directory'] = LOG_PATH\n\n# Instantiates the measurement platform\n# Commands time out by default after 60 seconds\nmanager = TaskManager.TaskManager(manager_params, browser_params)\n\n# Visits the sites\nfor site in sites:\n\n site = 'http://' + site\n\n # Parallelize sites over all number of browsers set above.\n command_sequence = CommandSequence.CommandSequence(\n site, reset=True,\n callback=lambda success, val=site:\n print(\"CommandSequence {} done\".format(val)))\n\n # Start by visiting the page\n command_sequence.get(sleep=3, timeout=60)\n\n # Run commands across the three browsers (simple parallelization)\n manager.execute_command_sequence(command_sequence)\n\n# Shuts down the browsers and waits for the data to finish logging\nmanager.close()\n","sub_path":"vanilla-crawl.py","file_name":"vanilla-crawl.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"346571534","text":"from setup import *\n\ndef unigram(obs):\n # Returns the tag of the word obs, as predicted by a unigram model\n pos_tag = ''\n argmax_tag = 0 \n \n if obs in OPROBS:\n probs = OPROBS[obs]\n else: \n probs = OPROBS['#UNSEEN']\n\n for tag in TAGS:\n index = TAGS.index(tag)\n pr_word_tag = probs[index]\n pr_tag = X0[index]\n pr_tag_word = pr_word_tag * pr_tag\n if pr_tag_word > argmax_tag:\n argmax_tag = pr_tag_word\n pos_tag = tag \n\n return pos_tag","sub_path":"pos-code/unigram.py","file_name":"unigram.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"139825221","text":"from math import sqrt\n\nimport numpy as np\nimport pytest\nimport scipy.sparse as sp\nfrom numpy.testing import assert_almost_equal\nfrom numpy.testing import assert_array_almost_equal\n\nfrom modl.dict_completion import DictCompleter, csr_center_data\nfrom modl.externals.spira.cross_validation import train_test_split\n\nbackends = ['c', 'python']\n\n\n@pytest.mark.parametrize(\"backend\", backends)\ndef test_dict_completion(backend):\n # Generate some toy data.\n rng = np.random.RandomState(0)\n U = rng.rand(50, 3)\n V = rng.rand(3, 20)\n X = np.dot(U, V)\n\n mf = DictCompleter(n_components=3, max_n_iter=100, alpha=1e-3,\n random_state=0,\n detrend=False,\n backend=backend,\n verbose=0, )\n\n mf.fit(X)\n\n Y = np.dot(mf.P_, mf.Q_)\n Y2 = mf.predict(X).toarray()\n\n assert_array_almost_equal(Y, Y2)\n\n rmse = np.sqrt(np.mean((X - Y) ** 2))\n rmse2 = mf.score(X)\n\n assert_almost_equal(rmse, rmse2)\n\n\n@pytest.mark.parametrize(\"backend\", backends)\ndef test_dict_completion_normalise(backend):\n # Generate some toy data.\n rng = np.random.RandomState(0)\n U = rng.rand(50, 3)\n V = rng.rand(3, 20)\n X = np.dot(U, V)\n\n mf = DictCompleter(n_components=3, max_n_iter=100, alpha=1e-3,\n random_state=0,\n backend=backend,\n verbose=0, detrend=True)\n\n mf.fit(X)\n\n Y = np.dot(mf.P_, mf.Q_)\n Y += mf.col_mean_[np.newaxis, :]\n Y += mf.row_mean_[:, np.newaxis]\n Y2 = mf.predict(X).toarray()\n\n assert_array_almost_equal(Y, Y2)\n\n rmse = np.sqrt(np.mean((X - Y) ** 2))\n rmse2 = mf.score(X)\n\n assert_almost_equal(rmse, rmse2)\n\n\n@pytest.mark.parametrize(\"backend\", backends)\ndef test_dict_completion_missing(backend):\n # Generate some toy data.\n rng = np.random.RandomState(0)\n U = rng.rand(100, 4)\n V = rng.rand(4, 20)\n X = np.dot(U, V)\n X = sp.csr_matrix(X)\n X_tr, X_te = train_test_split(X, train_size=0.95)\n X_tr = sp.csr_matrix(X_tr)\n X_te = sp.csr_matrix(X_te)\n\n mf = DictCompleter(n_components=4, max_n_iter=400, alpha=1,\n random_state=0,\n backend=backend,\n detrend=True,\n verbose=0, )\n\n mf.fit(X_tr)\n X_pred = mf.predict(X_te)\n rmse = sqrt(np.sum((X_te.data - X_pred.data) ** 2) / X_te.data.shape[0])\n X_te_c, _, _ = csr_center_data(X_te)\n rmse_c = sqrt(np.sum((X_te.data - X_te_c.data) ** 2) / X_te.data.shape[0])\n assert(rmse < rmse_c)\n # assert_array_almost_equal(X_te.data, X_pred.data)\n","sub_path":"modl/tests/test_dict_completion.py","file_name":"test_dict_completion.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"366371032","text":"\nimport keras as K\nimport keras.utils\nimport numpy as np\nimport pandas as pa\nimport keras\nfrom keras.layers import Dense, Activation, Embedding, Input, Flatten, Dropout, TimeDistributed, BatchNormalization, Reshape, Lambda, LSTM\nfrom keras.models import Sequential, load_model\nfrom keras.optimizers import RMSprop, Adam, SGD\nfrom sklearn.preprocessing import OneHotEncoder\n\nimport MyUtils.Embeddings as my_emb\nimport aptecoPythonUtilities.utils_explore as my_exp\nimport MyUtils.utils_nn as my_nn\nimport MyUtils.utils_prep as my_prep\n\n\n########################################################################################\n\n#### Data\n\ndef create_n_hot_rolling(X, y_window = 1):\n\n print(\"Getting Y categories with look ahead of {}\".format(y_window))\n\n # Convert to 0ne-hot, resetting the first index which will be for padded timestepts\n X01 = my_prep.onehot_2D(X)\n X01[:,:,0] = 0\n\n # Aggregate over max_ahead timesteps, setting 1 if any contain each item\n y_ = my_prep.rolling_3D(X01, y_window, 'max')\n\n return y_\n\ndef get_trans(raw, urn, item, code, n_obs = None):\n '''\n raw =hRaw\n urn = 'Person URN'\n item = 'Destination'\n code = 'Code'\n n_obs=100\n '''\n\n enc = my_prep.create_encoder(raw[item])\n raw[code] = enc.transform(raw[item])\n df1 = raw[[urn, item, code]]\n if n_obs:\n print(\"Using {} obs\".format(n_obs))\n df1 = df1.loc[0:n_obs]\n\n # Create arrays of items per Urn\n trans = my_prep.toGroupsDf(df1, urn, code)\n\n # Find maximum number of items per Urn\n max_items = max(trans[code].map(len))\n #multi = trans[np.count_nonzero(trans, axis=1) > 4]\n\n\n print(\"Created {} transactions with maximum length {}\".format(len(trans), max_items))\n\n return trans[code]\n\ndef get_xy(trans, n_obs=None, n_time = None, y_window=1):\n if n_obs is not None:\n trans = trans[0:n_obs]\n if n_time is None:\n # Find the maximum length of transaction sequence\n n_time = max(trans.map(len))\n\n X = my_prep.pad_data(trans, n_time, -1) + 1\n Y = create_n_hot_rolling(X, y_window)\n\n print(\"Created X with shape:{}\".format(X.shape))\n print(\"Created Y with shape:{}\".format(Y.shape))\n\n return X, Y\n\ndef check(X, Y):\n trans_lengths = pa.Series(np.apply_along_axis(np.count_nonzero, 1, X))\n print(\"Maximum length {}\".format(max(trans_lengths)))\n l = 1\n for l in range(1, max(trans_lengths)):\n # first_of_length_l\n ix = trans_lengths[trans_lengths==l].index\n if (len(ix)>0):\n ix = ix[0]\n print(\"Length {} at obs {}\".format(l, ix))\n print(\"X:\\n{}\".format(X[ix, ...]))\n print(\"Y:\\n{}\".format(Y[ix, ...]))\n print()\n\n#### Model Process\n\ndef load_data(data_info, n_obs=None):\n\n get_trans_fn = data_info['get_trans_fn']\n n_obs = data_info['n_obs']\n y_window = data_info['y_window']\n max_len = data_info['max_len']\n\n trans = get_trans_fn(n_obs=n_obs)\n X, Y = get_xy(trans, n_obs=n_obs, n_time=max_len, y_window=y_window)\n\n if data_info:\n data_info['n_time'] = X.shape[1]\n data_info['n_cats'] = Y.shape[2]\n\n return X,Y\n\nclass one_hot_accuracy_metric(keras.callbacks.Callback):\n def on_epoch_end(self, batch, logs):\n import sklearn\n #xV=X\n # yV=Y\n # model = model1_info['model']\n\n vd = self.validation_data\n if (len(vd)==3):\n xV, yV, _ = vd\n elif (len(vd)==4):\n xV, yV, _, _ = vd\n else:\n raise Exception(\"Didn't expect {} values in validation data\".format(len(vd)))\n\n # predict returns a probability\n prob = np.asarray(self.model.predict(xV))\n\n # Get predicted category\n if (prob.ndim==2):\n pred = np.where(prob > 0.5, 1, 0)\n average = 'binary'\n\n # Get single top category for prediction & y\n if (prob.ndim==3):\n pred = my_nn.argmax3d(prob)\n yV = my_nn.argmax3d(yV)\n # Need to average over the categories\n average = 'micro'\n\n # Print first 4 observations\n # print(\"X: {} {}\".format(xV.shape, xV[:4]))\n # print(\"Probs: {} {}\".format(prob.shape, prob[:4]))\n # print(\"Preds: {} {}\".format(pred.shape, pred[:4]))\n # print(\"Actual: {} {}\".format(yV.shape, yV[:4]))\n\n def flatten_non_zeros(A, Z):\n A[Z==0] = -1\n A = A.flatten()\n return A[A != -1]\n\n # Mask out where zeros in the input\n pred_ = flatten_non_zeros(pred, xV)\n yV_ = flatten_non_zeros(yV, xV)\n\n # Compare as flattened list over all obs and timestamps\n precision = sklearn.metrics.precision_score(yV_, pred_, average=average)\n logs.update({'MyPrecision':precision})\n\nclass n_hot_accuracy_metric(keras.callbacks.Callback):\n def on_epoch_end(self, batch, logs):\n import sklearn\n #xV=x\n # yV=y\n # model = model1_info['model']\n\n vd = self.validation_data\n if (len(vd)==3):\n xV, yV, _ = vd\n elif (len(vd)==4):\n xV, yV, _, _ = vd\n else:\n raise Exception(\"Didn't expect {} values in validation data\".format(len(vd)))\n\n # predict returns a probability\n prob = np.asarray(self.model.predict(xV))\n\n\n # Get single top category for prediction, convert back to one-hot\n topPred = my_nn.argmax3d(prob)\n pred01 = my_prep.onehot_2D(topPred)\n average = 'micro'\n\n # See where the prediction matches one of the n-hot y values\n n_hit = np.multiply(pred01, yV)\n # Count how many of the n-categories were hits (at most 1, since only the topPred was taken)\n hit = np.apply_along_axis(np.sum, 2, n_hit)\n\n # Print first 4 observations\n # n=1\n # print(\"X: {} {}\".format(xV.shape, xV[:n]))\n # print(\"Probs: {} {}\".format(prob.shape, prob[:n]))\n # print(\"Preds: {} {}\".format(pred01.shape, pred01[:n]))\n # print(\"Actual: {} {}\".format(yV.shape, yV[:n]))\n # print(\"Hits: {} {}\".format(hit.shape, hit[:n]))\n\n def flatten_non_zeros(A, Z):\n A[Z==0] = -1\n A = A.flatten()\n return A[A != -1]\n\n # Ignore hit/miss where zeros in the input\n hit_ = flatten_non_zeros(hit, xV)\n\n # Precision is just the proportion of top predictions that hit one of the n-hot actual values\n precision = np.mean(hit_)\n\n logs.update({'MyPrecision':precision})\n\ndef model_fit(model_or_model_info, X, y, epochs, batch_size=8, stateful=False, shuffle=True, save=True):\n import time\n\n if (type(model_or_model_info) is dict):\n model_info = model_or_model_info\n data_info = model_info['data_info']\n model = model_info['model']\n model_path = model_info['model_path']\n batch_size = model_info['batch_size']\n stateful = model_info.get('stateful')\n else:\n model = model_or_model_info\n\n\n # When running as stateful, the whole training set is the single large sequence, so must not shuffle it.\n # When not stateful, each item in the training set is a different individual sequence, so can shuffle these\n if stateful:\n shuffle = False\n batch_size = 1\n lbl = \"Iteration\"\n timesteps = X.shpape[1]\n if (timesteps != 1):\n raise ValueError(\"When using stateful it is assumed that each X value has a single time-step but there are {}\".format(timesteps))\n else:\n lbl = \"Epoch\"\n\n model_name = \"Unknown Model\"\n if model_info:\n x_shape = X.shape\n x_shape = [x_shape[0], x_shape[1], 1][0:data_info['x_dims']]\n X.reshape(x_shape)\n model_name = model.name\n\n\n\n print(\"Fitting model '{}' over {} epochs\".format(model_name,epochs))\n print(\"X shape: {}\".format(X.shape))\n print(\"y shape: {}\".format(y.shape))\n print()\n\n metrics = n_hot_accuracy_metric()\n precision = []\n accuracy = []\n loss = []\n for epoch in range(epochs):\n # if the shuffle argument in model.fit is set to True (which is the default),\n # the training data will be randomly shuffled at each epoch\n h = model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=shuffle\n ,validation_split=0.25\n ,callbacks=[metrics]\n ).history\n\n # Got error on callback with dropout i\n print(\"{} {:4d} : loss {:.04f}, accuracy {:0.4f}, Precision {:0.4f} - {}\".format(lbl, epoch, h['loss'][0], h['acc'][0], h['MyPrecision'][0], time.ctime()))\n #print(\"{} {:4d} : loss {:.04f}, accuracy {:0.4f} - {}\".format(lbl, epoch, h['loss'][0], h['acc'][0], time.ctime()))\n accuracy += h['acc']\n #precision += h['MyPrecision']\n loss += h['loss']\n\n # When not stateful, state is reset automatically after each input\n # When stateful, this is suppressed, so must manually reset after the epoch (effectively the one big sequence)\n if stateful: model.reset_states()\n\n if save: my_nn.model_save(model, model_path , model_name, \"latest\")\n\n if not (epoch % 10):\n if save: my_nn.model_save(model, model_path , model_name, epoch)\n\n\n if save: my_nn.model_save(model, model_path , model_name, \"final\", echo=True, temp=False)\n\n if model_info:\n model_info.update({'model':model})\n\n return precision\n\ndef build_model(model_info, data_only = False, extract=True, create=False, fit=True, epochs=100):\n if (extract or data_only):\n X, Y = load_data(model_info['data_info'])\n if data_only:\n return X, Y\n\n if create:\n print(\"Creating new model {}\".format(model_info['model_name']))\n model = model_info['create_fn'](model_info)\n model_info['model'] = model\n else:\n print (\"Reusing existing model {}\".format(model_info['model_name']))\n model = model_info['model']\n\n if fit:\n model_fit(model_info, X, Y,epochs=epochs)\n model.evaluate(X, Y, batch_size=5)\n\n\n\n\n\n\n\n\n###################################################################################\n# Holidays\n###################################################################################\n\nmodels_path = r\"C:\\Users\\rkirk\\Documents\\GIT\\Python\\TestPyCharm\\Models\\003 RNN Bookings\"\n\n\nhRaw = pa.read_csv(r\"S:\\develop\\Data\\Holidays\\Bookings for All People.csv\")\nmy_exp.overview(hRaw)\nmy_exp.detail(hRaw)\n\n#destEmb = my_emb.CreateFromDf(hRaw,'Person URN', \"Destination\")\n#destEmb.plotAll()\n\n\n\n\ndef create_model_1(model_info):\n '''\n Input has to be 2 dimensions: n_obs * n_time_stamp (with no n_features)\n Output is categorical\n :return:\n '''\n data_info = model_info['data_info']\n n_time = data_info['n_time']\n n_cats_in = data_info['n_cats']\n n_cats_out = n_cats_in\n\n hidden_units = model_info['hidden_units']\n embedding_size = model_info['embedding_size']\n dropout = model_info['dropout']\n mask_zero = model_info['mask_zero']\n model_name = model_info['model_name']\n\n model = Sequential(name=model_name)\n model.add(Embedding(input_dim=n_cats_in, input_length=n_time, output_dim=embedding_size, mask_zero=mask_zero))\n model.add(LSTM(hidden_units, return_sequences=True, dropout=dropout))\n model.add(TimeDistributed(Dense(n_cats_out, activation = \"softmax\")))\n model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])\n print(model.summary())\n return model\n\n\ndef get_hols_trans(n_obs=None):\n #n_obs=100\n trans = get_trans(hRaw, urn = 'Person URN', item = 'Destination', code = 'Code', n_obs=n_obs)\n return trans\n\ndef data_with(data_settings):\n data_info = {'n_obs':1000, 'x_dims':2, 'get_trans_fn':get_hols_trans, 'max_len':None}\n data_info.update(data_settings)\n return data_info\n\ndef model_with(model_settings):\n model_info = {\n 'model_name':\"default_model\", 'model_path':models_path,\n 'dropout':0.2, 'hidden_units':100, 'embedding_size':50,\n 'loss':keras.losses.categorical_crossentropy,\n 'batch_size':8,\n 'mask_zero':True}\n\n model_info.update(model_settings)\n return model_info\n\n\n\n\n\n\ndata_info_a = data_with({'n_obs':10000, 'y_window':1})\ndata_info_b = data_with({'n_obs':10000, 'y_window':3})\nmodel1a_info = model_with({'model_name': \"Model_Bookings_1a\", 'create_fn':create_model_1, 'data_info':data_info_a})\nmodel1b_info = model_with({'model_name': \"Model_Bookings_1b\", 'create_fn':create_model_1, 'data_info':data_info_b})\n\n\nbuild_model(model1a_info, epochs=5, create=True)\nbuild_model(model1b_info, epochs=50, create=True)\n\nmy_nn.model_load(model1a_info, suffix='Final', sub_folder='Keep')\nmy_nn.model_load(model1b_info, suffix='Final', sub_folder='Keep')\n\n\n\nXa, Ya = load_data(data_info_a)\ncheck(Xa, Ya)\nmy_nn.pred_counts(model1a_info, Xa, Ya, n_find=3)\n# With window of 1, top prediction found 32.37% as the next item\n\n# n_Find 0 1 2 3 4+\n# n_Top\n# 0 0.605339 0.323768 0.044596 0.016898 0.009399\n# 1 0.873913 0.099090 0.020998 0.004200 0.001800\n# 2 0.979902 0.012699 0.004300 0.001800 0.001300\n\n\n\nXb, Yb = load_data(data_info_b)\ncheck(Xb, Yb)\nmy_nn.pred_counts(model1b_info, Xb, Yb, n_find=3)\n# With window of 3, top prediction found slightly more, 36.6% as next item\n# - strangely, the %found in positions 2,3 are less, even though these items\n# would have been presented in the expected output\n\n# n_Find 0 1 2 3 4+\n# n_Top\n# 0 0.612539 0.366063 0.013499 0.005299 0.0026\n# 1 0.916608 0.055694 0.015098 0.008099 0.0045\n# 2 0.981702 0.013399 0.003000 0.001400 0.0005\n\n\n\n\n","sub_path":"Models/003 RNN Bookings/RNN bookings.py","file_name":"RNN bookings.py","file_ext":"py","file_size_in_byte":13767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524568943","text":"import smbus\nimport time\n\n# constants\nBUS = 1 # I2C bus number\nADDRESS = 0x48 # TC74 I2C bus address\nDELAY = 0.5 # delay between reads\n\n# Connect to I2C bus\nbus = smbus.SMBus(BUS)\ntry:\n while True:\n temp = bus.read_byte(ADDRESS)\n print(temp, 'degrees C')\n time.sleep(DELAY)\nexcept KeyboardInterrupt:\n bus.close()\n print('Done')\n","sub_path":"Labs/Lab08/i2cbus_test.py","file_name":"i2cbus_test.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"344719586","text":"#!/usr/bin/python3\n\n# NCBI taxonomy have complex prefix in high rank, so here only use\n# there three \"phylum\"\nkinds = {'Acrogymnospermae', 'Lycopodiopsida', 'Mesangiospermae',\n 'Polypodiopsida', 'basal Magnoliophyta'}\ngenus_list = set()\nwith open('./list.csv', 'r') as _:\n for line in _:\n genus_list.add(line.split('\\t')[1])\n\nresult = list()\nwith open('./genus_info', 'r') as raw:\n for line in raw:\n info = line.split('\\t')\n genus = info[0]\n # if genus not in genus_list:\n # continue\n family = ''\n order = ''\n kind = ''\n for item in info:\n if item.endswith('aceae'):\n family = item\n elif item.endswith('ales'):\n order = item\n elif item in kinds:\n kind = item\n break\n result.append([genus, family, order, kind])\nwith open('result.csv', 'w') as out:\n for i in result:\n out.write('\\t'.join(i)+'\\n')\n","sub_path":"checklist/add_higher.py","file_name":"add_higher.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"598757336","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 19 20:28:24 2018\n\n@author: purandur\n\"\"\"\n\nimport pandas as pd\nimport os\nos.chdir('D:\\Machine_Learning_Projects_by_Me\\Housing Price Prediction')\nHOUSING_PATH = os.path.join(\"datasets\", \"housing\")\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path=os.path.join(housing_path,\"housing.csv\")\n return pd.read_csv(csv_path)\n\nhousing=load_housing_data()\nhousing.head()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369482535","text":"import requests\nimport re,get_page\nfrom bs4 import BeautifulSoup\nurl='https://github.com/Jack-Cherish/python-spider'\n\ndef find_cn(url):\n #print('now find url :',url)\n res=get_page.get_page(url)\n soup=BeautifulSoup(res.text,'lxml')\n texxt=soup.find_all('div','Box-body')\n if(len(texxt)==1):\n tt=texxt[0].get_text()\n zhPattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n match=zhPattern.search(tt)\n if(match):\n content=soup.find('div','repository-meta-content').get_text()\n content=str(content).replace(' ','').replace('\\n','')\n return content\n else:\n return False\n else:\n #print(len(texxt))\n return False\n","sub_path":"github_spider/find_cn.py","file_name":"find_cn.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437705411","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: lvanw\n#\n# Created: 05-11-2018\n# Copyright: (c) lvanw 2018\n# Licence: \n#-------------------------------------------------------------------------------\n\nimport requests\nimport json\n\nbaseUrl = 'https://api.schiphol.nl/wayfinding'\nheaderData = {'app_id': 'a81ebb80', 'app_key': '28ebc893c62b69b9b8c75fa6ce972f63', 'ResourceVersion': 'v3'}\nfromLocation = 'mt.lincolnweg,amsterdam'\nflightName = 'KL2717'\n\n#in dit stuk van de code wordt de huidige locatie en de naam van de vlucht opgezocht,\n#waarmee vervolgens wordt gekeken naar het aantal stops dat er genomen kan worden vanaf het 'fromlocation' punt tot aan de bijbehordende gate\n#met het openbaar vervoer\nresponse = requests.get(\n\t'%s/plan/from/%s/profile/transport/flight/%s' % (\n baseUrl,\n \tfromLocation,\n\t\tflightName\n\t),\n\theaders = headerData\n).json()\n\n#in dit stuk van de code wordt vervolgens voor elke stop het nummer geprint, de naam van de stop en de coordinaten\nformat = '* #%s : %s - %s,%s'\nfor num, stop in enumerate(response['stops']):\n\tprint(\n\t\tformat % (\n\t\t\tnum,\n\t\t\tstop['name'],\n\t\t\tstop['latLng']['lat'],\n\t\t\tstop['latLng']['lng']\n\t\t)\n\t)","sub_path":"Step1JourneyPlan.py","file_name":"Step1JourneyPlan.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237882921","text":"def start_tutorial(user_id, ref = None):\n send_sticker(user_id, 'CAADAgADWgADP1c0GpzyckP3XK1hAg')\n if ref:\n db.change_ref(user_id, ref)\n text = '`Приветствую тебя, путешественник. Рад видеть тебя, готового к приключениям. Кто знает, какая судьба тебя ждёт? Станешь ли ты великим воином, или сдашься и падёшь на пол пути? Это зависит только от тебя. Но могучему герою нужно имя, подобающее его свершениям. \\nКак же к тебе будут обращатся?`'\n send_message(user_id, text, parse_mode = 'Markdown')\n set_hand(user_id, nick_tutorial)\n\n\ndef nick_tutorial(user_id, message):\n if len(otrh_f.switch_chars(message)) < 4 or len(otrh_f.switch_chars(message)) > 20:\n send_message(user_id, 'Твой ник должен соответсвовать идеалам. От 4-ех до 20-ти символов.\\nА также содержать только буквы и цифры.')\n return\n temp[user_id] = {}\n temp[user_id]['new_nick'] = otrh_f.switch_chars(message)\n text = 'Твое имя: ' + temp[user_id]['new_nick'] + '?'\n send_message(user_id, text, [['Да', 'Нет']])\n set_hand(user_id, nick_tutorial2)\n\n\ndef nick_tutorial2(user_id, message):\n _hero = db.get_hero(user_id)\n if message.lower() == 'да':\n db.change_nick(user_id, temp[user_id]['new_nick'])\n del temp[user_id]\n bot_end_tutorial(user_id)\n if message.lower() == 'нет':\n text = '`Так назови свое настоящее имя.`'\n send_message(user_id, text, parse_mode = 'Markdown')\n set_hand(user_id, nick_tutorial)\n else :\n pass","sub_path":"RPG/modules/tutorial/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"17213520","text":"import re\r\nfrom collections import defaultdict\r\nimport ler\r\nimport os\r\n\r\ntipo_arquivo_dict = defaultdict(str)\r\ntipo_arquivo_dict = { 'DIÁRIO ELETRÔNICO':'DE',\r\n 'Diário de Frequência Diária':'RFD',\r\n 'Diário de Notas':'RN'}\r\n\r\nre_curso = re.compile(r'-(\\d{4})-')\r\nre_disciplina = re.compile(r'[A-Z]\\d{6}')\r\nre_turma = re.compile(r'\\w{2}-\\d{4}-\\d{3}-\\d\\w{2}\\d*')\r\nre_arquivo = re.compile('|'.join(list(tipo_arquivo_dict.keys())))\r\nre_semestre = re.compile(r'\\d{4}/\\d')\r\n\r\ndef getFileName(page):\r\n cod_curso = re_curso.search(page).group(1)\r\n try:\r\n cod_disciplina = re_disciplina.search(page).group(0)\r\n except AttributeError:\r\n cod_disciplina = \"NotFound\"\r\n cod_turma = re_turma.search(page).group(0)\r\n tipo_arquivo = tipo_arquivo_dict[re_arquivo.search(page).group(0)]\r\n\r\n semestre = re_semestre.search(page).group(0)\r\n semestre = semestre.replace(\"\\n\",\"\").replace(\"/\",\".\")\r\n return '_'.join([semestre,cod_curso,cod_disciplina,tipo_arquivo,cod_turma])\r\n\r\ndef getPDFFiles(path):\r\n files = os.scandir(path) \r\n files = filter(lambda x: x.is_file and '.pdf' in x.name,files)\r\n return list(files)\r\n\r\ndef convertNames(path):\r\n files = getPDFFiles(path)\r\n total = len(list(files))\r\n print(\"Encontrados {0} arquivos\".format(total))\r\n novoNome = dict()\r\n for index, file in enumerate(files,1):\r\n print(\"[Processando {0}/{1}]: {2}\".format(index, total, file.name))\r\n try:\r\n page = ler.convert_pdf_to_txt(file)\r\n name = getFileName(page)\r\n novoNome[file] = name\r\n except:\r\n print(\"[Error]: \"+file.name)\r\n \r\n for key in filter(lambda x: 'NotFound' in novoNome[x], novoNome.keys()):\r\n name = novoNome[key]\r\n cod_turma = re_turma.search(name).group(0)\r\n cod = filter(lambda x: cod_turma in x and 'NotFound' not in x, novoNome.values())\r\n cod_disciplina = re_disciplina.search(next(cod)).group(0)\r\n novoNome[key] = novoNome[key].replace(\"NotFound\", cod_disciplina)\r\n return novoNome\r\n\r\nif __name__ == \"__main__\":\r\n path = r\".\"\r\n names = convertNames(path)\r\n i = 0\r\n for x,y in names.items():\r\n y = y + \".pdf\"\r\n print(\"[Renomeando]: {0} => {1}\".format(x.name,y))\r\n try:\r\n os.rename(x,os.path.join(path,y))\r\n except:\r\n print(\"{Error]: Não foi possível renomear o arquivo\")\r\n i+=1\r\n print(\"{0} arquivos foram renomeados\".format(i))\r\n input(\"Fim, aperte qualquer tecla para terminar\")\r\n\r\n","sub_path":"renomear.py","file_name":"renomear.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"620770463","text":"import baostock as bs\nimport pandas as pd\nimport datetime\n\n\n#买入时机\ndef in_time(stock_pd, main_data, stock_code):\n\n\t# stock_code = stock_pd[\"code\"]\n\n\t# print(stock_code)\n\n\t#买入数值设置\n\tstock_data = main_data[main_data[\"stock_code\"]==stock_code]\n\n\tin_pbMRQ = stock_data[\"in_pbMRQ\"].values[0]\n\tin_psTTM = stock_data[\"in_psTTM\"].values[0]\n\tin_peTTM_down = stock_data[\"in_pcfNcfTTM_down\"].values[0]\n\tin_peTTM_up = stock_data[\"in_pcfNcfTTM_up\"].values[0]\n\tin_pcfNcfTTM_down = stock_data[\"in_pcfNcfTTM_down\"].values[0]\n\tin_pcfNcfTTM_up = stock_data[\"in_pcfNcfTTM_up\"].values[0]\n\n\n\t# print(float(stock_pd[\"pcfNcfTTM\"])< in_pcfNcfTTM_up)\n\n\t# and float(stock_pd[\"peTTM\"]) > in_peTTM_down \\\n\t# and float(stock_pd[\"peTTM\"]) < in_peTTM_up \\\n\tif float(stock_pd[\"pbMRQ\"]) < in_pbMRQ \\\n\t\tand float(stock_pd[\"psTTM\"]) < in_psTTM \\\n\t\tand float(stock_pd[\"pcfNcfTTM\"]) > in_pcfNcfTTM_down \\\n\t\tand float(stock_pd[\"pcfNcfTTM\"]) < in_pcfNcfTTM_up:\n\t\n\t\treturn 1\n\n\telse:\n\t\treturn 0\n\n\t# in_time = stock_pd[\n\t# \t\t\t\t(stock_pd[\"pbMRQ\"] < in_pbMRQ)\n\t# \t\t\t\t&(stock_pd[\"psTTM\"] < in_psTTM)\n\t# \t\t\t\t# &(stock_pd[\"peTTM\"]>in_peTTM_down)\n\t# \t\t\t\t# &(stock_pd[\"peTTM\"] in_pcfNcfTTM_down)\n\t# \t\t\t\t&(stock_pd[\"pcfNcfTTM\"] < in_pcfNcfTTM_up)\n\t# \t\t\t\t]\n\n\t# print(in_time)\n\n#卖出时机\ndef out_time(stock_pd, main_data, stock_code):\n\n\t#买入数值设置\n\t#pbMRQ: x>3.3\n\t#peTTM: 0 < x < 30\n\t#psTTM:x>4.2\n\t#pcfNcfTTM: 0 out_pbMRQ and float(stock_pd[\"psTTM\"]) > out_psTTM:\n\t\treturn 1\n\telse:\n\t\treturn 0\n\n\n\n#单日股价获取函数\ndef main(stock_code, today_date, today_stock_info_pd, industry):\n\t#### 获取沪深A股历史K线数据 ####\n\t# 详细指标参数,参见“历史行情指标参数”章节;“分钟线”参数与“日线”参数不同。\n\t# 分钟线指标:date,time,code,open,high,low,close,volume,amount,adjustflag\n\t# rs = bs.query_history_k_data_plus(stock_code,\n\t# \"date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,isST,pctChg,pbMRQ,peTTM,psTTM,pcfNcfTTM\",\n\t# start_date=today_date,\n\t# end_date=today_date,\n\t# frequency=\"d\", adjustflag=\"3\")\n\t# print('query_history_k_data_plus respond error_code:'+rs.error_code)\n\t# print('query_history_k_data_plus respond error_msg:'+rs.error_msg)\n\n\trs = bs.query_history_k_data_plus(stock_code,\n \"date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,isST,pctChg,pbMRQ,peTTM,psTTM,pcfNcfTTM\",\n start_date=\"2020-02-21\",\n end_date=\"2020-02-21\",\n frequency=\"d\", adjustflag=\"3\")\n\tprint('query_history_k_data_plus respond error_code:'+rs.error_code)\n\tprint('query_history_k_data_plus respond error_msg:'+rs.error_msg)\n\n\n\n\t#### 打印结果集 ####\n\tdata_list = []\n\twhile (rs.error_code == '0') & rs.next():\n\t\t# 获取一条记录,将记录合并在一起\n\t\tdata_list.append(rs.get_row_data())\n\n\t\n\tcolumns=['date', 'code', 'industry', 'open', 'high', 'low', 'close', 'preclose', 'volume', 'amount', 'adjustflag', 'turn', 'tradestatus', 'isST', 'pctChg', 'pbMRQ', 'peTTM', 'psTTM', 'pcfNcfTTM']\n\n\t# print(data_list[0])\n\t# today_data = data_list[0]\n\t# print(data_list[0])\n\t\n\t# print(data_list[0])\n\n\tif data_list:\n\t\t#插入行业标识\n\t\tdata_list[0].insert(2, industry)\n\n\t\t#追加\n\t\ttoday_stock_info_pd = today_stock_info_pd.append(pd.DataFrame(data_list, columns=columns), ignore_index=True)\n\n\n\t# today_stock_info_pd[today_stock_info_pd[\"code\"==stock_code]][\"industry\"] = industry\n\t# today_stock_info_pd = today_stock_info_pd.reindex(columns=['date', 'code', 'industry', 'open', 'high', 'low', 'close', 'preclose', 'volume', 'amount', 'adjustflag', 'turn', 'tradestatus', 'isST', 'pctChg', 'pbMRQ', 'peTTM', 'psTTM', 'pcfNcfTTM'])\n\n\treturn today_stock_info_pd\n\n\nif __name__ == '__main__':\n\n\t#### 登陆系统 ####\n\tlg = bs.login()\n\t# 显示登陆返回信息\n\tprint('login respond error_code:'+lg.error_code)\n\tprint('login respond error_msg:'+lg.error_msg)\n\n\ttoday_date = str(datetime.datetime.now().date())\n\t# today_date = \"2020-02-19\"\n\n\t# index = bs.query_history_k_data_plus(\"sz.000507\",\n\t# \"date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,isST,pctChg,pbMRQ,peTTM,psTTM,pcfNcfTTM\",\n\t# start_date=today_date,\n\t# end_date=today_date,\n\t# frequency=\"d\", adjustflag=\"3\")\n\n\t# print(index.fields)\n\ttoday_stock_info_pd = pd.DataFrame(columns=['date', 'code','industry', 'open', 'high', 'low', 'close', 'preclose', 'volume', 'amount', 'adjustflag', 'turn', 'tradestatus', 'isST', 'pctChg', 'pbMRQ', 'peTTM', 'psTTM', 'pcfNcfTTM'])\n\n\t# stock_code_list = [\"sz.000507\", \"sz.002269\"]\n\n\tall_stock_code_pd = pd.read_csv(\"./股票关键数据/true.csv\", encoding=\"gbk\")\n\tstock_code_list = all_stock_code_pd[\"stock_code\"].values\n\n\tprint(\"共有:\", len(stock_code_list))\n\tcount = 0\n\tfor stock_code in stock_code_list:\n\t\tprint(stock_code)\n\t\tindustry = all_stock_code_pd[all_stock_code_pd[\"stock_code\"] == stock_code][\"stock_industry\"].values[0]\n\t\tprint(\"第:\",count)\n\t\tcount += 1\n\t\ttoday_stock_info_pd = main(stock_code, today_date, today_stock_info_pd, industry)\n\n\t#### 登出系统 ####\n\tbs.logout()\n\n\t#### 结果集输出到csv文件 #### \n\tprint(today_stock_info_pd)\n\ttoday_stock_info_pd.to_csv(\"./每日股价表/\"+today_date+\".csv\", index=False, encoding=\"gbk\")\n\n\n\n\t# today_stock_info_pd = today_stock_info_pd.reset_index(drop=True)\n\t# print(type(today_stock_info_pd.loc[0]))\n\n\t#获取行数\n\trow_count = today_stock_info_pd.shape[0]\n\n\tin_stock_pd = pd.DataFrame(columns=['date', 'code', 'industry', 'open', 'high', 'low', 'close', 'preclose', 'volume', 'amount', 'adjustflag', 'turn', 'tradestatus', 'isST', 'pctChg', 'pbMRQ', 'peTTM', 'psTTM', 'pcfNcfTTM'])\n\tout_stock_pd = pd.DataFrame(columns=['date', 'code', 'industry', 'open', 'high', 'low', 'close', 'preclose', 'volume', 'amount', 'adjustflag', 'turn', 'tradestatus', 'isST', 'pctChg', 'pbMRQ', 'peTTM', 'psTTM', 'pcfNcfTTM'])\n\n\n\t# main_data = pd.read_csv(\"./股票关键数据/true.csv\", encoding=\"gbk\")\n\tmain_data = all_stock_code_pd\n\n\t#买入时机\n\tfor i in range(0, row_count):\n\n\t\t#如果符合购买条件,则加入购买pd\n\t\t# print(today_stock_info_pd.loc[i][\"close\"])\n\n\t\tin_stock_temp_pd = today_stock_info_pd.loc[i]\n\t\t# print(\"in\",type(in_stock_temp_pd))\n\t\tstock_code = in_stock_temp_pd[\"code\"]\n\n\t\t#判断买入时机\n\t\tif in_time(in_stock_temp_pd, main_data, stock_code) == 1:\n\t\t\tin_stock_temp_pd[\"industry\"] = main_data[main_data[\"stock_code\"] == stock_code][\"stock_industry\"].values[0]\n\t\t\t# print(in_stock_temp_pd)\n\t\t\tin_stock_pd = in_stock_pd.append(in_stock_temp_pd, ignore_index=True)\n\t\t\tin_stock_pd.to_csv(\"./建议买入/\"+today_date+\".csv\", index=False, encoding=\"gbk\")\n\t# print(in_stock_pd)\n\n\n\t#判断卖出时机\n\tmy_stock_pd = pd.read_csv(\"./已买入/stock_code.csv\")\n\n\t# print(my_stock_pd)\n\n\tmy_sotck_list = my_stock_pd[\"stock_code\"].values\n\t# print(my_sotck_list)\n\n\tfor my_stock in my_sotck_list:\n\n\t\t# print(today_stock_info_pd[today_stock_info_pd[\"code\"] == my_stock])\n\n\t\tout_stock_temp_pd = today_stock_info_pd[today_stock_info_pd[\"code\"] == my_stock].copy()\n\n\t\t# print(\"out\",type(out_stock_temp_pd))\n\t\tstock_code = my_stock\n\n\t\tif out_stock_temp_pd.empty == False:\n\t\t\tif out_time(out_stock_temp_pd, main_data, stock_code) == 1:\n\t\t\t\tout_stock_temp_pd[\"industry\"] = main_data[main_data[\"stock_code\"] == stock_code][\"stock_industry\"].values[0]\n\t\t\t\t# out_stock_pd = out_stock_pd.append(out_stock_temp_pd, ignore_index=True)\n\t\t\t\tout_stock_temp_pd = out_stock_temp_pd.reindex(columns=['date', 'code', 'industry', 'open', 'high', 'low', 'close', 'preclose', 'volume', 'amount', 'adjustflag', 'turn', 'tradestatus', 'isST', 'pctChg', 'pbMRQ', 'peTTM', 'psTTM', 'pcfNcfTTM'])\n\t\t\t\tout_stock_pd = pd.concat([out_stock_pd,out_stock_temp_pd])\n\n\t\t\t\tout_stock_pd.to_csv(\"./建议卖出/\"+today_date+\".csv\", index=False, encoding=\"gbk\")\n\n\n\tif in_stock_pd.empty:\n\t\tprint(\"今日无建议买入\")\n\n\tif out_stock_pd.empty:\n\t\tprint(\"今日无建议卖出\")\n\n\n\n\n\n\t\n\n\n\n\n","sub_path":"每日统计买或卖股票 - 副本.py","file_name":"每日统计买或卖股票 - 副本.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"199483860","text":"from django.shortcuts import render\nimport numpy as np\nfrom . import lab\n\n# Create your views here.\n\n\ndef index(request):\n b_c, answer, alpha_js, minmax, max_w = lab.result()\n context = {\n 'b_c': [i.tolist() for i in b_c],\n 'answer': answer,\n 'alpha_js': alpha_js,\n 'minmax': minmax,\n 'max_w': max_w\n }\n return render(request, 'kms/index.html', context)\n","sub_path":"kms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"564143854","text":"\"\"\"Dual Moving Average Crossover algorithm.\nThis algorithm buys apple once its short moving average crosses\nits long moving average (indicating upwards momentum) and sells\nits shares once the averages cross again (indicating downwards\nmomentum).\n\"\"\"\n\nfrom zipline.api import order_target, record, symbol\nfrom zipline.finance import commission, slippage\n\n\ndef initialize(context):\n context.sym = symbol('AMZN')\n context.i = 0\n\n # Explicitly set the commission/slippage to the \"old\" value until we can\n # rebuild example data.\n # github.com/quantopian/zipline/blob/master/tests/resources/\n # rebuild_example_data#L105\n context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))\n context.set_slippage(slippage.VolumeShareSlippage())\n\n\ndef handle_data(context, data):\n # Skip first 300 days to get full windows\n context.i += 1\n if context.i < 10:\n return\n\n # Compute averages\n # history() has to be called with the same params\n # from above and returns a pandas dataframe.\n short_mavg = data.history(context.sym, 'price', 3, '1d').mean()\n long_mavg = data.history(context.sym, 'price', 10, '1d').mean()\n\n # Trading logic\n if short_mavg > long_mavg:\n # order_target orders as many shares as needed to\n # achieve the desired number of shares.\n order_target(context.sym, 100)\n elif short_mavg < long_mavg:\n order_target(context.sym, 0)\n\n # Save values for later inspection\n record(AMZN=data.current(context.sym, \"price\"),\n short_mavg=short_mavg,\n long_mavg=long_mavg)\n","sub_path":"Algorithms/dual_moving_avg.py","file_name":"dual_moving_avg.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"364547795","text":"def selection_sort(sequence, ascending=True, inplace=True):\n _get_next = _find_smallest if ascending else _find_largest\n if inplace:\n for i in range(len(sequence)):\n _next = _get_next(sequence[i:]) + i\n sequence[i], sequence[_next] = sequence[_next], sequence[i]\n return None\n else:\n new_sequence = [None] * len(sequence)\n for i in range(len(sequence)):\n _next = _get_next(sequence)\n new_sequence[i] = sequence.pop(_next)\n return new_sequence\n\n\ndef _find_smallest(sequence):\n smallest = sequence[0]\n smallest_index = 0\n for index, element in enumerate(sequence):\n if element < smallest:\n smallest = element\n smallest_index = index\n return smallest_index\n\n\ndef _find_largest(sequence):\n largest = sequence[0]\n largest_index = 0\n for index, element in enumerate(sequence):\n if element > largest:\n largest = element\n largest_index = index\n return largest_index\n\n\nif __name__ == \"__main__\":\n assert selection_sort([i for i in range(10)]) == None, 'inplace'\n assert selection_sort([3, -1, 8, 92, 9, -16], inplace=False) == [-16, -1, 3, 8, 9, 92], 'ascending'\n assert selection_sort([i for i in range(10)], ascending=False, inplace=False) == [i for i in range(9, -1, -1)], 'descending'\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"99202563","text":"import sure # noqa # pylint: disable=unused-import\n\nfrom moto.ssm.models import ParameterDict\n\n\ndef test_simple_setget():\n store = ParameterDict(\"accnt\", \"region\")\n store[\"/a/b/c\"] = \"some object\"\n\n store.get(\"/a/b/c\").should.equal(\"some object\")\n\n\ndef test_get_none():\n store = ParameterDict(\"accnt\", \"region\")\n\n store.get(None).should.equal(None)\n\n\ndef test_get_aws_param():\n store = ParameterDict(\"accnt\", \"region\")\n\n p = store[\"/aws/service/global-infrastructure/regions/us-west-1/longName\"]\n p.should.have.length_of(1)\n p[0].value.should.equal(\"US West (N. California)\")\n\n\ndef test_iter():\n store = ParameterDict(\"accnt\", \"region\")\n store[\"/a/b/c\"] = \"some object\"\n\n \"/a/b/c\".should.be.within(store)\n \"/a/b/d\".shouldnt.be.within(store)\n\n\ndef test_iter_none():\n store = ParameterDict(\"accnt\", \"region\")\n None.shouldnt.be.within(store)\n\n\ndef test_iter_aws():\n store = ParameterDict(\"accnt\", \"region\")\n\n \"/aws/service/global-infrastructure/regions/us-west-1/longName\".should.be.within(\n store\n )\n\n\ndef test_get_key_beginning_with():\n store = ParameterDict(\"accnt\", \"region\")\n store[\"/a/b/c\"] = \"some object\"\n store[\"/b/c/d\"] = \"some other object\"\n store[\"/a/c/d\"] = \"some third object\"\n\n begins_with_ab = list(store.get_keys_beginning_with(\"/a/b\", recursive=False))\n begins_with_ab.should.equal([\"/a/b/c\"])\n\n begins_with_a = list(store.get_keys_beginning_with(\"/a\", recursive=False))\n begins_with_a.should.equal([])\n\n begins_with_a_recursive = list(store.get_keys_beginning_with(\"/a\", recursive=True))\n set(begins_with_a_recursive).should.equal({\"/a/b/c\", \"/a/c/d\"})\n\n\ndef test_get_key_beginning_with_aws():\n \"\"\"\n ParameterDict should load the default parameters if we request a key starting with '/aws'\n :return:\n \"\"\"\n store = ParameterDict(\"accnt\", \"region\")\n\n uswest_params = set(\n store.get_keys_beginning_with(\n \"/aws/service/global-infrastructure/regions/us-west-1\", recursive=False\n )\n )\n uswest_params.should.equal(\n {\n \"/aws/service/global-infrastructure/regions/us-west-1\",\n \"/aws/service/global-infrastructure/regions/us-west-1/domain\",\n \"/aws/service/global-infrastructure/regions/us-west-1/geolocationCountry\",\n \"/aws/service/global-infrastructure/regions/us-west-1/geolocationRegion\",\n \"/aws/service/global-infrastructure/regions/us-west-1/longName\",\n \"/aws/service/global-infrastructure/regions/us-west-1/partition\",\n }\n )\n\n\ndef test_ssm_parameter_from_unknown_region():\n store = ParameterDict(\"accnt\", \"region\")\n list(\n store.get_keys_beginning_with(\n \"/aws/service/ami-amazon-linux-latest\", recursive=False\n )\n ).should.equal([])\n","sub_path":"tests/test_ssm/test_ssm_parameterstore.py","file_name":"test_ssm_parameterstore.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"386486481","text":"import os\n\nimport tensorflow as tf\nimport numpy as np\n\n\nclass DepthMapNetwork:\n\n def __init__(self, input_shape, output_shape):\n self.ckpt_path = os.path.join('.', os.environ['CKPT_DIR'],\n '{}'.format(self.__class__.__name__))\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.input = tf.placeholder(tf.float32,\n shape=(None, ) + input_shape)\n self.output = tf.layers.conv2d(self.input, 1, 1)\n self.saver = tf.train.Saver()\n\n def __call__(self, dataset):\n with tf.Session(graph=self.graph) as s:\n s.run(tf.global_variables_initializer())\n\n results = s.run(self.output,\n {self.input: np.array([d.img for d in dataset])})\n self.saver.save(s, str(self.ckpt_path))\n for i, result in enumerate(results):\n dataset[i].result = result.squeeze()\n","sub_path":"src/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"49408080","text":"from backpack import *\nimport pytest\n \n \ndef test_backpack():\n testpack = Backpack(\"Barry\", \"black\") # Instantiate the object.\n if testpack.name != \"Barry\": # Test an attribute.\n print(\"Backpack.name assigned incorrectly\")\n for item in [\"pencil\", \"pen\", \"paper\", \"computer\"]:\n testpack.put(item) # Test a method.\n print(\"Contents:\", testpack.contents)\n # ...","sub_path":"Probsets/Comp/Probset1/test_backpack.py","file_name":"test_backpack.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"388689605","text":"#!/usr/bin/env python3\nimport json\nimport utils.auto_logger as l\nimport utils.auto_utils as utils\nimport global_vars as gv\n\nclass Json:\n @classmethod\n def make_pretty(self, my_json):\n return (json.dumps(my_json, indent=4, sort_keys=False))\n\n @classmethod\n def writer_full_path(self, fname, data):\n str = self.make_pretty(data)\n try:\n with open(fname, 'w') as f:\n json_data=f.write(str)\n except Exception as err:\n l.logger.error(\"exception failure\")\n l.runlogs_logger.error(\"exception failure\")\n gv.fake_assert()\n\n @classmethod\n def writer(self, fname, data, path=\"data\", absolute_path=None, logPath=False):\n fnameJson = utils.get_path(fname, path, \"json\")\n self.writer_full_path(fnameJson, data)\n if logPath:\n l.runlogs_logger.info(\"created: {}\".format(fnameJson))\n l.logger.info(\"created: {}\".format(fnameJson))\n\n @classmethod\n def reader(self, fname, path=\"data\"):\n data = None\n try :\n fnameJson = utils.get_path(fname, path, \"json\")\n json_data=open(fnameJson).read()\n # json_data = json_data.replace(\"\\n\",\"\")\n data = json.loads(json_data)\n #l.logger.debug(\"data: {}\".format(data))\n except Exception as err:\n l.logger.error(\"exception failure fname:{} {}\".format(fname, fnameJson))\n l.runlogs_logger.error(\"exception failure fname:{} {}\".format(fname, fnameJson))\n gv.fake_assert()\n\n return data\n\n","sub_path":"utils/_json.py","file_name":"_json.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539599558","text":"\"\"\" Camera Obscura - Post-processing\nAUTHOR: VISHWA MUDIGONDA\nCLASS: COMPUTATIONAL PHOTOGRAPHY\nDATE: 1/28/2020\n\nThis file has a number of functions that you need to fill out in order to\ncomplete the assignment. Please write the appropriate code, following the\ninstructions on which functions you may or may not use.\n\nNotes\n-----\nYou are only allowed to use cv2.imread, c2.imwrite and cv2.copyMakeBorder from \ncv2 library. You should implement convolution on your own.\nGENERAL RULES:\n 1. DO NOT INCLUDE code that saves, shows, displays, writes the image that\n you are being passed in. Do that on your own if you need to save the images\n but these functions should NOT save the image to disk.\n 2. DO NOT import any other libraries aside from those that we provide.\n You should be able to complete the assignment with the given libraries\n (and in many cases without them).\n 3. DO NOT change the format of this file. You may NOT change function\n type signatures (not even named parameters with defaults). You may add\n additional code to this file at your discretion, however it is your\n responsibility to ensure that the autograder accepts your submission.\n 4. This file has only been tested in the course virtual environment.\n You are responsible for ensuring that your code executes properly in the\n virtual machine environment, and that any changes you make outside the\n areas annotated for student code do not impact your performance on the\n autograder system.\n\"\"\"\nimport numpy as np\nimport cv2\n\ndef applyConvolution(image, filter):\n \"\"\"Apply convolution operation on image with the filter provided. \n Pad the image with cv2.copyMakeBorder and cv2.BORDER_REPLICATE to get an output image of the right size\n Parameters\n ----------\n image : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n filter: numpy.ndarray\n A numpy array of dimensions (N,M) and type np.float64\n Returns\n -------\n output : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \"\"\"\n # WRITE YOUR CODE HERE.\n #https://books.google.com/books?id=bQ4dGTfo8-sC&pg=PA117&lpg=PA117&dq=%5B%5B0,-1,0%5D,%5B-1,5,-1%5D,%5B0,-1,0%5D%5D+sharpen+image&source=bl&ots=YxTBmisy_Y&sig=ACfU3U10v1dp24xHHTjBH5jGudhb1_qwSA&hl=en&sa=X&ved=2ahUKEwiN-vG15Z_oAhVkmeAKHd5dBukQ6AEwA3oECAYQAQ#v=onepage&q=%5B%5B0%2C-1%2C0%5D%2C%5B-1%2C5%2C-1%5D%2C%5B0%2C-1%2C0%5D%5D%20sharpen%20image&f=false\n background_color = [0,0,0]\n image = cv2.copyMakeBorder(image,15,15,15,15,cv2.BORDER_REPLICATE, value=background_color)\n\n imgWidth, imgHeight = image.shape[1], image.shape[0]\n filterW, filterH = filter.shape[1], filter.shape[0]\n imgShape = image.shape[2]\n completeImage = np.zeros((imgHeight,imgWidth,imgShape), dtype=np.uint8)\n \n for y in range(imgHeight):\n for x in range(imgWidth):\n for color in range(imgShape):\n if(x == 0 or y == 0 or y >= imgHeight-1 or x >= imgWidth-1):\n r = 0\n elif ((x < (imgWidth-2)) and (y < (imgHeight-2))):\n focusedMatrix = [[image[y-1,x-1,color], image[y-1,x,color], image[y-1,x+1,color]], \n [image[y,x-1,color], image[y,x,color], image[y,x+1,color]], \n [image[y+1,x-1,color], image[y+1,x,color], image[y+1,x+1,color]]]\n pixelValue = ((focusedMatrix*filter).sum())\n if(pixelValue > 255):\n pixelValue = 255\n completeImage[y][x][color] = pixelValue\n return completeImage\n \n raise NotImplementedError\n\ndef applyMedianFilter(image, filterdimensions):\n \"\"\"Apply median filter on image after padding it with zeros around the edges using cv2.copyMakeBorder\n Parameters\n ----------\n image : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n filterdimensions: list\n List of length 2 that represents the filter size M x N\n Returns\n -------\n output : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \"\"\"\n M, N = filterdimensions\n\n# WRITE YOUR CODE HERE.\n background_color = [0,0,0]\n image = cv2.copyMakeBorder(image,15,15,15,15,cv2.BORDER_CONSTANT,value=background_color)\n #switch height and width\n imgHeight, imgWidth = image.shape[0], image.shape[1]\n imgShape = image.shape[2]\n completeImage = np.zeros((imgHeight, imgWidth, imgShape), dtype=np.uint8)\n \n for y in range(imgHeight):\n for x in range(imgWidth):\n for color in range(imgShape):\n #y == imgHeight and x == imgWidth\n if(x == 0 or y == 0 or y == imgHeight or x == imgWidth):\n r = 0\n elif ((x < (imgWidth-2)) and (y < (imgHeight-2))):\n #Mask and Image Pixels\n focusedMatrix = [image[y-1, x-1, color], image[y-1, x, color], image[y-1, x+1, color],\n image[y, x-1, color], image[y, x, color], image[y, x+1, color],\n image[y+1, x-1, color], image[y+1, x, color], image[y+1, x+1, color]]\n sortedMatrix = sorted(focusedMatrix)\n completeImage[y][x][color] = sortedMatrix[4]\n \n completeImage = completeImage.astype(int)\n\n return completeImage\n\n raise NotImplementedError\n\ndef applyFilter1(image):\n \"\"\"Filter noise from the image by using applyConvolution() and an averaging filter\n Parameters\n ----------\n image : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \n Returns\n -------\n output : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \"\"\"\n # WRITE YOUR CODE HERE.\n #Takes the average of all 9 pixels in the 3x3 grid. \n avgMatrix = np.array(([[1,1,1],\n [1,1,1], \n [1,1,1]]), dtype=np.float64)\n returnarray = applyConvolution(image, avgMatrix)\n completeImage = np.divide(returnarray,9)\n completeImage = completeImage.astype(int)\n\n return completeImage\n\n raise NotImplementedError\n\ndef applyFilter2(image):\n \"\"\"Filter noise from the image by using applyConvolution() and a gaussian filter\n Parameters\n ----------\n image : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \n Returns\n -------\n output : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \"\"\"\n # WRITE YOUR CODE HERE.\n\n gaussianMatrix = np.array([[.0625,.125,.0625], \n [.125,.25,.125], \n [.0625,.125,.0625]], dtype=np.float64)\n \n completeImage = applyConvolution(image, gaussianMatrix)\n completeImage = completeImage.astype(int)\n completeImage.dtype\n \n return completeImage\n\n raise NotImplementedError\n \ndef sharpenImage(image):\n \"\"\"Sharpen the image. Call applyConvolution with an image sharpening kernel\n Parameters\n ----------\n image : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \n Returns\n -------\n output : numpy.ndarray\n A numpy array of dimensions (HxWx3) and type np.uint8\n \"\"\"\n # WRITE YOUR CODE HERE.\n # construct a sharpening filter\n #Laplacian uses the following matrix operator (Digital Image Processing and Pattern Recognition)\n sharpenedArr = np.array([[0,-1,0], \n [-1,5,-1], \n [0,-1,0]], dtype=np.float64)\n completeImage = applyConvolution(image, sharpenedArr)\n completeImage = completeImage.astype(int)\n\n return completeImage\n\n raise NotImplementedError\n\nif __name__ == \"__main__\":\n \n # Reading an image in default mode \n img = cv2.imread('co_image_0.jpg') \n imS = cv2.resize(img, (960, 540))\n matrix = [3,3]\n \n# MEDIAN\n# print(\"MEDIAN TEST\")\n# medianArray = applyMedianFilter(imS,matrix)\n# cv2.imwrite('medianPic.jpg', medianArray)\n applyMedianFilter(imS, matrix)\n\n# ApplyFilter1 - AVERAGE\n# print(\"AVERAGE TEST\")\n# averageArray = applyFilter1(imS)\n# cv2.imwrite('averagePic.jpg', averageArray)\n applyFilter1(imS)\n \n# ApplyFilter2 - GAUSSIAN\n# print(\"GAUSSIAN\")\n# gausArray = applyFilter2(imS)\n# cv2.imwrite('gaussianPic.jpg', gausArray)\n applyFilter2(imS)\n \n# SHARPEN IMAGE\n# print(\"SHARPEN IMAGE\")\n# sharpenArray = sharpenImage(imS)\n# cv2.imwrite('sharpenPic.jpg', sharpenArray)\n sharpenImage(imS)\n \n pass","sub_path":"assignment1.py","file_name":"assignment1.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"448298808","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright 2011-2013, Yu-chen Kao\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"Yu-chen Kao (cybeliak)\"\n\nfrom AsiyahGashmi.cAurora2.common import *\nimport gzip\n\nclass PrepareLex(Aurora2Base):\n\n @nz.hook('define_things')\n def init(self):\n self.o.define('lex', ag.things.Lexicon)\n\n @nz.chksum\n @nz.hook('run')\n def go(self):\n # Prepare seg and utt2spk\n with gzip.open(self.o.lex[\"lex.gz\"], \"wt\", encoding=\"utf-8\") as fpw:\n fpw.write(\"1 1.0 en_US W AH N\\n\")\n fpw.write(\"2 1.0 en_US T UW\\n\")\n fpw.write(\"3 1.0 en_US TH R IY\\n\")\n fpw.write(\"4 1.0 en_US F AO R\\n\")\n fpw.write(\"5 1.0 en_US F AY V\\n\")\n fpw.write(\"6 1.0 en_US S IH K S\\n\")\n fpw.write(\"7 1.0 en_US S EH V AH N\\n\")\n fpw.write(\"8 1.0 en_US EY T\\n\")\n fpw.write(\"9 1.0 en_US N AY N\\n\")\n fpw.write(\"Z 1.0 en_US Z IY R OW\\n\")\n fpw.write(\"O 1.0 en_US OW\\n\")\n\n self.o.lex.param[\"phoneset\"] = {'en_US': 'cmudigit'}\n","sub_path":"AsiyahGashmi/cAurora2/prepare_lex.py","file_name":"prepare_lex.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"261885845","text":"class Interger:\n @classmethod\n def input(cls):\n error_tag=0\n input_num=input('请输入一个整数:')\n if not input_num.isdigit():\n error_tag=1\n print('Invalid literal for init() with base 10:%s'%input_num)\n raise ValueError('Invalid literal for init() with base 10:%s'%input_num)\n if int(input_num)<-2147483648 or int(input_num)>2147483647:\n error_tag=1\n print('Error Msg: :%s -越界'%input_num)\n raise SlopOverError('Error Msg: :%s -越界'%input_num)\n return int(input_num) if error_tag==0 else cls.input()\n\nclass SlopOverError(BaseException):\n def __init__(self,number,message):\n self.number=number\n self.message=message\n\n def __str__(self):\n return '<'+self.message+str(self,number)+'>'\n\nInterger.input()\n","sub_path":"19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372833743","text":"\nimport argparse\nimport os\nimport numpy as np\nimport torch,torchvision\nimport torch.nn as nn\nfrom torchvision.utils import save_image\nfrom torch.autograd import Variable\nparser =argparse.ArgumentParser()\nparser.add_argument('--data_path',default ='D:/Desktop/celeba')\nparser.add_argument('--img_size',type=int,default=32)\nparser.add_argument('--batchsize',type=int,default=128)\nparser.add_argument('--netD',default='',)\nparser.add_argument('--netG',default='',)\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--z_num',type=int,default=100)\nopt =parser.parse_args()\n\ntransforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize((opt.img_size,opt.img_size)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])\ndataset = torchvision.datasets.ImageFolder(opt.data_path, transform=transforms)\ndataloader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=opt.batchsize,\n shuffle=True,\n drop_last=True,\n)\ndef weights_init(m):\n classname =m.__class__.__name__\n if classname.find('Conv')!= -1:\n m.weight.data.normal_(0.0,0.02)\n elif classname.find('BatchNorm')!= -1:\n m.weight.data.normal_(1.0,0.02)\n m.bias.data.fill_(0)\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n self.init_size = opt.img_size // 4\n self.l1 = nn.Sequential(nn.Linear(opt.z_num, 128*self.init_size**2))\n\n self.conv_blocks = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, 3, 3, stride=1, padding=1),\n nn.Tanh()\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(3, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2**4\n self.adv_layer = nn.Linear(128*ds_size**2, 1)\n\n def forward(self, img):\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n\n return validity\n# 关键处 MSELOSS 替代 BECLOSS\nadversarial_loss = torch.nn.MSELoss()\n#adversarial_loss = nn.BCELoss()\n\nNet_G =Generator()\nNet_D =Discriminator()\nNet_D.cuda()\nNet_G.cuda()\nadversarial_loss.cuda()\nNet_G.apply(weights_init)\nNet_D.apply(weights_init)\n#模型的读取\nif opt.netD != '':\n Net_D.load_state_dict(torch.load(opt.netD))\nif opt.netG !='':\n Net_G.load_state_dict(torch.load(opt.netG))\n\n\noptimizerD = torch.optim.Adam(Net_D.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizerG = torch.optim.Adam(Net_G.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\nFloatTensor =torch.cuda.FloatTensor\n\nfor epoch in range(200):\n for i,(imgs,label) in enumerate(dataloader):\n valid = Variable(FloatTensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)\n real_imgs =Variable(imgs.type(FloatTensor))\n # train errD_real\n Net_D.zero_grad()\n errD_real = adversarial_loss(Net_D(real_imgs), valid)\n errD_real.backward()\n # train errD_fake\n z = Variable(FloatTensor(np.random.normal(0, 1, (opt.batchsize, opt.z_num))))\n fake_imgs =Net_G(z)\n errD_fake = adversarial_loss(Net_D(fake_imgs.detach()), fake)\n # 这里的detach()函数非常的重要 \n errD_fake.backward()\n errD = errD_real + errD_fake\n optimizerD.step()\n #train errG\n Net_G.zero_grad()\n errG = adversarial_loss(Net_D(fake_imgs), valid)\n errG.backward()\n optimizerG.step()\n print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f'\n % (epoch, 200, i, len(dataloader),errD.item(), errG.item()))\n if i % 10 == 0:\n if not os.path.exists('../../output/LSGAN_CelebA'):\n os.mkdir('../../output/LSGAN_CelebA')\n save_image(fake_imgs.data[:25], '../../output/LSGAN_CelebA/%d_gen.png' % (epoch * len(dataloader) + i), nrow=5, normalize=True)\n save_image(imgs.data[:25], '../../output/LSGAN_CelebA/%d_real.png' % (epoch * len(dataloader) + i), nrow=5, normalize=True)\n # 模型的保存\n torch.save(Net_G.state_dict(), '../../output/LSGAN_CelebA/netG_epoch_%d.pth' % (epoch))\n torch.save(Net_D.state_dict(), '../../output/LSGAN_CelebA/netD_epoch_%d.pth' % (epoch))\n\n","sub_path":"LSGAN/LSGAN.py","file_name":"LSGAN.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152909310","text":"import MeCab\r\nfrom wordcloud import WordCloud\r\nfrom collections import Counter\r\n\r\ndef paste_cloud(title, paste_data):\r\n tagger = MeCab.Tagger()\r\n\r\n text = paste_data\r\n tagger.parse('')\r\n node = tagger.parseToNode(text)\r\n \r\n word_list = []\r\n while node:\r\n word_type = node.feature.split(',')[0]\r\n stop_words = ['てる', 'いる', 'なる', 'れる', 'する', 'ある', 'こと', 'もの', 'これ', 'さん', 'して', '的', 'ん',\\\r\n 'くれる', 'やる', 'くださる', 'そう', 'せる', 'した', '思う', 'それ', 'ここ', 'ちゃん', 'くん', 'ため', '', \\\r\n 'て','に','を','は','の', 'が', 'と', 'た', 'し', 'で', 'ない', 'も', 'な', 'い', 'か', 'ので', 'よう', 'れ','さ','なっ']\r\n if word_type == '名詞' and node.surface not in stop_words:\r\n word_list.append(node.surface)\r\n node = node.next\r\n \r\n word_chain = ' '.join(word_list)\r\n \r\n wc = WordCloud(background_color='white', font_path='/Library/Fonts/Arial Unicode.ttf', width=800, height=800, stopwords=stop_words)\r\n wc.generate(word_chain)\r\n wc.to_file('static/' + title + '.png')\r\n result_path = title + '.png'\r\n\r\n counter = Counter(word_list)\r\n top_20 = counter.most_common(20)\r\n print(top_20)\r\n\r\n return result_path\r\n\r\nif __name__ == '__main__':\r\n title = '蜘蛛の糸'\r\n paste_data = '''ある日の事でございます。御釈迦様は極楽の蓮池のふちを、独りでぶらぶら御歩きになっていらっしゃいました。\r\n 池の中に咲いている蓮の花は、みんな玉のようにまっ白で、そのまん中にある金色の蕊からは、何とも云えない好い匂が、\r\n 絶間なくあたりへ溢れて居ります。極楽は丁度朝なのでございましょう。\r\n やがて御釈迦様はその池のふちに御佇みになって、水の面を蔽っている蓮の葉の間から、ふと下の容子を御覧になりました。\r\n この極楽の蓮池の下は、丁度地獄の底に当って居りますから、水晶のような水を透き徹して、三途の河や針の山の景色が、\r\n 丁度覗き眼鏡を見るように、はっきりと見えるのでございます。'''\r\n paste_cloud(title, paste_data)\r\n","sub_path":"paste_cloud.py","file_name":"paste_cloud.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"424825180","text":"from tqdm import tqdm\nfrom functools import lru_cache\n\ndef hundreds(x):\n return int(str(x // 100)[-1])\n\n\ndef cell(serial, p):\n x, y = p\n rack_id = x + 10\n level = (rack_id * y + serial) * rack_id\n return hundreds(level) - 5\n\n\ndef grid(w, h, sx=0, sy=0):\n for y in range(w):\n for x in range(h):\n yield sx + x, sy + y\n\n@lru_cache()\ndef compute_power_grid(serial, size=300):\n power_grid = {}\n for x, y in grid(size, size):\n p = x+1, y+1\n power_grid[p] = cell(serial, p)\n return power_grid\n\ndef power_levels(levels, power_grid, kernel=3, size=300):\n for x, y in grid(size - kernel, size - kernel):\n levels[(x+1, y+1, kernel)] = sum(power_grid[(x+1 + a, y+1 + b)] for a, b in grid(kernel, kernel))\n return levels\n\ndef highest(levels):\n return max((l, c) for c, l in levels.items())\n\n#print(cell(71, (101,153)))\n#print(18, highest(power_levels(18)))\n#print(42, highest(power_levels(42)))\n\n# 1\n#print(1955, highest(power_levels(1955)))\n\nfrom collections import defaultdict\n\n@lru_cache()\ndef compute_sat(serial, size=300):\n sat = defaultdict(int)\n for x, y in grid(size + 1, size + 1):\n sat[(x, y)] = \\\n cell(serial, (x, y)) + \\\n sat[(x - 1, y)] + \\\n sat[(x, y - 1)] - \\\n sat[(x - 1, y - 1)]\n return dict(sat)\n\ndef power(serial, x, y, s):\n sat = compute_sat(serial)\n return sat[(x + s - 1, y + s - 1)] + sat[(x - 1, y - 1)] - sat[(x + s - 1, y - 1)] - sat[(x - 1, y + s - 1)]\n\ndef simple_power(serial, x, y, s):\n return sum(cell(serial, p) for p in grid(s, s, sx=x, sy=y))\n\ndef search(serial):\n for s in tqdm(reversed(range(1, 300)), total=300):\n for x, y in grid(300 - s, 300 - s):\n yield power(serial, x, y, s), x, y, s\n\ndef draw(serial, size=300):\n for y in range(size):\n for x in range(size):\n print('{: >4}'.format(cell(serial, (x, y))), end='')\n print()\n\ndef draw_sat(serial, size=300):\n sat = compute_sat(serial)\n for y in range(size):\n for x in range(size):\n print('{: >4}'.format(sat[(x, y)]), end='')\n print()\n\ndef test_sat(serial, n):\n import random\n for _ in range(32):\n s = random.randint(1, 300)\n x = random.randint(1, 300 - s)\n y = random.randint(1, 300 - s)\n print(x,y,s, end='... ')\n actual = power(serial, x, y, s)\n expected = simple_power(serial, x, y, s)\n if actual != expected:\n print(\"{actual} != {expected}\".format(actual=actual, expected=expected))\n else:\n print(\"{} ✔\".format(actual))\n\n\ndef main():\n serial = 1955\n #draw_sat(serial, 20)\n\n #test_sat(serial, n=32)\n print(max(search(serial=serial)))\n\nmain()\n","sub_path":"2018/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"500707166","text":"first = int(input())\nnum_list = []\nnum_length = 0\nfor second in range(1, first+1):\n new = 0\n numbers = []\n numbers.append(first)\n numbers.append(second)\n while new >= 0:\n new = numbers[-2] - numbers[-1]\n numbers.append(new)\n if numbers[-1] < 0:\n del numbers[-1]\n if num_length < len(numbers):\n num_length = len(numbers)\n num_list = numbers\nprint(num_length)\nprint(*num_list) # * 언패킹","sub_path":"IM/BJ2635수이어가기_s.py","file_name":"BJ2635수이어가기_s.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"243955766","text":"import discord\n\nasync def get_all_pokestops(channel,client):\n\treturn await get_all_objects(client,channel)\n\t\t\nasync def get_all_objects(client,channel):\n\tprint(\"Getting data from \"+channel.name+\"...\",flush=True,end='')\n\tobjects = {}\n\tasync for message in client.logs_from(channel,limit=100):\n\t\tfor l in message.content.split('\\n'):\n\t\t\ttry:\n\t\t\t\t[nom,lat,lon] = l.split('@')\n\t\t\t\tobjects[nom] = [nom,lat,lon]\n\t\t\texcept:\n\t\t\t\tprint(\"Error for \"+l,flush=True)\n\tprint(\" Done\",flush=True)\n\treturn objects","sub_path":"pokestop.py","file_name":"pokestop.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137443344","text":"'''\nDisplays centres of the detector pixels as blue dots,\ndisplays center of the detector as red dot,\ndisplays source position with green dot. \nUseful to figure out what delta (or in base.py shift_detector variable) means.\n'''\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import pi\n\ndef ray(D,alpha,w, eu, ev, ez):\n return D*np.cos(alpha)*ev + D*np.sin(alpha)*eu + w*ez\n\ndef cone_beam(D, H, Nr, Nc, delta, eu, ev, ez):\n dw = H/Nr\n da = dw/D\n alpha = np.linspace(-da/2*(Nc-1), da/2*(Nc-1), Nc) - delta[0]\n w = np.linspace(-dw/2*(Nr-1), dw/2*(Nr-1), Nr) - delta[1]\n \n theta = np.zeros((Nr,Nc,3)) # direction to the pixel's center\n for i in range(Nr):\n for j in range(Nc):\n theta[Nr-1-i,j,:] = ray(D, alpha[j], w[i], eu, ev, ez)\n return theta\n\nif __name__ == '__main__':\n R = 1.0\n D = 2*R\n \n H = 0.5\n Nr = 4\n Nc = 4\n \n s = np.pi/4\n P = H\n ys = np.array([R*np.cos(s), R*np.sin(s), P/2/np.pi*s])\n \n eu = np.array([-np.sin(s), np.cos(s), 0]) \n ev = np.array([-np.cos(s), -np.sin(s), 0])\n ez = np.array([0, 0, 1])\n \n dw = H/Nr\n da = dw/D\n delta = [0, 0]; # which point on detector will be hit by the ray defined \n # with alpha = 0 and w = 0\n # delta = [0, 0] corresponds to heigh/2, width/2\n\n theta = cone_beam(D,H,Nr,Nc,delta,eu,ev,ez); \n cd = np.zeros((Nr,Nc,3)) # coordinates of the pixel's center\n for i in range(Nr):\n for j in range(Nc):\n cd[Nr-1-i,j,:] = ys + theta[Nr-1-i,j,:]\n \n detector_center = (ys + np.array([D*np.cos(0.0)*ev + D*np.sin(0.0)*eu + 0*ez])).reshape(3,)\n source = ys.reshape(3,)\n x = cd[:,:,0].reshape(Nr*Nc,)\n y = cd[:,:,1].reshape(Nr*Nc,)\n z = cd[:,:,2].reshape(Nr*Nc,)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.scatter(x,y,z)\n ax.scatter(source[0], source[1], source[2], color='g')\n ax.scatter(detector_center[0], detector_center[1], detector_center[2], color='r')\n\n '''\n alphaep = da/2*Nc - delta[0]\n alphaen = -da/2*Nc - delta[0]\n wep = dw/2*Nr - delta[1] \n wen = -dw/2*Nr - delta[1]\n\n plane1 = [[tuple(ys), \\\n tuple(ys+ray(D,alphaep,wep,eu,ev,ez)),\\\n tuple(ys+ray(D,alphaen,wep,eu,ev,ez)),\\\n ]] \n collection = Poly3DCollection(plane1, linewidths=0.5, alpha=0.5)\n face_color = [0.5, 0.5, 1] \n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n\n plane2 = [[tuple(ys+ray(D,alphaep,wen,eu,ev,ez)),\\\n tuple(ys+ray(D,alphaen,wen,eu,ev,ez)),\\\n tuple(ys), \\\n ]] \n collection = Poly3DCollection(plane2, linewidths=0.5, alpha=0.5)\n face_color = [0.5, 0.5, 1] \n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n\n plane3 = [[tuple(ys), \\\n tuple(ys+ray(D,alphaep,wep,eu,ev,ez)),\\\n tuple(ys+ray(D,alphaep,wen,eu,ev,ez)),\\\n ]] \n collection = Poly3DCollection(plane3, linewidths=0.5, alpha=0.5)\n face_color = [0.5, 0.5, 1] \n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n\n \n plane4 = [[tuple(ys), \\\n tuple(ys+ray(D,alphaen,wep,eu,ev,ez)),\\\n tuple(ys+ray(D,alphaen,wen,eu,ev,ez)),\\\n ]] \n collection = Poly3DCollection(plane4, linewidths=0.5, alpha=0.5)\n face_color = [0.5, 0.5, 1] \n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n '''\n plt.show()\n\n \n","sub_path":"Cone_beam_geometry/cone_beam.py","file_name":"cone_beam.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"1887870","text":"\"\"\"This module contains repositories code.\"\"\"\n\nimport abc\nimport json\nfrom collections import Iterable\nfrom copy import deepcopy\nfrom typing import Any, Dict, List, Union\n\nimport pymysql.cursors\nfrom pymongo import ASCENDING, DESCENDING, MongoClient\n\nfrom .DomainEventListener import ApplicationDomainEventPublisher, DomainEventListener\nfrom .DomainObject import DomainObject\n\n\nclass Repository(metaclass=abc.ABCMeta):\n \"\"\"Repository is the base interface that all repo must implement.\"\"\"\n\n @abc.abstractmethod\n def load(self, object_id: str) -> DomainObject:\n \"\"\"Load a domain object based on its id.\n\n Args:\n object_id: the id of the object to be loaded\n\n Requires:\n object_id is not None and is a valid id\n\n Returns:\n The loaded domain object\n\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def exists(self, object_id: str) -> bool:\n \"\"\"Check if a domain object exist based on its id.\n\n Args:\n object_id: the identifier for this object\n\n Requires:\n object_id is not None and is a valid id\n\n Returns:\n True if the object exists. False otherwise\n\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def save(self, obj: DomainObject) -> None:\n \"\"\"Save a domain object.\n\n Args:\n obj: the received domain object\n\n Requires:\n obj is not None and is a valid DomainObject\n\n Effects:\n Modifications made of the domain object are saved\n\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_event_stream_for(self, object_id: str) -> List[Dict[str, Any]]:\n \"\"\"Return the event stream for an object.\n\n Args:\n object_id: the identifier of this object\n\n Requires:\n object_id is not None and is a valid object id\n\n Returns:\n A sorted list of all the events for this domain object\n\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_event_stream_since(self, event_id: str) -> List[Dict[str, Any]]:\n \"\"\"Return all the events that occurs since a event.\n\n **The event with event_id==event_id is returned too**\n\n Args:\n event_id: the event id\n\n Requires:\n event_id is not None and represent an existing event\n\n Returns:\n A sorted list of all the event that occurs since the specified event\n The specified event is the firt of this list\n\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def first_event_id(self) -> Union[str, None]:\n \"\"\"Return the first known event id for sync.\n\n If there is no event yet, None is returned\n\n Returns:\n the firdt event id as a string. None if there is\n no events\n\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def max_version_for_object(self, object_id: str) -> int:\n \"\"\"Return the max known version for a domain object.\n\n Args:\n object_id: the object identifier\n\n Requires:\n object_id is not None and represent an existing domain object\n\n Returns:\n The last known version of this object\n\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def create_blank_domain_object(self) -> DomainObject:\n \"\"\"Create an empty domain event of the appropriate type.\n\n Returns:\n an empty domain event of the appropriate type\n\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def merge_event_streams(prioritary_event_stream, other_event_stream):\n \"\"\"Merge two event streams and manage possible conflit.\n\n Conflicts in event stream are event streams with repeating event number\n\n If there is a conflit, the first stream is considered prioritary.\n The conflict is then resolved by appending the additional elements of the second stream\n after these from the first. Events numbers are then correctly modified\n\n Requires:\n The two event streams are for the same object_id\n\n Returns:\n The merged event stream\n\n \"\"\"\n diff1, diff2 = DomainObject.diff_event_streams(\n prioritary_event_stream, other_event_stream\n )\n\n if len(diff1) == 0 and len(diff2) == 0:\n return prioritary_event_stream\n elif len(diff1) == 0 and len(diff2) != 0:\n return other_event_stream\n elif len(diff1) != 0 and len(diff2) == 0:\n return prioritary_event_stream\n else:\n result_stream = deepcopy(prioritary_event_stream)\n actual_version = result_stream[-1][\"version\"] + 1\n for event in diff2:\n event[\"version\"] = actual_version\n actual_version += 1\n result_stream.append(event)\n return result_stream\n\n\nclass EventPublisherRepository(Repository, metaclass=abc.ABCMeta):\n \"\"\"Repository that publishes its saved events to domain event publisher.\"\"\"\n\n def __init__(self): # noqa: D102\n self.listeners = list()\n self.register_listener(ApplicationDomainEventPublisher().instance)\n\n def save(self, obj: DomainObject) -> None: # noqa: D102\n to_emit = self.append_to_stream(obj)\n\n assert to_emit is not None\n assert isinstance(to_emit, Iterable)\n\n for event in to_emit:\n for listener in self.listeners:\n assert isinstance(listener, DomainEventListener)\n listener.domainEventPublished(event)\n\n def register_listener(self, listener: DomainEventListener) -> None:\n \"\"\"Register a new listener in this.\n\n Args:\n listener: the given listener\n\n Requires:\n listener must not be None and must implemnt DomainEventListener\n\n Effects:\n The listener is added in self\n\n \"\"\"\n assert listener is not None\n assert isinstance(listener, DomainEventListener)\n\n if listener not in self.listeners:\n self.listeners.append(listener)\n\n @abc.abstractmethod\n def append_to_stream(self, obj: DomainObject) -> List[Dict[str, Any]]:\n \"\"\"Save modification made on the domain object.\n\n Args:\n obj: the domain object to be saved\n\n Requires:\n obj is not None\n\n Returns:\n the newly persisted events\n\n \"\"\"\n raise NotImplementedError()\n\n\nclass MongoEventSourceRepository(EventPublisherRepository, metaclass=abc.ABCMeta):\n \"\"\"EventSource repository that persist event in mongodb.\"\"\"\n\n def __init__(\n self,\n host=\"localhost\",\n port=27017,\n database=\"fenrys\",\n collection=\"event_store\",\n username=None,\n password=None,\n ):\n super().__init__()\n self.__client = MongoClient(host, port)\n if username is not None and password is not None:\n self.__client = MongoClient(\n host, port, username=username, password=password\n )\n self.__db = self.__client[database]\n self.__collection = self.__db[collection]\n self.__create_indexes()\n\n def append_to_stream(self, obj): # noqa: D102\n assert obj is not None\n assert isinstance(obj, DomainObject)\n\n max_known_version = self.max_version_for_object(obj.object_id)\n merged_stream = self.merge_event_streams(\n self.get_event_stream_for(obj.object_id), obj.event_stream\n )\n merged_stream_version = merged_stream[-1][\"version\"]\n\n events_to_add = list()\n if merged_stream_version > max_known_version:\n for event in filter(\n lambda e: e[\"version\"] > max_known_version, merged_stream\n ):\n events_to_add.append(deepcopy(event))\n\n if len(events_to_add) > 0:\n self.__collection.insert_many(events_to_add)\n\n return deepcopy(events_to_add)\n\n def load(self, object_id): # noqa: D102\n obj = self.create_blank_domain_object()\n assert isinstance(obj, DomainObject)\n\n stream = self.get_event_stream_for(object_id)\n obj.rehydrate(stream)\n\n return obj\n\n def exists(self, object_id): # noqa: D102\n return self.__collection.count_documents({\"object_id\": object_id}) > 0\n\n def get_event_stream_for(self, object_id): # noqa: D102\n return list(self.__collection.find({\"object_id\": object_id}, {\"_id\": 0}))\n\n def max_version_for_object(self, object_id): # noqa: D102\n max_version_event = list(\n self.__collection.find({\"object_id\": object_id})\n .sort([(\"version\", -1)])\n .limit(1)\n )\n\n if len(max_version_event) == 0:\n return 0\n else:\n return max_version_event[0][\"version\"]\n\n def get_event_stream_since(self, event_id): # noqa: D102\n event = self.__collection.find_one({\"event_id\": event_id})\n\n if event is None:\n raise ValueError(\"This event id cannot be found\")\n\n event_timestamp = event[\"event_timestamp\"]\n events_iterator = self.__collection.find(\n {\"event_timestamp\": {\"$gte\": event_timestamp}}\n ).sort([(\"event_timestamp\", 1)])\n\n for event in events_iterator:\n yield event\n\n def first_event_id(self) -> Union[str, None]: # noqa: D102\n events = list(\n self.__collection.find({\"version\": 1})\n .sort([(\"event_timestamp\", 1)])\n .limit(1)\n )\n\n if len(events) == 1:\n return events[0][\"event_id\"]\n else:\n return None\n\n def __create_indexes(self):\n self.__collection.create_index(\n [(\"event_id\", ASCENDING)], name=\"eventid_unique\", unique=True\n )\n self.__collection.create_index(\n [(\"object_id\", ASCENDING), (\"version\", ASCENDING)],\n name=\"objectid_version_unique\",\n unique=True,\n )\n self.__collection.create_index(\n [(\"event_timestamp\", DESCENDING)], name=\"timestamp_ascending\"\n )\n\n\nclass MySQLSourceRepository(EventPublisherRepository, metaclass=abc.ABCMeta):\n\n __CREATE_STREAM = \"\"\"create table `{}`(`object_id` varchar(255) not null, `version` int not null, `event_name` varchar(255) not null, `event` longtext not null, `event_timestamp` double not null, primary key(`object_id`, `version`))\"\"\"\n __SELECT_OBJECT_STREAM = \"select * from `{}` where object_id = %s\"\n __INSERT_OBJECT_STREAM = \"insert into `{}`(`object_id`, `version`, `event_name`, `event`, `event_timestamp`) values(%s, %s, %s, %s, %s)\"\n __CHECK_TABLE_EXISTS = \"show tables like %s\"\n __TABLE_EXISTS = False\n\n def __init__(\n self,\n user=\"fenrys\",\n password=\"fenrys\",\n host=\"localhost\",\n database=\"fenrys\",\n table=\"event_store\",\n ):\n super().__init__()\n self.__connection = pymysql.connect(\n host=host,\n user=user,\n password=password,\n db=database,\n charset=\"utf8mb4\",\n cursorclass=pymysql.cursors.DictCursor,\n )\n self.__table = table\n\n self.__create_table()\n\n def __del__(self):\n self.__connection.close()\n\n def __create_table(self):\n if not self.__table_exists():\n try:\n with self.__connection.cursor() as cursor:\n cursor.execute(\n MySQLSourceRepository.__CREATE_STREAM.format(self.__table)\n )\n self.__connection.commit()\n except Exception as e:\n self.__connection.rollback()\n raise e\n\n def __table_exists(self):\n if not MySQLSourceRepository.__TABLE_EXISTS:\n with self.__connection.cursor() as cursor:\n cursor.execute(\n MySQLSourceRepository.__CHECK_TABLE_EXISTS, (self.__table)\n )\n result = cursor.fetchone()\n if result:\n MySQLSourceRepository.__TABLE_EXISTS = True\n return True\n else:\n return False\n else:\n return True\n\n def append_to_stream(self, obj):\n assert obj is not None\n assert isinstance(obj, DomainObject)\n\n max_known_version = self.max_version_for_object(obj.object_id)\n\n events_to_add = list()\n if obj.version_number > max_known_version:\n for event in obj.event_stream:\n if event[\"version\"] > max_known_version:\n events_to_add.append(deepcopy(event))\n\n if len(events_to_add) > 0:\n try:\n with self.__connection.cursor() as cursor:\n cursor.executemany(\n MySQLSourceRepository.__INSERT_OBJECT_STREAM.format(\n self.__table\n ),\n map(\n lambda event: (\n event[\"object_id\"],\n int(event[\"version\"]),\n event[\"event_name\"],\n json.dumps(event[\"event\"]),\n \"{:10.15f}\".format(float(event[\"event_timestamp\"])),\n ),\n events_to_add,\n ),\n )\n self.__connection.commit()\n except Exception as e:\n self.__connection.rollback()\n raise e\n\n return deepcopy(events_to_add)\n\n def load(self, object_id):\n obj = self.create_blank_domain_object()\n assert isinstance(obj, DomainObject)\n\n stream = self.get_event_stream_for(object_id)\n obj.rehydrate(stream)\n\n return obj\n\n def exists(self, object_id):\n return len(self.get_event_stream_for(object_id)) > 0\n\n def get_event_stream_for(self, object_id):\n stream = list()\n\n with self.__connection.cursor() as cursor:\n cursor.execute(\n MySQLSourceRepository.__SELECT_OBJECT_STREAM.format(self.__table),\n (object_id),\n )\n results = cursor.fetchall()\n for result in results:\n r = dict()\n r[\"object_id\"] = result[\"object_id\"]\n r[\"version\"] = int(result[\"version\"])\n r[\"event_name\"] = result[\"event_name\"]\n r[\"event\"] = json.loads(result[\"event\"])\n r[\"event_timestamp\"] = float(result[\"event_timestamp\"])\n stream.append(r)\n\n return stream\n\n def max_version_for_object(self, object_id):\n stream = self.get_event_stream_for(object_id)\n\n return max(map(lambda x: x[\"version\"], stream)) if len(stream) > 0 else 0\n\n\nclass InMemoryEventSourceRepository(EventPublisherRepository, metaclass=abc.ABCMeta):\n def __init__(self):\n super().__init__()\n self.__repo = list()\n\n def append_to_stream(self, obj):\n assert obj is not None\n assert isinstance(obj, DomainObject)\n\n max_known_version = self.max_version_for_object(obj.object_id)\n merged_stream = self.merge_event_streams(\n self.get_event_stream_for(obj.object_id), obj.event_stream\n )\n merged_stream_version = merged_stream[-1][\"version\"]\n\n events_to_add = list()\n if merged_stream_version > max_known_version:\n for event in merged_stream:\n if event[\"version\"] > max_known_version:\n events_to_add.append(event)\n self.__repo.append(event)\n\n return deepcopy(events_to_add)\n\n def load(self, object_id):\n obj = self.create_blank_domain_object()\n assert isinstance(obj, DomainObject)\n\n stream = self.get_event_stream_for(object_id)\n obj.rehydrate(stream)\n\n return obj\n\n def exists(self, object_id):\n return len(self.get_event_stream_for(object_id)) > 0\n\n def get_event_stream_for(self, object_id):\n stream = list()\n for event in self.__repo:\n if event[\"object_id\"] == object_id:\n stream.append(event)\n return stream\n\n def get_event_stream_since(self, event_id):\n ignore = True\n for event in sorted(self.__repo, key=lambda x: x[\"event_timestamp\"]):\n if event[\"event_id\"] == event_id:\n ignore = False\n\n if not ignore:\n yield event\n\n def max_version_for_object(self, object_id):\n max_known_version = 0\n stream = self.get_event_stream_for(object_id)\n\n for event in stream:\n if event[\"version\"] > max_known_version:\n max_known_version = event[\"version\"]\n\n return max_known_version\n\n def first_event_id(self):\n if len(self.__repo) == 0:\n return None\n else:\n return self.__repo[0][\"event_id\"]\n","sub_path":"laizy/eventsourcing/EventSourceRepository.py","file_name":"EventSourceRepository.py","file_ext":"py","file_size_in_byte":17135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"113959669","text":"from __future__ import print_function\n\nimport boto3\nimport json\nimport datetime\nimport decimal\nimport elasticsearch\n\n# define AWS environment \nddb_table_name = 'amzn_stock_tracker' # ddb table name\nes_endpoint = 'search-mattsona-es-s4iwlyogvbgbp22zi7tu24p3x4.us-west-2.es.amazonaws.com' # elasticsearch endpoint \nes_index = 'stock_trades' # define the es index\nes_type = 'trade' # define the es record type\n\n# Helper class to convert a DynamoDB item to JSON.\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\n\nprint('Loading function')\n\ndef lambda_handler(event, context):\n\n ticker_symbol = event['ticker_symbol'] # what stock is being traded\n trade_operation = event['trade_operation'] # is this a buy or a sell \n trade_volume = event['trade_volume'] # stock volume for trade\n # trade_time = time.strftime(\"%c\") # old way of indicating trade time \n trade_time = str(datetime.datetime.isoformat(datetime.datetime.now()))\n\n price_movement_factor = 0.0 # how much will the stock price move based on the volume of the trade (up/down buy/sell)\n report_dict = {} # dict to hold reporting data on trades\n\n # change this to reflect the name of your dynamodb table\n dynamo = boto3.resource('dynamodb').Table(ddb_table_name)\n\n # get current price\n response = dynamo.get_item(\n Key={\n 'stock_name': ticker_symbol\n },\n ConsistentRead=True\n )\n # price_data = json.dumps(response['Item']['stock_price'], cls=DecimalEncoder)\n current_price = float(json.dumps(response['Item']['stock_price'], cls=DecimalEncoder))\n\n # process the trade, which updates the price of the stock\n # figure out which way (and how much) the stock price will move \n if trade_volume > 100000:\n price_movement_factor = 0.05\n elif trade_volume > 10000:\n price_movement_factor = 0.02\n elif trade_volume > 1000:\n price_movement_factor = 0.01\n\n if trade_operation == 'buy':\n price_delta = 1.0 + price_movement_factor\n else:\n price_delta = 1.0 - price_movement_factor\n\n # perform the trade and update the price (if needed), but make sure that the price in the table is the same @ start time via conditional write \n # need to update this to have shared fate between ES indexing + dynamo write \n\n try :\n response_after_update = dynamo.update_item(\n Key={'stock_name': ticker_symbol},\n UpdateExpression=\"set stock_price = :p, last_trade_time = :t\",\n ConditionExpression=\"stock_price = :o\",\n ExpressionAttributeValues={\n ':p': decimal.Decimal(str(round((current_price * price_delta),4))),\n ':t': trade_time,\n ':o': response['Item']['stock_price']\n },\n ReturnValues=\"ALL_NEW\"\n )\n\n # create dict to report on transaction\n report_dict = {\"initial_stock_info\": response['Item'], \"stock_event_info\": event, \"end_stock_info\": response_after_update['Attributes']}\n print('Data posted to DynamoDB: ' + str(response_after_update))\n\n except Exception as e:\n print(\"please retry trade - stock value moved or dynamoDB error: \" + str(e))\n raise e\n\n # get this data in to elasticsearch for indexing\n try: \n # es = elasticsearch.Elasticsearch([{'host': es_endpoint, 'port': 443, 'use_ssl': True, 'verify_certs': True}]) # hold off on validating certs\n es = elasticsearch.Elasticsearch([{'host': es_endpoint, 'port': 443, 'use_ssl': True}]) # hold off on validating certs\n es_response = es.index(index=es_index, doc_type=es_type, body=report_dict)\n print('Data posted to ES: ' + str(es_response))\n\n except Exception as e:\n print('Data post to ES failed: ' + str(e))\n raise e\n\n # return report of transaction\n return json.dumps(report_dict, cls=DecimalEncoder) \n\n\n","sub_path":"lambda_functions/processTrade/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"431235566","text":"# -*-coding:utf-8-*-\nimport requests\nimport json\nimport re,os\nimport itertools\nfrom hashlib import md5\ncount=0\n\n\ndef down_pic(pic_urls, localPath):\n global count\n user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36\"\n headers ={'User-Agent': user_agent, \"Upgrade-Insecure-Requests\": 1,\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cache-Control\": \"no-cache\"}\n if not os.path.exists(localPath): # 新建文件夹\n os.mkdir(localPath)\n \"\"\"给出图片链接列表, 下载图片\"\"\"\n # print(pic_urls)\n for pic_url in pic_urls:\n count = count + 1\n try:\n pic = requests.get(pic_url, timeout=15)\n picname=md5(pic.content).hexdigest()+'.jpg'\n picPath=localPath+picname\n if not os.path.exists(picPath):\n with open(picPath, 'wb') as f:\n f.write(pic.content)\n f.close()\n print('成功下载第%s张图片: %s' % (str(count), str(pic_url)))\n except Exception as e:\n print('下载第%s张图片时失败: %s' % (str(count), str(pic_url)))\n print(e)\n continue\n\n\n\ndef get_response(files,imgpath):\n url = '/pcdutu/a_upload?fr=html5&target=pcSearchImage&needJson=true'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',\n }\n source = 'http://image.baidu.com'\n vs_url = source + '/?fr=shitu'\n vs_page = requests.get(vs_url, headers=headers).text\n vs_id = re.findall('window.vsid = \"(.*?)\"', vs_page)[0]\n r = requests.post(source + url, headers=headers, files=files)\n tmp = r.text\n tmp_json = json.loads(tmp)\n queryImageUrl = tmp_json['url']\n querySign = tmp_json['querySign']\n url2 = source + '/pcdutu/a_similar?queryImageUrl=' + queryImageUrl + '&querySign=' + querySign + '&simid=undefined&word=&querytype=0&t=1534831269418&rn=60&sort=&fr=pc&pn={pn}'\n url3 = (url2.format(pn=x) for x in itertools.count(start=0, step=60))\n for url in url3:\n html = requests.get(url, timeout=10).text\n a = re.compile(r'\"ObjURL\":\"(.*?)\"')\n downURL = re.findall(a, html)\n down_pic(list(set(downURL)), imgpath)\n if len(downURL)==0:\n print('已下载全部搜索到的图片')\n break\n\n\nif __name__ == '__main__':\n #############需要输入的图片\n path0 = os.getcwd()\n dish_path=path0+'/dish'\n folderlist=os.listdir(dish_path)\n for folder in folderlist:\n count = 0\n folderpath=dish_path+'/'+folder\n filelist = os.listdir(folderpath)\n for file in filelist:\n ext=os.path.splitext(file)[1]\n if ext == '.jpg':\n files = {'file': (file, open(folderpath + '/' + file, 'rb'), 'image/jpg'), 'pos': (None, 'upload'),\n 'uptype': (None, 'upload_pc'), 'fm': (None, 'home')}\n filename=file.split(\".\")[0]\n imgpath=folderpath+'/結果/'\n get_response(files,imgpath)\n\n","sub_path":"food_name/baidu_image/image_research.py","file_name":"image_research.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"66077818","text":"from nltk.tokenize import word_tokenize, sent_tokenize, regexp_tokenize\nimport os\nimport re\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom urllib.request import urlopen\nfrom nltk.util import ngrams\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nimport string\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nurl = 'https://raw.githubusercontent.com/ronaldjgrafjr/data_science_guides/master/sample_data/us_constitution.txt'\nconstitution = ''.join([line.decode('utf-8') for line in urlopen(url).readlines()])\n\n# remove single \\n characters but keep \\n\\n\nconstitution = constitution.replace('\\n\\n', '|||')\nconstitution = constitution.replace('\\n', ' ')\nconstitution = constitution.replace(' ', ' ')\nconstitution = constitution.replace('|||', '\\n\\n')\n\n# exmplore the top n-grams within the constitution\nwords = [word for word in word_tokenize(constitution) if not word in set(stopwords.words('english') + list(string.punctuation))]\nunigrams = ngrams(words, 1)\nbigrams = ngrams(words, 2)\ntrigrams = ngrams(words, 3)\nfourgrams = ngrams(words, 4)\n\nCounter(fourgrams).most_common()[1:10]\n\n# create document vectors for each of the sentences of the constitution\nsentences = sent_tokenize(constitution)\n\n# https://medium.com/@mishra.thedeepak/doc2vec-simple-implementation-example-df2afbbfbad5\ntagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(sentences)]\n\nmax_epochs = 10\nvec_size = 20\nalpha = 0.025\n\nmodel = Doc2Vec(size=vec_size,\n alpha=alpha, \n min_alpha=0.00025,\n min_count=1,\n dm =1)\n \nmodel.build_vocab(tagged_data)\n\nfor epoch in range(max_epochs):\n print('iteration {0}'.format(epoch))\n model.train(tagged_data,\n total_examples=model.corpus_count,\n epochs=model.iter)\n # decrease the learning rate\n model.alpha -= 0.0002\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\n\nmodel.save(\"/Users/ronaldjgrafjr/Documents/Github/data_science_guides/python/sample_scripts/d2v.model\")\n\nvocab = [word for word in model.wv.vocab.keys()]\n\ndef complete_analaogy(w1, w2, w3):\n new_vector = model.wv.get_vector(w1) - model.wv.get_vector(w2) + model.wv.get_vector(w2)\n return(model.similar_by_vector(new_vector))\n\ncomplete_analaogy('house', 'legislature', 'president')\nmodel.similar_by_word('impeachment')\n\n# visualize using t-SNE (t-distributed Stochastic Neighbor Embedding)\ndef display_closestwords_tsnescatterplot(model, word):\n \n arr = np.empty((0,vec_size), dtype='f')\n word_labels = [word]\n\n # get close words\n close_words = model.similar_by_word(word)\n\n # add the vector for each of the closest words to the array\n arr = np.append(arr, np.array([model[word]]), axis=0)\n for wrd_score in close_words:\n wrd_vector = model[wrd_score[0]]\n word_labels.append(wrd_score[0])\n arr = np.append(arr, np.array([wrd_vector]), axis=0)\n \n # find tsne coords for 2 dimensions\n tsne = TSNE(n_components=2, random_state=0)\n np.set_printoptions(suppress=True)\n Y = tsne.fit_transform(arr)\n\n x_coords = Y[:, 0]\n y_coords = Y[:, 1]\n # display scatter plot\n plt.scatter(x_coords, y_coords)\n\n for label, x, y in zip(word_labels, x_coords, y_coords):\n plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')\n\n plt.xlim(x_coords.min()+0.00005, x_coords.max()+0.00005)\n plt.ylim(y_coords.min()+0.00005, y_coords.max()+0.00005)\n plt.show()\n\ndisplay_closestwords_tsnescatterplot(model, 'house')\n","sub_path":"python/sample_scripts/word_vectors.py","file_name":"word_vectors.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"165808718","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 29 15:30:26 2020\n\n@author: 03125327\n\nT(h) and Sy(h) calibration comparing measurements of WTD along transects\nwith modelled 1-D Boussinesq equation.\n\nNOTES:\n - Peat depth assumed equal along the transect\n\n\"\"\"\nimport argparse\nimport fipy as fp\nfrom fipy.tools import numerix\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy import interpolate\nfrom multiprocessing import Pool\nfrom multiprocessing import cpu_count\nimport emcee\n\n\"\"\"\nDefinitions\n\"\"\"\ndef fabricate_data(nx, dt, s0, s1, t0, t1, t2, NDAYS, SENSOR_LOCATIONS, MAX_HEAD_BOUNDARIES=5., MAX_SOURCE=3., filename=\"fabricated_data.txt\"):\n \"\"\"\n\n Parameters\n ----------\n nx : int\n Number of mesh subdivisions\n dt : float\n Time step in days\n s0, s1 : float\n params of S\n t0, t1, t2 : float\n params of T\n NDAYS : int\n Number of days\n SENSOR_LOCATIONS : list\n Location of sensors relative to mesh. Min value 0; max value (nx-1)\n MAX_HEAD_BOUNDARIES : float, optional\n Maximum head in the boundaries. The default is 5..\n MAX_SOURCE : float, optional\n Maximum value of P and ET. The default is 3..\n filename : str, optional\n Name of file where to write the fabricated data. The default is \"fabricated_data.txt\".\n\n Returns\n -------\n None. Instead, writes \n WTD of sensor(i) (with i from 0 to len(SENSOR_LOCATIONS)); day; P; ET\n to the file specified in filename\n \n\n \"\"\"\n dx = 1\n mesh = fp.Grid1D(nx=nx, dx=dx)\n \n # IC\n hini = 1.\n \n h = fp.CellVariable(name=\"head\", mesh=mesh, value=hini, hasOld=True)\n theta = fp.CellVariable(name=\"theta\", mesh=mesh, value=numerix.exp(s0_true + s1_true*h.value), hasOld=True)\n \n D = t0_true/s1_true * numerix.exp(t1_true* ((numerix.log(theta) -s0_true)/s1_true)**t2_true)/ theta\n \n # BC, source/sink\n boundary_sensors = np.random.rand(NDAYS, 2) * MAX_HEAD_BOUNDARIES\n PRECIPITATION = np.random.rand(NDAYS) * MAX_SOURCE\n EVAPOTRANSPIRATION = np.random.rand(NDAYS) * MAX_SOURCE\n P = PRECIPITATION[0]; ET = EVAPOTRANSPIRATION[0]\n \n # Boussinesq eq. in theta\n eq = fp.TransientTerm() == fp.DiffusionTerm(coeff=D) + P - ET\n \n MAX_SWEEPS = 100\n n_sensors = len(SENSOR_LOCATIONS)\n \n with open(filename, 'w') as out:\n out.write('sensor0 sensor1 sensor2 sensor3 day P ET\\n')\n \n for day in range(NDAYS):\n \n theta.updateOld()\n \n # BC and Source/sink update\n theta_left = numerix.exp(s0_true + s1_true*boundary_sensors[day,0])\n theta_right = numerix.exp(s0_true + s1_true*boundary_sensors[day,1])\n theta.constrain(theta_left, where=mesh.facesLeft); theta.constrain(theta_right, where=mesh.facesRight)\n P = PRECIPITATION[day]; ET = EVAPOTRANSPIRATION[day]\n \n \n res = 0.0\n for r in range(MAX_SWEEPS):\n resOld=res\n res = eq.sweep(var=theta, dt=dt)\n if abs(res - resOld) < 1e-7: break # it has reached to the solution of the linear system\n \n with open(filename, 'a') as out:\n h_from_theta = (numerix.log(theta) -s0)/s1\n sensor_values = [h_from_theta.value[loc] for loc in SENSOR_LOCATIONS]\n line = \" \".join([str(s_v) for s_v in sensor_values])\n line = line + f\" {day} {P} {ET}\"\n out.write( line + '\\n')\n\ndef read_sensors(filename):\n with open(filename, 'r') as f:\n df_sensors = pd.read_csv(filename, engine='python', sep=' ')\n \n sensor_measurements = df_sensors.loc[:,'sensor0':'sensor3'].to_numpy()\n day = df_sensors['day'].to_numpy()\n P = df_sensors['P'].to_numpy()\n ET = df_sensors['ET'].to_numpy()\n \n return sensor_measurements, day, P, ET\n\ndef hydro_1d(nx, dx, dt, params, theta_ini, ndays, sensor_loc):\n mesh = fp.Grid1D(nx=nx, dx=dx)\n \n s0 = params[0]; s1 = params[1] \n t0 = params[2]; t1 = params[3]; t2 = params[4];\n \n P = precip[0]; ET = evapotra[0]\n \n theta = fp.CellVariable(name=\"theta\", mesh=mesh, value=theta_ini, hasOld=True)\n \n # Choice of parameterization\n # This is the underlying transimissivity: T = t0 * exp(t1 * h**t2)\n # This is the underlying storage coeff: S = s1 * exp(s0 + s1 * h) # and S_theta = s1 * theta\n # S is hidden in change from theta to h\n D = t0/s1 * numerix.exp(t1* ((numerix.log(theta) -s0)/s1)**t2)/ theta \n \n if np.isnan(D.value).any() or (D<0).any():\n raise ValueError('D is non-positive')\n \n # Boussinesq eq. for theta\n eq = fp.TransientTerm() == fp.DiffusionTerm(coeff=D) + P - ET\n \n h_from_theta_sol = [] # returned quantity\n \n MAX_SWEEPS = 100\n \n for day in range(ndays):\n \n theta.updateOld()\n \n # BC and Source/sink update\n boundary_sensors = [measurements[day,0], measurements[day, -1]]\n theta_left = numerix.exp(s0 + s1*boundary_sensors[0])\n theta_right = numerix.exp(s0 + s1*boundary_sensors[1])\n theta.constrain(theta_left, where=mesh.facesLeft); theta.constrain(theta_right, where=mesh.facesRight)\n P = precip[day]; ET = evapotra[day] \n \n res = 0.0\n for r in range(MAX_SWEEPS):\n resOld=res\n res = eq.sweep(var=theta, dt=dt)\n if abs(res - resOld) < 1e-7: break # it has reached to the solution of the linear system\n \n # Append to list\n theta_sol = theta.value\n theta_sol_sensors = np.array([theta_sol[i] for i in sensor_loc])\n h_from_theta_sol.append((np.log(theta_sol_sensors) -s0)/s1)\n \n return np.array(h_from_theta_sol)\n\n#%%\n\"\"\"\nParse command-line arguments\n\"\"\"\nparser = argparse.ArgumentParser(description='Run MCMC parameter estimation')\n\nparser.add_argument('--ncpu', default=1, help='(int) Number of processors', type=int)\nparser.add_argument('-cl','--chainlength', default=20, help='(int) Length of MCMC chain', type=int)\nparser.add_argument('-w','--nwalkers', default=10, help='(int) Number of walkers in parameter space', type=int)\nargs = parser.parse_args()\n\nN_CPU = args.ncpu\nMCMC_STEPS = args.chainlength\nN_WALKERS = args.nwalkers\n\nN_PARAMS = 5\n\n#%%\n\"\"\"\nFabricate sensor data\nFrom synthetic fipy 1d simulation\n\"\"\"\ns0_true = 0.1; s1_true = 0.2\nt0_true = 1.; t1_true = 0.01; t2_true = 1.1\ntrue_params = [s0_true, s1_true, t0_true, t1_true, t2_true]\nSENSOR_LOCATIONS = [0, 12, 67, 94]\nNDAYS = 5\nnx_fabricate=100; dt=1.\n\n# Uncomment this to fabricate and rewrite some data\n# fabricate_data(nx_fabricate, dt, s0_true, s1_true, t0_true, t1_true, t2_true, NDAYS, SENSOR_LOCATIONS)\n\n#%%\n\"\"\"\nGet sensor data\nTODO: rewrite for sensors of the same time! MCMC takes care of parallelization\n\"\"\"\nfilename = 'fabricated_data.txt'\nmeasurements, days, precip, evapotra = read_sensors(filename)\n\n#%%\n\"\"\"\nMCMC parameter estimation\n\"\"\" \nnx = 10\ndx = 1.\ndt = 1.\n\nNDAYS = 5\n\nSENSOR_MEASUREMENT_ERR = 0.05 # metres. Theoretically, 1mm\n\n# Correct sensor positions to accommodate new nx\nsensor_locations = np.array(SENSOR_LOCATIONS) * nx / nx_fabricate\nsensor_locations = np.rint(sensor_locations).astype(int)\n\ndef log_likelihood(params):\n s0 = params[0]; s1 = params[1] \n t0 = params[2]; t1 = params[3]; t2 = params[4];\n \n # IC, interpolated from initial sensor values\n hini_interp = interpolate.interp1d(SENSOR_LOCATIONS, measurements[0])\n hini = hini_interp(np.arange(0, nx, dx))\n theta_ini = np.exp(s0 + s1*hini) \n \n try:\n simulated_wtd = hydro_1d(nx, dx, dt, params, theta_ini, NDAYS, sensor_locations)\n # TODO: this error handling might be the reason of the thing not working. Check!!\n except: # if error in hydro computation\n print(\"###### SOME ERROR IN HYDRO #######\")\n return -np.inf # or -np.inf?\n else:\n sigma2 = SENSOR_MEASUREMENT_ERR ** 2\n return -0.5 * np.sum((measurements - simulated_wtd) ** 2 / sigma2 + np.log(sigma2))\n\n# maximum likelihood optimization\nopt_ML = False\nif opt_ML:\n from scipy.optimize import minimize\n MAXITER = 100\n np.random.seed(42)\n nll = lambda x :-log_likelihood(x)\n initial = np.random.rand(5)\n soln = minimize(nll, initial, options={'maxiter':MAXITER, 'disp':True})\n kadj_ml = soln.x\n \n print(\"Maximum likelihood estimates:\")\n print(f\"k_adjust = {kadj_ml}\")\n\n\ndef log_prior(params):\n s0 = params[0]; s1 = params[1] \n t0 = params[2]; t1 = params[3]; t2 = params[4];\n # uniform priors everywhere.\n if -0.1 1:\n with Pool(N_CPU) as pool:\n pos = gen_positions_for_walkers(N_WALKERS, N_PARAMS)\n \n nwalkers, ndim = pos.shape\n \n # save chain to HDF5 file\n fname = \"mcmc_result_chain.h5\"\n backend = emcee.backends.HDFBackend(fname)\n backend.reset(nwalkers, ndim)\n \n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, pool=pool, backend=backend)\n sampler.run_mcmc(pos, MCMC_STEPS, progress=True)\n \nelif N_CPU == 1: # single processor\n pos = gen_positions_for_walkers(N_WALKERS, N_PARAMS)\n nwalkers, ndim = pos.shape\n \n # save chain to HDF5 file\n fname = \"mcmc_result_chain.h5\"\n backend = emcee.backends.HDFBackend(fname)\n backend.reset(nwalkers, ndim)\n \n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, backend=backend)\n sampler.run_mcmc(pos, MCMC_STEPS, progress=True);\n\n#%%\nimport corner\n\n\nflat_samples = sampler.get_chain(discard=0, thin=1, flat=True)\nprint(flat_samples.shape)\nlabels = ['s0', 's1', 't0', 't1', 't2']\nfig = corner.corner(\n flat_samples, labels=labels, truths=true_params\n);\nfig.savefig(\"MCMC_corner_result.png\")","sub_path":"1d_calibration.py","file_name":"1d_calibration.py","file_ext":"py","file_size_in_byte":10410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"23274676","text":"# 一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为 “Start” )。\n#\n# 机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为 “Finish” )。\n#\n# 问总共有多少条不同的路径?\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入:m = 3, n = 7\n# 输出:28\n#\n# 示例 2:\n#\n#\n# 输入:m = 3, n = 2\n# 输出:3\n# 解释:\n# 从左上角开始,总共有 3 条路径可以到达右下角。\n# 1. 向右 -> 向下 -> 向下\n# 2. 向下 -> 向下 -> 向右\n# 3. 向下 -> 向右 -> 向下\n#\n#\n# 示例 3:\n#\n#\n# 输入:m = 7, n = 3\n# 输出:28\n#\n#\n# 示例 4:\n#\n#\n# 输入:m = 3, n = 3\n# 输出:6\n#\n#\n#\n# 提示:\n#\n#\n# 1 <= m, n <= 100\n# 题目数据保证答案小于等于 2 * 109\n#\n# Related Topics 数组 动态规划\n# 👍 936 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# 时间复杂度:O(MN)\n# 空间复杂度:O(mn)\n# 执行耗时:44 ms,击败了37.34% 的Python3用户\n# 内存消耗:14.6 MB,击败了96.52% 的Python3用户\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n dp = [[0] * n for _ in range(m)]\n for i in range(m):\n dp[i][0] = 1\n for j in range(n):\n dp[0][j] = 1\n for i in range(1, m):\n for j in range(1, n):\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n return dp[m-1][n-1]\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"leetcodeTemp/leetcode/editor/cn/[62]不同路径.py","file_name":"[62]不同路径.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"622384787","text":"#!/usr/bin/env python3\n#\n# us_states_capitals_finder.py - The tool to grab US States Capitals on to clipboard.\n\nimport pyperclip\nimport re\n\n\n# capitals_regex \ncapitals_regex = re.compile(r'''(\n (\\w+\\s)? # If State has 2 words name\n (\\w+) # State's name\n (\\s\\(\\w\\w\\)) # Abbr of State's name\n (\\s-\\s) # Separator\n (\\w+) # Capital's name\n (\\.)? # If Capital's name has a dot\n (\\s\\w+)? # If Capital has 2 words name\n (\\s\\w+)? # If Capital has 3 words name\n )''', re.VERBOSE)\n\n\n# Find matches in clipboard text.\ntext = str(pyperclip.paste())\nmatches = []\n\nfor groups in capitals_regex.findall(text):\n capital_name = \"'\" + groups[1] + groups[2] + \"': '\" \\\n + groups[5] + groups[6] + groups[7] + groups[8] + \"'\"\n matches.append(capital_name)\n\n\n# Copy results to the clipboard.\nif len(matches) > 0:\n text = '' # clear text to store \n\n for i in range(len(matches)):\n if i % 2 == 0: # after 2 state's capital take a new line\n if (len(matches[0]) > len(matches[i])):\n text += matches[i] + ',\\t\\t\\t'\n else:\n text += matches[i] + ',\\t\\t'\n else:\n text += matches[i] + ',\\n'\n\n pyperclip.copy(text)\nelse:\n print('Nothing was found!')\n\n\n#print('\\n'.join(matches))\n\n","sub_path":"automate/source_code/us_states_capitals_finder.py","file_name":"us_states_capitals_finder.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"604778009","text":"# !/usr/bin/env\n# -*- coding:utf-8 -*-\n# Yachao Shao\n# Code on 5th April 2019\n# This file is used to get POI information from our transfered location list\n\nimport json\nimport urllib\nimport math\nimport pandas as pd\nimport time\nimport threading\nimport threadpool\n\nthreadpool_size = 10\n\ndef gcj02_to_bd09(lng, lat):\n\n x_pi = 3.14159265358979324 * 3000.0 / 180.0\n pi = 3.1415926535897932384626 # π\n a = 6378245.0 # 长半轴\n ee = 0.00669342162296594323 # 偏心率平方\n \"\"\"\n 火星坐标系(GCJ-02)转百度坐标系(BD-09)\n 谷歌、高德——>百度\n :param lng:火星坐标经度\n :param lat:火星坐标纬度\n :return:\n \"\"\"\n z = math.sqrt(lng * lng + lat * lat) + 0.00002 * math.sin(lat * x_pi)\n theta = math.atan2(lat, lng) + 0.000003 * math.cos(lng * x_pi)\n bd_lng = z * math.cos(theta) + 0.0065\n bd_lat = z * math.sin(theta) + 0.006\n return [bd_lng, bd_lat]\n\n\ndef location_poi(baidu_location, baidu_api_key):\n\n tag_list = ['美食', '酒店', '购物', '生活服务', '丽人', '旅游景点','休闲娱乐','运动健身',\n '教育培训', '文化传媒', '医疗', '汽车服务','交通设施', '金融','房地产', '公司企业',\n '政府机构','出入口', '自然地物']\n\n locationx = str(baidu_location[1])\n locationy = str(baidu_location[0])\n\n url1 = 'http://api.map.baidu.com/place/v2/search?query='\n url2 = '&location='\n url3 = '&radius=500&output=json&scope=2&page_size=20&page_num=0&sort_name:distance|sort_rule:1&ak='\n 'fBUwUjXAwBh1jxTLRdhubR0bG2buPCFD'\n poi_info_list = []\n poi_info_list.append(baidu_location)\n tag = 0\n while tag < len(tag_list):\n # print(tag_list[tag])\n url = url1 + urllib.parse.quote(tag_list[tag]) + url2 + locationx + ',' + locationy + url3 + baidu_api_key\n # print(url)\n result = urllib.request.urlopen(url).read()\n poi = json.loads(result)\n # print(poi)\n\n if poi['message'] == 'ok':\n number_total = poi['total']\n results = poi['results']\n distance_ave = 0\n if number_total != 0:\n distance_sum = 0\n for i in range(len(results)):\n distance_sum = distance_sum + results[i]['detail_info']['distance']\n distance_ave = distance_sum/number_total\n poi_info_list.append(number_total)\n poi_info_list.append(distance_ave)\n tag += 1\n # print(poi_info_list)\n return poi_info_list\n # input(\"Press Enter to continue...\")\n\n\n\ndef multi_thread_location_poi(location_file_path,split_tag,fmt=\"csv\"):\n \"\"\"\n 对于每个板块,获得这个板块下所有二手房的信息\n 并且将这些信息写入文件保存\n :param location_file_path: 文件路径\n :param split_tag: 切分后的文件后缀\n :param fmt: 保存文件格式\n :return: None\n \"\"\"\n # global total_num\n AK_list = []\n fin = open('AK.txt', 'r')\n while (1):\n line = fin.readline().strip()\n if line == '':\n break\n AK_list.append(line)\n\n fin.close()\n csv_file = \"poi_location_split_{0}.csv\".format(split_tag)\n # with open(csv_file, \"w\") as f:\n # 开始获得需要的板块数据\n # print(location_file_path)\n # print(split_tag)\n file_tag = str(split_tag)\n location_file = open(location_file_path+file_tag)\n\n location_list = pd.read_csv(location_file)['location']\n columns_name = ['baidu_location', 'poi_1_number', 'poi_1_distance_ave',\n 'poi_2_number', 'poi_2_distance_ave', 'poi_3_number', 'poi_3_distance_ave',\n 'poi_4_number', 'poi_4_distance_ave', 'poi_5_number', 'poi_5_distance_ave',\n 'poi_6_number', 'poi_6_distance_ave', 'poi_7_number', 'poi_7_distance_ave',\n 'poi_8_number', 'poi_8_distance_ave', 'poi_9_number', 'poi_9_distance_ave',\n 'poi_10_number', 'poi_10_distance_ave', 'poi_11_number', 'poi_11_distance_ave',\n 'poi_12_number', 'poi_12_distance_ave', 'poi_13_number', 'poi_13_distance_ave',\n 'poi_14_number', 'poi_14_distance_ave', 'poi_15_number', 'poi_15_distance_ave',\n 'poi_16_number', 'poi_16_distance_ave', 'poi_17_number', 'poi_17_distance_ave',\n 'poi_18_number', 'poi_18_distance_ave', 'poi_19_number', 'poi_19_distance_ave']\n location_poi_list = pd.DataFrame(columns=columns_name)\n AK_num =split_tag\n i = 0\n for location in location_list:\n lng, lat = location.split(',')\n lng = float(lng)\n lat = float(lat)\n baidu_location = gcj02_to_bd09(lng, lat)\n location_poi_list.loc[len(location_poi_list)] = location_poi(baidu_location, AK_list[AK_num])\n i = i + 1\n print(\"Finish transfer {0} location's POI extracting of split file {1}.\".format(i, split_tag))\n location_poi_list['location'] = location_list\n location_poi_list.to_csv(csv_file)\n # ershous = return_poi_list(location_file_path, split_tag)\n # 锁定\n # if mutex.acquire(1):\n # total_num += len(ershous)\n # # 释放\n # mutex.release()\n # if fmt == \"csv\":\n # for poi_location in location_poi_list:\n # # print(date_string + \",\" + xiaoqu.text())\n # f.write(poi_location.text() + \"\\n\")\n print(\"Finish crawl split file: \" + location_file_path + file_tag + \", save data to : \" + csv_file)\n return None\n\nif __name__ == '__main__':\n time_start = time.time()\n print(\"start extract poi information from baidu......\")\n # creat baidu_APIKEY list\n location_file_path = 'address_location_split_part_0'\n # location_file_path = 'address_location_detail'\n # split_tag = 0\n # multi_thread_location_poi(location_file_path, split_tag, None)\n\n # 准备线程池用到的参数\n nones = [None for i in range(9)]\n split_list = [i for i in range(9)]\n file_list = [location_file_path for i in range(9)]\n args = zip(zip(file_list, split_list),nones)\n # areas = areas[0: 1] # For debugging\n\n # 针对每个板块写一个文件,启动一个线程来操作\n pool_size = threadpool_size\n pool = threadpool.ThreadPool(pool_size)\n my_requests = threadpool.makeRequests(multi_thread_location_poi, args)\n [pool.putRequest(req) for req in my_requests]\n pool.wait()\n pool.dismissWorkers(pool_size, do_join=True)\n time_end = time.time()\n print(\"Finished all POI information extration, using %f.\" % (time_end - time_start))\n\n\n\n\n","sub_path":"location2poi.py","file_name":"location2poi.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82058845","text":"import sys\n\nrecipes = []\nhas_changes = False\nfilename = 'recipes.rd1'\n\nif (len(sys.argv)>1):\n filename = sys.argv[1]\n\nif filename.endswith('.rd1'):\n with open(filename) as f: \n recipe = {}\n for line in f.readlines():\n line = line.rstrip()\n if not line: \n continue \n key = line[:1]\n value = line[3:]\n if key == 'n':\n recipe = next((r for r in recipes if r['name'] == value), None)\n if not recipe and value:\n recipe = { 'name': value, 'description': [], 'ingredients': [] }\n recipes.append(recipe)\n elif key == 'd':\n recipe['description'].append(value)\n elif key == 'i':\n recipe['ingredients'].append(value)\n\nif filename.endswith('.rd2'):\n with open(filename) as f:\n while True: \n line = f.readline()\n if not line:\n break\n (description_lines, ingredients, name) = line.split(' ', 2)\n recipe = { 'name': name.strip(), 'description': [], 'ingredients': [] }\n for i in range(int(description_lines)):\n recipe['description'].append(f.readline().strip())\n for i in range(int(ingredients)):\n recipe['ingredients'].append(f.readline().strip())\n recipes.append(recipe)\n \ndef display_recipes():\n print(\"Available recipes:\")\n for i, recipe in enumerate(recipes):\n print(f\"\\t{i+1} - {recipe['name']}\")\n\ndef get_users_choice(number_of_recipes: int) -> (str, int): \n while True:\n print(\"\\n(choose recipe number to see it, type 'add' to add a new one, or press enter to exit)\")\n choice = input(\"which recipe would you like to see?\\n\")\n if not choice:\n return ('exit', None)\n elif choice == 'add':\n return ('add', None)\n elif choice.isdigit() and 0 < int(choice) <= number_of_recipes:\n return ('see', int(choice)-1)\n\n print(f\"The number must be between 1 and {number_of_recipes}!\")\n\ndef display_recipe(recipe):\n newline = '\\n'\n print(f\"\\nThe {recipe.get('name', 'UNKNOWN')}: {newline.join(recipe.get('description', ['NO DESCRIPTION']))}\")\n print(\"\\nTo make it you will need:\")\n for ingredient in recipe.get('ingredients', []):\n print(f\"\\t{ingredient}\")\n\ndef collect_recipe() -> dict:\n recipe = { 'description': [], 'ingredients': []}\n print(\"Adding a new recipe for: \", end='')\n name = input().strip()\n if not name: \n return None\n recipe['name'] = name \n while True: \n description = input(\"description: \").strip()\n if not description:\n break \n recipe['description'].append(description)\n while True: \n ingredient = input(\"ingredient: \").strip()\n if not ingredient:\n break \n recipe['ingredients'].append(ingredient)\n return recipe \n\ndef save_changes(recipes: list):\n newline = '\\n'\n with open(filename, 'w') as f: \n if filename.endswith('rd1'):\n for recipe in recipes: \n f.write(f\"n: {recipe['name']}\"+newline)\n for desc in recipe['description']: \n f.write(f\"d: {desc}\"+newline)\n for ingredient in recipe['ingredients']: \n f.write(f\"i: {ingredient}\"+newline)\n if filename.endswith('rd2'):\n for recipe in recipes: \n f.write(f\"{len(recipe['description'])} {len(recipe['ingredients'])} {recipe['name']}\"+newline)\n for desc in recipe['description']: \n f.write(f\"{desc}\"+newline)\n for ingredient in recipe['ingredients']: \n f.write(f\"{ingredient}\"+newline)\n\ndef update_recipes_with(recipe: dict):\n while True:\n if recipe: \n existing = next((r for r in recipes if r['name'] == recipe['name']), None)\n if existing:\n while True: \n print(f\"We already have a recipe for '{recipe['name']}'. Do you want to replace it? (say 'yes' or 'no'): \")\n choice = input().strip()\n if choice == 'yes':\n recipes.remove(existing)\n break\n elif choice == 'no': \n newname = input(\"Save the new recipe as: \").strip()\n if newname:\n recipe['name'] = newname\n break \n else: \n recipes.append(recipe)\n break\n# the main application loop. keep going until it is time to end\nwhile True: \n display_recipes()\n (action, recipe_id) = get_users_choice(len(recipes))\n if action == 'exit': \n if has_changes:\n while True: \n choice = input('Do you want to save changes to recipes.rd1? (yes or no): ').strip()\n if choice == 'yes':\n save_changes(recipes)\n break\n elif choice == 'no':\n break \n print(\"\\nThank your for cooking with Python. Goodbye.\")\n break \n if action == 'see':\n display_recipe(recipes[recipe_id])\n elif action == 'add':\n update_recipes_with(collect_recipe())\n has_changes = True \n \n print(\"\\n\\nLet's try again!\\n\")","sub_path":"steps/step04/recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634664172","text":"from ch_1_the_field import plotting, image, png\nimport os\n\nrepo_path = os.path.expanduser(\"~\") + \"/Documents/coding_the_matrix\"\nimage_path = repo_path + \"/support/ch_1_the_field/img01.png\"\n\nS = {2 + 2j, 3 + 2j, 1.75 + 1j, 2 + 1j, 2.25 + 1j, 2.5 +1j, 2.75 + 1j, 3 + 1j, 3.25 + 1j}\n\ndef task_1_4_1():\n print(S)\n plotting.plot(s)\n\ndef task_1_4_3():\n transform = 1 + 2j # one unit in x, 2 in y\n transformed = {transform + z for z in S}\n plotting.plot(transformed, 6)\n\ndef task_1_4_7():\n transform = .5 # scalling down\n transformed = {transform * z for z in S}\n print(S)\n print(transformed)\n plotting.plot(transformed, 6)\n\ndef task_1_4_9():\n transform = .5j\n transformed = {transform * z for z in S}\n plotting.plot(transformed, 6) \n\ndef task_1_4_10():\n imported = image.file2image(image_path)\n print(\"imported image\")\n \nif __name__ == \"__main__\":\n # task_1_4_1()\n # task_1_4_3()\n # task_1_4_7()\n # task_1_4_9()\n task_1_4_10()","sub_path":"tasks/luke/ch_1.py","file_name":"ch_1.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148696850","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 28 16:47:15 2019\n\n@author: marcin\n\n\"\"\"\n\ninput_str = input('wpisz jakis string: ')\ndimensions = [int(x) for x in input_str.split(',')]\nrowNum = dimensions[0]\ncolNum = dimensions[1]\nmultilist = [[0 for col in range(colNum)] for row in range(rowNum)]\n\nfor row in range(rowNum):\n for col in range(colNum):\n multilist[row][col]=row*col\n \n \nprint (multilist)","sub_path":"untitled6.py","file_name":"untitled6.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"307429977","text":"from uav_data.models.models import *\n\nmodel_registry = {\n 'applicationstatus': ApplicationStatus,\n 'virusstatus': VirusStatus,\n 'antivirusmachine': AntivirusMachine,\n 'machineapp': MachineApp,\n 'machineapp_app_idx': MachineAppAppIdx,\n 'antivirus': AntiVirus,\n 'virus_machine': VirusMachine,\n 'application': Application,\n 'virus': Virus,\n 'machinetbl': MachineTbl,\n 'machinestatus': MachineStatus,\n 'falsestatus': FalseStatus,\n 'activity': Activity,\n 'antivirusactivity': AntivirusActivity\n}\n","sub_path":"uav_data/models/model_registry.py","file_name":"model_registry.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"209502091","text":"def ChkPrime(num):\n if num>1:\n for i in (2,num):\n if num!=2 and (num%i)==0:\n return True\n elif num==1:\n return True\n else:\n return False\n\n\n\ndef add(no):\n ad=0\n for i in range(0, len(no)):\n\n ad=ad+no[i]\n return ad","sub_path":"Assignment_4/MarvellousNum.py","file_name":"MarvellousNum.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91523851","text":"#!/usr/bin/env python\n\n# ROS node for the Neato Robot Vacuum\n# Copyright (c) 2010 University at Albany. All right reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the University at Albany nor the names of its \n# contributors may be used to endorse or promote products derived \n# from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nROS node for Neato XV-11 Robot Vacuum.\n\"\"\"\n\n__author__ = \"ferguson@cs.albany.edu (Michael Ferguson)\"\n\nimport roslib; roslib.load_manifest(\"neato_node\")\nimport serial\nimport rospy\nimport time\nfrom math import sin,cos\n\nfrom sensor_msgs.msg import LaserScan\nfrom geometry_msgs.msg import Quaternion\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom tf.broadcaster import TransformBroadcaster\n\n# from neato_driver.neato_driver import xv11, BASE_WIDTH, MAX_SPEED\n# Neato Laser Distance Sensor\n\n\nclass Lds:\n STATE_START = 0\n STATE_SPEED1 = 1\n STATE_SPEED2 = 2\n STATE_DIST1 = 3\n STATE_DIST2 = 4\n STATE_STRENGTH1 = 5\n STATE_STRENGTH2 = 6\n STATE_END = 7\n STATE_ERROR = 8\n \n def __init__(self):\n self.state = 0\n self.prevbyte = 0x00\n self.cached_b2 = 0x00\n self.cached_b3 = 0x00\n self.angle = 0\n self.angle_deg = 0\n self.ranges = list()\n self.scanPub = rospy.Publisher('base_scan', LaserScan)\n\n # things that don't ever change\n scan_link = rospy.get_param('~frame_id','base_link')\n self.scan = LaserScan(header=rospy.Header(frame_id=scan_link))\n self.scan.angle_min = 0\n self.scan.angle_max = 6.26\n self.scan.angle_increment = 0.017437326\n self.scan.range_min = 0.020\n self.scan.range_max = 5.0\n self.state = Lds.STATE_START\n\n # the 0 is not aligned with the axis of the LDS\n angle = self.angle_deg - 10\n if angle < 0 : angle += 360\n\n\n def parse(self, data):\n # note data is always one byte, therefore should clean up\n data = str(data)\n #restore state of the parser\n state = self.state\n for byte in data:\n b = ord(byte)\n # rospy.loginfo(\"byte {:02X}\".format(b))\n # rospy.loginfo(\"b {:02X}\".format(b))\n # allow reset to start at any time\n if b == 0xC0 and (state == Lds.STATE_START or state == Lds.STATE_ERROR):\n # rospy.loginfo(\"start \" + \"{:02X}\".format(self.cached_b3) + \", {:02X}\".format(self.cached_b2) + \", {:02X}\".format(self.prevbyte) + \", {:02X}\".format(b));\n self.angle = 0\n state = Lds.STATE_SPEED1\n elif state == Lds.STATE_SPEED1:\n # rospy.loginfo(\"state_speed1\")\n state = Lds.STATE_SPEED2\n elif state == Lds.STATE_SPEED2:\n # goodvalue appears to be 5\n # goodvalue appears to be 5508, 20560 (?), 32776, 2056, 2176\n # goodvalue appears to be 5384\n # expecting 600 RPM?\n speed = self.prevbyte + (b << 8)\n # rospy.loginfo(\"state_speed2 = \" + str(speed))\n # rospy.loginfo(\"s \" + str(speed))\n state = Lds.STATE_DIST1\n elif state == Lds.STATE_DIST1:\n # data byte 0 : `\n # rospy.loginfo(\"state_dist1\")\n self.angle = self.angle + 1\n state = Lds.STATE_DIST2\n elif state == Lds.STATE_DIST2:\n # data byte 1 : \n # <\"invalid data\" flg> <\"quality warning\" flg> `\n if b & 0x80:\n error = self.prevbyte\n dist = 0.0\n strength_warning = False\n strength = 0.0\n # rospy.loginfo(\"invalid data(\" + str(self.angle) +\") = \" + str(error))\n rospy.loginfo(\"i\");\n state = Lds.STATE_ERROR\n else:\n self.error = 0\n if b & 0x40:\n strength_warning = True\n # rospy.loginfo(\"strength warning(\" + str(self.angle) +\")\")\n else:\n strength_warning = False\n dist = self.prevbyte | (( b & 0x3f) << 8)\n # may want to adjust as axis is off 10\n self.scan.ranges.append(dist / 1000.0)\n if (self.angle % 50 == 0):\n rospy.loginfo(\"d \" + str(self.angle))\n # rospy.loginfo(\"state_dist2(\" + str(self.angle) +\") = \" + str(dist))\n state = Lds.STATE_STRENGTH1\n elif state == Lds.STATE_STRENGTH1:\n # data byte 2 : `\n # rospy.loginfo(\"state_strength1\")\n state = Lds.STATE_STRENGTH2\n elif state == Lds.STATE_STRENGTH2:\n # data byte 3 : `\n # This value can get very high when facing a retroreflector. \n strength = self.prevbyte + (b << 8)\n # rospy.loginfo(\"strenght(\" + str(self.angle) +\") = \" + str(strength))\n if self.angle == 360:\n state = Lds.STATE_END\n # publish to ROS \n rospy.loginfo(\"full\"); \n self.scan.header.stamp = rospy.Time.now()\n self.scanPub.publish(self.scan)\n\t # rospy.loginfo(\"Published to ROS\")\n else:\n state = Lds.STATE_DIST1\n elif state == Lds.STATE_END:\n self.angle = self.angle + 1\n endbyte = self.angle - 360\n # rospy.loginfo(\"state_end\" + str(endbyte))\n # 3 end bytes then should be back to C0\n if endbyte > 3:\n state = Lds.STATE_ERROR\n else:\n state = Lds.STATE_ERROR\n # rospy.loginfo(\"state_error {:02X}\".format(b))\n #save current state\n self.state = state\n self.cached_b3 = self.cached_b2\n self.cached_b2 = self.prevbyte\n self.prevbyte = b\n\nclass NeatoNode:\n\n def __init__(self):\n \"\"\" Start up connection to the Neato Robot. \"\"\"\n rospy.init_node('neato')\n\n # portdev = rospy.get_param('~port', \"/dev/ttyACM0\")\n portdev = rospy.get_param('~port', \"/dev/prlite-neato\")\n rospy.loginfo(\"Using port: %s\"%(portdev))\n self.port = serial.Serial(portdev,115200)\n self.lds = Lds()\n\n def spin(self): \n # From http://xv11hacking.wikispaces.com/LIDAR+Sensor\n # The LiDAR spins counterclockwise at 10 revolutions per second.\n #\n # Early XV-11 units may be 3V3 powered! Not 5V!\n # Our XV-11 uses V2.1 firmware, which means that we need to use 3.3V!\n # Maximum current draw is 50 mA from the Arduino pin. \n # Sensor power consumption (does not include the motor): ~145mA @ 3.3V\n #\n # The motor can be powered at 3.3V continuous ( ~60mA ) in open loop,\n # which will produce a turn rate of around 240rpm on a clean and recent\n # sensor. Hair and dust can however create friction that will lower \n # the rotation speed.\n #\n # Data format for firmware 2.1 (Sparkfun scans, pre-production units)\n # The periodicity of the data is 1446 bytes.\n # \n # When valid, format is organized as follow :\n # \n # `5A A5 00 C0 XX XX ``\n # 90 165 0 192\n # XX and XX are speed\n # \n # `` is composed of 360 group of 4 bytes, organized like this :\n # `byte 0 : `\n # `byte 1 : <\"invalid data\" flg> <\"quality warning\" flg> `\n # `byte 2 : `\n # `byte 3 : `\n\n # The bit 7 of byte 1 seems to indicate that the distance could \n # not be calculated.\n # When this bit is set, the second byte is always `80`, the values \n # of the first byte may only be `02`, `03`, `21`, `25`, `35` or `50`.\n # When it's `21`, then the whole block is `21 80 XX XX`, \n # but for all the other values it's the data block is `YY 80 00 00`\n # maybe it's a code to say what type of error ? \n # (`35` is preponderant, `21` seems to be when the beam is interrupted \n # by the supports of the cover) .\n # Another thing to have a look to is the temporal repartition of the \n # data... the first sample after the sync seems to always be \n # `21 80 XX XX`, and when this pattern appears again, it's immediately \n # after an other value, without the 0.2ms interval we can see most \n # of the time between two blocks of 4...\n\n # The bit 6 of byte 1 is a warning when the reported strength is \n # greatly inferior to what is expected at this distance. This may \n # happen when the material has a low reflectance (black material...),\n # or when the dot does not have the expected size or shape (porous \n # material, transparent fabric, grid, edge of an object...), or maybe \n # when there are parasitic reflections (glass... ).\n\n # Byte 2 and 3 are the LSB and MSB of the strength indication. \n # This value can get very high when facing a retroreflector. \n\n # C0 hex = 192\n\n # main loop of driver\n r = rospy.Rate(5)\n rospy.loginfo(\"0\")\n # requestScan()\n while not rospy.is_shutdown():\n # string = self.port.readline()\n string = self.port.read()\n self.lds.parse(string)\n\nif __name__ == \"__main__\": \n robot = NeatoNode()\n robot.spin()\n\n","sub_path":"pr2lite_nav/nodes/pr2lite_neato.py","file_name":"pr2lite_neato.py","file_ext":"py","file_size_in_byte":10985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"51649419","text":"from pathlib import Path\nfrom ruamel.yaml import YAML\nfrom subprocess import call\n\ndef read_domain_config():\n # possible config file paths\n paths = [\n Path('/le/certs.yml'),\n Path('/le/certs.yaml'),\n *Path('/le/certs.d/').glob('*.yml'),\n *Path('/le/certs.d/').glob('*.yaml')\n ]\n\n # merge configuration files\n # Attention: similar keys override each other!\n config = {}\n for path in paths:\n if not path.is_file():\n continue\n config.update(YAML().load(path))\n\n print(config)\n return []\n\n # build params\n param_list = []\n for cert in config:\n if 'disabled' in config[cert] and config[cert]['disabled']:\n print('Ignoring {cert}'.format(cert=cert))\n continue\n\n debug = False\n params = (cert+\" certonly -n --agree-tos\"\n +\" --renew-with-new-domains\" # renew if domain-list changed\n +\" --keep-until-expiring\" # otherwise keep until it expires\n +\" --cert-name \"+cert\n )\n\n if not 'email' in config[cert]:\n print(\"Missing email for {cert}\".format(cert=cert))\n continue\n params += ' --email '+config[cert]['email']\n\n if not 'domains' in config[cert]:\n print(\"No domains for {cert}\".format(cert=cert))\n continue\n for d in config[cert]['domains']:\n params += ' -d '+d\n\n params += ' --preferred-challenges '\n if 'challenges' in config[cert]:\n params += config[cert]['challenges']\n else:\n params += 'http'\n\n if 'debug' in config[cert] and config[cert]['debug']:\n params += ' --debug'\n debug = True\n\n if 'dry_run' in config[cert] and config[cert]['dry_run']:\n params += ' --dry-run'\n print(\"-------------dRY\")\n else:\n endpoint = \"https://acme-v02.api.letsencrypt.org/directory\"\n if 'staging' in config[cert] and config[cert]['staging']:\n print('------------stage')\n params += ' --staging'\n endpoint = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n params += ' --server '+endpoint\n\n if 'webroot' in config[cert] and config[cert]['webroot']:\n params += ' --webroot -w '+config[cert]['webroot']\n else:\n params += ' --standalone'\n\n if 'args' in config[cert]:\n params += ' '+conf[cert]['args']\n\n if debug:\n print('Cerbot-args for {cert}: {params}'.format(cert=cert, params=params))\n\n param_list.append(params)\n\n return param_list\n\ndef get_cert(params):\n try:\n call('/scripts/run_certbot.sh {params}'.format(params=params), shell=True)\n finally:\n pass\n\nif __name__ == '__main__':\n try:\n param_list = read_domain_config()\n for p in param_list:\n get_cert(p)\n\n except Exception as e:\n print(\"Error: \"+str(e))\n pass\n","sub_path":"scripts/getcerts.py","file_name":"getcerts.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"382923511","text":"import numpy as np\r\nimport pyautogui\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport pandas as pd\r\nimport os\r\nfrom imutils import face_utils\r\nimport dlib\r\nimport time\r\nimport tensorflow as tf\r\n\r\n\r\n#start=0\r\n#vtiem=[]\r\n# Vamos inicializar um detector de faces (HOG) para então\r\n# let's go code an faces detector(HOG) and after detect the \r\n# landmarks on this detected face\r\n\r\n# p = our pre-treined model directory, on my case, it's on the same script's diretory.\r\np = \"AuxFiles/shape_predictor_68_face_landmarks.dat\"\r\ndetector = dlib.get_frontal_face_detector()\r\npredictor = dlib.shape_predictor(p)\r\n\r\n\r\nmodel1 = tf.keras.models.load_model('AuxFiles/saved_modelbywd1/my_model')\r\nmodel2 = tf.keras.models.load_model('AuxFiles/saved_modelo4aux/my_model')\r\ncap = cv2.VideoCapture(0)\r\n\r\n\r\nwhile(True):\r\n# Capture the video frame \r\n # by frame \r\n _, frame = cap.read()\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n \r\n # Get faces into webcam's image\r\n rects = detector(gray, 0)\r\n \r\n # For each detected face, find the landmark.\r\n for (i, rect) in enumerate(rects):\r\n \t# Make the prediction and transfom it to numpy array\r\n\t shape = predictor(gray, rect)\r\n\t shape = face_utils.shape_to_np(shape)\r\n\t o1=shape[36:41,:]\r\n\t o2=shape[42:47,:]\r\n\r\n \r\n\t # Draw on our image, all the finded cordinate points (x,y) \r\n\t '''\r\n\t for (x, y) in shape:\r\n\t cv2.circle(frame, (x, y), 2, (0, 255, 0), -1)\r\n\t '''\r\n\t o1x=[int(max(0,np.min(o1[:,0])-15)),int(min(1280,np.max(o1[:,0])+15))]\r\n\t o1y=[int(max(0,np.min(o1[:,1])-15)),int(min(1280,np.max(o1[:,1])+15))]\r\n\r\n\t o2x=[int(max(0,np.min(o2[:,0])-15)),int(min(1280,np.max(o2[:,0])+15))]\r\n\t o2y=[int(max(0,np.min(o2[:,1])-15)),int(min(1280,np.max(o2[:,1])+15))]\r\n\r\n\r\n\t recortada1=gray[int(o1y[0]):int(o1y[1]), int(o1x[0]):int(o1x[1])]\r\n\t recortada2=gray[int(o2y[0]):int(o2y[1]), int(o2x[0]):int(o2x[1])]\r\n\r\n\t cv2.rectangle(frame,(o1x[0],o1y[0]),(o1x[1],o1y[1]),(255,0,0),2)\r\n\t cv2.rectangle(frame,(o2x[0],o2y[0]),(o2x[1],o2y[1]),(255,0,0),2)\r\n\r\n\t dim=(42,50)\r\n\t normalizada1=cv2.resize(recortada1,(dim))/255\r\n\t normalizada2=cv2.resize(recortada2,(dim))/255\r\n\r\n\t normalizada1=np.reshape(normalizada1,(1,42,50,1))\r\n\t normalizada2=np.reshape(normalizada2,(1,42,50,1))\r\n\r\n\t c2=model1.predict(normalizada1)\r\n\t c1=model2.predict(normalizada2)\r\n\r\n\t cv2.circle(frame,(c1[0][0],c1[0][1]),10,(255,0,255),3)\r\n\t cv2.circle(frame,(c2[0][0],c2[0][1]),10,(0,0,255),3)\r\n\r\n\t pred=(c1+c2)/2\r\n\t #cv2.circle(frame,(pred[0][0],pred[0][1]),10,(255,0,255),3)\r\n\r\n\t print(pred)\r\n\r\n\r\n\r\n\t# Display the resulting frame \r\n cv2.namedWindow('Grabando', cv2.WINDOW_NORMAL)\r\n cv2.imshow('Grabando', frame) \r\n #stop=time.time()\r\n #act=stop-start\r\n #start=stop\r\n #vtiem.append(act) \r\n k = cv2.waitKey(1) & 0xFF\r\n if k == 27:\r\n #np.savetxt('dlimod.csv',vtiem,delimiter=',')\r\n break\r\n\r\ncv2.destroyAllWindows()\r\ncap.release()","sub_path":"carpeta1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254901673","text":"__author__ = 'gkour'\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport utils as utils\nimport numpy as np\n\n\nclass BrainPG:\n sess = None\n\n @staticmethod\n def init_session():\n if BrainPG.sess is None:\n tf.reset_default_graph()\n BrainPG.sess = tf.Session()\n\n def __init__(self, lr, s_size, action_size, h_size, scope, gamma, copy_from_scope=None):\n self._s_size = s_size\n self._action_size = action_size\n self._h_size = h_size\n self._gamma = gamma\n self._regularization_param = 0.001\n\n # Implementing F(state)=action\n self.state_in = tf.placeholder(shape=[None, self._s_size], dtype=tf.float32)\n self.reward_holder = tf.placeholder(shape=[None], dtype=tf.float32)\n self.action_holder = tf.placeholder(shape=[None], dtype=tf.int32)\n\n self.action_distribution = self._construct_policy_model(scope)\n\n taken_action_probability = BrainPG.get_decision_probability(self.action_holder, self.action_distribution)\n\n loss = -tf.reduce_mean(tf.log(taken_action_probability) * self.reward_holder)\n self.optimize = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(loss)\n\n # Initialize Variables\n BrainPG.sess.run(tf.variables_initializer(tf.get_collection(tf.GraphKeys.VARIABLES, scope)))\n\n self.saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.VARIABLES, scope))\n\n if copy_from_scope is not None:\n BrainPG.sess.run(utils.update_target_graph(copy_from_scope, scope))\n\n def _construct_policy_model(self, scope):\n with tf.variable_scope(scope):\n net = slim.stack(self.state_in, slim.fully_connected, [self._h_size], activation_fn=tf.nn.relu)\n\n action_output = slim.fully_connected(net, self._action_size, activation_fn=tf.nn.softmax,\n weights_regularizer=slim.l2_regularizer(self._regularization_param))\n\n return action_output\n\n @staticmethod\n def get_decision_probability(actual_decision, decisions_probabilities):\n action_indexes = tf.range(0, tf.shape(decisions_probabilities)[0]) * tf.shape(decisions_probabilities)[\n 1] + actual_decision\n return tf.gather(tf.reshape(decisions_probabilities, [-1]), action_indexes)\n\n def save_model(self, path):\n self.saver.save(BrainPG.sess, path)\n\n def load_model(self, path):\n self.saver.restore(BrainPG.sess, path)\n\n def act(self, obs):\n action_dist = BrainPG.sess.run(self.action_distribution, feed_dict={self.state_in: [obs]})\n action = utils.dist_selection(action_dist[0])\n # action = utils.epsilon_greedy(0.01, action_dist[0])\n return action\n\n def act_dist(self, sess, obs):\n action_dist = sess.run(self.action_distribution, feed_dict={self.state_in: [obs]})\n return action_dist[0]\n\n def train(self, batch_obs, batch_acts, batch_rews, batch_newstate):\n feed_dict = {self.reward_holder: batch_rews,\n self.action_holder: batch_acts,\n self.state_in: np.vstack(batch_obs)}\n\n BrainPG.sess.run([self.optimize], feed_dict=feed_dict)\n\n def state_size(self):\n return self._s_size\n","sub_path":"brains/brainpg.py","file_name":"brainpg.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394975377","text":"# -*- coding: utf-8 -*-\r\nfrom flask import Flask, render_template, url_for, flash, redirect, request, make_response\r\nfrom flask_cors import CORS, cross_origin\r\nimport json\r\n\r\nfrom models import faceanalysis\r\nfrom config.config import config\r\n\r\napi = Flask(__name__)\r\nCORS(api, support_credentials=True)\r\n\r\n@api.route('/auth', methods=['POST'])\r\ndef auth():\r\n\r\n # 処理開始\r\n print(\"face_recognition start.\")\r\n # print(\"post param[filename]=\" + request.form[\"filename\"])\r\n\r\n # 認証\r\n data = json.loads(request.data.decode('utf-8'))\r\n # print(\"data\", data)\r\n result_json = faceanalysis.auth(\r\n data[\"image\"],\r\n data[\"threshold\"]\r\n )\r\n # print(\"result_json\", result_json)\r\n # 認証画像を保存\r\n # faceanalysis.save_authimage(data[\"image\"], config[\"auth_images_dir\"], result_json)\r\n # レスポンス整形\r\n response = make_response(result_json) \r\n\r\n # 処理終了\r\n print(\"face_recognition result :\", result_json[\"facedetection_result\"])\r\n print(\"auth_result result :\", result_json[\"auth_result\"])\r\n print(\"face_recognition end.\")\r\n\r\n return response\r\n\r\n@api.route('/facedetection', methods=['POST'])\r\ndef facedetection():\r\n data = json.loads(request.data.decode('utf-8'))\r\n result_json = faceanalysis.detect_face(data[\"image\"])\r\n return make_response(result_json)\r\n\r\n@api.route('/boundingbox', methods=['POST'])\r\ndef boundingbox():\r\n data = json.loads(request.data.decode('utf-8'))\r\n\r\n rgb_img = faceanalysis.load_rgbimg(data[\"image\"])\r\n bb = faceanalysis.face_boundingbox(rgb_img)\r\n bb_for_display = faceanalysis.face_boundingbox_for_display(bb)\r\n\r\n return make_response(bb_for_display)\r\n\r\n@api.route('/facevector', methods=['POST'])\r\ndef facevector():\r\n data = json.loads(request.data.decode('utf-8'))\r\n\r\n rgb_img = faceanalysis.load_rgbimg(data[\"image_dataurl\"])\r\n bb = faceanalysis.face_boundingbox(rgb_img)\r\n face_vector = faceanalysis.face_vector(rgb_img, bb)\r\n\r\n return make_response({\"face_vector\": face_vector})\r\n\r\n@api.route('/saveimage', methods=['POST'])\r\ndef saveimage():\r\n # 登録画像(利用開始情報)を保存\r\n save_result = faceanalysis.save_registimage(data[\"image\"], config[\"regist_images_dir\"])\r\n return make_response(save_result)","sub_path":"201216/store/ictstore-app/faceanalysis-api/api/controllers/faceanalysis_controller.py","file_name":"faceanalysis_controller.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7290384","text":"\"\"\"The hello command.\"\"\"\n\n\nfrom json import dumps\nfrom .base import Base\nfrom .utils import *\n\nclass Hello(Base):\n \"\"\"Say hello, world!\"\"\"\n\n def run(self):\n print('This adds a file or directory to jagger config')\n print('You supplied the following options:', dumps(self.options, indent=2, sort_keys=True))\n target = self.options['']\n target = os.path.abspath(target)\n\n config = safeLoadConfig()\n changed_config = False\n\n if os.path.isdir(target):\n if JAGGER_DIRS in config and target in config[JAGGER_DIRS]:\n print(\"Removed {}\".format(target))\n del config[JAGGER_DIRS][target]\n changed_config = True\n elif os.path.isfile(target):\n if JAGGER_FILES in config and target in config[JAGGER_FILES]:\n print(\"Removed {}\".format(target))\n del config[JAGGER_FILES][target]\n changed_config = True\n\n if changed_config:\n saveConfig(config)\n print(\"config has been updated\")\n printDict(safeLoadConfig())\n else:\n print(\"Nothing to remove.\")","sub_path":"jagger/commands/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"27064199","text":"import sys\nsys.stdin = open('15686_치킨 배달.txt')\n\ndef comb(n, r):\n if r == 0:\n chicken()\n elif n< r:\n return\n else:\n chi2[r-1] = chi[n-1]\n comb(n-1, r-1)\n comb(n-1, r)\n\ndef chicken():\n global ans2\n dis = [list(99999 for _ in range(N)) for _ in range(N)]\n for x, y in chi2:\n for i in range(N):\n for j in range(N):\n dist = abs(j - x) + abs(i - y)\n if arr[i][j] == 1 and dis[i][j] >= dist:\n dis[i][j] = dist\n ans = 0\n for i in range(N):\n for j in range(N):\n if dis[i][j] != 99999:\n ans += dis[i][j]\n if ans2 >= ans:\n ans2 = ans\n\n\nN, M = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\nchi = []\nchi2 = [0] * M\nans2 = 9999999999\n# 모든 치킨집 추가\nfor i in range(N):\n for j in range(N):\n if arr[i][j] == 2:\n chi.append([j, i])\n# 조합으로 최대 M개의 치킨집 뽑기\ncomb(len(chi), M)\n\n\nprint(ans2)","sub_path":"08_algorithm/00_BAEKJOON/15686_치킨 배달.py","file_name":"15686_치킨 배달.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437968638","text":"\nimport tkinter\nimport cv2\nimport PIL.Image, PIL.ImageTk\nfrom cv2 import *\nimport numpy as np\n# Callback for the \"Blur\" button\n#def blur_image():\n # global cv_img\n # global photo\n \n # cv_img = cv2.blur(cv_img, (3, 3))\n # photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(cv_img))\n # canvas.create_image(0, 0, image=photo, anchor=tkinter.NW)\n\ndef retake_image():\n global photo\n global img\n \n m = VideoCapture(0)\n s, img = m.read()\n photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(img))\n canvas.create_image(0, 0, image = photo , anchor = tkinter.NW)\n ##updatedPicture = ImageTk.PhotoImage(Image.open(img))\n #w.configure(cv_img = updatedPicture)\n #height, width, no_channels = cv_img.shape\n #photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(cv_img))\n #canvas.create_image(0, 0, image=photo, anchor=tkinter.NW)\n\n \n\ncam = VideoCapture(0)\ns, cv_img = cam.read()\n\nwindow = tkinter.Tk()\nwindow.title(\"Student Registration\")\n\n# Get the image dimensions (OpenCV stores image data as NumPy ndarray)\nheight, width, no_channels = cv_img.shape\n \n# Create a canvas that can fit the above image\ncanvas = tkinter.Canvas(window, width = width, height = height)\ncanvas.pack()\n \n# Use PIL (Pillow) to convert the NumPy ndarray to a PhotoImage\nphoto = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(cv_img))\n \n# Add a PhotoImage to the Canvas\ncanvas.create_image(0, 0, image=photo, anchor=tkinter.NW)\n \n# Button that lets the user blur the image\nbtn_save=tkinter.Button(window, text=\"Save\", width=50, command=retake_image)\nbtn2_retake=tkinter.Button(window, text=\"Retake\", width=50, command=retake_image)\nbtn_save.pack(anchor=tkinter.CENTER, expand=True)\nbtn2_retake.pack(anchor=tkinter.CENTER, expand=True)\n \nwindow.mainloop()\n \n","sub_path":"upa/upa_test.py","file_name":"upa_test.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"21865995","text":"import numpy as np\r\nimport cv2\r\nimport os\r\n\r\ndef clu(img):\r\n Z = img.reshape((-1, 3))\r\n\r\n # convert to np.float32\r\n Z = np.float32(Z)\r\n\r\n # define criteria, number of clusters(K) and apply kmeans()\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\r\n K =8\r\n ret, label, center = cv2.kmeans(Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\r\n\r\n # Now convert back into uint8, and make original image\r\n\r\n # 与0的距离\r\n centersqu = center * center\r\n sum_centersqu = np.sum(centersqu, axis=1)\r\n\r\n # 最小值与坐标\r\n coor = sum_centersqu.argmin()\r\n\r\n # print(label.min(),label.max())\r\n\r\n center = np.uint8(center)\r\n\r\n # 最后在图上画的是中心像素center的值\r\n res = center[label.flatten()]\r\n\r\n mask = np.ones(label.shape)\r\n\r\n for i in range(label.shape[0]):\r\n if label[i] == coor:\r\n\r\n mask[i] = 0\r\n else:\r\n mask[i] = 255\r\n\r\n mask = mask.reshape(img.shape[0], img.shape[1])\r\n # cv2.imshow('mask', mask)\r\n res2 = res.reshape((img.shape))\r\n\r\n return mask,res2\r\n\r\n # cv2.imshow('res2', res2)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n\r\nif __name__=='__main__':\r\n\r\n path='C:\\\\Users\\\\songwendong\\\\Desktop\\\\519\\\\half-bg-shadow-lap'\r\n dirs=os.listdir(path)\r\n\r\n\r\n destpath='C:\\\\Users\\\\songwendong\\\\Desktop\\\\519\\\\half-bg-shadow-lap-clus'\r\n\r\n count=1\r\n for file in dirs:\r\n\r\n name=os.path.join(path,file)\r\n img = cv2.imread(name)\r\n mask,_=clu(img)\r\n\r\n destname=os.path.join(destpath,file)\r\n # print(destname)\r\n cv2.imwrite(destname,mask)\r\n print(count)\r\n count+=1\r\n\r\n\r\n\r\n","sub_path":"PycharmProjects/paper/Myowncluster.py","file_name":"Myowncluster.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"358609562","text":"from flask import render_template, redirect, request, flash\nfrom FlaskApp import app, db, repo\nfrom FlaskApp.forms import RateForm, Task2From\nfrom FlaskApp.mapping import Mapping\nfrom FlaskApp.models import User\nimport geopy.distance\nimport itertools\nimport math\nimport z3\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html', title='Project3-Homepage')\n\n\n@app.route('/task1')\ndef task1(): \n Mapping()\n return render_template('task1.html', title='Quantify the Rivalry')\n\n\n@app.route('/task2', methods=['GET', 'POST'])\ndef task2():\n form = Task2From()\n\n if request.method == 'GET':\n return render_template('task2.html', title='A Casual Exploration', form=form)\n elif request.method == 'POST':\n if form.validate_on_submit():\n # first selection-------------------------------------------------------------------------------------------\n place_id1 = form.Stores.data\n if place_id1 != 'None':\n evi_cases1 = 0\n cri_cases1 = 0\n rating1 = 0.0\n lat1 = 0.0\n lng1 = 0.0\n for document in repo.henryhcy_jshen97_leochans_wangyp.countEvictionCrimeCVS.find({'place_id': place_id1}):\n evi_cases1 = document['eviction_case']\n cri_cases1 = document['crime_case']\n rating1 = document['rating']\n lat1 = document['location']['lat']\n lng1 = document['location']['lng']\n\n vicinity1 = ''\n for document in repo.henryhcy_jshen97_leochans_wangyp.cvs.find({'place_id': place_id1}):\n vicinity1 = document['vicinity']\n\n flash(\"Result1--Store1: {}; Rating: {}; Evictions: {}; Larcenies: {}.\".format(vicinity1, rating1, evi_cases1, cri_cases1), 'error')\n\n # second selection------------------------------------------------------------------------------------------\n place_id2 = form.Compare.data\n if (place_id2 != 'None' and place_id1!= 'None'):\n if (place_id2 == place_id1):\n flash(\"Result2--Please select a different store.\")\n else:\n evi_cases2 = 0\n cri_cases2 = 0\n rating2 = 0.0\n lat2 = 0.0\n lng2 = 0.0\n for document in repo.henryhcy_jshen97_leochans_wangyp.countEvictionCrimeCVS.find({'place_id': place_id2}):\n evi_cases2 = document['eviction_case']\n cri_cases2 = document['crime_case']\n rating2 = document['rating']\n lat2 = document['location']['lat']\n lng2 = document['location']['lng']\n\n coord1 = (lat1, lng1)\n coord2 = (lat2, lng2)\n distance =geopy.distance.distance(coord1, coord2)\n\n evi_case_diff = (evi_cases1 - evi_cases2)\n cri_case_diff = (cri_cases1 - cri_cases2)\n\n vicinity2 = ''\n for document in repo.henryhcy_jshen97_leochans_wangyp.cvs.find({'place_id': place_id2}):\n vicinity2 = document['vicinity']\n\n flash(\"Result2--Store2: {}; Rating {}; DistanceTo: {}; Store1: {}; Evictions diff: {} Larcenies diff {}.\".format(vicinity2, rating2, distance, vicinity1, evi_case_diff, cri_case_diff))\n\n # third selection-------------------------------------------------------------------------------------------\n place_id3 = form.Total.data\n if (place_id3 != \"None\" and place_id1 != 'None' and place_id2 != 'None'):\n if (place_id3 == place_id1):\n flash(\"Result3--You select a the same store as store1. Please Select a different one.\")\n elif (place_id3 == place_id2):\n flash(\"Result3--You select a the same store as store2. Please Select a different one.\")\n else:\n corr_evi = 0.0\n corr_cri = 0.0\n for document in repo.henryhcy_jshen97_leochans_wangyp.correlationCVS.find():\n if document['document_type'] == 'rating_eviction_correlation':\n corr_evi = document['corr']\n elif document['document_type'] == 'rating_crime_correlation':\n corr_cri = document['corr']\n c = corr_cri/corr_evi\n\n input_list = [place_id1, place_id2, place_id3]\n rating_list = []\n coord_list = []\n total_stab = 0.0\n for document in repo.henryhcy_jshen97_leochans_wangyp.countEvictionCrimeCVS.find({'place_id': {'$in': input_list}}):\n rating_list.append(document['rating'])\n total_stab += math.pow(document['crime_case'], c)/(document['eviction_case'])\n coord_list.append( (document['location']['lat'], document['location']['lng']) )\n dist01 = geopy.distance.distance(coord_list[0], coord_list[1]).km\n dist02 = geopy.distance.distance(coord_list[0], coord_list[2]).km\n dist12 = geopy.distance.distance(coord_list[1], coord_list[2]).km\n total_access = dist01 + dist02 + dist12\n\n vicinity3 = ''\n for document in repo.henryhcy_jshen97_leochans_wangyp.cvs.find({'place_id': place_id3}):\n vicinity3 = document['vicinity']\n\n flash(\"Result3--Store1: {}; Store2: {}; Store3: {}; Total Stability S: {}; Total Accessibilty A = {}; Respective Ratings: {}, {}, {}.\".format(vicinity1, vicinity2, vicinity3, total_stab, total_access, rating_list[0], rating_list[1], rating_list[2]))\n\n # fourth, fifth, and sixth selection------------------------------------------------------------------------\n K = int(form.K.data)\n S = form.S.data\n A = form.A.data\n if S != '' and A != '':\n try:\n S = int(form.S.data)\n A = int(form.A.data)\n except ValueError:\n flash(\"Invalid inputs on Filed 5 & 6.\")\n return redirect(\"/task2\")\n\n corr_evi = 0.0\n corr_cri = 0.0\n for document in repo.henryhcy_jshen97_leochans_wangyp.correlationCVS.find():\n if document['document_type'] == 'rating_eviction_correlation':\n corr_evi = document['corr']\n elif document['document_type'] == 'rating_crime_correlation':\n corr_cri = document['corr']\n c = corr_cri / corr_evi\n\n pid_list = []\n geo_list = []\n stab_list = []\n for document in repo.henryhcy_jshen97_leochans_wangyp.countEvictionCrimeCVS.find():\n pid = document['place_id']\n location_coordinate = (document['location']['lat'], document['location']['lng'])\n stability = (math.pow(document['crime_case'], c) / document['eviction_case']) * 1000\n\n pid_list.append(pid)\n geo_list.append(location_coordinate)\n stab_list.append(stability)\n addr_list = []\n for document in repo.henryhcy_jshen97_leochans_wangyp.cvs.find({'place_id': {'$in': pid_list}}):\n addr_list.append(document['vicinity'])\n\n solver = z3.Solver()\n X = [z3.Real('x{}'.format(i)) for i in range(len(pid_list))]\n\n # constraints:\n # (1) choose exactly K stores\n for i in X:\n solver.add(z3.Or(i == 0, i == 1))\n solver.add(sum(X) == K)\n # (2) the total stability of K stores must greater than or equal to S\n solver.add(sum([X[i]*stab_list[i] for i in range(len(pid_list))]) >= S)\n # (3) the total accessibility of k stores must greater than or equal to A\n two_subset = list(itertools.combinations([i for i in range(len(pid_list))], 2))\n solver.add(sum([X[i[0]] * X[i[1]] * (geopy.distance.distance(geo_list[i[0]], geo_list[i[1]]).km) for i in two_subset]) >= A)\n\n # get the solution and return their address\n solution_list = []\n if (solver.check() == z3.sat):\n m = solver.model()\n for i in range(len(pid_list)):\n name = 'x{}'.format(i)\n if (m[z3.Real(name)] == 1):\n solution_list.append(addr_list[i])\n else:\n solution_list.append('not solution found')\n flash(\"The solution for K-salesmen is: {}\".format(str(solution_list)))\n\n return redirect('/task2')\n else:\n flash(\"We are sorry. Something went wrong.\")\n return render_template('task2.html', title='A Casual Exploration', form=form)\n\n\n@app.route('/task2/map')\ndef task2map():\n return render_template('task2map.html', title='Task2 Map')\n\n\n@app.route('/report')\ndef report():\n return render_template('report.html', title='Final Report')\n\n\n@app.route('/feedback', methods=['GET', 'POST'])\ndef feedback():\n form = RateForm()\n\n if request.method == 'GET':\n return render_template('feedback.html', title='Project3-Feedback', form=form)\n elif request.method == 'POST':\n if form.validate_on_submit():\n if form.Name.data == 'DELETE_ALL' and form.Ratings.data == '1':\n for i in User.query.all():\n db.session.delete(i)\n db.session.commit()\n flash(\"All comments deleted\")\n return redirect('/feedback')\n else:\n user = User(name=form.Name.data, ratings=form.Ratings.data, comments=form.Comments.data)\n try:\n db.session.add(user)\n db.session.commit()\n thank = \"Thank you for your time! Back to Homepage in 3 seconds.\"\n return render_template('feedback.html', title=\"Thanks\", thankyou=thank, form=form)\n except Exception:\n flash(\"Name already exist. Please choose a different one.\")\n return redirect(\"/feedback\")\n\n else:\n flash(\"Name and Rating are required.\")\n return render_template('feedback.html', title='Project3-Feedback', form=form)\n\n@app.route('/feedback/ratings')\ndef ratings():\n users = User.query.all()\n if users == []:\n return render_template('ratings.html', average='0', title='Ratings&Comments')\n else:\n count = 0\n total_ratings = 0\n messages = []\n for i in users:\n count += 1\n total_ratings += int(i.ratings)\n m = (i.name, \" rates {}/5 and says: {}\".format(i.ratings, i.comments))\n messages.append(m)\n average_rating = str(total_ratings/count)[0:3]\n return render_template('ratings.html', messages=messages, average=average_rating, title='Ratings&Comments')\n","sub_path":"henryhcy_jshen97_leochans_wangyp/FlaskApp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":11441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"508701351","text":"from django.conf import settings\nfrom django_mailer import constants\n\n# Provide a way of temporarily pausing the sending of mail.\nPAUSE_SEND = getattr(settings, \"MAILER_PAUSE_SEND\", False)\n\nUSE_BACKEND = getattr(settings, 'MAILER_USE_BACKEND',\n 'django.core.mail.backends.smtp.EmailBackend')\n\n# Default priorities for the mail_admins and mail_managers methods.\nMAIL_ADMINS_PRIORITY = getattr(settings, 'MAILER_MAIL_ADMINS_PRIORITY',\n constants.PRIORITY_HIGH)\nMAIL_MANAGERS_PRIORITY = getattr(settings, 'MAILER_MAIL_MANAGERS_PRIORITY',\n None)\n\n# When queue is empty, how long to wait (in seconds) before checking again.\nEMPTY_QUEUE_SLEEP = getattr(settings, \"MAILER_EMPTY_QUEUE_SLEEP\", 30)\n\n# Lock timeout value. how long to wait for the lock to become available.\n# default behavior is to never wait for the lock to be available.\n# lockfile has a bug dealing with negative values so ensure it's always >= 0\nLOCK_WAIT_TIMEOUT = max(getattr(settings, \"MAILER_LOCK_WAIT_TIMEOUT\", 0), 0)\n\n# An optional alternate lock path, potentially useful if you have multiple\n# projects running on the same server.\nLOCK_PATH = getattr(settings, \"MAILER_LOCK_PATH\", None)\n\n\n# Controls for delivery\n# Allow sending a fixed/limited amount of emails in each delivery run\n# defaults to None which means send everything in the queue\nEMAIL_MAX_SENT = getattr(settings, \"MAILER_EMAIL_MAX_SENT\", None)\n\n# Stop sending emails in the current round if more than X emails get deferred\n# defaults to None which means keep going regardless\nEMAIL_MAX_DEFERRED = getattr(settings, \"MAILER_EMAIL_MAX_DEFERRED\", None)\n\n# When delivering, wait some time between emails to avoid server overload\n# defaults to 0 for no waiting\nEMAIL_THROTTLE = getattr(settings, \"MAILER_EMAIL_THROTTLE\", 0)\n","sub_path":"django_mailer/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"65910886","text":"#\n# @lc app=leetcode.cn id=860 lang=python3\n#\n# [860] 柠檬水找零\n#\n\n# @lc code=start\nclass Solution:\n def lemonadeChange(self, bills: List[int]) -> bool:\n # 贪心算法思想,找零时先考虑10美元,再看5美元的\n if not bills:\n return False\n count5, count10 = 0, 0\n for bill in bills:\n if bill == 5: count5 += 1\n elif bill == 10: count10, count5 = count10 + 1, count5 - 1\n elif count10: count10, count5 = count10 - 1, count5 - 1\n else: count5 -= 3\n # 每次来一个客户检查5美元是否够用\n if count5 < 0: return False\n return True\n \n# @lc code=end\n\n","sub_path":"Week_04/860.柠檬水找零.py","file_name":"860.柠檬水找零.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"308941692","text":"import logging\nimport time\nfrom typing import Dict\nfrom typing import List\n\nimport boto3\nimport botocore.exceptions\nimport neo4j\n\nfrom cartography.intel.aws.ec2.util import get_botocore_config\nfrom cartography.util import aws_handle_regions\nfrom cartography.util import run_cleanup_job\nfrom cartography.util import timeit\n\nlogger = logging.getLogger(__name__)\n\n# EMR API is subject to aggressive throttling, so we need to sleep a second between each call.\nLIST_SLEEP = 1\nDESCRIBE_SLEEP = 1\n\n\n@timeit\n@aws_handle_regions\ndef get_emr_clusters(boto3_session: boto3.session.Session, region: str) -> List[Dict]:\n client = boto3_session.client('emr', region_name=region, config=get_botocore_config())\n clusters: List[Dict] = []\n paginator = client.get_paginator('list_clusters')\n for page in paginator.paginate():\n cluster = page['Clusters']\n clusters.extend(cluster)\n time.sleep(LIST_SLEEP)\n return clusters\n\n\n@timeit\ndef get_emr_describe_cluster(boto3_session: boto3.session.Session, region: str, cluster_id: str) -> Dict:\n client = boto3_session.client('emr', region_name=region, config=get_botocore_config())\n cluster_details: Dict = {}\n try:\n response = client.describe_cluster(ClusterId=cluster_id)\n cluster_details = response['Cluster']\n except botocore.exceptions.ClientError as e:\n logger.warning(\n \"Could not run EMR describe_cluster due to boto3 error %s: %s. Skipping.\",\n e.response['Error']['Code'],\n e.response['Error']['Message'],\n )\n return cluster_details\n\n\n@timeit\ndef load_emr_clusters(\n neo4j_session: neo4j.Session, cluster_data: List[Dict], region: str, current_aws_account_id: str,\n aws_update_tag: int,\n) -> None:\n query = \"\"\"\n UNWIND $Clusters as emr_cluster\n MERGE (cluster:EMRCluster{id: emr_cluster.Name})\n ON CREATE SET cluster.firstseen = timestamp(),\n cluster.arn = emr_cluster.ClusterArn,\n cluster.id = emr_cluster.Id,\n cluster.region = $Region\n SET cluster.name = emr_cluster.Name,\n cluster.instance_collection_type = emr_cluster.InstanceCollectionType,\n cluster.log_encryption_kms_key_id = emr_cluster.LogEncryptionKmsKeyId,\n cluster.requested_ami_version = emr_cluster.RequestedAmiVersion,\n cluster.running_ami_version = emr_cluster.RunningAmiVersion,\n cluster.release_label = emr_cluster.ReleaseLabel,\n cluster.auto_terminate = emr_cluster.AutoTerminate,\n cluster.termination_protected = emr_cluster.TerminationProtected,\n cluster.visible_to_all_users = emr_cluster.VisibleToAllUsers,\n cluster.master_public_dns_name = emr_cluster.MasterPublicDnsName,\n cluster.security_configuration = emr_cluster.SecurityConfiguration,\n cluster.autoscaling_role = emr_cluster.AutoScalingRole,\n cluster.scale_down_behavior = emr_cluster.ScaleDownBehavior,\n cluster.custom_ami_id = emr_cluster.CustomAmiId,\n cluster.repo_upgrade_on_boot = emr_cluster.RepoUpgradeOnBoot,\n cluster.outpost_arn = emr_cluster.OutpostArn,\n cluster.log_uri = emr_cluster.LogUri,\n cluster.servicerole = emr_cluster.ServiceRole,\n cluster.lastupdated = $aws_update_tag\n WITH cluster\n\n MATCH (owner:AWSAccount{id: $AWS_ACCOUNT_ID})\n MERGE (owner)-[r:RESOURCE]->(cluster)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $aws_update_tag\n \"\"\"\n\n logger.info(\"Loading EMR %d clusters for region '%s' into graph.\", len(cluster_data), region)\n neo4j_session.run(\n query,\n Clusters=cluster_data,\n Region=region,\n aws_update_tag=aws_update_tag,\n AWS_ACCOUNT_ID=current_aws_account_id,\n ).consume()\n\n\n@timeit\ndef cleanup(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:\n logger.debug(\"Running EMR cleanup job.\")\n run_cleanup_job('aws_import_emr_cleanup.json', neo4j_session, common_job_parameters)\n\n\n@timeit\ndef sync(\n neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str], current_aws_account_id: str,\n update_tag: int, common_job_parameters: Dict,\n) -> None:\n for region in regions:\n logger.info(\"Syncing EMR for region '%s' in account '%s'.\", region, current_aws_account_id)\n\n clusters = get_emr_clusters(boto3_session, region)\n\n cluster_data: List[Dict] = []\n for cluster in clusters:\n cluster_id = cluster['Id']\n cluster_details = get_emr_describe_cluster(boto3_session, region, cluster_id)\n if cluster_details:\n cluster_data.append(cluster_details)\n time.sleep(DESCRIBE_SLEEP)\n\n load_emr_clusters(neo4j_session, cluster_data, region, current_aws_account_id, update_tag)\n\n cleanup(neo4j_session, common_job_parameters)\n","sub_path":"cartography/intel/aws/emr.py","file_name":"emr.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599121770","text":"#!/usr/bin/env python\r\n\r\n# -*- encoding: utf-8 -*-\r\n\r\n'''\r\n@Author : HY\r\n@Software: PyCharm\r\n@File : CART.py\r\n@Time : 2019/5/24 16:36\r\n@Desc : CART算法的实现\r\n\r\n'''\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef loadDataSet(fileName):\r\n with open(fileName) as file:\r\n con = file.readlines()\r\n dataSet=[]\r\n for ele in con:\r\n line=ele.strip().split('\\t')\r\n line=[float(l) for l in line]\r\n dataSet.append(line)\r\n return dataSet\r\ndef binSplitDataTest(dataSet,featureIdex,featureVal):\r\n \"\"\"\r\n Description:树回归算法中,根据特征二元切分数据集\r\n Params:\r\n\r\n Return:\r\n\r\n Author:\r\n HY\r\n Modify:\r\n 2019/5/25 16:13\r\n \"\"\"\r\n #注意np.nonzero的作用\r\n rightMat=dataSet[np.nonzero(dataSet[:,featureIdex]>featureVal)[0]]\r\n leftMat=dataSet[np.nonzero(dataSet[:,featureIdex]<=featureVal)[0]]\r\n return rightMat,leftMat\r\n\r\ndef regLef(dataSet):\r\n #把数据集矩阵展开成一行\r\n return np.mean(dataSet[:,-1])\r\n\r\ndef resErr(dataSet):\r\n return np.var(dataSet[:,-1])*np.shape(dataSet)[0]\r\n\r\n\r\ndef chooseBestSplit(dataSet,leafType=regLef,errorType=resErr,ops=(1,4)):\r\n \"\"\"\r\n Description:遍历所有的特征和特征的值,选择出最佳的切分点\r\n Params:\r\n\r\n Return:\r\n\r\n Author:\r\n HY\r\n Modify:\r\n 2019/5/25 16:14\r\n \"\"\"\r\n tolS=ops[0];tolN=ops[1]#停止条件,一个是误差,一个是分割数据的最小范围\r\n if len(set(dataSet[:,-1].T.tolist()[0]))==1:#停止条件,数据集中元素相同停止\r\n return None,leafType(dataSet)\r\n S=errorType(dataSet)\r\n bestFeatureIndex=0;bestFeatureVal=0\r\n m,n =np.shape(dataSet)\r\n smallestS=float('inf')\r\n for feaIn in range(n-1):#遍历所有的特征\r\n for splitVal in set(dataSet[:,feaIn].T.A.tolist()[0]):#根绝特征值来遍历\r\n rightMat, leftMat=binSplitDataTest(dataSet,feaIn,splitVal)#得到切分后的数据集\r\n if np.shape(rightMat)[0]= low:\n s += l[i]\n if s > left_sum:\n left_sum = s\n left_i = i\n i -= 1\n\n right_sum = -float(\"inf\")\n i = mid + 1\n s = 0\n while i <= high:\n s += l[i]\n if s > right_sum:\n right_sum = s\n right_i = i\n\n i += 1\n\n return (left_i, right_i, left_sum + right_sum)\n\n\ndef _find_maximum_subarray_imp(l, low, high):\n \"\"\"Divide and conquer tecnique for solve maximu sub-array\"\"\"\n if low == high:\n return (low, high, l[low])\n\n mid = low + int((high - low) / 2)\n mcros = _find_max_crossing_subarray(l, low, mid, high)\n mright = _find_maximum_subarray_imp(l , low, mid)\n mleft = _find_maximum_subarray_imp(l , mid + 1, high)\n\n r = [mcros[2], mright[2], mleft[2]]\n m = -float(\"inf\")\n idx = -1\n for i, val in enumerate(r):\n if m < val:\n m = val\n idx = i\n\n if idx == 0:\n return mcros\n elif idx == 1:\n return mright\n else:\n return mleft\n\ndef find_maximum_subarray(l):\n \"\"\"High high fucntion call\"\"\"\n return _find_maximum_subarray_imp(l, 0, len(l) -1)\n","sub_path":"misc/maximum_subarray.py","file_name":"maximum_subarray.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"423013395","text":"from http.server import SimpleHTTPRequestHandler, HTTPServer\nfrom json import dumps\nfrom threading import Timer\nfrom webbrowser import open\nfrom icalevents.icalevents import events\n\nFILE = 'main.html'\nPORT = 8000\n\n\n# noinspection PyPep8Naming\nclass TestHandler(http.server.SimpleHTTPRequestHandler):\n \"\"\"\n The test example handler.\n \"\"\"\n\n def do_POST(self):\n \"\"\"\n Handle a post request by returning the square of the number.\n \"\"\"\n print(self.headers)\n print(self.headers.get_all('content-length'))\n print(self.rfile.read(int(self.headers.get_all('content-length')[0])))\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.flush_headers()\n self.wfile.write(build_event_list_json())\n\n\ndef build_event_list_json():\n gc_events = events(\"https://calendar.google.com/calendar/ical/ucu.edu.ua_gl1e5udah0l84\"\n \"uekquhjeqkgm0%40group.calendar.google.com/public/basic.ics\")\n event_list = [{'summary': gc_event.__dict__['summary'], 'start': (str(gc_event.__dict__['start']))[:19],\n 'end': (str(gc_event.__dict__['end']))[:19], 'description': gc_event.__dict__['description']} for\n gc_event in gc_events]\n return dumps(event_list)\n\n\ndef open_browser():\n \"\"\"\n Start a browser after waiting for half a second.\n \"\"\"\n\n def _open():\n open('http://localhost:%s/%s' % (PORT, FILE))\n\n Timer(0.5, _open).start()\n\n\ndef start_server():\n \"\"\"\n Start the server.\n \"\"\"\n HTTPServer((\"\", PORT), TestHandler).serve_forever()\n\n\nif __name__ == \"__main__\":\n open_browser()\n start_server()\n","sub_path":"hackaton/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"390666981","text":"import pandas as pd\nimport us\n\nfrom can_tools.scrapers import variables\nfrom can_tools.scrapers.official.base import StateDashboard\n\n\nclass OhioVaccineCounty(StateDashboard):\n has_location = False\n source = \"https://coronavirus.ohio.gov/wps/portal/gov/covid-19/dashboards/covid-19-vaccine/covid-19-vaccination-dashboard\"\n source_name = \"Ohio Department of Health\"\n state_fips = int(us.states.lookup(\"Ohio\").fips)\n url = \"https://coronavirus.ohio.gov/static/dashboards/vaccine_data.csv\"\n location_type = \"county\"\n\n variables = {\n \"vaccines_started\": variables.INITIATING_VACCINATIONS_ALL,\n \"vaccines_completed\": variables.FULLY_VACCINATED_ALL,\n }\n\n def fetch(self):\n return pd.read_csv(self.url, parse_dates=[\"date\"])\n\n def normalize(self, data: pd.DataFrame) -> pd.DataFrame:\n not_counties = [\"Out of State\", \"Unknown\"] # noqa\n dates = list(data[\"date\"].agg([min, max]))\n idx = pd.MultiIndex.from_product(\n [pd.date_range(*dates), sorted(list(data[\"county\"].unique()))],\n names=[\"dt\", \"location_name\"],\n )\n\n return (\n data.rename(columns={\"county\": \"location_name\", \"date\": \"dt\"})\n .set_index([\"dt\", \"location_name\"])\n .reindex(idx, fill_value=0)\n .unstack(level=[\"location_name\"])\n .sort_index()\n .cumsum()\n .stack(level=[0, 1])\n .rename(\"value\") # name the series\n .reset_index() # convert to long form df\n .rename(columns={\"level_1\": \"variable\"})\n .dropna()\n .assign(\n value=lambda x: pd.to_numeric(x.loc[:, \"value\"]),\n vintage=self._retrieve_vintage(),\n location_name=lambda x: x[\"location_name\"].str.strip(),\n )\n .query(\"location_name not in @not_counties\")\n .pipe(self.extract_CMU, cmu=self.variables)\n .drop([\"variable\"], axis=1)\n )\n","sub_path":"can_tools/scrapers/official/OH/oh_vaccine.py","file_name":"oh_vaccine.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"653453616","text":"from core.views import BaseView\nfrom vlog.models import Category, Article, Tag, Comment\nfrom django.db.models import Count\nfrom django.core.paginator import Paginator\nfrom rest_framework import generics\nfrom vlog.serializers import CategorySerializer, ArticleSerializer, TagSerializer\n\n\nclass IndexView(BaseView):\n template_name = 'vlog/index.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n categories = Category.objects.annotate(\n category_population=Count(\n 'articles'\n )\n ).order_by(\n '-category_population'\n )[:3]\n context.update({'categories': categories})\n\n articles = Article.objects.annotate(\n article_comments=Count('comments')\n ).order_by('-article_comments')[:10]\n context.update({'articles': articles})\n\n tags = Tag.objects.annotate(\n tags_articles=Count('articles')\n ).order_by('-tags_articles')[:10]\n context.update({'tags': tags})\n\n return self.render_to_response(context)\n\n\nclass CategoriesView(BaseView):\n template_name = 'vlog/categories.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n categories = Category.objects.all().annotate(\n category_articles=Count('articles')\n ).order_by('-category_articles')\n context.update({'categories': categories})\n\n return self.render_to_response(context)\n\nclass CategoryView(BaseView):\n template_name = 'vlog/category.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n category = Category.objects.get(slug=kwargs.get('category_slug'))\n context.update({'category': category})\n\n top_2_articles = Article.objects.filter(\n category__slug=kwargs.get('category_slug')\n ).annotate(\n articles_comments=Count('comments')\n ).order_by('-articles_comments')[:2]\n context.update({'top_2_articles': top_2_articles})\n\n articles = Article.objects.filter(\n category__slug=kwargs.get('category_slug')\n ).annotate(\n articles_comments=Count('comments')\n ).order_by('-articles_comments')\n context.update({'articles': articles})\n\n paginator = Paginator(articles, 3)\n page = request.GET.get('page')\n articles_page = paginator.get_page(page)\n context.update({'articles_page': articles_page})\n\n return self.render_to_response(context)\n\nclass ArticlesView(BaseView):\n template_name = 'vlog/articles.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n articles = Article.objects.all().annotate(\n comments_qty=Count('comments')\n ).order_by('-comments_qty')\n\n context.update({'articles': articles})\n\n return self.render_to_response(context)\n\nclass ArticleView(BaseView):\n template_name = 'vlog/article.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n article = Article.objects.get(slug=kwargs.get('article_slug'))\n context.update({'article': article})\n\n return self.render_to_response(context)\n\nclass TagsView(BaseView):\n template_name = 'vlog/tags.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n tags = Tag.objects.all().annotate(\n articles_qty=Count('articles')\n ).order_by('-articles_qty')\n context.update({'tags': tags})\n\n articles_by_tag = Article.objects.values(\n 'tags', 'title', 'slug'\n ).annotate(\n comments_qty=Count('comments')\n ).order_by('tags', '-comments_qty')\n context.update({'articles_by_tag': articles_by_tag})\n\n return self.render_to_response(context)\n\nclass TagView(BaseView):\n template_name = 'vlog/tag.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n tag = Tag.objects.get(slug=kwargs.get('tag_slug'))\n context.update({'tag': tag})\n\n articles = Article.objects.filter(\n tags__slug=kwargs.get('tag_slug')\n ).annotate(\n articles_comments=Count('comments')\n ).order_by('-articles_comments')\n context.update({'articles': articles})\n\n return self.render_to_response(context)\n\n\nclass CategoriesList(generics.ListCreateAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n\nclass CategoryDetail(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n lookup_url_kwarg = 'category_slug'\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n\nclass ArticlesList(generics.ListCreateAPIView):\n queryset = Article.objects.all()\n serializer_class = ArticleSerializer\n\n\nclass ArticleDetail(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n lookup_url_kwarg = 'article_slug'\n queryset = Article.objects.all()\n serializer_class = ArticleSerializer\n\n\nclass TagsList(generics.ListCreateAPIView):\n queryset = Tag.objects.all()\n serializer_class = TagSerializer\n\n\nclass TagDetail(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n lookup_url_kwarg = 'tag_slug'\n queryset = Tag.objects.all()\n serializer_class = TagSerializer\n","sub_path":"src/vlog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"291360287","text":"import requests\nfrom Models import Lesson, Course\nfrom Models import session\n\nfrom config import headers, auth, bid\n\ndef login():\n login_url = 'https://jiliguala.com/api/users/tokens'\n payload = {\n 'u': '13652085660',\n 'p': '1310559a',\n 'typ': 'mobile'\n }\n res = requests.get(login_url, headers=headers, params=payload).json()\n return {\n 'user_id': res['data']['_id'],\n 'password': res['data']['tok'],\n 'bid': res['data']['b'][0]['_id']\n }\n\n\ndef get_roadmap(_id, typ):\n roadmap_url = 'https://jiliguala.com/api/lesson/roadmap'\n payload = {\n 'bid': bid,\n 'id': _id,\n 'typ': typ\n }\n res = requests.get(\n roadmap_url,\n headers=headers,\n params=payload,\n auth=auth\n )\n return res.json()\n\n\ndef get_lessons(_id):\n typ = _id[-2:]\n roadmap = get_roadmap(_id, typ)\n return roadmap['data']['roadmap'] + roadmap['data']['unitroadmap']\n\n\ndef get_lesson_info(lesson_id):\n lesson_url = 'https://jiliguala.com/api/lessons'\n payload = {\n 'lid': lesson_id,\n 'bid': bid\n }\n res = requests.get(\n lesson_url,\n headers=headers,\n params=payload,\n auth=auth\n )\n return res.json()\n\n\ndef save_lession_to_db(lesson_info):\n if session.query(Lesson).filter_by(_id=lesson_info['data']['_id']).one_or_none() is None:\n lesson = Lesson(\n _id=lesson_info['data']['_id'],\n ttl=lesson_info['data']['ttl']\n )\n session.add(lesson)\n session.commit()\n\n\ndef update_lessons():\n _ids = ['L1MC', 'L2MC', 'L3MC', 'B1MC', 'B2MC', 'L1PH', 'L2PH', 'L3PH']\n for _id in _ids:\n lessons = get_lessons(_id)\n print(_id, ' ', len(lessons))\n for lesson in lessons:\n lesson_info = get_lesson_info(lesson['_id'])\n print(\n lesson_info['data']['_id'],\n lesson_info['data']['doneusers'],\n lesson_info['data']['ttl'],\n )\n save_lession_to_db(lesson_info)\n\n\ndef get_courses(cat):\n result = []\n courses_url = 'https://jiliguala.com/api/course/entry/cat'\n page = 0\n payload = {\n 'bid': bid,\n 'page': str(page),\n 'cat': cat \n }\n res = requests.get(courses_url, params=payload, headers=headers, auth=auth)\n courses = res.json()['data']['courses']\n while len(courses) > 0:\n result = result + courses\n page = page + 1\n payload = {\n 'bid': bid,\n 'page': str(page),\n 'cat': cat\n }\n courses = requests.get(courses_url, params=payload, headers=headers, auth=auth).json()['data']['courses']\n return result\n\n\ndef save_course_to_db(course_info):\n if session.query(Course).filter_by(_id=course_info['_id']).one_or_none() is None:\n course = Course(\n _id=course_info['_id'],\n ttl=course_info['ttl'],\n cat=course_info['cat']\n )\n if course_info['cat'] == '7':\n course.itemid = course_info['itemid']\n session.add(course)\n session.commit()\n\n\ndef get_course_info(course_id):\n course_url = 'https://jiliguala.com/api/roadmap/unit'\n payload = {\n 'bid': bid,\n 'courseid': course_id\n }\n courses = requests.get(course_url, params=payload, headers=headers, auth=auth).json()['data']['courses']\n if len(courses) > 1:\n print('Error {}'.format(courses))\n course_info = courses[0]\n return course_info\n\n\ndef update_courses():\n cats = ['2', '5', '7']\n for cat in cats:\n print('cat {}'.format(cat))\n courses = get_courses(cat)\n for course in courses:\n course_info = get_course_info(course['_id'])\n print(course_info)\n save_course_to_db(course_info)\n\n\nif __name__ == '__main__':\n update_courses()\n update_lessons()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"515724785","text":"import socket\nimport datetime\nimport time\nfrom dateutil import parser\nfrom timeit import default_timer as timer\nimport sys\n\nif len(sys.argv) != 2:\n print(\"Incorrect usage: expects one argument (Name of client)\")\n sys.exit()\n\ndef synchronizeTime():\n print(\"{}:\\n\".format(sys.argv[1]))\n s = socket.socket()\n port = 8011\n s.connect(('127.0.0.1', port))\n request_time = timer()\n server_time = parser.parse(s.recv(1024).decode())\n response_time = timer()\n actual_time = datetime.datetime.now()\n print(\"Time returned by server: \" + str(server_time))\n process_delay_latency = response_time - request_time\n print(\"Process Delay latency: \" + str(process_delay_latency) + \" seconds\")\n print(\"Actual clock time at client side: \" + str(actual_time))\n client_time = server_time + datetime.timedelta(seconds = (process_delay_latency) / 2)\n print(\"Synchronized process client time: \" + str(client_time))\n time.sleep(10)\n s.close()\nif __name__ == '__main__':\n synchronizeTime()\n","sub_path":"lab6/q2Cristians/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383221152","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thur, Apr 29, 2021\nLast modified on Fri, May 7, 2021\nConditional MC for Heston model based on QE discretization scheme by Andersen(2008)\n@author: Xueyang & Xiaoyin\n\"\"\"\nimport numpy as np\nimport pyfeng as pf\nimport scipy.stats as st\nimport scipy.integrate as spint\nimport scipy.optimize as sopt\nfrom tqdm import tqdm\n\n\nclass HestonCondMcQE:\n '''\n Conditional MC for Heston model based on QE discretization scheme by Andersen(2008)\n\n Underlying price is assumed to follow a geometric Brownian motion.\n Volatility (variance) of the price is assumed to follow a CIR process.\n\n Example:\n >>> import numpy as np\n >>> import heston_cmc_qe as heston\n >>> strike = [100.0, 140.0, 70.0]\n >>> forward = 100\n >>> delta = [1, 1/2, 1/4, 1/8, 1/16, 1/32]\n >>> vov, kappa, rho, texp, theta, sigma = [1, 0.5, -0.9, 10, 0.04, 0.2]\n >>> heston_cmc_qe = heston.HestonCondMcQE(vov=vov, kappa=kappa, rho=rho, theta=theta)\n >>> price_cmc = np.zeros([len(delta), len(strike)])\n >>> for d in range(len(delta)):\n >>> price_cmc[d, :] = heston_cmc_qe.price(strike, forward, texp, sigma=sigma, delta=delta[d], path=1e5, seed=123456)\n >>> price_cmc\n array([[14.52722285, 0.19584722, 37.20591415],\n [13.56691261, 0.26568546, 36.12295964],\n [13.22061601, 0.29003533, 35.9154245 ],\n [13.12057087, 0.29501411, 35.90207168],\n [13.1042753 , 0.29476289, 35.89245755],\n [13.09047939, 0.29547721, 35.86410028]])\n '''\n\n def __init__(self, vov=1, kappa=0.5, rho=-0.9, theta=0.04):\n '''\n Initiate a Heston model\n\n Args:\n vov: volatility of variance, strictly positive\n kappa: speed of variance's mean-reversion, strictly positive\n rho: correlation between BMs of price and vol\n theta: long-term mean (equilibirum level) of the variance, strictly positive\n '''\n self.vov = vov\n self.kappa = kappa\n self.rho = rho\n self.theta = theta\n\n self.psi_points = None # for TG scheme only\n self.rx_results = None\n self.dis = 1e-3\n\n def price(self, strike, spot, texp, sigma, delta, intr=0, divr=0, psi_c=1.5, path=10000, scheme='QE', seed=None):\n '''\n Conditional MC routine for Heston model\n Generate paths for vol only using QE discretization scheme.\n Compute integrated variance and get BSM prices vector for all strikes.\n\n Args:\n strike: strike price, in vector form\n spot: spot (or forward)\n texp: time to expiry\n sigma: initial volatility\n delta: length of each time step\n intr: interest rate (domestic interest rate)\n divr: dividend/convenience yield (foreign interest rate)\n psi_c: critical value for psi, lying in [1, 2]\n path: number of vol paths generated\n scheme: discretization scheme for vt, {'QE', 'TG', 'Euler', 'Milstein', 'KJ'}\n seed: random seed for rv generation\n\n Return:\n BSM price vector for all strikes\n '''\n self.sigma = sigma\n self.bsm_model = pf.Bsm(self.sigma, intr=intr, divr=divr)\n self.delta = delta\n self.path = int(path)\n self.step = int(texp / self.delta)\n\n vt = self.sigma ** 2 * np.ones([self.path, self.step + 1])\n np.random.seed(seed)\n\n if scheme == 'QE':\n u = np.random.uniform(size=(self.path, self.step))\n\n expo = np.exp(-self.kappa * self.delta)\n for i in range(self.step):\n # compute m, s_square, psi given vt(i)\n m = self.theta + (vt[:, i] - self.theta) * expo\n s2 = vt[:, i] * (self.vov ** 2) * expo * (1 - expo) / self.kappa + self.theta * (self.vov ** 2) * \\\n ((1 - expo) ** 2) / (2 * self.kappa)\n psi = s2 / m ** 2\n\n # compute vt(i+1) given psi\n below = np.where(psi <= psi_c)[0]\n ins = 2 * psi[below] ** -1\n b2 = ins - 1 + np.sqrt(ins * (ins - 1))\n b = np.sqrt(b2)\n a = m[below] / (1 + b2)\n z = st.norm.ppf(u[below, i])\n vt[below, i+1] = a * (b + z) ** 2\n\n above = np.where(psi > psi_c)[0]\n p = (psi[above] - 1) / (psi[above] + 1)\n beta = (1 - p) / m[above]\n for k in range(len(above)):\n if u[above[k], i] > p[k]:\n vt[above[k], i+1] = beta[k] ** -1 * np.log((1 - p[k]) / (1 - u[above[k], i]))\n else:\n vt[above[k], i+1] = 0\n\n elif scheme == 'TG':\n if np.all(self.rx_results) == None:\n self.psi_points, self.rx_results = self.prepare_rx()\n\n expo = np.exp(-self.kappa * self.delta)\n for i in range(self.step):\n # compute m, s_square, psi given vt(i)\n m = self.theta + (vt[:, i] - self.theta) * expo\n s2 = vt[:, i] * (self.vov ** 2) * expo * (1 - expo) / self.kappa + self.theta * (self.vov ** 2) * \\\n ((1 - expo) ** 2) / (2 * self.kappa)\n psi = s2 / m ** 2\n\n rx = np.array([self.find_rx(j) for j in psi])\n\n z = np.random.normal(size=(self.path, self.step))\n mu_v = np.zeros_like(z)\n sigma_v = np.zeros_like(z)\n mu_v[:, i] = rx * m / (st.norm.pdf(rx) + rx * st.norm.cdf(rx))\n sigma_v[:, i] = np.sqrt(s2) * psi ** (-0.5) / (st.norm.pdf(rx) + rx * st.norm.cdf(rx))\n\n vt[:, i+1] = np.fmax(mu_v[:, i] + sigma_v[:, i] * z[:, i], 0)\n\n elif scheme == 'Euler':\n z = np.random.normal(size=(self.path, self.step))\n for i in range(self.step):\n vt[:, i+1] = vt[:, i] + self.kappa * (self.theta - np.max(vt[:, i], 0)) * self.delta + \\\n self.vov * np.sqrt(np.max(vt[:, i], 0) * self.delta) * z[:, i]\n below_0 = np.where(vt < 0)\n vt[below_0] = 0\n\n elif scheme == 'Milstein':\n z = np.random.normal(size=(self.path, self.step))\n for i in range(self.step):\n vt[:, i+1] = vt[:, i] + self.kappa * (self.theta - np.max(vt[:, i], 0)) * self.delta + self.vov * \\\n np.sqrt(np.max(vt[:, i], 0) * self.delta) * z[:, i] + \\\n self.vov**2 * 0.25 * (z[:, i]**2 - 1) * self.delta\n below_0 = np.where(vt < 0)\n vt[below_0] = 0\n\n elif scheme == 'KJ':\n z = np.random.normal(size=(self.path, self.step))\n for i in range(self.step):\n vt[:, i+1] = (vt[:, i] + self.kappa * self.theta * self.delta + self.vov * \\\n np.sqrt(np.max(vt[:, i], 0) * self.delta) * z[:, i] + \\\n self.vov**2 * 0.25 * (z[:, i]**2 - 1) * self.delta) / (1 + self.kappa * self.delta)\n below_0 = np.where(vt < 0)\n vt[below_0] = 0\n\n # compute integral of vt, equivalent spot and vol\n vt_int = spint.simps(vt, dx=self.delta)\n spot_cmc = spot * np.exp(self.rho * (vt[:, -1] - vt[:, 0] - self.kappa * (self.theta * texp - vt_int))\n / self.vov - self.rho ** 2 * vt_int / 2)\n vol_cmc = np.sqrt((1 - self.rho ** 2) * vt_int / texp)\n\n # compute bsm price vector for the given strike vector\n price_cmc = np.zeros_like(strike)\n for j in range(len(strike)):\n price_cmc[j] = np.mean(self.bsm_model.price_formula(strike[j], spot_cmc, vol_cmc, texp, intr=intr, divr=divr))\n\n return price_cmc\n\n def prepare_rx(self):\n '''\n Pre-calculate r(x) and store the result\n for TG scheme only\n '''\n fx = lambda rx: rx * st.norm.pdf(rx) + st.norm.cdf(rx) * (1 + rx ** 2) / \\\n ((st.norm.pdf(rx) + rx * st.norm.cdf(rx)) ** 2) - 1\n rx_results = np.linspace(-2, 100, 10 ** 5)\n psi_points = fx(rx_results)\n\n return psi_points, rx_results\n\n def find_rx(self, psi):\n '''\n Return r(psi) according to the pre_calculated results\n '''\n\n if self.rx_results[self.psi_points >= psi].size == 0:\n print(\"Caution: input psi too large\")\n return self.rx_results[-1]\n elif self.rx_results[self.psi_points <= psi].size == 0:\n print(\"Caution: input psi too small\")\n return self.rx_results[0]\n else:\n return (self.rx_results[self.psi_points >= psi][0] + self.rx_results[self.psi_points <= psi][-1])/2\n\n\n","sub_path":"heston_cmc_qe.py","file_name":"heston_cmc_qe.py","file_ext":"py","file_size_in_byte":8799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500260379","text":"import logging\nimport itertools\nimport numpy as np\n\nfrom chemistry.functions import PolarCoordsWithDirection, GaussianException\nfrom chemistry.utils import linalg\n\n\ndef optimize_structure_rfo(molecule, struct, rfo, stop_strategy):\n path = []\n zero = np.zeros(molecule.n_dims - 6)\n\n for itr in itertools.count():\n path.append(struct)\n\n motionless = linalg.get_motionless(molecule, struct)\n value, grad, hess = motionless.value_grad_hess(zero)\n\n delta = rfo(itr=itr, x=zero, val=value, grad=grad, hess=hess)\n print('\\n\\nnew iteration\\nvalue = {}, grad norm = {}, delta norm = {}'.format(value, np.linalg.norm(grad), np.linalg.norm(delta)))\n print('delta norm = {} [{}]'.format(np.linalg.norm(delta), delta))\n\n if stop_strategy(itr=iter, x=zero, val=value, grad=grad, hess=hess, delta=delta):\n print('break')\n break\n\n while True:\n try:\n next_value = motionless(delta)\n expected = grad.dot(delta) + .5 * delta.dot(hess.dot(delta))\n real = next_value - value\n\n print('delta norm = {} [{}]'.format(np.linalg.norm(delta), delta))\n print('expected = {}, real = {}, d = {}'.format(expected, real, abs(expected - real) / abs(expected)))\n print()\n if abs(expected - real) / abs(expected) < .3:\n break\n except GaussianException as exc:\n print('exception => decreasing delta')\n\n delta *= .5\n\n struct = motionless.transform(delta)\n\n return path\n\n\n\ndef optimize_on_sphere(func, r, dir, delta_strategy, stop_strategy):\n \"\"\"\n Optimizes function on sphere surface with center in zero\n\n :param func: function to optimize\n :param r: radius of sphere to optimize on\n :param dir: initial direction. Vector of norm r\n :param delta_strategy: iteration delta strategy\n :param stop_strategy: iteration stop strategy\n :return: optimization path of directions\n \"\"\"\n\n path = []\n skips1 = []\n skips2 = []\n\n phi = np.zeros(func.n_dims - 1)\n\n from chemistry.optimization.delta_strategies import Newton, RFO\n newton = Newton()\n rfo = RFO()\n\n for itr in itertools.count():\n path.append(dir)\n\n in_polar = PolarCoordsWithDirection(func, r, dir)\n # value, grad = in_polar.value_grad(phi)\n value, grad, hess = in_polar.value_grad_hess(phi)\n\n skips1.append(in_polar.transform(newton(grad, hess)))\n skips2.append(in_polar.transform(rfo(grad, hess)))\n\n delta = delta_strategy(itr=itr, x=phi, val=value, grad=grad)\n\n print(value, dir, np.linalg.norm(grad), np.linalg.norm(delta))\n\n if stop_strategy(itr=iter, x=phi, val=value, grad=grad, delta=delta):\n break\n\n dir = in_polar.transform(delta)\n\n return path, skips1, skips2\n\n\ndef optimize_on_sphere_rfo(func, r, dir, rfo, stop_strategy, comp_eps=1e-9):\n path = []\n phi = np.zeros(func.n_dims - 1)\n\n for itr in itertools.count():\n path.append(dir)\n\n in_polar = PolarCoordsWithDirection(func, r, dir)\n value, grad, hess = in_polar.value_grad_hess(phi)\n\n import matplotlib.pyplot as plt\n plt.imshow(np.abs(hess), cmap='hot', interpolation='nearest')\n plt.show()\n\n delta = rfo(itr=itr, x=phi, val=value, grad=grad, hess=hess)\n logging.debug('new iteration\\nvalue = {}, grad norm = {}, delta norm = {}\\nhess values:\\n{}'.format(\n value, np.linalg.norm(grad), np.linalg.norm(delta), linalg.calc_singular_values(hess)\n ))\n logging.debug('new iteration\\nvalue = {}, grad norm = {}, delta norm = {}'.format(\n value, np.linalg.norm(grad), np.linalg.norm(delta)\n ))\n\n if stop_strategy(itr=iter, x=phi, val=value, grad=grad, delta=delta, hess=hess):\n break\n\n while True:\n next_value = in_polar(delta)\n\n expected = grad.dot(delta) + .5 * delta.dot(hess.dot(delta))\n real = next_value - value\n\n d = abs(expected - real) / abs(expected)\n logging.debug('delta norm = {}, expected = {}, real = {}, d = {}'.format(\n np.linalg.norm(delta), expected, real, d\n ))\n if abs(expected - real) < comp_eps or d < .3:\n break\n delta *= .5\n\n dir = in_polar.transform(delta)\n\n return path\n\n\n","sub_path":"chemistry/optimization/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"451778028","text":"# This code is modified from https://github.com/jakesnell/prototypical-networks \n\nimport backbone\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn.functional as F\nfrom methods.meta_template import MetaTemplate\n\nimport utils\n\nclass ProtoMarginTrain(MetaTemplate):\n def __init__(self, model_func, n_way, n_support, m=0.50, Lambda=0.50):\n super(ProtoMarginTrain, self).__init__( model_func, n_way, n_support)\n self.loss_fn = nn.CrossEntropyLoss()\n self.Lambda = Lambda\n self.m = m\n\n\n def set_forward(self,x,is_feature = False):\n z_support, z_query = self.parse_feature(x,is_feature)\n\n z_support = z_support.contiguous()\n z_proto = z_support.view(self.n_way, self.n_support, -1 ).mean(1) #the shape of z is [n_data, n_dim]\n z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )\n\n\n dists = euclidean_dist(z_query, z_proto)\n scores = -dists\n\n return scores\n\n def set_forward_margin_loss(self,x,is_feature = False):\n z_support, z_query = self.parse_feature(x,is_feature)\n\n z_support = z_support.contiguous()\n z_proto = z_support.view(self.n_way, self.n_support, -1 ).mean(1) #the shape of z is [n_data, n_dim]\n z_proto = F.normalize(z_proto)\n\n dists = euclidean_dist(z_proto, z_proto)\n dists = torch.sqrt(dists + 1e-9) # Add small value to avoid nan gradient\n dists = self.m - dists\n zeros = torch.zeros(size=dists.shape, dtype=torch.float32).cuda()\n dists = torch.max(dists, zeros)\n dists = torch.triu(dists, diagonal=1)\n\n loss = torch.sum(torch.pow(dists, 2)) / (self.n_way * (self.n_way-1) / 2)\n\n return loss\n\n def set_forward_loss(self, x):\n y_query = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))\n y_query = Variable(y_query.cuda())\n\n scores = self.set_forward(x)\n loss = self.loss_fn(scores, y_query)\n margin_loss = self.set_forward_margin_loss(x)\n\n total_loss = loss + self.Lambda * margin_loss\n return total_loss\n\ndef euclidean_dist( x, y):\n # x: N x D\n # y: M x D\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n assert d == y.size(1)\n\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n\n return torch.pow(x - y, 2).sum(2)\n\n# python ./train.py --dataset miniImageNet --model ResNet10 --method protomargin --n_shot 5 --train_aug","sub_path":"methods/protomargin.py","file_name":"protomargin.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"390344373","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, shiftgrid\nimport time\nfrom matplotlib import cm\n\n\ndef plot_map(data,\n lons,\n lats,\n significance,\n color_map=\"RdYlBu_r\",\n parallel_s=10,\n meridian_s=10,\n draw_graticule=False,\n save_file=False,\n figsize=(12, 8),\n dpi=200,\n crop=False):\n fig = plt.figure(figsize=figsize, dpi=dpi)\n\n # map margin\n horizontal_margin = np.abs(lons[0] - lons[1]) * 2\n vertical_margin = np.abs(lats[0] - lats[1]) * 2\n\n # create map\n if crop:\n m = Basemap(projection=\"cyl\",\n resolution='i',\n area_thresh=5000,\n llcrnrlon=np.min(lons) - horizontal_margin,\n llcrnrlat=np.min(lats) - vertical_margin,\n urcrnrlon=np.max(lons) + horizontal_margin,\n urcrnrlat=np.max(lats) + vertical_margin)\n else:\n m = Basemap(\n projection=\"cyl\",\n resolution='i',\n area_thresh=5000\n )\n\n if draw_graticule:\n parallels = np.arange(-90, 90 + parallel_s, parallel_s)\n meridians = np.arange(-180, 180 + meridian_s, meridian_s)\n m.drawparallels(parallels, labels=[1, 0, 0, 0], color=\"#aeaeae\", linewidth=0.1, size=5)\n m.drawmeridians(meridians, labels=[1, 0, 0, 1], color=\"#aeaeae\", linewidth=0.1, size=5)\n\n m.drawcountries(linewidth=0.1)\n m.drawcoastlines(linewidth=0.1)\n\n # shift lons and lats by step/2\n shift_lons = -np.abs(lons[0] - lons[1]) / 2\n shift_lats = -np.abs(lats[0] - lats[1]) / 2\n x, y = m(*np.meshgrid(lons + shift_lons, lats + shift_lats))\n\n # colormap setting\n color_res = 32\n ticks = np.linspace(np.nanmin(data), np.nanmax(data), int(color_res / 2) + 1)\n colormap = cm.get_cmap(color_map, color_res)\n\n # plot grid\n color = m.pcolor(x,\n y,\n np.round(data.squeeze(), 2),\n cmap=colormap,\n edgecolors=\"k\",\n linewidth=0.01,\n vmin=np.nanmin(data),\n vmax=np.nanmax(data))\n\n # plot colorbar\n color_bar = m.colorbar(color, location=\"bottom\", pad=\"5%\", ticks=np.round(ticks, 2))\n color_bar.ax.tick_params(labelsize=6)\n\n # plot trend significance\n if np.array(significance).shape[0] > 0:\n for row in range(len(lats)):\n for col in range(len(lons)):\n if significance[row][col] == 1:\n plt.plot(lons[col], lats[row], marker='+', markersize=0.1, color='k', linewidth=0.01)\n elif significance[row][col] == -1:\n plt.plot(lons[col], lats[row], marker=\"|\", markersize=0.1, color='k', linewidth=0.01)\n\n if save_file:\n plt.savefig(f\"result_{int(time.time())}.png\", dpi=dpi * 5, bbox_inches='tight', pad_inches=0.2)\n\n # plt.show()\n","sub_path":"app/lib/mymaplib.py","file_name":"mymaplib.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319242255","text":"from typing import List\n\nfrom tensorflow import keras\n\nfrom tfaip.base.data.pipeline.definitions import PipelineMode\nfrom tfaip.base.device_config import DeviceConfig\nimport tfaip.base.imports as tfaip_cls\nfrom tfaip.base.predict.multimodelpredictor import MultiModelVoter\n\nfrom calamari_ocr.ocr.predict.params import PredictorParams\nfrom calamari_ocr.ocr.scenario import Scenario\nfrom calamari_ocr.ocr.dataset.data import Data\nfrom calamari_ocr.ocr.voting import VoterParams\nfrom calamari_ocr.ocr import SavedCalamariModel, DataParams\nfrom calamari_ocr.ocr.voting.adapter import CalamariMultiModelVoter\nfrom calamari_ocr.utils.output_to_input_transformer import OutputToInputTransformer\n\n\nclass Predictor(tfaip_cls.Predictor):\n @staticmethod\n def from_checkpoint(params: PredictorParams, checkpoint: str, auto_update_checkpoints=True):\n ckpt = SavedCalamariModel(checkpoint, auto_update=False)\n trainer_params = Scenario.trainer_params_from_dict(ckpt.dict)\n trainer_params.scenario_params.data_params.pre_processors_.run_parallel = False\n trainer_params.scenario_params.data_params.post_processors_.run_parallel = False\n scenario = Scenario(trainer_params.scenario_params)\n predictor = Predictor(params, scenario.create_data())\n ckpt = SavedCalamariModel(checkpoint, auto_update=auto_update_checkpoints) # Device params must be specified first\n predictor.set_model(keras.models.load_model(ckpt.ckpt_path + '.h5', custom_objects=Scenario.model_cls().get_all_custom_objects()))\n return predictor\n\n\nclass MultiPredictor(tfaip_cls.MultiModelPredictor):\n @classmethod\n def from_paths(cls, checkpoints: List[str],\n auto_update_checkpoints=True,\n predictor_params: PredictorParams = None,\n voter_params: VoterParams = None,\n **kwargs\n ) -> 'aip_predict.MultiModelPredictor':\n if not checkpoints:\n raise Exception(\"No checkpoints provided.\")\n\n if predictor_params is None:\n predictor_params = PredictorParams(silent=True, progress_bar=True)\n\n DeviceConfig(predictor_params.device_params)\n checkpoints = [SavedCalamariModel(ckpt, auto_update=auto_update_checkpoints) for ckpt in checkpoints]\n multi_predictor = super(MultiPredictor, cls).from_paths(\n [ckpt.json_path for ckpt in checkpoints],\n predictor_params,\n Scenario,\n model_paths=[ckpt.ckpt_path + '.h5' for ckpt in checkpoints],\n predictor_args={'voter_params': voter_params},\n )\n\n return multi_predictor\n\n def __init__(self, voter_params, *args, **kwargs):\n super(MultiPredictor, self).__init__(*args, **kwargs)\n self.voter_params = voter_params or VoterParams()\n\n def create_voter(self, data_params: 'DataParams') -> MultiModelVoter:\n # Cut non text processors (first two)\n post_proc = [Data.data_processor_factory().create_sequence(\n data.params().post_processors_.sample_processors[2:], data.params(), PipelineMode.Prediction) for\n data in self.datas]\n pre_proc = Data.data_processor_factory().create_sequence(\n self.data.params().pre_processors_.sample_processors, self.data.params(),\n PipelineMode.Prediction)\n out_to_in_transformer = OutputToInputTransformer(pre_proc)\n return CalamariMultiModelVoter(self.voter_params, self.datas, post_proc, out_to_in_transformer)\n","sub_path":"calamari_ocr/ocr/predict/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626750903","text":"import pygame\nimport game_functions as gf\nfrom settings import Settings\nfrom paddles import Paddle\nfrom ai_paddles import AIPaddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\nfrom stats import GameStats\nfrom startup import StartupScreen\n\n\ndef run_game():\n pygame.init()\n s = Settings()\n screen = pygame.display.set_mode((s.screen_width, s.screen_height))\n pygame.display.set_caption(\"Pong\")\n paddles = Paddle(s, screen, s.playerpaddlex, s.playerpaddley, s.paddle_height, s.paddle_width)\n paddles2 = Paddle(s, screen, s.playerpaddlex, 0, s.paddle_height, s.paddle_width)\n paddles3 = Paddle(s, screen, 0, s.leftpaddley, s.paddle_width, s.paddle_height)\n ai_paddles = AIPaddle(s, screen, s.aipaddlex, s.playerpaddley, s.paddle_height, s.paddle_width)\n ai_paddles2 = AIPaddle(s, screen, s.aipaddlex, 0, s.paddle_height, s.paddle_width)\n ai_paddles3 = AIPaddle(s, screen, s.rightpaddlex, s.rightpaddley, s.paddle_width, s.paddle_height)\n ball = Ball(s, screen)\n stats = GameStats(s)\n sb = Scoreboard(s, screen, stats)\n su = StartupScreen(s, screen)\n\n while True:\n gf.check_events(paddles, paddles2, paddles3, stats, sb)\n if stats.game_active:\n gf.check_collisions(s, ball, paddles, paddles2, paddles3, ai_paddles, ai_paddles2, ai_paddles3, sb, stats)\n gf.update_screen(s, screen, paddles, paddles2, paddles3, ai_paddles, ai_paddles2, ai_paddles3, ball, sb, stats, su)\n else:\n gf.showtitlescreen(su, stats)\n\n\nrun_game()\n","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"441766353","text":"#!/usr/bin/env python\n\n#######################################################\n# _ #\n# _| | ___ #\n# / . |/ . \\ #\n# \\___|\\___/ #\n# #\n# get stuff done #\n# #\n#######################################################\n# do is licensed under the Beer-ware license(R42): #\n# widr1225@colorado.edu wrote this script. As long as #\n# you retain this notice you can do whatever you want #\n# with this stuff. If we meet some day, and you think #\n# this stuff is worth it, you can buy me a beer in #\n# return --Will Drach #\n#######################################################\n# do comes with no guarantee, warranty, or suggestion #\n# that it even remotely does what it's supposed to. #\n#######################################################\n# Notes: #\n# * 1 indent is 4 spaces #\n# #\n# * max page length is 78 #\n# #\n# * single quotes are preferable to doubles #\n# #\n# * follow pep 8 style guide #\n# https://www.python.org/dev/peps/pep-0008 #\n# #\n# * run big revisions through pep8 and pylint #\n# #\n#######################################################\n\ntry:\n # import this first so we can throw an error\n # if something goes wrong\n from sys import stderr\n\n # default libraries\n import sys\n import os\n import os.path\n import ast\n\n # default specific imports\n from os.path import isfile\n from ast import literal_eval\n\n # Do specific libraries\n import do_helper\n\n # Do specific specific imports\n from do_helper import read_list\n\nexcept ImportError as err:\n try:\n stderr.write('ERROR: ', err)\n except:\n print('ERROR: ', err)\n exit(1)\n\n#######################################################\n# The do list object #\n#######################################################\n\n\nclass do(object):\n '''The do list object:\n arguments:\n page_list = []\n list_dict = {}\n\n The do list is a task list with different\n categories and priorities.\n\n Lists are called \"shelves\"\n Categories are called \"books\"\n Subcategories are called \"chapters\"\n and tasks are called \"pages\"\n\n This naming convention is because having names\n is easier to keep track of than just \"subcat\"\n or \"subsubsubsubcat\" or whatever\n '''\n\n def __init__(self, page_list=[], list_dict={}):\n self.page_list = page_list\n self.list_dict = list_dict\n self.shelves = self.find_shelves()\n self.books = self.find_books()\n self.chapters = self.find_chapters()\n\n ####################\n # various helper #\n # functions #\n ####################\n\n def refresh_priorities(self, page, verify=True):\n '''Reset a page's overall priority if another\n priority changed.\n '''\n verified = self.verify_page(page)\n\n if verify and not verified:\n raise RuntimeError('invalid page')\n return\n elif not verified:\n return\n # find task information\n t_shelf = page[3]\n t_book = page[4]\n t_chapter = page[5]\n\n # grab the page priority\n page_priority = page[1]\n\n # start iterating and finding the correct priorities\n # base priority is the priority without adding\n # the task priority\n t_shelf_dict = self.list_dict[t_shelf]\n if t_book is not None:\n t_book_dict = t_shelf_dict[t_book]\n t_book_prio = t_book_dict['priority']\n\n if t_chapter is not None:\n t_chapter_dict = t_shelf_dict[t_chapter]\n t_chapter_prio = t_chapter_dict['priority']\n base_priority = t_book_prio * 3 + t_chapter_prio * 2\n else:\n base_priority = t_book_prio * 3 + 6\n\n else:\n base_priority = 15\n\n # get the full priority\n priority = base_priority + page_priority\n\n # reconstruct the page\n page = (page[0], page_priority, priority,\n page[3], page[4], page[5])\n return page\n\n def reconstruct_pagelist(self, page_list=[]):\n '''reconstruct the page list\n with updated priorities\n '''\n\n if page_list == []:\n page_list = self.page_list\n\n new_page_list = []\n\n # iterate through the list and run\n # refresh priorities on each item\n for page in page_list:\n page = self.refresh_priorities(page)\n new_page_list.append(page)\n\n return new_page_list\n\n def org_pagelist(self, page_list=[]):\n '''sort pages by overall priority'''\n\n if page_list == []:\n page_list = self.page_list\n # use sorted to sort the list properly\n sorted_page_list = sorted(page_list, key=lambda priority: priority[2])\n return(sorted_page_list)\n\n def verify_page(self, page):\n '''verify that a page's shelf/book/chapter exist\n so that you can properly construct priorities\n '''\n\n page_shelf = page[3]\n page_book = page[4]\n page_chapter = page[5]\n\n if page_shelf not in self.shelves:\n return False\n elif page_book not in self.books and page_book is not None:\n return False\n elif page_chapter not in self.chapters and page_chapter is not None:\n return False\n else:\n return True\n\n ####################\n # find functions #\n ####################\n\n def find_shelves(self):\n '''list all of the shelves'''\n\n # find all of the shelves\n item_list = []\n for item in self.list_dict.keys():\n item_list.append(item)\n\n return item_list\n\n def find_books(self, shelves=[]):\n '''list books on a set of specified shelves\n\n inputs:\n shelves = ['list','of','shelves']\n all_books = True/False\n all_books will print the books for every shelf\n '''\n\n # iterate through the shelves and find the\n # books that go with them\n item_list = []\n if shelves == []:\n shelves = self.find_shelves()\n for shelf in shelves:\n for item in self.list_dict[shelf].keys():\n if item != 'priority':\n item_list.append([shelf, item])\n\n return item_list\n\n def find_chapters(self, shelves=[], books=[]):\n '''list chapters on a set of specified books\n\n inputs:\n shelves = ['list','of','shelves']\n to find all chapters regardless of book or\n shelf just run find_chapters() by itself\n '''\n\n # set the proper shelves/books\n item_list = []\n if books == [] and shelves == []:\n books = self.find_books()\n elif books == []:\n books = self.find_books(shelves=shelves)\n else:\n if shelves == []:\n shelves = self.find_shelves()\n\n new_books = self.find_books(shelves=shelves)\n delete_list = []\n for item in new_books:\n if item[1] not in books:\n delete_list.append(item)\n\n for item in delete_list:\n new_books.remove(item)\n\n books = new_books\n\n # iterate through and return the chapters\n for shelf, book in books:\n chapter_dict = self.list_dict[shelf][book]\n for item in chapter_dict.keys():\n if item != 'priority':\n item_list.append([shelf, book, item])\n\n return item_list\n\n def find_pages(self, shelves=[], books=[], chapters=[]):\n '''grab a set of pages based on a handful of inputs'''\n\n # set the proper parameters, if it's empty set it to all\n if shelves == []:\n shelves = self.shelves\n if books == []:\n books = self.books\n if chapters == []:\n chapters = self.chapters\n\n item_list = []\n # append items only if they match the parameters\n for item in self.page_list:\n if(item[3] in shelves and\n item[4] in books and\n item[5] in chapters):\n\n item_list.append(item)\n\n return(self.org_pagelist(page_list=item_list))\n\n ####################\n # locate functions #\n ####################\n\n def locate_shelf(self, book=None, chapter=None):\n '''find what shelf a book or chapter is in'''\n\n if book is None and chapter is None:\n raise RuntimeError('invalid parameters')\n return\n\n if book is None:\n shelf, book = locate_book(chapter=chapter)\n return shelf\n\n for key, shelf in self.list_dict.items():\n for new_book in shelf.keys():\n if new_book == book:\n return key\n\n def locate_book(self, chapter=None):\n '''find what shelf and book a chapter is in'''\n\n if chapter is None:\n raise RuntimeError('invalid parameters')\n return\n\n for key, shelf in self.list_dict.items():\n for bkey, book in shelf.items():\n for new_chapter in book.keys():\n if new_chapter == chapter:\n return(key, bkey)\n else:\n raise RuntimeError('book not found')\n return\n\n ####################\n # add functions #\n ####################\n\n def add_shelf(self, shelf=None):\n '''Create a new shelf'''\n\n if shelf is None:\n raise RuntimeError('invalid parameters')\n return\n\n if shelf in self.shelves:\n raise RuntimeError('shelf exists')\n return\n\n self.list_dict[shelf] = {}\n self.shelves = find_shelves()\n\n return\n\n def add_book(self, shelf=None, book=None, priority=3):\n '''Create a new book'''\n\n if book is None:\n raise RuntimeError('invalid parameters')\n return\n\n if shelf not in self.shelves:\n raise RuntimeError('invalid parameters')\n return\n\n self.list_dict[shelf][book] = {'priority': priority}\n self.books = find_books()\n\n return\n\n def add_chapter(self, shelf=None, book=None, chapter=None, priority=3):\n '''Create a new chapter'''\n\n if chapter is None or book is None:\n raise RuntimeError('invalid parameters')\n return\n\n if shelf is None:\n shelf = locate_shelf(book=book)\n\n if shelf not in self.shelves or book not in self.books:\n raise RuntimeError('invalid parameters')\n return\n\n self.list_dict[shelf][book][chapter] = {'priority': priority}\n self.chapters = find_chapters()\n\n return\n\n def add_page(self, page='', priority=3,\n shelf=None, book=None, chapter=None):\n '''Does what it says, adds a page.\n parameters are:\n page = str : page description\n priority = int : page priority\n shelf = str : the shelf for the page to go on\n book = str : the book for the page to go in\n chapter = str : the chapter for the page to go in\n '''\n\n # verify we have *something* to file it into\n if shelf is None and book is None and chapter is None:\n raise RuntimeError('no parameters given')\n return\n\n # if we are given slightly malformed parameters,\n # turn them into proper parameters\n if shelf is None and book is not None:\n book_list = find_books()\n for new_shelf, new_book in book_list:\n if new_book == book:\n shelf = new_shelf\n break\n\n if shelf is None:\n raise RuntimeError('invalid parameters')\n return\n\n elif shelf is None or (book is None and chapter is not None):\n chapter_list = find_chapters()\n for new_shelf, new_book, new_chapter in chapter_list:\n if new_chapter == chapter:\n shelf = new_shelf\n book = new_book\n break\n\n if shelf is None or book is None:\n raise RuntimeError('invalid parameters')\n return\n\n page = (page, priority, 0, shelf, book, chapter)\n page = self.refresh_priorities(page)\n self.page_list.append(page)\n\n return\n\n ####################\n # delete functions #\n ####################\n\n def del_shelf(self, shelf=None):\n '''Remove a shelf and all of the associated pages'''\n\n if shelf is None or shelf not in self.shelves:\n raise RuntimeError('invalid parameters')\n return\n\n del self.list_dict[shelf]\n\n del_page(shelf=[shelf])\n\n return\n\n def del_book(self, shelf=None, book=None):\n '''Remove a book and all of the associated pages'''\n\n if book is None or book not in self.books:\n raise RuntimeError('invalid parameters')\n\n if shelf is None:\n shelf = locate_shelf(book)\n\n del self.list_dict[shelf][book]\n\n delete_page(book=book, shelf=shelf)\n\n return\n\n def del_chapter(self, shelf=None, book=None, chapter=None):\n '''Remove a chapter and all of the associated pages'''\n\n if chapter is None or chapter not in self.chapter:\n raise RuntimeError('invalid parameters')\n\n if shelf is None and book is not None:\n shelf = locate_shelf(book)\n elif book is None:\n shelf, book = locate_book(chapter=chapter)\n\n del self.list_dict[shelf][book]\n\n delete_page(book=book, shelf=shelf)\n\n return\n\n def del_page(self, shelf=[], book=[], chapter=[], page=[]):\n '''Remove a page or set of pages with\n specified parameters\n shelf = list : shelves to remove from\n book = list : books to remove from\n chapter = list : chapters to remove from\n page = list : specific pages to remove\n if the lower sets are blank (which they\n are by default) it will remove all of the\n things from the smallest input given\n '''\n\n delete_field = 0\n delete_query = page\n\n if page is []:\n delete_field = 5\n if chapter is []:\n delete_field = 4\n if book is []:\n delete_field = 3\n if shelf is []:\n raise RuntimeError('invalid parameters')\n return\n\n for page in self.page_list:\n page_shelf = page[delete_field]\n if page_shelf in delete_query:\n delete_list.append(page)\n\n for page in delete_list:\n self.page_list.remove(page)\n return\n\nif __name__ == '__main__':\n exit(2)\n","sub_path":"do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":15557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112384589","text":"'''\nThe code snippet below is our first model, a simple stack of 3 convolution layers \nwith a ReLU activation and followed by max-pooling layers. This is very similar to \nthe architectures that Yann LeCun advocated in the 1990s for image classification \n(with the exception of ReLU).\n'''\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\n#from keras.backend import image_data_format\nfrom keras import backend as K\n\nfrom os.path import dirname, abspath\nfrom os import walk\n\n# gets jpg count for all images in given directory and all subdirectories\ndef jpg_counts(dirpath, verbose=False):\n from os import listdir, walk\n #from os.path import isfile, join\n \n # list of all subdirectories\n dirlist = [x[0] for x in walk(dirpath)][1:]\n \n # list of all images in this directory\n imagelist = [f for f in listdir(dirpath) if '.jpg' in f[-4:].lower()]\n if verbose:\n print(len(imagelist),'\\n')\n \n # get all images in all subdirectories\n #print(dirlist)\n for currdir in dirlist:\n allfiles = [f for f in listdir(currdir)]\n imagelistsubdir = [f for f in listdir(currdir) if '.jpg' in f[-4:].lower()]\n imagelist += imagelistsubdir\n if verbose:\n if len(allfiles) != len(imagelistsubdir):\n print(currdir, len(imagelistsubdir), 'out of', len(allfiles), 'EXTRA NON JPG FILES')\n else:\n print(currdir, len(imagelistsubdir), 'out of', len(allfiles))\n\n if verbose:\n print(len(imagelist))\n return len(imagelist)\n\ndef get_num_classes(dirpath, verbose=False):\n from os import walk\n \n # get list of all direct subdirectories\n dirlist = next(walk(dirpath))[1]\n \n if verbose:\n print('Classes found:')\n for d in dirlist:\n print(d)\n \n return len(dirlist)\n\n\nimg_width, img_height = 150, 150\n#img_width, img_height = 761, 800\n\ninput_shape = (150, 150, 3)\n#input_shape = (3, 150, 150)\n\n\nmodel = Sequential()\n#model.add(Conv2D(32, (3, 3), input_shape=(3, 150, 150)))\nmodel.add(Conv2D(32, 3, 3, input_shape=input_shape))\nmodel.add(Activation('relu'))\n#model.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#model.add(Conv2D(32, (3, 3)))\nmodel.add(Conv2D(32, 3, 3))\nmodel.add(Activation('relu'))\n#model.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#model.add(Conv2D(64, (3, 3)))\nmodel.add(Conv2D(64, 3, 3))\nmodel.add(Activation('relu'))\n#model.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n'''\nOn top of it we stick two fully-connected layers. We end the model with a single \nunit and a sigmoid activation, which is perfect for a binary classification. To \ngo with it we will also use the binary_crossentropy loss to train our model.\n'''\n\n#basedir = dirname(abspath(__file__))\n#basedir += '/data'\n#basedir = '/home/ubuntu/kojak/data'\nbasedir = '/data/data'\n#basedir = '/data/data2'\ntargetdir = basedir + '/train'\nvaldir = basedir + '/validation'\n\nnum_categories = get_num_classes(targetdir)\nprint('Number of categories:', num_categories)\n\nmodel.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\n#model.add(Dense(1))\nmodel.add(Dense(num_categories)) # number of categories\n#model.add(Activation('sigmoid'))\nmodel.add(Activation('softmax')) # for multiclass\n\n'''\nmodel.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n'''\n\n# for multiclass\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n\n\n'''\nLet's prepare our data. We will use .flow_from_directory() to generate batches \nof image data (and their labels) directly from our jpgs in their respective \nfolders.\n'''\n \nbatch_size = 32\n\n# this is the augmentation configuration we will use for training\ntrain_datagen = ImageDataGenerator(\n rotation_range=180,\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n# this is the augmentation configuration we will use for testing:\n# only rescaling\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\n# this is a generator that will read pictures found in\n# subfolders of 'data/train', and indefinitely generate\n# batches of augmented image data\n\nclasses = next(walk(targetdir))[1]\n\ntrain_generator = train_datagen.flow_from_directory(\n targetdir, # this is the target directory (originally = 'data/train')\n target_size=(img_width, img_height), # all images will be resized to 150x150\n batch_size=batch_size,\n class_mode='categorical',\n classes=classes)\n\n# this is a similar generator, for validation data\nvalidation_generator = test_datagen.flow_from_directory(\n valdir, # (originally 'data/validation')\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical',\n classes=classes)\n\n'''\nWe can now use these generators to train our model. Each epoch takes 20-30s on \nGPU and 300-400s on CPU. So it's definitely viable to run this model on CPU if \nyou aren't in a hurry.\n'''\n\n#epochs=1, # original 50, 400s/epoch on cpu (20s/epoch on gpu)\n'''\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=2000 // batch_size,\n epochs=50,\n validation_data=validation_generator,\n validation_steps=800 // batch_size)\n'''\n\nnb_epoch = 25\nnb_train_samples = jpg_counts(targetdir)\n#nb_train_samples = 51634\nnb_validation_samples = jpg_counts(valdir)\n\nmodel.fit_generator(\n train_generator,\n samples_per_epoch = nb_train_samples,\n nb_epoch = nb_epoch,\n validation_data=validation_generator,\n nb_val_samples = nb_validation_samples)\nmodel.save_weights('first_try.h5') # always save your weights after training or during training\n\nprint('Done!')","sub_path":"python_scripts/basicCNNmodel.py","file_name":"basicCNNmodel.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259959117","text":"import unittest\n\nfrom singelton import Borg, Singelton\n\nclass TestSingelton(unittest.TestCase):\n\tdef test_two_singeltons_have_different_identity(self):\n\t\tx = Singelton(HTTP=\"Hyper Text Transfer Protocol\")\n\t\ty = Singelton(SNMP=\"Simple Network Management Protocol\")\n\t\tself.assertIsNot(x, y)\n\n\tdef test_two_singeltons_share_common_state(self):\n\t\tx = Singelton(HTTP=\"Hyper Text Transfer Protocol\")\n\t\ty = Singelton(SNMP=\"Simple Network Management Protocol\")\n\t\tself.assertEqual(str(x), str(y))\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"src/Creational/test_creational_patterns.py","file_name":"test_creational_patterns.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99458173","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2019/05/03\r\n# @Company : INVINCIBLE STUDIO\r\n# @Author : Mo Wenlong\r\n# @Email : invincible0918@126.com\r\n# @File : mirrorAnimModel.py\r\n\r\nimport os\r\nfrom Core.MayaGUI.LitFrame.model import Model\r\nfrom functools import partial\r\nfrom maya.api import OpenMaya as om\r\nfrom maya.api import OpenMayaAnim as oma\r\nfrom maya import cmds as mc\r\nimport math\r\n\r\n\r\nclass MirrorAnimModel(Model):\r\n SourceRoot = 'B_Pelvis'\r\n DestRoot = 'rig_B_Pelvis'\r\n\r\n def lateInit(self, **kwargs):\r\n super(MirrorAnimModel, self).lateInit(**kwargs)\r\n\r\n sourceList = list()\r\n self.__getJoints(self.SourceRoot, sourceList)\r\n\r\n destList = list()\r\n self.__getJoints(self.DestRoot, destList)\r\n\r\n for i in range(len(destList)):\r\n if i == 0:\r\n pos = mc.xform(sourceList[i].fullPathName(), q=True, t=True, a=True, ws=True)\r\n mc.xform(destList[i].fullPathName(), t=[-pos[0], pos[1], pos[2]], a=True, ws=True)\r\n\r\n rot = mc.xform(sourceList[i].fullPathName(), q=True, ro=True, a=True, ws=True)\r\n mc.xform(destList[i].fullPathName(), ro=rot, a=True, ws=True)\r\n else:\r\n sourceNode = None\r\n f = destList[i].fullPathName().replace('|rig_target', '').replace('rig_', '')\r\n\r\n if '_L_' in f:\r\n f = f.replace('_L_', '_R_')\r\n\r\n for j in range(len(sourceList)):\r\n if sourceList[j].fullPathName() == f:\r\n sourceNode = sourceList[j]\r\n break\r\n\r\n elif '_R_' in f:\r\n f = f.replace('_R_', '_L_')\r\n for j in range(len(sourceList)):\r\n if sourceList[j].fullPathName() == f:\r\n sourceNode = sourceList[j]\r\n break\r\n else:\r\n sourceNode = sourceList[i]\r\n\r\n jointOrientX = mc.getAttr(sourceNode.fullPathName() + '.jointOrientX')\r\n jointOrientY = mc.getAttr(sourceNode.fullPathName() + '.jointOrientY')\r\n jointOrientZ = mc.getAttr(sourceNode.fullPathName() + '.jointOrientZ')\r\n\r\n jointOrientQ = om.MEulerRotation(jointOrientX * math.pi / 180,\r\n jointOrientY * math.pi / 180,\r\n jointOrientZ * math.pi / 180).asQuaternion()\r\n\r\n q = sourceNode.rotation(om.MSpace.kTransform, True) * jointOrientQ\r\n q.x *= -1\r\n q.y *= -1\r\n\r\n destList[i].setRotation(q * jointOrientQ.inverse(), om.MSpace.kTransform)\r\n\r\n def __getJoints(self, parent, li):\r\n l = om.MGlobal.getSelectionListByName(parent)\r\n node = om.MFnTransform(l.getDependNode(0))\r\n\r\n li.append(node)\r\n children = mc.listRelatives(parent, c=True, f=True, type='joint')\r\n if children:\r\n for child in children:\r\n self.__getJoints(child, li)\r\n","sub_path":"Projects/INV/Tools/MirrorAnim/Package/Scripts/mirrorAnimModel.py","file_name":"mirrorAnimModel.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"358261990","text":"# Copyright 2020 QuantStack\n# Distributed under the terms of the Modified BSD License.\n\nimport json\nimport os\nimport random\nimport shutil\nimport uuid\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Dict, NoReturn\n\nimport typer\nimport uvicorn\nfrom sqlalchemy.orm.session import Session\n\nfrom quetz.config import Config, _env_config_file, _env_prefix, _user_dir, create_config\nfrom quetz.database import get_session\nfrom quetz.db_models import (\n ApiKey,\n Channel,\n ChannelMember,\n Identity,\n Package,\n PackageMember,\n Profile,\n User,\n)\n\napp = typer.Typer()\n\n_deployments_file = os.path.join(_user_dir, 'deployments.json')\n\n\nclass LogLevel(str, Enum):\n critical = \"critical\"\n error = \"error\"\n warning = \"warning\"\n info = \"info\"\n debug = \"debug\"\n trace = \"trace\"\n\n\ndef _fill_test_database(db: Session) -> NoReturn:\n \"\"\"Create dummy users and channels to allow further testing in dev mode.\"\"\"\n\n testUsers = []\n try:\n for index, username in enumerate(['alice', 'bob', 'carol', 'dave']):\n user = User(id=uuid.uuid4().bytes, username=username)\n\n identity = Identity(\n provider='dummy',\n identity_id=str(index),\n username=username,\n )\n\n profile = Profile(name=username.capitalize(), avatar_url='/avatar.jpg')\n\n user.identities.append(identity)\n user.profile = profile\n db.add(user)\n testUsers.append(user)\n\n for channel_index in range(3):\n channel = Channel(\n name=f'channel{channel_index}',\n description=f'Description of channel{channel_index}',\n private=False,\n )\n\n for package_index in range(random.randint(5, 10)):\n package = Package(\n name=f'package{package_index}',\n summary=f'package {package_index} summary text',\n description=f'Description of package{package_index}',\n )\n channel.packages.append(package)\n\n test_user = testUsers[random.randint(0, len(testUsers) - 1)]\n package_member = PackageMember(\n package=package, channel=channel, user=test_user, role='owner'\n )\n\n db.add(package_member)\n\n if channel_index == 0:\n package = Package(name='xtensor', description='Description of xtensor')\n channel.packages.append(package)\n\n test_user = testUsers[random.randint(0, len(testUsers) - 1)]\n package_member = PackageMember(\n package=package, channel=channel, user=test_user, role='owner'\n )\n\n db.add(package_member)\n\n # create API key\n key = uuid.uuid4().hex\n\n key_user = User(id=uuid.uuid4().bytes)\n api_key = ApiKey(\n key=key, description='test API key', user=test_user, owner=test_user\n )\n db.add(api_key)\n print(f'Test API key created for user \"{test_user.username}\": {key}')\n\n key_package_member = PackageMember(\n user=key_user,\n channel_name=channel.name,\n package_name=package.name,\n role='maintainer',\n )\n db.add(key_package_member)\n\n db.add(channel)\n\n channel_member = ChannelMember(\n channel=channel,\n user=test_user,\n role='owner',\n )\n\n db.add(channel_member)\n db.commit()\n finally:\n db.close()\n\n\ndef _get_deployments() -> Dict[str, str]:\n \"\"\"Get a mapping of the current Quetz deployments.\n\n Returns\n -------\n deployments : Dict[str, str]\n The mapping of deployments\n \"\"\"\n\n if os.path.exists(_deployments_file):\n return _get_cleaned_deployments()\n else:\n Path(_user_dir).mkdir(parents=True, exist_ok=True)\n return {}\n\n\ndef _store_deployment(path: str, config_file_name: str) -> NoReturn:\n \"\"\"Store a new Quetz deployment.\n\n Parameters\n ----------\n path : str\n The location of the deployment\n config_file_name : str\n The configuration file name, including its extension\n \"\"\"\n\n json_ = {path: config_file_name}\n deployments = _get_deployments()\n\n deployments.update(json_)\n with open(_deployments_file, 'w') as f:\n json.dump(deployments, f)\n\n\ndef _get_cleaned_deployments() -> Dict[str, str]:\n \"\"\"Get a cleaned version of deployments.\n\n This could be necessary to clean-up if the user delete manually a deployment\n directory without updating the deployments files in its profile.\n\n Returns\n -------\n deployments : Dict[str, str]\n The mapping of deployments\n \"\"\"\n\n with open(_deployments_file, 'r') as f:\n deployments = json.load(f)\n\n to_delete = []\n for path, f in deployments.items():\n config_file = os.path.join(path, f)\n if not os.path.exists(config_file): # User has deleted the instance without CLI\n to_delete.append(path)\n\n cleaned_deployments = {\n path: f for path, f in deployments.items() if path not in to_delete\n }\n\n if len(to_delete) > 0:\n with open(_deployments_file, 'w') as f:\n json.dump(cleaned_deployments, f)\n\n return cleaned_deployments\n\n\ndef _clean_deployments() -> NoReturn:\n \"\"\"Clean the deployments without returning anything.\"\"\"\n _ = _get_cleaned_deployments()\n\n\n@app.command()\ndef create(\n path: str = typer.Argument(\n None,\n help=(\n \"The directory in which the deployment will be created \"\n \"(will be created if does not exist)\"\n ),\n ),\n config_file_name: str = typer.Option(\n \"config.toml\", help=\"The configuration file name expected in the provided path\"\n ),\n copy_conf: str = typer.Option(\n None, help=\"The configuration to copy from (e.g. dev_config.toml)\"\n ),\n create_conf: bool = typer.Option(\n False,\n help=\"Enable/disable creation of a default configuration file\",\n ),\n dev: bool = typer.Option(\n False,\n help=(\n \"Enable/disable dev mode \"\n \"(fills the database with test data and allows http access)\"\n ),\n ),\n) -> NoReturn:\n \"\"\"Create a new Quetz deployment.\"\"\"\n\n abs_path = os.path.abspath(path)\n config_file = os.path.join(path, config_file_name)\n deployments = _get_deployments()\n\n if os.path.exists(path) and abs_path in deployments:\n delete_ = typer.confirm(f'Quetz deployment exists at {path}.\\nOverwrite it?')\n if delete_:\n delete(path, force=True)\n del deployments[abs_path]\n else:\n typer.echo('Use the start command to start a deployment.', err=True)\n raise typer.Abort()\n\n Path(path).mkdir(parents=True)\n\n # only authorize path with a config file to avoid deletion of unexpected files\n # when deleting Quetz instance\n if not all(f == config_file_name for f in os.listdir(path)):\n typer.echo(\n f'Quetz deployment not allowed at {path}.\\n'\n 'The path should not contain more than the configuration file.',\n err=True,\n )\n raise typer.Abort()\n\n if not os.path.exists(config_file) and not (create_conf or copy_conf):\n typer.echo(\n 'No configuration file provided.\\n'\n 'Use --create-conf or --copy-conf to produce a config file.',\n err=True,\n )\n raise typer.Abort()\n\n if copy_conf:\n if not os.path.exists(copy_conf):\n typer.echo(f'Config file to copy does not exist {copy_conf}.', err=True)\n raise typer.Abort()\n\n typer.echo(f\"Copying config file from {copy_conf} to {config_file}\")\n shutil.copyfile(copy_conf, config_file)\n\n if not os.path.exists(config_file) and create_conf:\n if dev:\n https = 'false'\n else:\n https = 'true'\n conf = create_config(https=https)\n with open(config_file, 'w') as f:\n f.write(conf)\n\n os.environ[_env_prefix + _env_config_file] = config_file\n config = Config(config_file)\n\n os.chdir(path)\n Path('channels').mkdir()\n db = get_session(config.sqlalchemy_database_url)\n\n if dev:\n _fill_test_database(db)\n\n _store_deployment(abs_path, config_file_name)\n\n\n@app.command()\ndef start(\n path: str = typer.Argument(None, help=\"The path of the deployment\"),\n port: int = typer.Option(8000, help=\"The port to bind\"),\n host: str = typer.Option(\"127.0.0.1\", help=\"The network interface to bind\"),\n proxy_headers: bool = typer.Option(True, help=\"Enable/disable X-Forwarded headers\"),\n log_level: LogLevel = typer.Option(\n LogLevel.info,\n help=\"Set the logging level\",\n ),\n reload: bool = typer.Option(\n False,\n help=(\n \"Enable/disable automatic reloading of the server when sources are modified\"\n ),\n ),\n) -> NoReturn:\n \"\"\"Start a Quetz deployment.\n\n To be started, a deployment has to be already created.\n At this time, only Uvicorn is supported as manager.\n \"\"\"\n\n abs_path = os.path.abspath(path)\n deployments = _get_deployments()\n\n try:\n config_file_name = deployments[abs_path]\n except KeyError:\n typer.echo(f'No Quetz deployment found at {path}.', err=True)\n raise typer.Abort()\n\n config_file = os.path.join(abs_path, config_file_name)\n os.environ[_env_prefix + _env_config_file] = config_file\n os.chdir(path)\n\n import quetz\n\n quetz_src = os.path.dirname(quetz.__file__)\n uvicorn.run(\n \"quetz.main:app\",\n reload=reload,\n reload_dirs=(quetz_src,),\n port=port,\n proxy_headers=proxy_headers,\n host=host,\n log_level=log_level,\n )\n\n\n@app.command()\ndef run(\n path: str = typer.Argument(None, help=\"The path of the deployment\"),\n config_file_name: str = typer.Option(\n \"config.toml\", help=\"The configuration file name expected in the provided path\"\n ),\n copy_conf: str = typer.Option(\n None, help=\"The configuration to copy from (e.g. dev_config.toml)\"\n ),\n create_conf: bool = typer.Option(\n False,\n help=\"Enable/disable creation of a default configuration file\",\n ),\n dev: bool = typer.Option(\n False,\n help=(\n \"Enable/disable dev mode \"\n \"(fills the database with test data and allows http access)\"\n ),\n ),\n port: int = typer.Option(8000, help=\"The port to bind\"),\n host: str = typer.Option(\"127.0.0.1\", help=\"The network interface to bind\"),\n proxy_headers: bool = typer.Option(True, help=\"Enable/disable X-Forwarded headers\"),\n log_level: LogLevel = typer.Option(\n LogLevel.info,\n help=\"Set the logging level\",\n ),\n reload: bool = typer.Option(\n False,\n help=(\n \"Enable/disable automatic reloading of the server when sources are modified\"\n ),\n ),\n) -> NoReturn:\n \"\"\"Run a Quetz deployment.\n\n It performs sequentially create and start operations.\"\"\"\n\n abs_path = os.path.abspath(path)\n create(abs_path, config_file_name, copy_conf, create_conf, dev)\n start(abs_path, port, host, proxy_headers, log_level, reload)\n\n\n@app.command()\ndef delete(\n path: str = typer.Argument(None, help=\"The path of the deployment\"),\n force: bool = typer.Option(\n False, help=\"Enable/disable removal without confirmation prompt\"\n ),\n) -> NoReturn:\n \"\"\"Delete a Quetz deployment.\"\"\"\n\n abs_path = os.path.abspath(path)\n deployments = _get_deployments()\n\n try:\n _ = deployments[abs_path]\n except KeyError:\n typer.echo(f'No Quetz deployment found at {path}.', err=True)\n raise typer.Abort()\n\n delete = force or typer.confirm(f\"Delete Quetz deployment at {path}?\")\n if not delete:\n raise typer.Abort()\n\n shutil.rmtree(abs_path)\n _clean_deployments()\n\n\n@app.command()\ndef list() -> NoReturn:\n \"\"\"List Quetz deployments.\"\"\"\n\n deployments = _get_deployments()\n\n if len(deployments) > 0:\n typer.echo('\\n'.join([p for p in deployments]))\n\n\nif __name__ == \"__main__\":\n app()\n","sub_path":"quetz/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":12403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"270252220","text":"from django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.html import strip_tags\nimport re\n\nfrom django.utils.safestring import mark_safe\nfrom django.core.serializers import serialize\nimport json\nfrom django.db.models.query import QuerySet\nimport urllib\n\nregister = template.Library()\n\n\n@register.filter\n@stringfilter\ndef sentence_case(value):\n return value.replace(\"_\", \" \").capitalize()\n\n\n@register.filter\n@stringfilter\ndef inferred_status_label(status):\n return \"\" + status + \"\"\n\n\n@register.filter\n@stringfilter\ndef facet_name(value):\n if value == 'bill_type':\n return 'Legislation type'\n if value == 'sponsorships':\n return 'Sponsor'\n if value == 'controlling_body':\n return 'Controlling body'\n if value == 'inferred_status':\n return 'Legislation status'\n\n\n@register.filter\n@stringfilter\ndef remove_action_subj(bill_action_desc):\n # removes 'by X' from bill action descriptions & expands abbrevs\n # for more readable action labels\n clean_action = re.sub(r'\\bComm\\b', 'Committee', bill_action_desc)\n clean_action = re.sub(r'\\bRecved\\b', 'Received', clean_action)\n clean_action = re.sub(r'[,\\s]*by\\s[^\\s]*', '', clean_action)\n\n # shorten the really long action descriptions for approval w/ modifications\n if 'approved with modifications' in clean_action.lower():\n clean_action = 'Approved with Modifications'\n\n return clean_action\n\n\n@register.filter\n@stringfilter\ndef short_blurb(text_blob):\n if len(text_blob) > 196:\n blurb = text_blob[:196]\n blurb = blurb[:blurb.rfind(' ')] + ' ...'\n return blurb\n else:\n return text_blob\n\n\n@register.filter\n@stringfilter\ndef short_title(text_blob):\n if len(text_blob) > 28:\n blurb = text_blob[:24]\n blurb = blurb[:blurb.rfind(' ')] + ' ...'\n return blurb\n else:\n return text_blob\n\n\n@register.filter\n@stringfilter\ndef strip_mailto(email):\n return re.sub('mailto:', '', email)\n\n\n@register.filter\n@stringfilter\ndef committee_topic_only(committee_name):\n clean = re.sub('Committee on ', '', committee_name)\n clean = re.sub('Subcommittee on ', '', clean)\n if 'Mental Health, Developmental Disability' in clean:\n clean = 'Mental Health & Disability'\n return clean\n\n\n@register.filter\n@stringfilter\ndef clean_html(text):\n value = strip_tags(text).replace('\\n', '')\n return re.sub(r'&(?:\\w+|#\\d+);', '', value)\n\n\n@register.filter\n@stringfilter\ndef alternative_identifiers(id_original):\n id_1 = re.sub(\" \", \" 0\", id_original)\n id_2 = re.sub(\" \", \"\", id_original)\n id_3 = re.sub(\" \", \"\", id_1)\n return id_original + ' ' + id_1 + ' ' + id_2 + ' ' + id_3\n\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\n\n@register.filter\ndef format_date_sort(s, fmt='%Y%m%d%H%M'):\n if s:\n return s.strftime(fmt)\n else:\n return '0'\n\n\n@register.filter\ndef format_url_parameters(url):\n params = [\"?&sort_by=date\", \"?&sort_by=title\", \"?&sort_by=relevance\", \"?&ascending=true\", \"?&descending=true\", \"&sort_by=date\", \"&sort_by=title\", \"&sort_by=relevance\", \"&ascending=true\", \"&descending=true\", \"sort_by=date\", \"sort_by=title\", \"sort_by=relevance\", \"ascending=true\", \"descending=true\"]\n\n paramsDict = dict((re.escape(el), \"\") for el in params)\n\n pattern = re.compile(\"|\".join(paramsDict.keys()))\n\n return pattern.sub(lambda m: paramsDict[re.escape(m.group(0))], url)\n\n# TODO: Clean up for refactor of javascript.\n# @register.simple_tag\n# def query_transform(request, **kwargs):\n\n# data_dict = dict(request.GET.copy())\n# print(data_dict)\n# try:\n# selected_facet_names = data_dict['selected_facets']\n# except:\n# selected_facet_names = []\n\n# updated = request.GET.copy()\n# if selected_facet_names:\n# for k,v in kwargs.items():\n# selected_facet_names.append(v)\n\n# updated['selected_facets'] = selected_facet_names\n\n# return updated.urlencode()\n\n\n@register.filter\ndef create_facet_string(selected_facets, query=None):\n facet_string = \"/search/rss/?\"\n\n if query:\n facet_string = \"/search/rss/?q=\" + query\n\n for key, values in selected_facets:\n for value in values:\n facet_string += \"&selected_facets=\" + key + \":\" + value\n\n return facet_string\n\n@register.filter\ndef remove_question(text):\n return text.rstrip('?')","sub_path":"councilmatic_core/templatetags/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345792082","text":"# coding: utf-8\n\n\"\"\"\n PyLucid message level middleware\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n Set the django message level dynamically\n \n http://docs.djangoproject.com/en/dev/ref/contrib/messages/#changing-the-minimum-recorded-level-per-request\n\n :copyleft: 2011 by the PyLucid team, see AUTHORS for more details.\n :license: GNU GPL v3 or above, see LICENSE for more details.\n\"\"\"\n\n\nfrom django.contrib.messages import constants as message_constants\nfrom django.contrib import messages\n\n\nclass MessageLevelMiddleware(object):\n def process_request(self, request):\n \"\"\"\n set the django message level by user type use system preferences.\n \"\"\"\n # Get the system preferences\n system_preferences = request.PYLUCID.preferences\n\n # get the level by user type and system preferences\n if request.user.is_superuser:\n level = system_preferences[\"message_level_superuser\"]\n elif request.user.is_staff:\n level = system_preferences[\"message_level_staff\"]\n elif request.user.is_authenticated():\n level = system_preferences[\"message_level_normalusers\"]\n else:\n level = system_preferences[\"message_level_anonymous\"]\n\n # Set the current used message level\n messages.set_level(request, level)\n\n\n","sub_path":"venv/Lib/site-packages/PyLucid-1.5.0-py2.7.egg/pylucid_project/middlewares/message_level.py","file_name":"message_level.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"457844800","text":"from flask import Blueprint, request, make_response, jsonify\nfrom app.api.v1.models.model import Party, parties\n\nparties = Blueprint('parties', __name__)\n\n\nclass PartiesResource:\n\n @parties.route('/parties', methods=['POST'])\n def post():\n data = request.get_json(force=True)\n\n name = data['name']\n hqaddress = data['hqaddress']\n logoUrl = data['logoUrl']\n\n res = Party().save(name, hqaddress, logoUrl)\n return make_response(jsonify({\n \"message\": \"created successfully\"\n }))\n\n @parties.route('/parties', methods=['GET'])\n def get():\n\n parties = Party().get_all_parties()\n\n return make_response(jsonify({\n \"message\": \"retrieved successfully\",\n \"parties\": parties\n }))\n\n # @parties.route('/parties', methods=['GET'])\n # def get():\n #\n # parties = Party().get_party_by_names()\n #\n # return make_response(jsonify({\n # \"message\": \"retrieved successfully\",\n # \"parties\": parties\n # }))\n","sub_path":"app/api/v1/Views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"651085840","text":"def tester(start):\n state = start # Обращение к нелокальным переменным\n def nested(label): # действует как обычно\n print(label, state) # извлекает значение state из области\n return nested # видимости объемлющей функции\n\nF = tester(0) # Но здесь будет постояно 0\nF('spam')\n\ndef tester2(start):\n state = start # В каждом вызове сохраняется свое значение state\n def nested(label): # Объект state находится\n nonlocal state\n print(label, state) # в объемлющей области видимости\n state += 1 # Изменит значение переменной, объявленной как nonlocal\n return nested\n\nF = tester2(0)\nF('sram') # Будет увеличивать значение state при каждом вызове\nF('ham')\nF('eggs')\nG = tester2(55) # Создаст новую функцию, которая начнет счет с 55\nG('sram')\nG('ham')\nG('eggs') # Обновит значение state до 57\nF('bacon') # Но в функции F значение state останется прежним\n # Каждая новая функция получает свой экземпляр state\n\ndef tester3():\n spam = 99\n def nested():\n nonlocal spam\n print('Current=', spam)\n spam += 1\n return nested\nF = tester3()\nF()\nF()","sub_path":"chapter17/nonlocal_instruction.py","file_name":"nonlocal_instruction.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"228816289","text":"#!usr/bin/env python \n#-*- coding:utf-8 _*- \n\"\"\" \n@author:yaoli \n@file: get_baidu_coord.py \n@time: 2020/03/17\n采用百度地图API进行地理编码\n\"\"\"\n\nimport io\nimport requests\nimport json\n\ndef parse_url(data={}):\n \"\"\"\n 拼接url地址,森林防火项目时写的函数\n \"\"\"\n item = data.items()\n urls= '?'\n for i in item:\n (key, value) = i\n temp_str = key + \"=\" + \"%s\" % value\n urls = urls + temp_str + \"&\"\n urls = urls[:len(urls) - 1]\n return urls\n\n\ndef get_locate(data_in, file_out):\n \"\"\"\n 爬虫函数\n data_in 为列表\n file_out 为文件对象\n \"\"\"\n for index in range(len(data_in)):\n line = data_in[index]\n address = line\n\n baseurl = 'http://api.map.baidu.com/geocoding/v3/'\n params = {\n 'address': '湖南省长沙市' + address, # 地址\n 'output': 'json',\n 'ak': 'exA7IllQD32dPVgPlMx5kP34j4dAvokQ'} # 百度密钥 峰值接受每秒200次请求。\n url = baseurl + parse_url(params)\n\n res = requests.get(url)\n jd = json.loads(res.text)\n coords = jd['result']['location']\n if address == '科大一号院':\n coords['lng'] = 113.00650\n coords['lat'] = 28.23198\n\n out_loc = '\\'' + address + '\\'' + ':[' + str(round(coords['lng'], 5)) + ',' + str(\n round(coords['lat'], 5)) + '], \\n'\n file_out.write(out_loc)\n\nif __name__ == \"__main__\":\n # 地址输入,一行一个地址\n f = io.open('place_dup.csv', 'r', encoding='utf-8')\n data = f.read()\n f.close()\n\n # 输出文档\n f_out = open('loc.txt', 'r+')\n f_out.truncate(0)\n\n urlList = []\n if (data.find('\"') != -1):\n data = data.replace('\"', '')\n datas = data.split('\\n')\n\n # print(datas)\n get_locate(datas, f_out)\n\n\n","sub_path":"get_baidu_coord.py","file_name":"get_baidu_coord.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474749133","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n# @author : 郑祥忠\n# @license : (C) Copyright,2013-2019,广州海格星航科技\n# @contact : dylenzheng@gmail.com\n# @file : change.py\n# @time : 10/25/19 5:07 PM\n# @desc : \n'''\nfilex = open('imageNet.txt','r')\nfiley = open('imageNet_new.txt','a')\nfor line in filex.readlines():\n img_path = line.strip().split(' ')[0]\n print('img_path=',img_path)\n information = line.strip().split(' ')[1:]\n print('information=',information)\n information[1],information[-1] = information[-1],information[1]\n print('information=',information)\n coordination = ''\n for index in information[:-1]:\n coordination += index + ' '\n # print('coordination=',coordination)\n line_new = img_path + ' '+ coordination + '\\n'\n print('line_new=',line_new)\n filey.write(line_new)","sub_path":"coco2txt/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"409094684","text":"from oslo_versionedobjects import base as versioned_objects_base\n\nimport flocx_market.db.sqlalchemy.api as db\nfrom flocx_market.objects import base\nfrom flocx_market.objects import fields\n\n\n@versioned_objects_base.VersionedObjectRegistry.register\nclass Offer(base.FLOCXMarketObject):\n\n fields = {\n 'marketplace_offer_id': fields.StringField(),\n 'provider_offer_id': fields.StringField(),\n 'project_id': fields.StringField(),\n 'status': fields.StringField(),\n 'server_id': fields.StringField(),\n 'start_time': fields.DateTimeField(nullable=True),\n 'end_time': fields.DateTimeField(nullable=True),\n 'server_config': fields.FlexibleDictField(nullable=True),\n 'cost': fields.FloatField(),\n }\n\n @classmethod\n def create(cls, data, context):\n o = db.offer_create(data, context)\n return cls._from_db_object(cls(), o)\n\n @classmethod\n def get(cls, offer_id, context):\n if offer_id is None:\n return None\n else:\n o = db.offer_get(offer_id, context)\n if o is None:\n return None\n else:\n return cls._from_db_object(cls(), o)\n\n def destroy(self, context):\n db.offer_destroy(self.marketplace_offer_id, context)\n return True\n\n @classmethod\n def get_all(cls, context):\n all_offers = db.offer_get_all(context)\n return cls._from_db_object_list(all_offers)\n\n def save(self, context):\n updates = self.obj_get_changes()\n db_offer = db.offer_update(\n self.marketplace_offer_id, updates, context)\n return self._from_db_object(self, db_offer)\n\n @classmethod\n def get_all_unexpired(cls, context):\n unexpired = db.offer_get_all_unexpired(context)\n return cls._from_db_object_list(unexpired)\n\n @classmethod\n def get_all_by_project_id(cls, context):\n by_project_id = db.offer_get_all_by_project_id(context)\n return cls._from_db_object_list(by_project_id)\n\n def expire(self, context):\n self.status = 'expired'\n self.save(context)\n","sub_path":"flocx_market/objects/offer.py","file_name":"offer.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"109501593","text":"\"\"\"example_app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.contrib.auth import views as auth_views\n\nfrom asset_dashboard.views import ProjectListView, CipPlannerView, ProjectCreateView, \\\n ProjectUpdateView, ProjectListJson, \\\n ProjectsByDistrictListView, ProjectsByDistrictListJson\n\nurlpatterns = [\n path('', ProjectListView.as_view(), name='projects'),\n path('projects/json/', ProjectListJson.as_view(), name='project-list-json'),\n path('projects/add-project/', ProjectCreateView.as_view(), name='add-project'),\n path('projects//', ProjectUpdateView.as_view(), name='project-detail'),\n path('projects/districts/', ProjectsByDistrictListView.as_view(), name='projects-by-district'),\n path('projects/districts/json/', ProjectsByDistrictListJson.as_view(), name='projects-district-json'),\n path('cip-planner/', CipPlannerView.as_view(), name='cip-planner'),\n path('accounts/login/', auth_views.LoginView.as_view(), name='login'),\n path('accounts/logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('admin/', admin.site.urls),\n]\n\nhandler404 = 'asset_dashboard.views.page_not_found'\nhandler500 = 'asset_dashboard.views.server_error'\n","sub_path":"asset_dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"6111272","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom keras.applications import ResNet50, VGG16, InceptionV3, Xception\nfrom keras.layers import Flatten, Dense, Dropout, Input\nfrom keras.models import Sequential, Model\n\n# Map model names to classes\nMODELS = {\n \"vgg16\": VGG16,\n \"inception\": InceptionV3,\n \"xception\": Xception,\n \"resnet\": ResNet50\n}\n\n# Define path to pre-trained classification block weights - this is\nvgg_weights_path = \"weights/vgg16_pretrain_weights.h5\"\n# res_weights_path = \"weights/vgg16_pretrain_weights.h5\"\n\ndef create_model(model, model_weights_path=None, top_model=True):\n \"\"\"Create custom model for transfer learning\n\n Steps:\n (i) load pre-trained NN architecture\n (ii) (optional) add custom classification block of two fully connected layers\n (iii) load pre-trained model weights, if available\n\n Parameters\n ----------\n model: str\n choose which pre-trained Keras deep learning model to use for the 'bottom' layers of the custom model\n model_weights_path: str\n optional path to weights for classification block; otherwise, pre-trained weights will be loaded\n top_model: bool\n whether to include custom classification block, or to load model 'without top' to extract features\n\n Returns\n -------\n my_model: keras.model\n Model utilised for prediction or training\n \"\"\"\n\n # ensure a valid model name was supplied\n if model not in MODELS.keys():\n raise AssertionError(\"The model parameter must be a key in the `MODELS` dictionary\")\n\n # Create pre-trained model for feature extraction, without classification block\n print(\"[INFO] loading %s...\" % (model,))\n model = MODELS[model](include_top=False,\n input_tensor=Input(shape=(224, 224, 3)))\n\n # For transfer learning\n if top_model:\n # Create classification block\n top_model = Sequential()\n top_model.add(Flatten(input_shape=model.output_shape[1:]))\n top_model.add(Dense(256, activation='relu'))\n top_model.add(Dropout(0.5))\n top_model.add(Dense(26, activation='softmax'))\n\n # Join pre-loaded model + classification block\n print(\"[INFO] creating model.\")\n my_model = Model(inputs=model.input,\n outputs=top_model(model.output))\n\n # Load weights for classification block\n print(\"[INFO] loading model weights.\")\n if model_weights_path is not None:\n # user-supplied weights\n my_model.load_weights(model_weights_path)\n elif model == \"vgg16\":\n # pre-trained weights for transfer learning with VGG16\n my_model.load_weights(vgg_weights_path)\n elif model == \"resnet\":\n # pre-trained weights for transfer learning with ResNet50\n print(\"ResNet50 pre-trained weights are not available yet, please use VGG16 for now!\")\n # my_model.load_weights(res_weights_path)\n\n return my_model\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"334917108","text":"set=[]\r\n\r\nprint(\"Enter the numbers one by one : \")\r\nprint(\"Press ENTER when done\")\r\nwhile True:\r\n num=input()\r\n\r\n if (num==\"\"): #To stop input\r\n break\r\n set.append(num)\r\nl=len(set)\r\n\r\n\r\ndef mean(int):\r\n sum=0\r\n i=0\r\n for i in range(0,l):\r\n sum=sum+eval(set[i])\r\n mean.meann=sum/l\r\n print(\"The mean is: \",mean.meann)\r\n\r\ndef median(int):\r\n rem=l%2\r\n pos=int(l/2)\r\n if(rem==0):\r\n med=int(set[pos-1])+int(set[pos])\r\n mediann=med/2\r\n if(rem==1):\r\n pos=int(l/2)\r\n mediann=set[pos]\r\n\r\n print(\"The median is :\",mediann)\r\n\r\n\r\ndef mode(int):\r\n smv=0\r\n i=0\r\n while(i= 15:\n this.flurry_str.on()\n\n #def ro(this, t):\n #Selfbuff('a3',0.10,-1).on()\n\n def s1_proc(this, e):\n this.afflics.poison('s1',120,0.582)\n\n def s2_proc(this, e):\n this.flurry_str.off()\n this.dmg_make('o_s2_crisis',this.s2boost*10.82)\n\n def skill_charge(self, proc, c):\n for s in ('s1', 's2', 's3'):\n if s != proc:\n skill = getattr(self, s)\n skill.charge(skill.sp*c)\n log('sp','{}_charge_{}'.format(proc, s), 0, '{}/{}'.format(int(skill.charged), int(skill.sp)))\n def s1_before(this, e):\n this.skill_charge('s1', this.a1_c)\n def s2_before(this, e):\n this.skill_charge('s2', this.a1_c)\n def s3_before(this, e):\n this.skill_charge('s3', this.a1_c)\n\nif __name__ == '__main__':\n conf = {}\n conf['slots.a'] = CC()+Flash_of_Genius()\n conf['acl'] = \"\"\"\n `s1\n `s2, seq=5\n `s3\n \"\"\"\n\n adv_test.test(module(), conf, verbose=-2)\n\n","sub_path":"adv/cassandra.py","file_name":"cassandra.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"180852467","text":"# -*- coding: utf-8 -*-\n\n\ndef get_count_m(n: int, m: int) -> int:\n \"\"\"1~n 中m出现的次数, 返回次数\n 如1~12中包含1的数字有1 10 11 12,1一共出现了5次\n \"\"\"\n if n < 1 or m < 1 or m > 9:\n return 0\n count = 0\n for item in range(1, n + 1):\n item = str(item)\n count += item.count(str(m))\n\n return count\n\n\ndef countDigitOne(n):\n countr, i = 0, 1\n while i <= n:\n divider = i * 10\n countr += (n // divider) * i + min(max(n % divider - i + 1, 0), i)\n i *= 10\n return countr\n\nif __name__ == \"__main__\":\n # print(get_count_m(1000000, 1))\n print(countDigitOne(10000000))\n","sub_path":"CodingInterviews/2_43_count_1.py","file_name":"2_43_count_1.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119179874","text":"# 2014-07-10\n# -*- coding: utf-8 -*-\n\nfrom datetime import date, datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.dates\n\nfrom requestParams import *\nfrom getPrice import PriceData\n\ndef readData(filename):\n '''\n (file) -> list of str, list of float\n Gets file name and return two lists:\n with data in string format and square metre price in float.\n '''\n f = open(filename, 'r')\n dates = []\n prices = []\n lines = f.readlines()\n for line in lines:\n lineData = line.strip().split()\n dates.append(lineData[0])\n prices.append(float(lineData[1]))\n f.close()\n return dates, prices\n\ndef writeTodayPrice(filename):\n '''\n Gets file name, checks if today price data exist.\n If not collects today price data and writes to file.\n '''\n today = str(date.today())\n dates, prices = readData(filename)\n if dates == [] or dates[-1] != today: # check if data were written this day or file is empty\n pD = PriceData(url, params)\n houseList = pD.buildHouseList() # build house list for given parameters\n metrePriceList = []\n for house in houseList.getElements():\n if house.getTotalGryvniaPrice() and house.getArea():\n metrePrice = house.getTotalGryvniaPrice()/house.getArea()\n metrePriceList.append(metrePrice)\n averagePrice = sum(metrePriceList)/len(metrePriceList)\n f = open('out.txt', 'a')\n f.write(today + ' ' + str(averagePrice) + '\\n')\n f.close()\n print('Data have been written to file.')\n else:\n print('Data were written to file early.')\n\ndef plotPriceTime(filename):\n '''\n Plots time row of metre house price on days\n '''\n fig = plt.figure()\n dates, prices = readData(filename)\n datesDate = [datetime.strptime(item, '%Y-%m-%d') for item in dates] # convert date to datetime format\n datesNum = matplotlib.dates.date2num(datesDate) # convert datetime list to number list\n # axes = plt.subplot(1,1,1)\n axes = fig.add_subplot(1,1,1)\n plt.plot_date(datesNum, prices, 'r-')\n majorLocator = matplotlib.dates.MonthLocator()\n axes.xaxis.set_major_locator(majorLocator)\n axes.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(\"%Y-%m-%d\")) # \"%m-%d\" for part date\n # -format date to plot\n fig.autofmt_xdate(bottom=0.18, rotation=60)\n plt.grid()\n plt.savefig('figure.jpg')\n\nif __name__ == '__main__':\n writeTodayPrice('out.txt')\n plotPriceTime('out.txt')\n","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267945143","text":"#!/usr/bin/python3\n\"\"\" Program that creates and distributes an archive to your web servers,\nusing the function deploy \"\"\"\nfrom datetime import datetime\nfrom fabric.api import *\nfrom os import path\n\nenv.hosts = ['35.243.214.144', '34.233.133.27']\n\n\ndef do_pack():\n \"\"\" Generates a .tgz archive from the contents of the web_static\n folder of your AirBnB Clone repo \"\"\"\n date_str = datetime.now().strftime('%Y%m%d%H%M%S')\n local(\"mkdir -p versions/\")\n try:\n local(\"tar -cvzf versions/web_static_{}.tgz web_static\"\n .format(date_str))\n return \"versions/web_static_{}.tgz\".format(date_str)\n except Exception:\n return None\n\n\ndef do_deploy(archive_path):\n \"\"\" Distributes an archive to the web servers \"\"\"\n if not path.exists(archive_path):\n return False\n # split the path and get the second element in the list\n file_path = archive_path.split(\"/\")[1]\n serv_folder = \"/data/web_static/releases/\" + file_path\n\n try:\n put(archive_path, \"/tmp/\")\n run(\"sudo mkdir -p \" + serv_folder)\n run(\"sudo tar -xzf /tmp/\" + file_path + \" -C \" + serv_folder + \"/\")\n run(\"sudo rm /tmp/\" + file_path)\n run(\"sudo mv \" + serv_folder + \"/web_static/* \" + serv_folder)\n run(\"sudo rm -rf \" + serv_folder + \"/web_static\")\n run(\"sudo rm -rf /data/web_static/current\")\n run(\"sudo ln -s \" + serv_folder + \" /data/web_static/current\")\n print(\"New version deployed!\")\n return True\n except Exception:\n return False\n\n\ndef deploy():\n \"\"\" Call the do_pack() function and store the path of the created archive\n Call the do_deploy(archive_path) function, using the new\n path of the new archive\n Return False if no archive has been created\n Return the return value of do_deploy\"\"\"\n file_path = do_pack()\n if file_path is None:\n return False\n\n return (do_deploy(file_path))\n","sub_path":"3-deploy_web_static.py","file_name":"3-deploy_web_static.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"475066869","text":"\n\nfrom xai.brain.wordbase.nouns._fixative import _FIXATIVE\n\n#calss header\nclass _FIXATIVES(_FIXATIVE, ):\n\tdef __init__(self,): \n\t\t_FIXATIVE.__init__(self)\n\t\tself.name = \"FIXATIVES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"fixative\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fixatives.py","file_name":"_fixatives.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"125985076","text":"from toee import *\nfrom Co8 import D20CO8_F_POISON\n\ndef san_trap( trap, triggerer ):\n\t# numP = 210 / (game.party_npc_size() + game.party_pc_size())\n\t# for obj in game.obj_list_vicinity( triggerer.location, OLC_CRITTERS ):\n\t\t# obj.stat_base_set(stat_experience, (obj.stat_level_get(stat_experience) - numP))\n\tgame.particles( trap.partsys, trap.obj )\n\tgame.particles( 'sp-Sound Burst', trap.obj )\n\tgame.sound(4028,1)\n\tfor obj in game.obj_list_vicinity( triggerer.location, OLC_CRITTERS ):\n\t\tif (obj.distance_to(trap.obj) <= 10):\n\t\t\tif (obj.has_los(trap.obj) or not obj.has_los(trap.obj)):\n\t\t\t\tfor dmg in trap.damage:\n\t\t\t\t\tif (dmg.type == D20DT_POISON):\n\t\t\t\t\t\tif (obj.saving_throw( 20, D20_Save_Fortitude, D20CO8_F_POISON, trap.obj ) == 0):\n\t\t\t\t\t\t\tobj.condition_add_with_args(\"Poisoned\",dmg.damage.bonus,0)\n\t\t\t\t\telse:\n\t\t\t\t\t\tobj.reflex_save_and_damage( trap.obj, 20, D20_Save_Reduction_Half, D20STD_F_SPELL_DESCRIPTOR_ELECTRICITY, dmg.damage, dmg.type, D20DAP_NORMAL )\n\n\tgame.new_sid = 0\n\treturn SKIP_DEFAULT\n","sub_path":"scr/py32013Trap14_glyph_electricity.py","file_name":"py32013Trap14_glyph_electricity.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"467741105","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2019, Arista Networks EOS+\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# Neither the name of Arista Networks nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\nDOCUMENTATION = r'''\n---\nmodule: cv_configlet\nversion_added: \"2.9\"\nauthor: \"EMEA AS Team(ansible-dev@arista.com)\"\nshort_description: Create, Delete, or Update CloudVision Portal Configlets.\ndescription:\n - CloudVison Portal Configlet compares the list of configlets and config in\n - configlets against cvp-facts then adds, deletes, or updates\n - them as appropriate.\n - If a configlet is in cvp_facts but not in configlets it will be deleted.\n - If a configlet is in configlets but not in cvp_facts it will be created.\n - If a configlet is in both configlets and cvp_facts it configuration will\n - be compared and updated with the version in configlets\n - if the two are different.\noptions:\n configlets:\n description: List of configlets to managed on CVP server.\n required: true\n default: null\n cvp_facts:\n description: Facts extracted from CVP servers using cv_facts module\n required: true\n default: null\n configlet_filter:\n description: Filter to apply intended mode on a set of configlet.\n If not used, then module only uses ADD mode. configlet_filter\n list configlets that can be modified or deleted based\n on configlets entries.\n required: false\n default: null\n'''\n\nEXAMPLE = r'''\n---\n- name: Test cv_configlet_v2\n hosts: cvp\n connection: local\n gather_facts: no\n vars:\n configlet_list:\n Test_Configlet: \"! This is a Very First Testing Configlet\\n!\"\n Test_DYNAMIC_Configlet: \"{{ lookup('file', 'templates/configlet_'+inventory_hostname+'.txt') }}\"\n tasks:\n - name: 'Collecting facts from CVP {{inventory_hostname}}.'\n tags:\n - always\n cv_facts:\n register: cvp_facts\n\n - name: 'Create configlets on CVP {{inventory_hostname}}.'\n tags:\n - provision\n cv_configlet:\n cvp_facts: \"{{cvp_facts.ansible_facts}}\"\n configlets: \"{{configlet_list}}\"\n configlet_filter: [\"New\", \"Test\",\"base-chk\",\"base-firewall\"]\n register: cvp_configlet\n'''\n\n# Required by Ansible and CVP\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.arista.cvp.plugins.module_utils.cv_client import CvpClient\nfrom ansible_collections.arista.cvp.plugins.module_utils.cv_client_errors import CvpLoginError, CvpApiError\nfrom ansible.module_utils.connection import Connection, ConnectionError\nimport re\nfrom time import sleep\n# Required by compare function\nimport difflib\nfrom fuzzywuzzy import fuzz # Library that uses Levenshtein Distance to calculate the differences between strings.\n\ndef compare(fromText, toText, lines=10):\n \"\"\" Compare text string in 'fromText' with 'toText' and produce\n diffRatio - a score as a float in the range [0, 1] 2.0*M / T\n T is the total number of elements in both sequences,\n M is the number of matches.\n Score - 1.0 if the sequences are identical, and 0.0 if they have nothing in common.\n unified diff list\n Code\tMeaning\n '- '\tline unique to sequence 1\n '+ '\tline unique to sequence 2\n ' '\tline common to both sequences\n '? '\tline not present in either input sequence\n \"\"\"\n fromlines = fromText.splitlines(1)\n tolines = toText.splitlines(1)\n diff = list(difflib.unified_diff(fromlines, tolines,n=lines))\n textComp = difflib.SequenceMatcher(None, fromText, toText)\n diffRatio = round( textComp.quick_ratio()*100, 2)\n return [diffRatio,diff]\n\ndef connect(module):\n ''' Connects to CVP device using user provided credentials from playbook.\n :param module: Ansible module with parameters and client connection.\n :return: CvpClient object with connection instantiated.\n '''\n client = CvpClient()\n connection = Connection(module._socket_path)\n host = connection.get_option(\"host\")\n port = connection.get_option(\"port\")\n user = connection.get_option(\"remote_user\")\n pswd = connection.get_option(\"password\")\n try:\n client.connect([host],\n user,\n pswd,\n protocol=\"https\",\n port=port,\n )\n except CvpLoginError as e:\n module.fail_json(msg=str(e))\n return client\n\ndef configlet_action(module):\n ''' Compare configlets in \"configlets\" with configlets in \"cvp_facts\"\n if configlet exists in \"cvp_facts\" check config, if changed update\n if configlet does not exist in \"cvp_facts\" add to CVP\n if configlet in \"cvp_facts\" but not in \"configlets\" remove from CVP if\n not applied to a device or container.\n :param module: Ansible module with parameters and client connection.\n :return: data: dict of module actions and taskIDs\n '''\n # If any configlet changed updated 'changed' flag\n changed = False\n #Compare configlets against cvp_facts-configlets\n keep_configlet = [] # configlets with no changes\n delete_configlet = [] # configlets to delete from CVP\n deleted = []\n update_configlet = [] # configlets with config changes\n updated = []\n new_configlet = [] # configlets to add to CVP\n new = []\n taskList = [] # Tasks that have a pending status after function runs\n\n for configlet in module.params['cvp_facts']['configlets']:\n # Only deal with Static configlets not Configletbuilders or\n # their derived configlets\n # Include only configlets that match filter elements \"all\" will\n # include all configlets.\n if configlet['type'] == 'Static':\n if re.search(r\"\\ball\\b\", str(module.params['configlet_filter'])) or (\n any(element in configlet['name'] for element in module.params['configlet_filter'])):\n if configlet['name'] in module.params['configlets']:\n ansible_configlet = module.params['configlets'][configlet['name']]\n configlet_compare = compare(configlet['config'],ansible_configlet)\n # compare function returns a floating point number\n if configlet_compare[0] == 100.0:\n keep_configlet.append(configlet)\n else:\n update_configlet.append({'data':configlet,'config':ansible_configlet})\n else:\n delete_configlet.append(configlet)\n # Look for new configlets, if a configlet is not CVP assume it is to be created\n for ansible_configlet in module.params['configlets']:\n found = False\n for cvp_configlet in module.params['cvp_facts']['configlets']:\n if str(ansible_configlet) == str(cvp_configlet['name']):\n found = True\n if not found:\n new_configlet.append({'name':str(ansible_configlet),\n 'config':str(module.params['configlets'][ansible_configlet])})\n\n # Only execute this section if ansible check_mode is false\n if not module.check_mode:\n # delete any configlets as required\n if len(delete_configlet) > 0:\n for configlet in delete_configlet:\n try:\n delete_resp = module.client.api.delete_configlet(configlet['name'], configlet['key'])\n except Exception as error:\n errorMessage = re.split(':', str(error))[-1]\n message = \"Configlet %s cannot be deleted - %s\"%(configlet['name'],errorMessage)\n deleted.append({configlet['name']:message})\n else:\n if \"error\" in str(delete_resp).lower():\n message = \"Configlet %s cannot be deleted - %s\"%(configlet['name'],delete_resp['errorMessage'])\n deleted.append({configlet['name']:message})\n else:\n changed = True\n deleted.append({configlet['name']:\"success\"})\n\n # Update any configlets as required\n if len(update_configlet) > 0:\n for configlet in update_configlet:\n try:\n update_resp = module.client.api.update_configlet(configlet['config'],\n configlet['data']['key'],\n configlet['data']['name'])\n except Exception as error:\n errorMessage = re.split(':', str(error))[-1]\n message = \"Configlet %s cannot be updated - %s\"%(configlet['name'],errorMessage)\n updated.append({configlet['name']:message})\n else:\n if \"errorMessage\" in str(update_resp):\n message = \"Configlet %s cannot be updated - %s\"%(configlet['name'],update_resp['errorMessage'])\n updated.append({configlet['data']['name']:message})\n else:\n module.client.api.add_note_to_configlet(configlet['data']['key'],\"## Managed by Ansible ##\")\n changed = True\n updated.append({configlet['data']['name']:\"success\"})\n\n # Add any new configlets as required\n if len(new_configlet) > 0:\n for configlet in new_configlet:\n try:\n new_resp = module.client.api.add_configlet(configlet['name'],configlet['config'])\n except Exception as error:\n errorMessage = re.split(':', str(error))[-1]\n message = \"Configlet %s cannot be created - %s\"%(configlet['name'],errorMessage)\n created.append({configlet['name']:message})\n else:\n if \"errorMessage\" in str(new_resp):\n message = \"Configlet %s cannot be created - %s\"%(configlet['name'],new_resp['errorMessage'])\n new.append({configlet['name']:message})\n else:\n module.client.api.add_note_to_configlet(new_resp,\"## Managed by Ansible ##\")\n changed = True\n new.append({configlet['name']:\"success\"})\n\n # Get any Pending Tasks in CVP\n if changed:\n # Allow CVP to generate Tasks\n sleep(10)\n # Build required data for tasks in CVP - work order Id, current task status, name\n # description\n tasksField = {'workOrderId':'workOrderId','workOrderState':'workOrderState',\n 'currentTaskName':'currentTaskName','description':'description',\n 'workOrderUserDefinedStatus':'workOrderUserDefinedStatus','note':'note',\n 'taskStatus':'taskStatus', 'workOrderDetails': 'workOrderDetails'}\n tasks = module.client.api.get_tasks_by_status('Pending')\n # Reduce task data to required fields\n for task in tasks:\n taskFacts= {}\n for field in task.keys():\n if field in tasksField:\n taskFacts[tasksField[field]] = task[field]\n taskList.append(taskFacts)\n data = {'new':new,'updated':updated,'deleted':deleted,'tasks':taskList}\n else:\n for configlet in new_configlet:\n new.append({configlet['name']:\"checked\"})\n for configlet in update_configlet:\n updated.append({configlet['data']['name']:\"checked\"})\n for configlet in delete_configlet:\n deleted.append({configlet['name']:\"checked\"})\n data = {'new':new,'updated':updated,'deleted':deleted,'tasks':taskList}\n return [changed,data]\n\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n argument_spec = dict(\n configlets=dict(type='dict',required=True),\n cvp_facts=dict(type='dict',required=True),\n configlet_filter=dict(type='list', default='none')\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n result = dict(changed=False,data={})\n messages = dict(issues=False)\n # Connect to CVP instance\n module.client = connect(module)\n\n # Pass module params to configlet_action to act on configlet\n result['changed'],result['data'] = configlet_action(module)\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"arista/cvp/plugins/modules/cv_configlet.py","file_name":"cv_configlet.py","file_ext":"py","file_size_in_byte":13682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"637437855","text":"import cv2\nimport numpy as np\nimport math\nimport scipy.stats as st\nimport matplotlib.pyplot as plt\nimport operator\nimport time\nimport os\nfrom enum import Enum\nimport pandas as pd\n\n#Import Akisato Kimura implementation of Itti's Saliency Map Generator\n#Original Source: https://github.com/akisatok/pySaliencyMap\nimport pySaliencyMap\n\n#-------------------------------------------------\n#Start Global Variables\n#-------------------------------------------------\n\nsegmentsEntropies = []\nsegmentsCoords = []\n\ndir = \"/Users/dylanseychell/dev/MSRA10K_Imgs_GT/MSRA10K_Imgs_GT/Imgs\"\ndir2 = \"/Users/dylanseychell/dev/shelves\"\ndir3 = \"/Users/dylanseychell/dev/⁨COTS/COTSDataset⁩/⁨Part2-MultipleObjects⁩\"\n\nsegDim = 9\nsegments = []\ngtSegments = []\ndws = []\nsaraList = []\n\nevalList = []\nlabelsEvalList = ['Image','Index','Rank','Quartile','isGT','Outcome']\n\noutcomeList = []\nlabelsOutcomeList = ['Image', 'FN', 'FP', 'TN', 'TP']\n\ndataframeCollection = {}\nerrorCount = 0\n\n#-------------------------------------------------\n#SaRa Initial Functions\n#-------------------------------------------------\n\ndef generateSegments(img, segCount, depth=None):\n segments = []\n segmentCount = segCount\n index = 0\n\n wInterval = int(img.shape[1]/segmentCount)\n hInterval = int(img.shape[0]/segmentCount)\n\n for i in range(segmentCount):\n for j in range(segmentCount):\n #Note: img[TopRow:BottomRow, FirstColumn:LastColumn]\n tempSegment = img[int(hInterval*i):int(hInterval*(i+1)), int(wInterval*j):int(wInterval*(j+1))]\n #cv2.imshow(\"Crop\" + str(i) + str(j), tempSegment)\n #coordTup = (index, x1, y1, x2, y2)\n coordTup = (index, int(wInterval*j), int(hInterval*i), int(wInterval*(j+1)), int(hInterval*(i+1)))\n segmentsCoords.append(coordTup)\n segments.append(tempSegment)\n index+=1\n\n return segments\n\ndef returnIttiSaliency(img):\n imgsize = img.shape\n img_width = imgsize[1]\n img_height = imgsize[0]\n sm = pySaliencyMap.pySaliencyMap(img_width, img_height)\n saliency_map = sm.SMGetSM(img)\n\n #Scale pixel values to 0-255 instead of float (approx 0, hence black image)\n #https://stackoverflow.com/questions/48331211/how-to-use-cv2-imshow-correctly-for-the-float-image-returned-by-cv2-distancet/48333272\n saliency_map = cv2.normalize(saliency_map, None, 255,0, cv2.NORM_MINMAX, cv2.CV_8UC1)\n\n return saliency_map\n\n#-------------------------------------------------\n#Saliency Ranking\n#-------------------------------------------------\n\ndef calculatePixelFrequency(img):\n flt = img.flatten()\n unique, counts = np.unique(flt, return_counts=True)\n pixelsFrequency = dict(zip(unique, counts))\n\n return pixelsFrequency\n\ndef calculateEntropy(img, w, dw):\n flt = img.flatten()\n\n c = flt.shape[0]\n totalPixels = 0\n tprob = 0\n sumOfProbs = 0\n entropy = 0\n wt = w*10\n\n #if imgD=None then proceed normally\n #else calculate its frequency and find max\n #use this max value as a weight in entropy\n\n pixelsFrequency = calculatePixelFrequency(flt)\n\n totalPixels = sum(pixelsFrequency.values())\n\n for px in pixelsFrequency:\n tprob = (pixelsFrequency.get(px))/totalPixels\n #probs[px] = tprob\n entropy += entropy + (tprob*math.log(2,(1/tprob)))\n\n entropy = entropy * wt * dw\n\n return(entropy)\n\ndef findMostSalientSegment(segments, kernel, dws):\n maxEntropy = 0\n index = 0\n i = 0\n for segment in segments:\n #tempEntropy = calculateEntropy(segment, kernel[i])\n tempEntropy = calculateEntropy(segment, kernel[i], dws[i])\n tempTup = (i, tempEntropy)\n segmentsEntropies.append(tempTup)\n if tempEntropy > maxEntropy:\n maxEntropy = tempEntropy\n index = i\n i += 1\n\n return maxEntropy, index\n\ndef makeGaussian(size, fwhm = 10, center=None):\n #https://gist.github.com/andrewgiessel/4635563\n \"\"\" Make a 2D gaussian kernel.\n size is the length of a side of the square\n fwhm is full-width-half-maximum, which\n can be thought of as an effective radius.\n \"\"\"\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)\n\ndef get_last_non_zero_index(d, default=None):\n rev = (len(d) - idx for idx, item in enumerate(reversed(d), 1) if item)\n return next(rev, default)\n\ndef get_first_non_zero_indox(list):\n return next((i for i, x in enumerate(list) if x), None)\n\ndef genDepthWeights(dSegments, depthMap):\n\n histD,binsD = np.histogram(depthMap,256,[0,256])\n firstNZ = get_first_non_zero_indox(histD)\n lastNZ = get_last_non_zero_index(histD)\n mid = (firstNZ+lastNZ)/2\n\n for seg in dSegments:\n hist,bins = np.histogram(seg,256,[0,256])\n #print(hist)\n dw=0\n ind = 0\n for s in hist:\n if(ind > mid):\n dw = dw + (s*(1))\n ind = ind + 1\n dws.append(dw)\n\n return dws\n\ndef genBlankDepthWeight(dSegments):\n for seg in dSegments:\n dw=1\n dws.append(dw)\n return dws\n\ndef generateHeatMap(img, mode, sortedSegScores, SegmentsCoords):\n #mode0 prints just a white grid\n #mode1 prints prints a colour-coded grid\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n printIndex = 0\n set = int(0.25*len(sortedSegScores))\n color = (0,0,0)\n\n saraListOut = []\n\n #rank = 0\n\n for ent in sortedSegScores:\n quartile = 0\n if(mode == 0):\n color = (255,255,255)\n t = 4\n elif(mode == 1):\n if(printIndex+1 <= set):\n color = (0,0,255)\n t = 8\n quartile = 4\n elif(printIndex+1 <= set*2):\n color = (0,128,255)\n t = 6\n quartile = 3\n elif(printIndex+1 <= set*3):\n color = (0,255,255)\n t = 4\n quartile = 2\n elif(printIndex+1 <= set*4):\n color = (0,250,0)\n t = 2\n quartile = 1\n\n x1 = segmentsCoords[ent[0]][1]\n y1 = segmentsCoords[ent[0]][2]\n x2 = segmentsCoords[ent[0]][3]\n y2 = segmentsCoords[ent[0]][4]\n x = int((x1 + x2 )/2)\n y = int((y1 + y2)/2)\n\n cv2.putText(img, str(printIndex), (x-2,y), font, .5, color ,1 ,cv2.LINE_AA)\n cv2.rectangle(img, (x1,y1), (x2,y2), color , t)\n\n #print(\"\\nText Index:\" + str(printIndex))\n #print(\"Rank:\" + str(ent[0]))\n #print(\"Quartile:\" + str(quartile))\n\n #cv2.putText(gtSara, str(printIndex), (x-2,y), font, .5, (255,255,255) ,1 ,cv2.LINE_AA)\n #cv2.rectangle(gtSara, (x1,y1), (x2,y2), color , t)\n\n #saraTuple = (index, rank, quartile)\n saraTuple = (ent[0], printIndex, quartile)\n #print(\"\\nSara Tuple: \" + str(saraTuple))\n saraListOut.append(saraTuple)\n printIndex+=1\n\n #print(saraListOut)\n return img, saraListOut\n\ndef generateSaRa(tex, texSegments):\n #Generate Gaussian Weights\n gaussian_kernel_array = makeGaussian(segDim)\n gaussian1d = gaussian_kernel_array.ravel()\n\n #Generate Depth scores\n #dSegments = generateSegments(gt, segDim)\n dws = genBlankDepthWeight(texSegments)\n\n #Generate Saliency Ranking\n maxH, index = findMostSalientSegment(texSegments, gaussian1d, dws)\n dictEntropies = dict(segmentsEntropies)\n sortedEntropies = sorted(dictEntropies.items(), key=operator.itemgetter(1), reverse=True)\n\n #Generate Heatmap and display it\n texOut, saraListOut = generateHeatMap(tex, 1, sortedEntropies, segmentsCoords)\n return texOut, saraListOut\n\n#-------------------------------------------------\n#Evaluation Functions\n#-------------------------------------------------\n\ndef returnSARA(inputImg):\n\n texSegments = generateSegments(returnIttiSaliency(inputImg), 9)\n saraOutput, saraListOutput = generateSaRa(inputImg, texSegments)\n\n return saraOutput, saraListOutput\n\ndef mse(imageA, imageB):\n\t# the 'Mean Squared Error' between the two images is the\n\t# sum of the squared difference between the two images;\n\t# NOTE: the two images must have the same dimension\n\terr = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\n\terr /= float(imageA.shape[0] * imageA.shape[1])\n\n\t# return the MSE, the lower the error, the more \"similar\"\n\t# the two images are\n\treturn err\n\n#-------------------------------------------------\n#-------------------------------------------------\n#Start Main Code\n#-------------------------------------------------\n#-------------------------------------------------\n\ncotsSet = \"academic_book_no\"\nimgPath1 = \"/Users/dylanseychell/dev/COTS/COTSDataset/Part2-MultipleObjects/\" + cotsSet +\"/3_colour.jpeg\"\n\ns1 = cv2.imread(imgPath1)\n\n#for another image, simply import a second image and initialise s2 and replicate the code below for the scond image\n#s2 = cv2.imread(imgPath2)\n\ncv2.imshow(\"Input Image\", s1)\nprint(texPath1)\n\n#texSegments1 = generateSegments(returnIttiSaliency(s1), 9)\n\nprint(\"Generating SaRa\")\n\noutS1, saraListS1 = returnSARA(s1)\ncv2.imshow(\"SaRa Output for S1\", outS1)\nprint(saraListS1)\n\ncv2.waitKey()\n\n#-------------------------------------------------\n#Auxiliary Output Code\n#-------------------------------------------------\n\n#start_time = time.time()\n#Code to be timed goes here\n#print(\"%s\" % (time.time() - start_time))\n\n#returns zero if all pixels are black\n#print(cv2.countNonZero(gtSegments[0]))\n","sub_path":"Code/saraRC1.py","file_name":"saraRC1.py","file_ext":"py","file_size_in_byte":9583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569252698","text":"# -*- coding:utf8 -*-\nimport os\nimport re\nimport code\nimport shutil\nimport argparse\nimport functools\nimport inspect\n\nfrom fnmatch import fnmatch\n\nfrom codecs import BOM_UTF8\n\n\nclass DirectoryContext(object):\n def __init__(self, dir_path):\n self.target_dir_path = dir_path\n self.prev_dir_path = None\n \n def __enter__(self):\n print('push_directory:{0}'.format(self.target_dir_path))\n self.prev_dir_path = os.getcwd()\n os.chdir(self.target_dir_path)\n return self\n \n def __exit__(self, type, value, tb):\n print('pop_directory:{0}'.format(self.prev_dir_path))\n os.chdir(self.prev_dir_path)\n\n\nclass FilterPattern(object):\n RECURSIVE_DIR_PATTERN = '...'\n\n def __init__(self, patterns):\n self.patterns = [\n re.compile(pattern.replace('...', '[\\w\\/]+') + '$') \n if '...' in pattern else pattern \n for pattern in patterns]\n\n def __call__(self, text):\n for pattern in self.patterns:\n if isinstance(pattern, str):\n return fnmatch(text, pattern)\n else:\n return pattern.match(text)\n\n\nclass ProjectManager(object):\n class ExitCode(object):\n EMPTY_ARGUMENTS = -1\n WRONG_PROCESS = -2\n\n class Error(Exception):\n pass\n\n class ArgumentError(Error):\n pass\n\n def __init__(self, import_name=None):\n self.root_path = self.get_root_path(import_name) if import_name is not None else os.getcwd()\n self.main_parser = argparse.ArgumentParser()\n self.sub_parsers = self.main_parser.add_subparsers()\n\n @staticmethod\n def get_root_path(import_name):\n import sys\n mod = sys.modules.get(import_name)\n if mod is not None and hasattr(mod, '__file__'):\n return os.path.dirname(os.path.abspath(mod.__file__))\n\n import pkgutil\n loader = pkgutil.get_loader(import_name)\n\n if loader is None or import_name == '__main__':\n return os.getcwd()\n\n if hasattr(loader, 'get_filename'):\n file_path = loader.get_filename(import_name)\n else:\n __import__(import_name)\n mod = sys.modules[import_name]\n file_path = getattr(mod, '__file__', None)\n\n if file_path is None:\n raise RuntimeError('NOT_FOUND_PATH_FOR_MODULE:{0}'.format(import_name))\n\n return os.path.dirname(os.path.abspath(file_path))\n\n def command(self, **option_table):\n def handler(func):\n option_names = inspect.getargspec(func).args # OLD_CODE: func.func_code.co_varnames[:func.func_code.co_argcount]\n\n @functools.wraps(func)\n def wrapper(ns):\n kwargs = dict()\n for option_name in option_names:\n option_info_dict = option_table[option_name]\n option_nargs = option_info_dict.get('nargs', None)\n if option_nargs == 1:\n kwargs[option_name] = getattr(ns, option_name)[0]\n else:\n kwargs[option_name] = getattr(ns, option_name)\n\n return func(**kwargs)\n\n def create_sub_parser(func_name, func_doc):\n new_sub_parser = self.sub_parsers.add_parser(\n func_name, help=func_doc)\n\n for option_name in option_names:\n option_info_dict = option_table[option_name]\n option_flag = option_info_dict.get('flag', None)\n if option_flag:\n del option_info_dict['flag']\n new_sub_parser.add_argument(\n option_flag, '--' + option_name, **option_info_dict)\n else:\n new_sub_parser.add_argument(\n option_name, **option_info_dict)\n\n return new_sub_parser\n\n sub_parser = create_sub_parser(func.__name__, func.__doc__)\n sub_parser.set_defaults(func=wrapper)\n return handler\n\n def add_common_argument(self, *args, **kwargs):\n self.main_parser.add_argument(*args, **kwargs) \n \n def run_command(self, cmd_args):\n if cmd_args is None or len(cmd_args) == 0:\n self.main_parser.print_help()\n return self.ExitCode.EMPTY_ARGUMENTS\n\n ns = self.main_parser.parse_args(cmd_args)\n try:\n return ns.func(ns)\n except self.Error as e:\n print('')\n print(str(e))\n return self.ExitCode.WRONG_PROCESS\n\n @staticmethod\n def run_system_command(exec_path, exec_args, is_verbose=True):\n cmd_line = '%s %s' % (exec_path, ' '.join(exec_args))\n if is_verbose:\n print('$ {0}'.format(cmd_line))\n return os.system(cmd_line)\n\n @staticmethod\n def run_python_shell(title, local_dict):\n code.interact(title, local=local_dict)\n\n @staticmethod\n def push_directory(dir_path):\n return DirectoryContext(os.path.realpath(dir_path))\n\n @staticmethod\n def access_directory(dir_path):\n return os.access(dir_path, os.R_OK)\n\n @staticmethod\n def access_file(file_path):\n return os.access(file_path, os.R_OK)\n\n @staticmethod\n def find_dir_path_iter(\n base_dir_path='.',\n path_patterns=None,\n filter_dir_name=None,\n is_all_dirs=False,\n is_real_path=True):\n\n filter_path_pattern = FilterPattern(path_patterns) if path_patterns else None\n\n for parent_dir_path, dir_names, file_names in os.walk(base_dir_path):\n if filter_path_pattern is None or filter_path_pattern(parent_dir_path[len(base_dir_path) + 1:]):\n yield os.path.realpath(parent_dir_path) if is_real_path else parent_dir_path\n\n if not is_all_dirs:\n for dir_name in list(dir_names):\n if dir_name[0] == '.':\n dir_names.remove(dir_name)\n\n if filter_dir_name:\n for dir_name in list(dir_names):\n if not filter_dir_name(dir_name):\n dir_names.remove(dir_name)\n\n @classmethod\n def smart_find_file_path_iter(cls, hint, base_dir_path='.'):\n real_file_path = os.path.realpath(os.path.expandvars(hint))\n if os.access(real_file_path, os.R_OK):\n yield real_file_path\n\n file_name_pattern = hint + '*'\n for found_file_path in cls.find_file_path_iter(base_dir_path, filter_file_name=FilterPattern([file_name_pattern])):\n yield found_file_path\n\n @classmethod\n def smart_find_file_path(cls, hint, base_dir_path='.'):\n for found_file_path in cls.smart_find_file_path_iter(hint, base_dir_path):\n return found_file_path\n else: \n raise cls.Error('NOT_FOUND_FILE_IN_DIR_PATH:' + base_dir_path + ' HINT:' + hint)\n\n @staticmethod\n def find_file_path_iter(\n base_dir_path='.',\n path_patterns=None,\n filter_dir_name=None,\n filter_file_name=None,\n filter_file_ext=None,\n filter_file_path=None,\n is_all_files=False,\n is_real_path=True):\n\n filter_path_pattern = FilterPattern(path_patterns) if path_patterns else None\n\n for parent_dir_path, dir_names, file_names in os.walk(base_dir_path):\n if not is_all_files:\n for dir_name in list(dir_names):\n if dir_name[0] == '.':\n dir_names.remove(dir_name)\n\n if filter_dir_name:\n for dir_name in list(dir_names):\n if not filter_dir_name(dir_name):\n dir_names.remove(dir_name)\n \n for file_name in file_names:\n if not is_all_files:\n if file_name[0] == '.':\n continue\n\n if filter_file_name is None or filter_file_name(file_name):\n file_ext = os.path.splitext(file_name)[1].lower()\n if filter_file_ext is None or filter_file_ext(file_ext):\n file_path = os.path.join(parent_dir_path, file_name)\n if filter_file_path is None or filter_file_path(file_path):\n if filter_path_pattern is None or filter_path_pattern(file_path[len(base_dir_path) + 1:]):\n yield os.path.realpath(file_path) if is_real_path else file_path\n\n @staticmethod\n def add_utf8_bom(file_path):\n file_bom = open(file_path, 'rb').read(len(BOM_UTF8))\n if file_bom != BOM_UTF8:\n file_data = open(file_path, 'rb').read()\n open(file_path, 'wb').write(BOM_UTF8 + file_data)\n\n @staticmethod\n def remove_utf8_bom(file_path):\n file_bom = open(file_path, 'rb').read(len(BOM_UTF8))\n if file_bom == BOM_UTF8:\n file_data = open(file_path, 'rb').read()\n open(file_path, 'wb').write(file_data[len(BOM_UTF8):])\n\n @staticmethod\n def remove_symbolic_link(link_path, is_testing=True):\n real_link_path = os.path.abspath(link_path)\n if is_testing:\n print('test_remove_symbolic_link:{0}'.format(real_link_path))\n else:\n print('remove_symbolic_link:{0}'.format(real_link_path))\n os.unlink(real_link_path)\n\n @staticmethod\n def remove_file(file_path, is_testing=True):\n real_file_path = os.path.realpath(file_path)\n if is_testing:\n print('test_remove_file:{0}'.format(real_file_path))\n else:\n print('remove_file:{0}'.format(real_file_path))\n os.remove(real_file_path)\n\n @staticmethod\n def remove_tree(dir_path, is_testing=True):\n real_dir_path = os.path.realpath(dir_path)\n if is_testing:\n print('test_remove_tree:{0}'.format(real_dir_path))\n else:\n print('remove_tree:{0}'.format(real_dir_path))\n shutil.rmtree(real_dir_path)\n\n @classmethod\n def remove_trees_by_patterns(cls, path_patterns, base_dir_path='.', is_testing=True, is_verbose=True):\n real_dir_paths = [os.path.realpath(dir_path)\n for dir_path in cls.find_dir_path_iter(\n base_dir_path, path_patterns, is_all_dirs=True)]\n \n for real_dir_path in reversed(real_dir_paths):\n if is_testing:\n print('test_remove_tree:{0} path_patterns:{1}'.format(real_dir_path, path_patterns))\n else:\n if is_verbose:\n print('remove_tree:{0} path_patterns:{1}'.format(real_dir_path, path_patterns))\n shutil.rmtree(real_dir_path)\n\n @classmethod\n def remove_files_by_patterns(cls, path_patterns, base_dir_path='.', is_testing=True, is_verbose=True):\n real_file_paths = [os.path.realpath(file_path)\n for file_path in cls.find_file_path_iter(\n base_dir_path, path_patterns, is_all_files=True)]\n \n for real_file_path in real_file_paths:\n if is_testing:\n print('test_remove_file:{0} path_patterns:{1}'.format(real_file_path, path_patterns))\n else:\n if is_verbose:\n print('remove_file:{0} path_patterns:{1}'.format(real_file_path, path_patterns))\n os.remove(real_file_path)\n\n @classmethod\n def make_symbolic_link(cls, source_path, target_path):\n real_source_path = os.path.realpath(source_path)\n real_target_path = os.path.realpath(target_path)\n print('make_symbolic_link_source:{0} target:{1}'.format(real_source_path, real_target_path))\n os.symlink(real_source_path, real_target_path)\n\n @staticmethod\n def make_directory(dir_path):\n real_dir_path = os.path.realpath(dir_path)\n if os.access(real_dir_path, os.R_OK):\n print('already_made_directory:{0}'.format(real_dir_path))\n return False\n\n print('make_directory:{0}'.format(real_dir_path))\n os.makedirs(real_dir_path)\n return True\n\n @staticmethod\n def touch_file(file_path):\n real_file_path = os.path.realpath(file_path)\n if os.access(real_file_path, os.R_OK):\n print('touch_file:{0}'.format(real_file_path))\n file_data = open(file_path, \"rb\").read()\n else:\n print('make_touch_file:{0}'.format(real_file_path))\n file_data = \"\"\n\n open(real_file_path, \"wb\").write(file_data)\n\n def join_path(self, *paths):\n return os.path.join(self.root_path, *paths)\n\n\nif __name__ == '__main__':\n FILE_PATH = os.path.realpath(__file__)\n MODULE_PATH = os.path.dirname(FILE_PATH)\n PROJECT_PATH = os.path.dirname(MODULE_PATH)\n\n pm = ProjectManager()\n\n @pm.command(messages=dict(type=str, nargs='+'))\n def echo(messages):\n print(messages)\n\n pm.run_command(['echo', 'haha'])\n","sub_path":"pypm/project_manager.py","file_name":"project_manager.py","file_ext":"py","file_size_in_byte":12983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"18387043","text":"import os\nimport shutil\nimport sys\n\nfrom conda_build.conda_interface import TemporaryDirectory, PY3\nimport pytest\n\nfrom conda_build import post\nfrom conda_build.utils import on_win\n\nfrom .utils import test_config, testing_workdir, add_mangling\n\n\ndef test_compile_missing_pyc(testing_workdir):\n good_files = ['f1.py', 'f3.py']\n bad_file = 'f2_bad.py'\n tmp = os.path.join(testing_workdir, 'tmp')\n shutil.copytree(os.path.join(os.path.dirname(__file__), 'test-recipes',\n 'metadata', '_compile-test'), tmp)\n post.compile_missing_pyc(os.listdir(tmp), cwd=tmp,\n python_exe=sys.executable)\n for f in good_files:\n assert os.path.isfile(os.path.join(tmp, add_mangling(f)))\n assert not os.path.isfile(os.path.join(tmp, add_mangling(bad_file)))\n\n\n@pytest.mark.skipif(on_win, reason=\"no linking on win\")\ndef test_hardlinks_to_copies(testing_workdir):\n with open('test1', 'w') as f:\n f.write(\"\\n\")\n\n os.link('test1', 'test2')\n assert os.lstat('test1').st_nlink == 2\n assert os.lstat('test2').st_nlink == 2\n\n post.make_hardlink_copy('test1', os.getcwd())\n post.make_hardlink_copy('test2', os.getcwd())\n\n assert os.lstat('test1').st_nlink == 1\n assert os.lstat('test2').st_nlink == 1\n","sub_path":"tests/test_post.py","file_name":"test_post.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"452226555","text":"import json\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import BertTokenizer\nfrom mogai_bert_nezha import BertModel, BertConfig\nimport os\nfrom torch.nn import init\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nprint(\"Using {} device\".format(device))\nmodel_path = \"./nezha-base-www\"\n#model_path = \"./chinese-bert-wwm-ext\"\ntokenizer = BertTokenizer.from_pretrained(model_path)\nConfig = BertConfig.from_pretrained(model_path)\nConfig.conditional_size = 128\nConfig.position_embedding_type = \"nezha\"\nConfig.max_position_embeddings = 1024\nbatch_size = 16\nmaxlen = 1024\nLR = 1e-5\nvariants = [\n u'短短匹配A类',\n u'短短匹配B类',\n u'短长匹配A类',\n u'短长匹配B类',\n u'长长匹配A类',\n u'长长匹配B类',\n]\n\n# 读取数据\ntrain_data, valid_data, test_data = [], [], []\nfor i, var in enumerate(variants):\n key = 'labelA' if 'A' in var else 'labelB'\n fs = [\n './datasets/sohu2021_open_data_clean/%s/train.txt' % var,\n './datasets/round2/%s.txt' % var\n ]\n for f in fs:\n with open(f) as f:\n for l in f:\n l = json.loads(l)\n train_data.append((i, l['source'], l['target'], int(l[key])))\n f = './datasets/sohu2021_open_data_clean/%s/valid.txt' % var\n with open(f) as f:\n for l in f:\n l = json.loads(l)\n valid_data.append((i, l['source'], l['target'], int(l[key])))\n\n\nclass CustomImageDataset(Dataset):\n\n def __init__(self, data, tokenizer, maxlen, transform=None, target_transform=None):\n self.data = data\n self.tokenizer = tokenizer\n self.maxlen = maxlen\n self.transform = transform\n self.target_transform = target_transform\n # self.c2stander = {0:0,1:1,2:0,3:1,4:0,5:1}\n # self.c2length = {0:0, 1:0, 2:1, 3: 1, 4: 2, 5: 2}\n\n def text_to_id(self, source, target, c):\n if c == 4 or c == 5:\n input_ids = np.zeros(self.maxlen, dtype='int')\n attention_mask = np.zeros(self.maxlen, dtype='int')\n token_type_ids = np.zeros(self.maxlen, dtype='int')\n one_maxlen = self.maxlen // 2\n token_id_1 = self.tokenizer.encode(source, max_length=one_maxlen, truncation=True)\n token_id_2 = self.tokenizer.encode(target, max_length=one_maxlen, truncation=True)\n input_id = token_id_1 + token_id_2[1:]\n token_type_id = [0] * len(token_id_1) + [1] * (len(token_id_2) - 1)\n assert len(input_id) == len(token_type_id)\n input_ids[:len(input_id)] = input_id\n attention_mask[:len(input_id)] = 1\n token_type_ids[:len(token_type_id)] = token_type_id\n else:\n input_ids = np.zeros(self.maxlen, dtype='int')\n attention_mask = np.zeros(self.maxlen, dtype='int')\n token_type_ids = np.zeros(self.maxlen, dtype='int')\n token_id = self.tokenizer(source, target, max_length=self.maxlen, truncation=True)\n token_type_id = token_id['token_type_ids']\n input_id = token_id['input_ids']\n assert len(input_id) == len(token_type_id)\n input_ids[:len(input_id)] = input_id\n attention_mask[:len(input_id)] = 1\n token_type_ids[:len(token_type_id)] = token_type_id\n return input_ids, attention_mask, token_type_ids\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n c = self.data[idx][0]\n text_source = self.data[idx][1]\n text_target = self.data[idx][2]\n label = self.data[idx][3]\n input_ids, attention_mask, token_type_ids = self.text_to_id(text_source, text_target, c)\n sample = {\"input_ids\": input_ids, \"attention_mask\": attention_mask, 'token_type_ids': token_type_ids}\n # stander = self.c2stander[c]\n # length = self.c2length[c]\n return sample, label, c\n\n\n# Define model\nclass NeuralNetwork(nn.Module):\n def __init__(self, model_path):\n super(NeuralNetwork, self).__init__()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(768, 512),\n nn.ReLU(),\n nn.Linear(512, 2)\n )\n self.standerembed = nn.Embedding(6, 128)\n # self.lengthembed = nn.Embedding(3,32)\n self.bert = BertModel.from_pretrained(model_path, config=Config)\n\n def forward(self, input_ids, attention_mask, token_type_ids, c):\n conditional = self.standerembed(c)\n # lengthembed = self.lengthembed(length)\n # conditional = torch.cat([standerembed,lengthembed],-1)\n x1 = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, conditional=conditional)\n x2 = x1.last_hidden_state\n logits = self.linear_relu_stack(x2[:, 0])\n return logits\n\n\nmodel = NeuralNetwork(model_path)\nprint(model)\nfor i in model.state_dict():\n if 'LayerNorm.bias_dense' in i or 'LayerNorm.weight_dense' in i:\n init.zeros_(model.state_dict()[i])\nmodel = nn.DataParallel(model,device_ids=[0,1,2,3]).cuda()\n#model = model.cuda()\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.AdamW(model.parameters(), lr=LR)\n\ntraining_data = CustomImageDataset(train_data, tokenizer, maxlen)\ntesting_data = CustomImageDataset(valid_data, tokenizer, maxlen)\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)\ntest_dataloader = DataLoader(testing_data, batch_size=batch_size)\n\n\ndef train(dataloader, model, loss_fn, optimizer):\n size = len(dataloader.dataset)\n correct = 0\n model.train()\n for batch, (data, y, c) in enumerate(dataloader):\n input_ids = data['input_ids'].to(device)\n attention_mask = data['attention_mask'].to(device)\n token_type_ids = data['token_type_ids'].to(device)\n y = y.to(device)\n c = c.to(device)\n pred = model(input_ids, attention_mask, token_type_ids, c)\n loss = loss_fn(pred, y)\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n print(pred.argmax(1))\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if batch % 50 == 0:\n loss, current = loss.item(), batch * len(input_ids)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n print(f\"Accuracy: {(100 * correct / size):>0.1f}%\")\n\n\ndef test(dataloader, model):\n size = len(dataloader.dataset)\n model.eval()\n test_loss = 0\n TP_a, TN_a, FN_a, FP_a = 0, 0, 0, 0\n TP_b, TN_b, FN_b, FP_b = 0, 0, 0, 0\n with torch.no_grad():\n for data, y, c in dataloader:\n input_ids = data['input_ids'].to(device)\n attention_mask = data['attention_mask'].to(device)\n token_type_ids = data['token_type_ids'].to(device)\n y = y.to(device)\n c = c.to(device)\n pred = model(input_ids, attention_mask, token_type_ids, c)\n test_loss += loss_fn(pred, y).item()\n pred_result = pred.argmax(1)\n TP_a += ((pred_result == 1) & (y == 1) & ((c == 0) | (c == 2) | (c == 4))).type(torch.float).sum().item()\n TN_a += ((pred_result == 0) & (y == 0) & ((c == 0) | (c == 2) | (c == 4))).type(torch.float).sum().item()\n FN_a += ((pred_result == 0) & (y == 1) & ((c == 0) | (c == 2) | (c == 4))).type(torch.float).sum().item()\n FP_a += ((pred_result == 1) & (y == 0) & ((c == 0) | (c == 2) | (c == 4))).type(torch.float).sum().item()\n TP_b += ((pred_result == 1) & (y == 1) & ((c == 1) | (c == 3) | (c == 5))).type(torch.float).sum().item()\n TN_b += ((pred_result == 0) & (y == 0) & ((c == 1) | (c == 3) | (c == 5))).type(torch.float).sum().item()\n FN_b += ((pred_result == 0) & (y == 1) & ((c == 1) | (c == 3) | (c == 5))).type(torch.float).sum().item()\n FP_b += ((pred_result == 1) & (y == 0) & ((c == 1) | (c == 3) | (c == 5))).type(torch.float).sum().item()\n test_loss /= size\n p_a = TP_a / (TP_a + FP_a)\n r_a = TP_a / (TP_a + FN_a)\n p_b = TP_b / (TP_b + FP_b)\n r_b = TP_b / (TP_b + FN_b)\n F1_a = 2 * r_a * p_a / (r_a + p_a)\n F1_b = 2 * r_b * p_b / (r_b + p_b)\n F1 = (F1_a + F1_b) / 2\n print(\n f\"Test Error: \\n ,F1a_score:{(F1_a):>5f}, F1b_score:{(F1_b):>5f},\\n F1_score:{(F1):>5f} ,Avg loss: {test_loss:>8f} \\n\")\n return F1\n\n\nif __name__ == '__main__':\n epochs = 5\n F1max = 0\n for t in range(epochs):\n print(f\"Epoch {t + 1}\\n-------------------------------\")\n train(train_dataloader, model, loss_fn, optimizer)\n test_F1 = test(test_dataloader, model)\n if test_F1 > F1max:\n F1max = test_F1\n torch.save(model.module.state_dict(), \"./model_saved/nezha_Conditional_F1_%s_model.pth\" % F1max)\n print(f\"Higher F1: {(F1max):>5f}%, Saved PyTorch Model State to model.pth\")\n print(\"Done!\")\n","sub_path":"model/model/Torch_model/Souhu_TextMatch/Nezha_1024.py","file_name":"Nezha_1024.py","file_ext":"py","file_size_in_byte":8995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"56747167","text":"from tools import *\r\n\r\n\r\nclass LogisticRegression(object):\r\n\r\n def Train(self, Train_data, Train_label, epoch=1, optimize='adagrad'):\r\n history_Train_loss = []\r\n self.__initParams(Train_data.shape[1])\r\n Train_x = Train_data.T.copy()\r\n\r\n for i in range(epoch):\r\n if (i + 1) % 100 == 0:\r\n print(\"Processing epoch: %d\" % (i + 1))\r\n (pred, loss), (d_weight, d_beta) = self.__propagation(Train_x, Train_label)\r\n self.__updateParams(d_weight, d_beta, optimize)\r\n if i % 10 == 0:\r\n history_Train_loss.append(loss)\r\n return history_Train_loss\r\n\r\n def Predict(self, test_data, return_label=True):\r\n test_x = test_data.T.copy()\r\n pred = self.__sigmoid(self.weight.T @ test_x + self.beta)\r\n pred_label = pred.copy()\r\n pred_label[pred_label < 0.5] = 0\r\n pred_label[pred_label >= 0.5] = 1\r\n self.Predict_label = pred_label\r\n if return_label:\r\n return pred_label\r\n else:\r\n return pred\r\n\r\n def CalcAccuracy(self, test_label):\r\n label = test_label\r\n diff = self.Predict_label - label\r\n correct = diff[diff == 0.]\r\n return correct.size / label.shape[1]\r\n\r\n def __CrossEntropyLoss(self, pred, y):\r\n _, N = pred.shape\r\n reg_lambda = self.reg_strength\r\n loss = -(y * np.log(pred) + (1 - y) * np.log(1 - pred))\r\n loss = (1 / N) * (np.sum(loss)) + reg_lambda * np.sum(self.weight * self.weight)\r\n d_pred = (1 / N) * ((1 - y) * (1. / (1 - pred)) - y * (1. / pred))\r\n return loss, d_pred\r\n\r\n\r\n def __init__(self, learning_rate=5e-1, reg_strength=1e-4):\r\n self.learning_rate = learning_rate\r\n self.reg_strength = reg_strength\r\n self.weight = None\r\n self.beta = None\r\n self.ada_h_w = None\r\n self.ada_h_b = None\r\n self.Predict_label = None\r\n\r\n\r\n def __hypothesis(self, x, backward=False, d_h=None):\r\n h = self.weight.T @ x + self.beta\r\n if backward is False:\r\n return h\r\n else:\r\n if d_h is None: # calculate gradient\r\n d_h = np.zeros_like(h)\r\n d_weight = (d_h @ x.T).T\r\n d_weight += 2 * self.reg_strength * self.weight\r\n d_beta = np.sum(d_h) / x.shape[1]\r\n return d_weight, d_beta\r\n\r\n def __updateParams(self, d_weight, d_beta, optimize='adagrad'):\r\n if optimize is 'sgd':\r\n self.weight -= self.learning_rate * d_weight\r\n self.beta -= self.learning_rate * d_beta\r\n else:\r\n if optimize is 'adagrad':\r\n self.ada_h_w += d_weight * d_weight\r\n self.ada_h_b += d_beta * d_beta\r\n else:\r\n self.ada_h_w = 0.9 * self.ada_h_w + 0.1 * d_weight * d_weight\r\n self.ada_h_b = 0.9 * self.ada_h_b + 0.1 * d_beta * d_beta\r\n self.weight -= self.learning_rate * d_weight / (np.sqrt(self.ada_h_w) + 1e-7)\r\n self.beta -= self.learning_rate * d_beta / (np.sqrt(self.ada_h_b) + 1e-7)\r\n\r\n\r\n def __sigmoid(self, x, backward=False, dy=.0):\r\n y = 1 / (1 + np.exp(-x) + 1e-7)\r\n dx = dy * y * (1 - y)\r\n if backward is False:\r\n return y\r\n else:\r\n return dx\r\n\r\n def __initParams(self, dimension):\r\n self.weight = np.random.randn(dimension, 1)\r\n self.beta = 0\r\n self.ada_h_w = np.zeros_like(self.weight)\r\n self.ada_h_b = 0\r\n\r\n\r\n def __propagation(self, x, y):\r\n h = self.__hypothesis(x)\r\n pred = self.__sigmoid(h)\r\n loss, d_pred = self.__CrossEntropyLoss(pred, y)\r\n d_h = self.__sigmoid(h, backward=True, dy=d_pred)\r\n d_weight, d_beta = self.__hypothesis(x, backward=True, d_h=d_h)\r\n return (pred, loss), (d_weight, d_beta)\r\n\r\n\r\nlearning_rates = [5e-1, 1e-1, 5e-2, 1e-2]\r\nreg_strengths1 = [1e-1, 1e-2]\r\nreg_strengths2 = [1e-3, 1e-4]\r\nepoch = 300\r\n(Train_data, Train_label), (test_data, test_label) = getData(r\"D:\\mclearning\\income.csv\", visualize=True)\r\n\r\noptimize = 'sgd'\r\nbest_accu_1, best_param_1 = drawLossForDiffParams(LogisticRegression, Train_data, Train_label, test_data, test_label, learning_rates, reg_strengths1, epoch, optimize=optimize)\r\nbest_accu_2, best_param_2 = drawLossForDiffParams(LogisticRegression, Train_data, Train_label, test_data, test_label, learning_rates, reg_strengths2, epoch, optimize=optimize)\r\nbest_accu = best_accu_1 if best_accu_1 > best_accu_2 else best_accu_2\r\nbest_param = best_param_1 if best_accu_1 > best_accu_2 else best_param_2\r\nprint(best_accu)\r\nprint(best_param)\r\n\r\nepochs = [30, 50, 100, 150, 300, 500, 750, 1000, 1500]\r\naccuracies = drawAccuOfBestForDiffEpoch(LogisticRegression, Train_data, Train_label, test_data, test_label, best_param, epochs, optimize=optimize)\r\nprint(accuracies)\r\n","sub_path":"code_regression.py","file_name":"code_regression.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"40843644","text":"from log_into_wiki import *\nimport re\n\nsite = login('me', 'lol')\nsummary = 'Bot Edit - Automatically Forcing Sprite Cache Update'\nurl_re_start = r'.*(\\/.\\/..\\/)'\nurl_re_end = r'(\\?version=\\w*)\\\".*'\ncss_page_list = ['MediaWiki:Common.css', 'MediaWiki:Mobile.css']\n\ncategory_result = site.api('query', list = 'categorymembers', cmtitle = 'Category:Sprite Images', cmlimit = 50)\nfile_name_list = [_['title'] for _ in category_result['query']['categorymembers']]\n\nparse_text_list = ['[[%s|link=]]' % _ for _ in file_name_list]\nparse_text = '!!!'.join(parse_text_list)\nresult = site.api('parse', text = parse_text, title = 'Main Page', disablelimitreport = 1)\ntext = result['parse']['text']['*']\n\ncss_texts_old = []\ncss_texts_new = []\nfor file_name in file_name_list:\n\traw_name = file_name.replace('File:', '')\n\tre_full = url_re_start + re.escape(raw_name) + url_re_end\n\tmatch = re.match(re_full, text)\n\tcss_texts_new.append(match[1] + raw_name + r'\\1' + match[2])\n\tcss_texts_old.append(re.escape(match[1] + raw_name) + r'(.*)' + r'\\?version=\\w*')\n\t\ndef replace_css_in_file(css_page):\n\tcss_page_text = css_page.text()\n\tcss_page_text_new = css_page_text\n\tfor i, v in enumerate(css_texts_old):\n\t\tcss_page_text_new = re.sub(v, css_texts_new[i], css_page_text_new)\n\tif css_page_text != css_page_text_new:\n\t\tcss_page.save(css_page_text_new, summary = summary)\n\nfor page_name in css_page_list:\n\treplace_css_in_file(site.pages[page_name])\n\nprint('Ran!')","sub_path":"sprites_cachebreak.py","file_name":"sprites_cachebreak.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146734085","text":"import pytest\nfrom aiohttp.test_utils import make_mocked_coro\n\nimport virtool.caches.db\nimport virtool.utils\n\n\n@pytest.fixture\ndef trim_parameters():\n return {\n \"end_quality\": \"20\",\n \"mode\": \"pe\",\n \"max_error_rate\": \"0.1\",\n \"max_indel_rate\": \"0.03\",\n \"max_length\": None,\n \"mean_quality\": \"25\",\n \"min_length\": \"20\"\n }\n\n\ndef test_calculate_cache_hash(trim_parameters):\n hashed = virtool.caches.db.calculate_cache_hash(trim_parameters)\n assert hashed == \"68b60be51a667882d3aaa02a93259dd526e9c990\"\n\n\n@pytest.mark.parametrize(\"paired\", [True, False], ids=[\"paired\", \"unpaired\"])\ndef test_create(paired, snapshot, dbs, static_time, test_random_alphanumeric, trim_parameters):\n \"\"\"\n Test that the function works with default keyword arguments and when `paired` is either `True` or `False`.\n\n \"\"\"\n cache = virtool.caches.db.create(dbs, \"foo\", trim_parameters, paired)\n\n snapshot.assert_match(cache, \"return\")\n snapshot.assert_match(dbs.caches.find_one(), \"db\")\n\n\ndef test_create_legacy(snapshot, dbs, static_time, test_random_alphanumeric, trim_parameters):\n \"\"\"\n Test that the function works when the `legacy` keyword argument is `True` instead of the default `False`.\n\n \"\"\"\n cache = virtool.caches.db.create(dbs, \"foo\", trim_parameters, False, legacy=True)\n\n snapshot.assert_match(cache, \"return\")\n snapshot.assert_match(dbs.caches.find_one(), \"db\")\n\n\ndef test_create_program(snapshot, dbs, static_time, test_random_alphanumeric, trim_parameters):\n \"\"\"\n Test that the function works with a non-default trimming program keyword argument\n (trimmomatic-0.2.3 instead of skewer-0.2.2).\n\n \"\"\"\n cache = virtool.caches.db.create(dbs, \"foo\", trim_parameters, False, program=\"trimmomatic-0.2.3\")\n\n snapshot.assert_match(cache, \"return\")\n snapshot.assert_match(dbs.caches.find_one({\"_id\": test_random_alphanumeric.last_choice}), \"db\")\n\n\ndef test_create_duplicate(snapshot, dbs, static_time, test_random_alphanumeric, trim_parameters):\n \"\"\"\n Test that the function handles duplicate document ids smoothly. The function should retry with a new id.\n\n \"\"\"\n dbs.caches.insert_one({\"_id\": test_random_alphanumeric.next_choice[:8].lower()})\n\n cache = virtool.caches.db.create(dbs, \"foo\", trim_parameters, False)\n\n snapshot.assert_match(cache, \"return\")\n snapshot.assert_match(dbs.caches.find_one({\"_id\": test_random_alphanumeric.last_choice}), \"db\")\n\n\n@pytest.mark.parametrize(\"exists\", [True, False])\nasync def test_get(exists, dbi):\n \"\"\"\n Test that the function returns a cache document when it exists and returns `None` when it does not.\n\n \"\"\"\n if exists:\n await dbi.caches.insert_one({\"_id\": \"foo\"})\n\n result = await virtool.caches.db.get(dbi, \"foo\")\n\n if exists:\n assert result == {\"id\": \"foo\"}\n return\n\n assert result is None\n\n\n@pytest.mark.parametrize(\"exception\", [False, True])\nasync def test_remove(exception, dbi):\n app = {\n \"db\": dbi,\n \"run_in_thread\": make_mocked_coro(raise_exception=FileNotFoundError) if exception else make_mocked_coro(),\n \"settings\": {\n \"data_path\": \"/foo\"\n }\n }\n\n await dbi.caches.insert_one({\"_id\": \"baz\"})\n\n await virtool.caches.db.remove(app, \"baz\")\n\n assert await dbi.caches.count_documents({}) == 0\n\n app[\"run_in_thread\"].assert_called_with(\n virtool.utils.rm,\n \"/foo/caches/baz\",\n True\n )\n","sub_path":"tests/caches/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"362212798","text":"# Brute Force Solution, check ALL areas:\n# Time Complexity: O(n^2)\nclass Solution1:\n def maxArea(self, height: List[int]) -> int:\n n = len(height)\n largest = 0\n for i in range(n):\n for j in range(i + 1, n):\n x = j - i\n y = min(height[i], height[j])\n largest = max(largest, x * y)\n \n return largest\n\n# Two Pointer Solution, iterate only once. \n# The area between two walls is capped by whichever side is smallest.\n# Start by seeing the area of the two furthest away from eachother and\n# then move the shortest pointer inwards until they meet. You dont have \n# to check any other area since the best possible area for each point\n# is already checked \n# Time Complexity: O(n)\nclass Solution2:\n def maxArea(self, height: List[int]) -> int:\n n = len(height)\n left = 0\n right = n - 1\n largest = 0\n while left < right:\n x = right - left\n y = min(height[left], height[right])\n largest = max(largest, x * y)\n if (height[left] < height[right]):\n left += 1\n else:\n right -= 1\n \n return largest\n \n","sub_path":"leetcode/container-with-most-water.py","file_name":"container-with-most-water.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"463995900","text":"import sys\nimport logging\nimport threading\nfrom psdaq.control.DaqControl import DaqControl\nfrom psdaq.control.TimedRun import TimedRun\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', type=int, choices=range(0, 8), default=2,\n help='platform (default 2)')\n parser.add_argument('-C', metavar='COLLECT_HOST', default='drp-neh-ctl001',\n help='collection host (default drp-neh-ctl001)')\n parser.add_argument('-t', type=int, metavar='TIMEOUT', default=10000,\n help='timeout msec (default 10000)')\n parser.add_argument('-v', action='store_true', help='be verbose')\n parser.add_argument('--duration', type=int, default=10,\n help='run duration seconds (default 10)')\n args = parser.parse_args()\n\n # instantiate DaqControl object\n control = DaqControl(host=args.C, platform=args.p, timeout=args.t)\n\n # configure logging handlers\n if args.v:\n level=logging.DEBUG\n else:\n level=logging.WARNING\n logging.basicConfig(level=level)\n logging.info('logging initialized')\n\n # get initial DAQ state\n daqState = control.getState()\n logging.info('initial state: %s' % daqState)\n if daqState == 'error':\n sys.exit('failed to get initial DAQ state')\n\n # instantiate TimedRun\n run = TimedRun(control, daqState=daqState, args=args)\n\n run.stage()\n\n try:\n\n # -- begin script --------------------------------------------------------\n\n # run daq for the specified time duration\n run.set_running_state()\n run.sleep(args.duration)\n\n # -- end script ----------------------------------------------------------\n\n except KeyboardInterrupt:\n run.push_socket.send_string('shutdown') #shutdown the daq communicator thread\n sys.exit('interrupted')\n\n run.unstage()\n\n run.push_socket.send_string('shutdown') #shutdown the daq communicator thread\n\nif __name__ == '__main__':\n main()\n","sub_path":"psdaq/psdaq/control/rix_timed_run.py","file_name":"rix_timed_run.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"58386315","text":"\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\n#Funktionen:\n\ndef GetAngleBetweenVectors(X, Y): #nicht orientierter Winkel zwischen zwei Vektoren\n x=np.linalg.norm(X)\n y=np.linalg.norm(Y)\n return np.arccos(np.dot(X,Y)/(x*y))\n\ndef RotationAroundYAxis(alpha): #Rotationsmatrix, alpha um y Achse\n return np.array([[np.cos(alpha),0,np.sin(alpha)],[0,1,0],[-np.sin(alpha),0,np.cos(alpha)]]) \n\ndef RotationAroundXAxis(beta): #Rotationsmatrix, beta um x Achse\n return np.array([[1,0,0],[0,np.cos(beta),-np.sin(beta)],[0,np.sin(beta),np.cos(beta)]])\n\ndef LorentzBoost(b,g): #Lorentz-Boost, b = Beta-Faktor, g = Gamma-Faktor\n return np.array([[g,0,0,b*g],[0,1,0,0],[0,0,1,0],[b*g,0,0,g]])\n\ndef SimulateKDecayPoint(sx, sy, tau): #Funktion gibt Zerfallspunkt eines Kaons aus (x,y,z) und Vektor in Flugrichtung\n if sx==0 and sy==0: #sx, sy = 0 erzeugen einen reinen Strahl in z-Richtung\n alpha = 0\n beta = 0\n else:\n alpha = np.array(stats.norm.rvs(loc=0, scale=sx, size=1)) #Erzeugen eines zufaelligen Streuwinkels in x Richtung \n beta = np.array(stats.norm.rvs(loc=0, scale=sy, size=1)) #Erzeugen eines zufaelligen Streuwinkels in y Richtung\n vlen= np.array(stats.expon.rvs(loc=0, scale=tau, size=1)) #Erzeuge Fluglaenge eines Kaons (Exponentialverteilt, mittlere Flugweite tau) \n x0= vlen*np.tan(alpha)*np.cos(beta)/np.sqrt(1+np.tan(alpha)**2 *np.cos(beta)**2) \n y0= np.sqrt(vlen**2 -(x0**2))*np.sin(beta) #kartesische Koordinaten\n z0= np.sqrt(vlen**2 -(x0**2))*np.cos(beta)\n dp= np.array([x0,y0,z0]) #Zerfallspunkt (dp=decaypoint) \n ev = dp/vlen #Normierter Vektor in Flugrichtung \n return dp, ev, alpha, beta\n\ndef SimulateK2PiDecay(E_K_0, E_K_plus, p, b, g, tau): #Erzeugung eines einzelnen Pion-Paares im Lab-frame\n theta = np.array(stats.uniform.rvs(scale=np.pi, size=1)) #gleichverteilt zwischen 0 und Pi\n phi = np.array(stats.uniform.rvs(scale=2*np.pi, size=1)) #gleichverteilt zwischen 0 und 2 Pi\n x0= np.sin(theta)*np.cos(phi) \n y0= np.sin(theta)*np.sin(phi) #kartesische Koordinaten\n z0= np.cos(theta) \n P_K_0 = np.array([E_K_0,p*x0,p*y0,p*z0]) #4-Vektoren im K+-Frame\n P_K_plus = np.array([E_K_plus,p*(-x0),p*(-y0),p*(-z0)])\n P_lab_0 = np.dot(LorentzBoost(b,g),P_K_0.T) #4-Vektoren im Lab-Frame\n P_lab_plus = np.dot(LorentzBoost(b,g),P_K_plus.T)\n return P_lab_0, P_lab_plus \n\ndef RotateDecayVectors(sx, sy, tau, E_K_0, E_K_plus, p, b, g): #Erzeugt in K richtung Rotierte Zerfallsvektoren\n dp,ev,alpha,beta = SimulateKDecayPoint(sx, sy, tau) #Rotationswinkel \n P_lab_0,P_lab_plus = SimulateK2PiDecay(E_K_0, E_K_plus, p, b, g, tau) #Normale Zerfallsvektore\n P_lab_0=P_lab_0[1:]\n P_lab_plus=P_lab_plus[1:] \n P_lab_0r = np.dot(RotationAroundXAxis(beta),np.dot(RotationAroundYAxis(alpha),P_lab_0.T)) #Gedrehte Zerfallsvektoren \n P_lab_plusr = np.dot(RotationAroundXAxis(beta),np.dot(RotationAroundYAxis(alpha),P_lab_plus.T))\n return P_lab_0r, P_lab_plusr, dp\n \ndef SimulateNDecays(sx, sy, tau, E_K_0, E_K_plus, p, b, g, n): #Erzeugt n Zerfaelle\n P_lab_0 = []\n P_lab_plus = []\n dp = []\n for i in range(n):\n decay = RotateDecayVectors(sx, sy, tau, E_K_0, E_K_plus, p, b, g)\n P_lab_0.append(decay[0])\n P_lab_plus.append(decay[1])\n dp.append(decay[2])\n return P_lab_0, P_lab_plus, dp #Ausgabe: Listen von 4-Vektoren und Ortsvektoren des K+-Zerfalls\n\ndef HitDistance(P_lab_0, P_lab_plus, dp, a): #Abstand zu Mittelpunkt des Detektors von Pi_0 und Pi_plus \n if float(dp[-1]) >= a: #Aussortieren der K+, die hinter Detektor zerfallen\n return [100,100]\n else:\n n_0 = float((a-dp[-1])/float(P_lab_0[-1])) #Berechne wieviel mal P_lab_0r an dp angehängt werden muss damit z=a\n n_plus = float((a-dp[-1])/float(P_lab_plus[-1]))\n d_0 = np.sqrt((float(dp[0])+float(n_0*P_lab_0[0]))**2+(float(dp[1])+float(n_0*P_lab_0[1]))**2) #Berechnet Abstand zu z Achse (r=(x^2+y^2)^(1/2))\n d_plus = np.sqrt((float(dp[0])+float(n_plus*P_lab_plus[0]))**2+(float(dp[1])+float(n_plus*P_lab_plus[1]))**2) \n return [d_0, d_plus] \n\ndef successrate(P_lab_0, P_lab_plus, dp, a, n): #Zaehlung der Erfolge einer Messung im Verhaeltnis zu Anzahl K+\n success = 0\n for i in range(n):\n if HitDistance(P_lab_0[i], P_lab_plus[i], dp[i], a)[0] <= 2 and HitDistance(P_lab_0[i], P_lab_plus[i], dp[i], a)[1] <= 2:\n success += 1\n return success/n\n\ndef GraficEvaluation(a_opt, SR_max, A, SR):\t\t\t #huebsche Darstellung der Messwerte\n plt.figure()\n plt.plot(A,SR)\n plt.xlim(xmin=a_range[0],xmax=a_range[1])\n plt.ylim(ymin=0, ymax=1)\n plt.plot([a_range[0],a_range[1]], [max(SR),max(SR)],'k:')\n plt.plot([a_opt,a_opt], [0,1],'k:')\n plt.xlabel('detector position [m]')\n plt.ylabel(r'Successrate [success/$n_{K+}$]')\n plt.show()\n \ndef RunExperiment(sx, sy, E_K_0, E_K_plus, p, b, g, a_range, tau, n): #Ausfuehrung des Experiments\n A = np.linspace(*a_range)\n decay = SimulateNDecays(sx, sy, tau, E_K_0, E_K_plus, p, b, g, n)\n P_lab_0, P_lab_plus, dp = decay[0], decay[1], decay[2]\n SR = []\n for a in A:\n SR.append(successrate(P_lab_0, P_lab_plus, dp, a, n))\n SR_max = max(SR)\n a_opt = 0\n for i in range(len(A)):\n if SR[i]==SR_max:\n a_opt = A[i]\n return a_opt, SR_max, SR, GraficEvaluation(a_opt, SR_max, A, SR) #Ausgabe: optimale Detektorposition, maximale Erfolgsrate, Messdaten, Plot\n\n#Parameter:\n\nE_K_0 = 245.563588 #MeV #Energie der neutrale Pionen in K+ system\nE_K_plus = 248.118174 #MeV #Energie der positiven Pionen in K+ system\np = 205.14091 #MeV/c #Impulsbetrag der Pionen (der selbe fuer beide)\nb = 0.99997833784995 #Betafaktor\ng = 151.92756392754 #Gammafaktor\ntau = 560 #Mittlere Zerfallsstrecke von K+\nn = 100 #Anzahl K+\na_range = [0,500,500] \t\t#Anfangspunkt, Endpunkt, Anzahl Messungen\nsx = 1e-3\t\t\t#Standardabweichung xWinkel (alpha)\nsy = 1e-3 \t\t\t#Standardabweichung yWinkel (beta)\n\n#Auswertung:\na_opt, SR_max, SR, f = RunExperiment(sx, sy, E_K_0, E_K_plus, p, b, g, a_range, tau, n)\nwith open(\"data.txt\", \"w\") as fh: #Ausgabe der Messdaten in Datei\n\tfh.write(str(SR))\nprint('Optimale Position: ', a_opt)\nprint('Maximale Erfolgsrate: ', SR_max)\n","sub_path":"K2Pi/K2Pi.py","file_name":"K2Pi.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"304664785","text":"__author__ = 'Group 21 - COMP90024 Cluster and Cloud Computing'\nimport couchdb\n\n\nclass TweetStore(object):\n\n def __init__(self, dbname, url='http://127.0.0.1:5984/'):\n try:\n self.server = couchdb.Server(url=url)\n self.db = self.server.create(dbname)\n except couchdb.http.PreconditionFailed:\n self.db = self.server[dbname]\n\n def save_tweet(self, tw):\n try:\n json_str = tw\n json_str['_id'] = json_str['id_str']\n self.db.save(json_str)\n return 1\n except:\n return 0\n\n def get_db_reference(self):\n return self.db","sub_path":"harvesting/TwitterStore.py","file_name":"TwitterStore.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66579618","text":"import codecs\nimport copy\nimport pickle\n\nimport numpy as np\n\nimport console\nimport constants\nimport regression\n\nnp.random.seed(11)\n\n\n# This is to pickle all foods in sorted order\ndef pickle_top_foods_for_each_nutrient(pickle_all_foods=True):\n with open(constants.NUTRIENT_DETAILS_FILE, 'r', encoding='ISO-8859-1') \\\n as nutrient_data_file:\n nutrient_data = nutrient_data_file.read().split('\\n')[1:]\n if not pickle_all_foods:\n nutrient_data = [x for x in nutrient_data if str(\n int(x.split('^')[0])) in constants.SELECTED_FOOD_IDS]\n nutrient_data = [x.split(\"^\") for x in nutrient_data]\n with open(constants.NUTRIENT_DETAILS_FILE, 'r', encoding='ISO-8859-1') \\\n as nutrient_data_file:\n mineral_desc = nutrient_data_file.read().split('\\n')[0] \\\n .split('^')[constants.NUTRIENT_START_INDEX:-1]\n\n nutrient_data = np.array([x for x in nutrient_data])\n nutrient_data = np.concatenate(\n (\n nutrient_data[:, 0:1],\n nutrient_data[:, constants.NUTRIENT_START_INDEX:-1]\n ),\n axis=1\n )\n nutrient_data[nutrient_data == ''] = '0'\n nutrient_data = nutrient_data.astype(\"float\")\n dict_li = []\n for i in range(1, 150):\n temp = nutrient_data[nutrient_data[:, i].argsort()[::-1]]\n dict_li.append(\n str(i) + ':' + \"('\" + mineral_desc[\n i - 1] + \"_TopFoods',[\" + \",\".join(\n '\"{0}\"'.format(w) for w in\n temp[:temp.shape[0], [0]].astype('str').reshape(\n temp.shape[0]).tolist()) + '])')\n top_nutritious_food = ','.join(dict_li)\n nutrient_wise_top_foods = eval('{' + top_nutritious_food + '}')\n if pickle_all_foods:\n with open(constants.TOP_ALL_FOODS_PER_NUTRIENT_FILE,\n 'wb') as pickle_file:\n pickle.dump(nutrient_wise_top_foods, pickle_file)\n else:\n with open(constants.TOP_SELECTED_FOODS_PER_NUTRIENT_FILE,\n 'wb') as pickle_file:\n pickle.dump(nutrient_wise_top_foods, pickle_file)\n\n\ndef build_x_and_y(input_food_list, duplicate_sample_count, daily_limit_list,\n req_mineral_list):\n avg = sum(daily_limit_list) / len(daily_limit_list)\n normalize_list = [avg / x if x > 0 else 1 for x in daily_limit_list]\n daily_limit_list = [\n normalize_list[i] * daily_limit_list[i]\n for i in range(len(normalize_list))\n ]\n with codecs.open(constants.NUTRIENT_DETAILS_FILE, 'r', encoding='utf-8',\n errors='ignore') as data:\n food_items = data.read().split('\\n')[1:]\n food_items = [food_item.split(\"^\") for food_item in food_items]\n food_dict = {}\n [food_dict.update({str(int(food_item[0])): food_item}) for food_item in\n food_items]\n x = np.array(\n [[[z * float(food_dict[x][y]) * (float(constants.GRAMS)) / 100 for y in\n [x1 + constants.NUTRIENT_START_INDEX for x1 in\n req_mineral_list]] for x in input_food_list] for z in\n range(1, duplicate_sample_count + 1)])\n x = x * normalize_list\n y = np.array([[x * m for x in daily_limit_list] for m in\n range(1, duplicate_sample_count + 1)])\n return x, y, normalize_list\n\n\ndef get_top_foods_for_nutrient(all_foods, index, length):\n if all_foods:\n nutrient_wise_top_foods = pickle.load(\n open(constants.TOP_ALL_FOODS_PER_NUTRIENT_FILE, 'rb'))\n else:\n nutrient_wise_top_foods = pickle.load(\n open(constants.TOP_SELECTED_FOODS_PER_NUTRIENT_FILE, 'rb'))\n return (\n nutrient_wise_top_foods[index + 1][0],\n [\n str(int(float(x)))\n for x in nutrient_wise_top_foods[index + 1][1][:length + 1]\n ]\n )\n\n\ndef get_add_more_list(x, y, theta):\n dt_product = regression.dot_product(x, theta)\n # required = y[0]\n # computed_total = dt_product[0]\n ratio = (y / dt_product).tolist()[0]\n difference = (y - dt_product).tolist()[0]\n add_more_list = []\n\n for i in range(len(ratio)):\n if ratio[i] > 2 or difference[i] > 50:\n add_more_list.append(i)\n return [constants.REQUIRED_NUTRIENT_LIST[x] for x in add_more_list]\n\n\ndef get_remove_existing_foods_list(x, y, theta):\n dt_product = regression.dot_product(x, theta)\n # required = y[0]\n # computed_total = dt_product[0]\n ratio = (y / dt_product).tolist()[0]\n difference = (y - dt_product).tolist()[0]\n remove_existing = []\n for i in range(len(ratio)):\n if ratio[i] < 0.5 or difference[i] < (-50):\n remove_existing.append(i)\n return [constants.REQUIRED_NUTRIENT_LIST[x] for x in remove_existing]\n\n\ndef show_add_more_foods(nutrient_top_foods_dict, final_foods):\n loop = True\n while loop:\n loop_1 = True\n print(\"Added extra Foods for todays Meal:\")\n for i in nutrient_top_foods_dict.keys():\n print('\\t' + nutrient_top_foods_dict[i][0] + ' : ' + ', '.join(\n nutrient_top_foods_dict[i][1]))\n print(\n \"\\nselect nutrients that you need to add, \"\n \"Please enter the number associated with nutrient\"\n )\n for i in nutrient_top_foods_dict.keys():\n print('\\t' + str(i) + \" for \" + nutrient_top_foods_dict[i][0])\n print(\"\\t# to exit application\")\n try:\n nutrient_key = int(input())\n if nutrient_key in nutrient_top_foods_dict.keys():\n while loop_1:\n console.show_products(nutrient_top_foods_dict[nutrient_key]\n [2])\n food_list = nutrient_top_foods_dict[nutrient_key][2]\n food_key = int(input())\n if food_key in range(1, len(food_list) + 1):\n nutrient_top_foods_dict[nutrient_key][1].append(\n nutrient_top_foods_dict[nutrient_key][2][\n food_key].split('^')[1].replace(',', '')[:25])\n final_foods.append(\n nutrient_top_foods_dict[nutrient_key][2][\n food_key].split('^')[0])\n else:\n loop_1 = False\n except Exception as e:\n loop = False\n print(\n \"You chose to exit or gave wrong Input, \"\n \"Thank you for using this Application\"\n + str(e)\n )\n return final_foods\n\n\ndef show_delete_foods(nutrient_top_foods_dict, final_foods):\n loop = True\n while loop:\n loop_1 = True\n print(\"Foods That need to be removed:\")\n print(final_foods)\n for i in nutrient_top_foods_dict.keys():\n print('\\t' + nutrient_top_foods_dict[i][0] + ' : ' + ', '.join(\n nutrient_top_foods_dict[i][1]))\n print(\n \"\\nselect nutrients that you need to remove, \"\n \"Please enter the number associated with nutrient\"\n )\n for i in nutrient_top_foods_dict.keys():\n print('\\t' + str(i) + \" for \" + nutrient_top_foods_dict[i][0])\n print(\"\\t# to exit application\")\n try:\n nutrient_key = int(input())\n if nutrient_key in nutrient_top_foods_dict.keys():\n while loop_1:\n console.show_products(\n nutrient_top_foods_dict[nutrient_key][2]\n )\n food_list = nutrient_top_foods_dict[nutrient_key][2]\n food_key = int(input())\n if food_key in range(1, len(food_list) + 1):\n nutrient_top_foods_dict[nutrient_key][1].append(\n nutrient_top_foods_dict[nutrient_key][2][\n food_key].split('^')[1].replace(',', '')[:25])\n final_foods.append(\n nutrient_top_foods_dict[nutrient_key][2][\n food_key].split('^')[0])\n else:\n loop_1 = False\n except ValueError:\n loop = False\n print(\n \"You chose to exit or gave wrong Input, \"\n \"Thank you for using this Application\"\n )\n return list(set(final_foods))\n\n\ndef add_more_foods(add_more_list, final_foods):\n nutrient_top_foods_dict = {}\n with open(constants.NUTRIENT_DETAILS_FILE, 'r',\n encoding='ISO-8859-1') as nutrient_details_file:\n food_items = nutrient_details_file.read().split('\\n')[1:]\n # food_items = open(constants.NUTRIENT_DETAILS_FILE).read().split('\\n')[1:]\n food_items = [food_item.split(\"^\") for food_item in food_items]\n food_dict = {}\n [food_dict.update({str(int(food_item[0])): food_item}) for food_item in\n food_items]\n for i in range(len(add_more_list)):\n top_list = [\n x for\n x in get_top_foods_for_nutrient(False, add_more_list[i], 50)[1]\n if x not in final_foods\n ]\n dictt = {i: top_list[i - 1] + '^' + food_dict[top_list[i - 1]][4] for i\n in range(1, len(top_list) + 1)}\n nutrient_top_foods_dict.update(\n {i + 1: (constants.NUTRIENT_LIST[add_more_list[i]], [], dictt)})\n final_foods = show_add_more_foods(nutrient_top_foods_dict, final_foods)\n return final_foods\n\n\ndef removeExistingFoods(remove_existing_list, final_foods):\n nutrient_top_foods_dict = {}\n with open(constants.NUTRIENT_DETAILS_FILE, 'r',\n encoding='ISO-8859-1') as nutrient_details_file:\n food_items = nutrient_details_file.read().split('\\n')[1:]\n # food_items = open(constants.NUTRIENT_DETAILS_FILE).read().split('\\n')[1:]\n food_items = [food_item.split(\"^\") for food_item in food_items]\n food_dict = {}\n [food_dict.update({str(int(food_item[0])): food_item}) for food_item in\n food_items]\n for i in range(len(remove_existing_list)):\n top_list = [\n x\n for x in get_top_foods_for_nutrient(\n False,\n remove_existing_list[i], 300\n )[1]\n if x in final_foods\n ]\n dictt = {i: top_list[i - 1] + '^' + food_dict[top_list[i - 1]][4] for i\n in range(1, len(top_list) + 1)}\n nutrient_top_foods_dict.update({i + 1: (\n constants.NUTRIENT_LIST[remove_existing_list[i]], [], dictt)})\n remove_foods = show_delete_foods(nutrient_top_foods_dict, [])\n return remove_foods\n\n\ndef add_or_remove_foods(x, y, theta, final_foods):\n print(\n \"Please analyse the output in \" + constants.OUTPUT_FILE\n + \"\\n\"\n + \"Select one of the below items\"\n + \"\\n\\n\\t\"\n + \"1 For Adding a Food yourself\"\n + \"\\n\\t\"\n + \"2 for Adding system analysed Foods\"\n + \"\\n\\t\"\n + \"3 for Removing a food item\"\n + \"\\n\\t\"\n + \"4 for Removing system analysed Foods\"\n + \"\\n\\t\"\n + \"5 for removing Zero weight foods\"\n + \"\\n\\t\"\n + \"6 for Running with specific Iterations\"\n + \"\\n\\t\"\n + \"# To Continue with previous items\"\n )\n try:\n option = int(input())\n if option == 1:\n print(\n \"Enter the comma separated food item IDs \"\n \"that needed to be added. \"\n \"for example\\n\\t11080,11215\"\n )\n new_foods = [\n x.strip()\n for x in input().strip().split(',')\n if x not in final_foods\n ]\n if len(new_foods) > 0:\n append_theta = np.array(\n [[i] * x.shape[2] for i in np.random.rand(len(new_foods))]\n )\n theta = np.concatenate((theta, append_theta), axis=0)\n final_foods = final_foods + new_foods\n return final_foods, theta, 0\n else:\n print(\"No food item to add\")\n return None\n elif option == 2:\n if get_add_more_list(x, y, theta):\n initial_copy = copy.deepcopy(final_foods)\n final_foods = add_more_foods(get_add_more_list(x, y, theta),\n final_foods)\n if len(final_foods) - len(initial_copy) > 0:\n append_theta = np.array(\n [\n [i] * x.shape[2]\n for i in\n np.random.rand(len(final_foods)\n - len(initial_copy))\n ]\n )\n theta = np.concatenate((theta, append_theta), axis=0)\n return final_foods, theta, 0\n else:\n print(\"No food items to Add based on Analysis\")\n return None\n elif option == 3:\n print(\n \"Enter the comma separated food \"\n \"item IDs that needed to be deleted.\"\n \"for example\\n\\t11080,11215\"\n )\n remove_foods = list(\n set(\n [x.strip() for x in str(input()).strip().split(',') if\n x in final_foods]\n )\n )\n if len(remove_foods) > 0:\n for food in remove_foods:\n indx = final_foods.index(food)\n final_foods.remove(food)\n theta = np.delete(theta, indx, axis=0)\n return final_foods, theta, 0\n else:\n print(\"No food items to delete\")\n return None\n elif option == 4:\n if get_remove_existing_foods_list(x, y, theta):\n remove_foods = removeExistingFoods(\n get_remove_existing_foods_list(x, y, theta),\n final_foods\n )\n if len(remove_foods) > 0:\n for food in remove_foods:\n indx = final_foods.index(food)\n final_foods.remove(food)\n theta = np.delete(theta, indx, axis=0)\n return final_foods, theta, 0\n else:\n print(\"No food items to delete based on Analysis\")\n return None\n elif option == 5:\n while True:\n theta_list = theta[:, 0].tolist()\n try:\n indx = theta_list.index(0.0)\n food = final_foods[indx]\n final_foods.remove(food)\n theta = np.delete(theta, indx, axis=0)\n except Exception as e:\n print(\"Exception\" + str(e))\n break\n return final_foods, theta, 0\n elif option == 6:\n print(\"Enter the reqired iterations\")\n iters = int(input())\n if iters > 0:\n return final_foods, theta, iters\n else:\n return final_foods, theta, 0\n else:\n print(\"Invalid option. Thanks for suing this Application.\")\n return None\n except Exception as e:\n print(\n \"You chose to exit or gave wrong Input, \"\n \"Thank you for using this Application\\nThe exception is \\n\"\n + str(e)\n )\n return final_foods, theta, 0\n","sub_path":"nutreat/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"67276494","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# expose vital data of dot nodes for prometheus\n\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nimport requests\nimport os\n\n\n\nNODE_URL = os.environ.get(\"NODE_URL\", \"http://localhost:9933\") #todo make the port configurable\nLISTEN = os.environ.get(\"LISTEN\", \"0.0.0.0\")\nPORT = int(os.environ.get(\"PORT\", \"8000\"))\nDEBUG = bool(os.environ.get(\"DEBUG\", False))\nTIMEOUT = 1\n\n\n\nclass DotExporter(BaseHTTPRequestHandler):\n\n spec = {}\n\n def set_spec(self):\n try:\n spec = {}\n spec['name'] = self.query(\"system_name\")\n spec['version'] = self.query(\"system_version\")\n spec['chain'] = self.query(\"system_chain\")\n try:\n with open('/polkaversion/version') as v:\n spec['build'] = v.readline().strip().split(' ')[1]\n with open('/polkaversion/substrate-ref') as sr:\n spec['substrate_ref'] = '-'.join([l.strip() for l in sr.readlines()])\n except:\n pass\n DotExporter.spec = spec\n except:\n DotExporter.spec = {}\n\n\n def __init__(self, *args):\n # updates the spec on every request\n # TODO improve when previous block is known\n self.set_spec()\n BaseHTTPRequestHandler.__init__(self, *args)\n\n def log_message(self, format, *args):\n msg = format % args\n if hasattr(self, 'msg'):\n msg += ' :: %s' % self.msg\n if hasattr(self, 'headers') and self.headers.get('Origin'):\n msg += ' [%s]' % self.headers.get('Origin')\n BaseHTTPRequestHandler.log_message(self, msg)\n\n\n def query(self, method, params = []):\n header = { 'Content-Type': 'application/json', 'Accept': 'application/json' }\n payload = { 'jsonrpc': '2.0', 'method': method, 'params': params, 'id': 0 }\n\n\n r = requests.post(NODE_URL, json=payload, headers=header, timeout=TIMEOUT)\n\n try:\n return r.json()['result']\n except:\n self.msg = r.text\n\n\n def send(self, text = \"\", status = 200):\n self.send_response(status)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(str.encode(text))\n\n\n\n def do_GET(self):\n if self.path == '/metrics':\n if not DotExporter.spec:\n self.set_spec()\n # maybe implement system_networkState in the future\n m = []\n try:\n chain_getHeader = self.query(\"chain_getHeader\")\n system_health = self.query(\"system_health\")\n runtime_version = self.query(\"state_getRuntimeVersion\")\n\n try:\n chain_getFinalizedHead = self.query(\"chain_getFinalizedHead\")\n chain_FinalizedHeadBlock = self.query(\"chain_getBlock\", [chain_getFinalizedHead])\n m.append({\n 'name': 'dot_chain_block_number',\n 'prop': { 'block': 'finalized' },\n 'value': int(chain_FinalizedHeadBlock['block']['header']['number'], 16)\n })\n except:\n pass\n\n m.append({\n 'name': 'dot_chain_block_number',\n 'prop': { 'block': 'head' },\n 'value': int(chain_getHeader['number'], 16)\n })\n m.append({\n 'name': 'dot_peer_count',\n 'value': int(system_health[\"peers\"])\n })\n m.append({\n 'name': 'dot_shouldHavePeers',\n 'value': int(system_health[\"shouldHavePeers\"])\n })\n m.append({\n 'name': 'dot_isSyncing',\n 'value': int(system_health[\"isSyncing\"])\n })\n m.append({\n 'name': 'dot_specVersion',\n 'value': int(runtime_version[\"specVersion\"])\n })\n m.append({\n 'name': 'dot_rpc_healthy',\n 'value': 1\n })\n except:\n m.append({\n 'name': 'dot_rpc_healthy',\n 'value': 0\n })\n\n metrics = ''\n for i in m:\n prop = ','.join([ f'{k}=\"{v}\"' for k,v in { **DotExporter.spec, **i.get('prop', {})}.items()])\n if prop: prop = f'{{{prop}}}'\n metrics += f\"{i['name']}{prop} {i['value']}\\n\"\n return self.send(metrics)\n\n elif self.path == '/health':\n\n if DEBUG and self.headers.get('Origin') == 'dotexporter':\n return self.send(status = 200)\n\n try:\n system_health = self.query(\"system_health\")\n assert('peers' in system_health and 'shouldHavePeers' in system_health)\n except:\n return self.send(status = 502)\n\n if system_health[\"peers\"] < 2 and system_health[\"shouldHavePeers\"] == True:\n self.msg = \"system_health: peers %s, shouldHavePeers: %s\" \\\n % (system_health[\"peers\"], system_health[\"shouldHavePeers\"])\n return self.send(status = 500)\n\n self.msg = '%s peers' % system_health[\"peers\"]\n return self.send(\"OK %s\\n\" % system_health[\"peers\"])\n else:\n return self.send(\"substrate/polkadot node monitoring\\n\")\n\n\nif __name__ == '__main__':\n httpd = HTTPServer((LISTEN, PORT), DotExporter)\n print(\"Serving requests on %s:%s\" % (LISTEN, PORT))\n httpd.serve_forever()\n\n\n","sub_path":"metrics/dotexporter.py","file_name":"dotexporter.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"519293531","text":"'''\nCreated on Jan 5, 2016\n\n@author: jeffc\n'''\nimport unittest\nimport tracemalloc\nfrom HTMLTestRunner import HTMLTestRunner\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport sys,os.path\n# adjust the path to find config\nsys.path.append(\n os.path.join(os.path.dirname(__file__), '../../config',)\n)\nimport config\nfrom config import PUBLIC_URL\nfrom util import wait\nimport time\n\n#Tests\ntracemalloc.start()\nclass TestPrivateAllele(unittest.TestCase):\n \"\"\"\n @status: Tests that the dummy private allele Brca1 does not display on public\n \"\"\"\n\n def setUp(self):\n self.driver = webdriver.Firefox()\n #self.driver = webdriver.Chrome()\n self.driver.get(config.PUBLIC_URL)\n self.driver.implicitly_wait(10)\n\n def test_private(self):\n driver = self.driver\n self.assertIn(\"Informatics\", driver.title)\n querytext = driver.find_element(By.NAME, 'query')\n querytext.clear()\n querytext.send_keys(\"Brca1\") # put your marker symbol\n querytext.send_keys(Keys.RETURN) # click the submit button\n brcalink = driver.find_element(By.LINK_TEXT, \"Brca1\")# Find the Brca1 link and click it\n brcalink.click()\n #switch to the new tab that opens\n self.driver.switch_to.window(self.driver.window_handles[1])\n # Find the all alleles and mutations link and click it\n allallelelink = driver.find_element(By.ID, \"phenoMutationLink\")\n allallelelink.click() # assert that there is no link for Brca1\n # assert that there is no link for Brca1\n self.assertNotIn(\"test1\", self.driver.page_source,\"Test1 allele is displaying!\")\n \n def test_hide_private_marker(self):\n \"\"\"\n @status: Tests that the dummy private allele Brca1 does not display on public\n \"\"\"\n driver = self.driver\n self.assertIn(\"Informatics\", driver.title)\n querytext = driver.find_element(By.NAME, 'query')\n querytext.clear()\n querytext.send_keys(\"Brca1\")# put your marker symbol\n querytext.send_keys(Keys.RETURN) # click the submit button\n wait.forAjax(self.driver, 2)\n self.assertNotIn(self.driver.page_source, 'Braca1')\n\n def tearDown(self):\n self.driver.quit()\n tracemalloc.stop()\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestPrivateAllele))\n return suite\n\nif __name__ == '__main__':\n unittest.main(testRunner=HTMLTestRunner(output='C:\\WebdriverTests'))\n","sub_path":"PyTests/FeWI/private_allele.py","file_name":"private_allele.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"557064680","text":"import pikitlib\nimport time\nfrom networktables import NetworkTables\n# To see messages from networktables, you must setup logging\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\nimport robotmap\n\n\nclass MyRobot():\n def robotInit(self):\n \"\"\"Robot initialization function\"\"\"\n # object that handles basic drive operations\n self.leftBackMotor = pikitlib.SpeedController(robotmap.BACK_LEFT)\n self.leftFrontMotor = pikitlib.SpeedController(robotmap.FRONT_LEFT)\n self.rightBackMotor = pikitlib.SpeedController(robotmap.BACK_RIGHT)\n self.rightFrontMotor = pikitlib.SpeedController(robotmap.FRONT_RIGHT)\n\n self.left = pikitlib.SpeedControllerGroup(self.leftBackMotor, self.leftFrontMotor)\n self.right = pikitlib.SpeedControllerGroup(self.rightBackMotor, self.rightFrontMotor )\n\n self.myRobot = pikitlib.DifferentialDrive(self.left, self.right)\n # self.myRobot.setExpiration(0.1)\n\n self.DEADZONE = 0.4\n\n #self.buzz = pikitlib.IllegalBuzzer()\n\n NetworkTables.initialize()\n self.driver = pikitlib.XboxController(0)\n\n def autonomousInit(self):\n self.myRobot.tankDrive(0.8, 0.8)\n\n def autonomousPeriodic(self):\n self.myRobot.tankDrive(1, 0.5)\n\n buttonAPressed = self.driver.getAButtonPressed()\n if buttonAPressed:\n logging.debug('AButton has been pressed')\n buttonAReleased = self.driver.getAButtonReleased()\n if buttonAReleased:\n logging.debug('AButton has been released')\n buttonA = self.driver.getAButton() \n if buttonA:\n logging.debug('AButton is DOWN on controller 0')\n else:\n logging.debug('AButton is UP on controller 0')\n \n\n def teleopInit(self):\n \"\"\"\n Configures appropriate robot settings for teleop mode\n \"\"\"\n self.left.setInverted(True)\n self.right.setInverted(True)\n \n def deadzone(self, val, deadzone):\n if abs(val) < deadzone:\n return 0\n return val\n\n def teleopPeriodic(self):\n #forward = -self.driver.getRawAxis(5) \n #rotation_value = rotation_value = self.driver.getX(LEFT_HAND)\n \n # Test controller\n \n forward = self.driver.getX(0)\n forward = 0.80 * self.deadzone(forward, robotmap.DEADZONE)\n rotation_value = -0.8 * self.driver.getY(1)\n self.myRobot.arcadeDrive(forward,rotation_value)\n\n\n \"\"\"\n forward = 0.7\n rotation_value = 0.2\n\n\n forward = self.deadzone(forward, 0.5)\n\n self.myRobot.arcadeDrive(forward, rotation_value)\"\"\"\n","sub_path":"pikitlib/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73454470","text":"# Filename: q2_calc_cylinder_volume.py\n# Name: Ambrose Tan\n# Date created: 20130121\n# Date modified: 20130121\n# Description: Computing the volume of a cylinder\n\nimport math\nradius = 0\nlength = 0\nradius = float(input(\"Input radius of cylinder: \\n\"))\nlength = float(input(\"Input length of cylinder: \\n\"))\narea = radius * radius * math.pi\nvolume = area * length\nprint(\"Volume of cylinder is: {:0.2f}\".format(volume), \" to 2 d.p.\")\n","sub_path":"practical101/q2_calc_cylinder_volume.py","file_name":"q2_calc_cylinder_volume.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194807218","text":"from rest_framework import serializers\n\nfrom productos.models import Product\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model= Product\n #exclude=('state')\n exclude=('state','created_date','modified_date','deleted_date')\n\n def to_representation(self, instance):\n return {\n 'id':instance.id,\n 'descripcion': instance.description,\n 'name':instance.name,\n 'image': instance.image if instance.image != '' else '',\n 'measure_unit': instance.meausre_unit.descripton if instance.meausre_unit is not None else '',\n 'category_products': instance.category_product.description if instance.category_product is not None else '',\n }","sub_path":"productos/api/serializers/product_serializers.py","file_name":"product_serializers.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432441694","text":"def graph_input():\n\tgraph=[]\n\tnodes=int(input(\"Enter no. of nodes : \"))\n\tedge=int(input(\"Enter no. of edges :\"))\n\tprint(\"Enter edges with weights:\")\n\tfor i in range(edge):\n\t\tedges=input(\"Enter edge %d : \"%(i+1))\n\t\tx,y,z=[int(a) for a in edges.split()]\n\t\tgraph.append([x,y,z])\n\treturn nodes,graph\t\n\n\ndef printPath(parent,j):\n\tif parent[j]==-1:\n\t\tprint(j, end=\" \"); return\n\tprintPath(parent,parent[j])\n\tprint(j,end=\" \")\n\ndef print_solution(dist, parent, n):\n\tsrc=1\n\tprint(\"Vertex\\t\\t Distance from src\\t\\tPath\")\n\tfor i in range(1,n+1):\n\t\tif i==1:\n\t\t\tcontinue\n\t\tprint(\"\\n%d-->%d \\t\\t %d \\t\\t\\t\\t:\" %(src,i,dist[i]))\n\t\tprintPath(parent,i)\n\ndef bellman(graph,src,n):\n\tdist={i:9999 for i in range(1,n+1) }\n\tparent={i:-1 for i in range(1,n+1)}\n\tdist[src]=0\n\tfor i in range(n-1):\n\t\tfor u,v,w in graph:\n\t\t\tif dist[u]!=9999 and dist[u]+w aws_instances.json\n#\n\nregions = [\n \"ap-northeast-1\",\n \"ap-northeast-2\",\n \"ap-south-1\",\n \"ap-southeast-1\",\n \"ap-southeast-2\",\n \"ca-central-1\",\n \"eu-central-1\",\n \"eu-west-1\",\n \"eu-west-2\",\n \"eu-west-3\",\n \"sa-east-1\",\n \"us-east-1\",\n \"us-east-2\",\n \"us-west-1\",\n \"us-west-2\",\n]\n\nregion_name_mapping = {\n 'ap-northeast-1': 'Asia Pacific (Tokyo)',\n 'ap-northeast-2': 'Asia Pacific (Seoul)',\n 'ap-northeast-3': 'Asia Pacific (Osaka-Local)',\n 'ap-south-1': 'Asia Pacific (Mumbai)',\n 'ap-southeast-1': 'Asia Pacific (Singapore)',\n 'ap-southeast-2': 'Asia Pacific (Sydney)',\n 'ca-central-1': 'Canada (Central)',\n 'cn-north-1': 'China (Beijing)',\n 'cn-northwest-1': 'China (Ningxia)',\n 'eu-central-1': 'EU (Frankfurt)',\n 'eu-north-1': 'EU (Stockholm)',\n 'eu-west-1': 'EU (Ireland)',\n 'eu-west-2': 'EU (London)',\n 'eu-west-3': 'EU (Paris)',\n 'sa-east-1': 'South America (Sao Paulo)',\n 'us-east-1': 'US East (N. Virginia)',\n 'us-east-2': 'US East (Ohio)',\n 'us-gov-east-1': 'AWS GovCloud (US-East)',\n 'us-gov-west-1': 'AWS GovCloud (US)',\n 'us-west-1': 'US West (N. California)',\n 'us-west-2': 'US West (Oregon)',\n}\n\nHOURS_IN_MONTH = 30 * 24\n\ndef get_instance_data(raw_data):\n print('computing instance data')\n ec2_offer = awspricing.offer('AmazonEC2')\n print('retrieved amazon ec2 offer data')\n simple_instance_data = defaultdict(list)\n for instance in raw_data:\n baseline = instance['vCPU']\n if instance['base_performance'] is not None:\n baseline = instance['base_performance']\n if baseline == \"N/A\":\n continue\n instance_info = {\n 'instanceType': instance['instance_type'],\n 'gpu': instance['GPU'],\n 'memory': instance['memory'],\n 'cpu': instance['vCPU'],\n 'burstable': instance[\"burst_minutes\"] is not None,\n 'baseline': baseline,\n 'generation': instance['generation'],\n }\n if instance[\"generation\"] != 'current':\n continue\n for region in regions:\n try:\n price = ec2_offer.ondemand_hourly(\n instance_type=instance['instance_type'],\n operating_system='Linux',\n region=region,\n )\n instance_info['price'] = float(price)\n print(instance_info)\n simple_instance_data[region].append(instance_info)\n except ValueError:\n continue\n return simple_instance_data\n\n\ndef get_raw_instance_data():\n print(\"reading data\")\n kipdir = get_kipdir()\n filename = os.path.join(\n kipdir, \"scripts/create_instance_data/aws_instances.json\")\n raw_data = json.load(open(filename))\n return raw_data\n\n\ndef make_filter(*args):\n filters = []\n for field, value in args:\n filters.append({\n \"Field\": field,\n \"Value\": value,\n \"Type\": \"TERM_MATCH\",\n })\n return filters\n\n\ndef get_price_from_product_response(response):\n pricelist_json = response['PriceList'][0]\n pricelist = json.loads(pricelist_json)\n return pricelist['terms']['OnDemand'].values()[0]['priceDimensions'].values()[0]['pricePerUnit']['USD']\n\n\ndef get_storage_pricing():\n storage_by_region = {}\n pricing = boto3.client('pricing')\n for region in regions:\n region_name = region_name_mapping[region]\n filters = make_filter(\n (\"volumeType\", \"General Purpose\"),\n (\"location\", region_name))\n response = pricing.get_products(\n ServiceCode='AmazonEC2', Filters=filters)\n try:\n price = get_price_from_product_response(response)\n storage_by_region[region] = {\n # this name/key must match the product name in our\n # usage records\n 'gp2': {'price': float(price) / HOURS_IN_MONTH},\n }\n except (IndexError, KeyError):\n print(\"No ebs pricing available in\", region)\n continue\n pprint(storage_by_region)\n return storage_by_region\n\n\ndef get_elb_pricing():\n elb_by_region = {}\n pricing = boto3.client('pricing')\n for region in regions:\n region_name = region_name_mapping[region]\n filters = make_filter(\n (\"productFamily\", \"Load Balancer\"),\n (\"groupDescription\", \"Standard Elastic Load Balancer\"),\n (\"location\", region_name))\n response = pricing.get_products(\n ServiceCode='AmazonEC2', Filters=filters)\n try:\n price = get_price_from_product_response(response)\n elb_by_region[region] = {\n # this name/key must match the product name in our\n # usage records\n 'ELB-Classic': {'price': float(price)},\n }\n except (IndexError, KeyError):\n print(\"No ELB pricing available in\", region)\n continue\n pprint(elb_by_region)\n return elb_by_region\n\n\ndef update_instance_data(args):\n print('loading raw instance data')\n raw_data = get_raw_instance_data()\n print('getting instance data')\n simple_instance_data = get_instance_data(raw_data)\n jsonfp = dumpjson(simple_instance_data)\n if args.upload:\n upload('aws_instance_data.json', jsonfp)\n else:\n write_go('aws', jsonfp, dumpjson({}))\n\n\ndef update_network_data(args):\n elb_pricing = get_elb_pricing()\n jsonfp = dumpjson(elb_pricing)\n if args.upload:\n upload('aws_network_data.json', jsonfp)\n\n\ndef update_storage_data(args):\n pricing = get_storage_pricing()\n jsonfp = dumpjson(pricing)\n if args.upload:\n upload('aws_storage_data.json', jsonfp)\n\n\nif __name__ == '__main__':\n args = parse_args()\n update_instance_data(args)\n","sub_path":"scripts/create_instance_data/create_aws_instance_data.py","file_name":"create_aws_instance_data.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183172799","text":"\"\"\"\n\nif you are going to compare tail then u dont have to continue\nchecking.\n\n\"\"\"\nimport sys\nimport os\n\nclass LinkedListNode:\n def __init__(self, node_value):\n self.val = node_value\n self.next = None\n\ndef _insert_node_into_singlylinkedlist(head, tail, val):\n if head == None:\n head = LinkedListNode(val)\n tail = head\n else:\n node = LinkedListNode(val)\n tail.next = node\n tail = tail.next\n return tail\n\n\ndef _display_LinkedList(head):\n temp = head\n if temp:\n while temp:\n print(temp.val,\"->\",end=\"\")\n temp = temp.next\n print(\"nil\")\n\n\ndef getSizenTail(head):\n count = 0\n temp = head\n prev = None\n while (temp):\n prev = temp\n temp = temp.next\n count+=1\n\n return count,prev\n\n'''\nwhat i am planning to do is get to size and get tail \nif tails match that means intersection exists\n'''\n\ndef find_intersection(l1,l2):\n curr1 = l1\n curr2 = l2\n\n size1,tail1 = getSizenTail (curr1)\n size2,tail2 = getSizenTail(curr2)\n\n if size1 > 0 and size2 > 0 and tail1== tail2: # we check if size > 0 and tail matches\n if size1 > size2:\n while size1 > size2:\n curr1 = curr1.next\n size1-=1\n else:\n while size2 > size1:\n curr2 = curr2.next\n size2-=1\n\n common = None\n while curr1 and curr2 :\n if curr1 == curr2 and common is None:\n common = curr1.val\n elif curr1 != curr2:\n common = None\n\n curr1 = curr1.next\n curr2 = curr2.next\n\n if (curr1 or curr2) and not common:\n return -1\n else:\n return common\n\n\n else:\n return -1\n\n\n\n\n\nif __name__ == \"__main__\":\n #f = open(os.environ['OUTPUT_PATH'], 'w')\n\n l1 = None\n l1_tail = None\n l1_size = int(input())\n l1_i = 0\n while l1_i < l1_size:\n l1_item = int(input())\n\n l1_tail = _insert_node_into_singlylinkedlist(l1, l1_tail, l1_item)\n if l1_i == 0:\n l1 = l1_tail\n l1_i += 1\n\n l2 = None\n l2_tail = None\n l2_size = int(input())\n l2_i = 0\n while l2_i < l2_size:\n l2_item = int(input())\n\n l2_tail = _insert_node_into_singlylinkedlist(l2, l2_tail, l2_item)\n if l2_i == 0:\n l2 = l2_tail\n l2_i += 1\n\n _display_LinkedList(l1)\n _display_LinkedList(l2)\n\n # --------\n merge_at = int(input())\n l1_temp = l1\n i = 0\n while i < merge_at:\n l1_temp = l1_temp.next\n i += 1\n if l2_tail == None:\n l2 = l1_temp\n else:\n l2_tail.next = l1_temp\n # --------\n _display_LinkedList(l1)\n _display_LinkedList(l2)\n res = find_intersection(l1, l2);\n print (str(res) + \"\\n\")\n\n\n","sub_path":"Python/IK/LinkedListStackQueue/intersectionofLL.py","file_name":"intersectionofLL.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249370444","text":"import numpy as np\nimport json\nimport cv2\nfrom pycocotools.coco import COCO\nimport os , sys\ntrain_json = '/home/zaikun/hdd/data/keypoint/annotations/person_keypoints_train2017.json'\nval_json = '/home/zaikun/hdd/data/keypoint/annotations/person_keypoints_val2017.json'\nval_dir = '/home/zaikun/hdd/data/keypoint/val2017/'\ntrain_dir = '/home/zaikun/hdd/data/keypoint/train2017/'\n\nval_save_dir = '/home/zaikun/hdd/data/coco_gender/val_2017'\ntrain_save_dir = '/home/zaikun/hdd/data/coco_gender/train_2017'\n\nf = val_json\nimg_dir = val_dir\nsave_dir = val_save_dir\n\n\ndef save_coco_gender(f, save_dir, img_dir):\n print('loading json file...')\n coco = COCO(f)\n keys = list(coco.imgs.keys())\n print('#of keys {}'.format(len(keys)))\n\n\n for i, ind in enumerate(keys):\n img_meta = coco.imgs[ind]\n img_idx = img_meta['id']\n img_name = img_meta['file_name']\n ann_idx = coco.getAnnIds(imgIds=img_idx)\n anns = coco.loadAnns(ann_idx)\n img = cv2.imread(img_dir + img_name)\n for ann in anns :\n if ann['category_id'] == 1 :\n id = ann['id']\n uid = img_name.split('.')[0] + '_' + str(id)\n bbox = ann['bbox']\n if bbox[2] < 30 or bbox[3] < 30 :\n continue\n crop_img = img[int(bbox[1]): int(bbox[3] + bbox[1]), int(bbox[0]): int(bbox[0] + bbox[2])]\n output_name = os.path.join(save_dir, uid + '.jpg')\n import pdb; pdb.set_trace()\n cv2.imwrite(output_name, crop_img)\n if i % 1000 == 0:\n print(i)\n #cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2] + bbox[0]), int(bbox[3] + bbox[1])), (255, 0, 0), 5)\n\n\nif __name__ == '__main__':\n save_coco_gender(val_json, val_save_dir, val_dir)\n #save_coco_gender(train_json, train_save_dir, train_dir)\n","sub_path":"scripts/coco_gender.py","file_name":"coco_gender.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"462466151","text":"import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Pad(function.Function):\n\n \"\"\"Padding of an array\"\"\"\n\n def __init__(self, pad_width, mode, **keywords):\n self.mode = mode\n self.keywords = keywords\n self.pad_width = pad_width\n self.pad_bw = numpy.asarray(pad_width)\n if self.pad_bw.size == 1:\n self.pad_bw = numpy.repeat(self.pad_bw, 2)\n\n def check_type_forward(self, in_types):\n # Depending on the arguments, pad_width and keywords, the input value\n # may be inappropriate. In that case, numpy.pad or cupy.pad will raise\n # errors, so that only check the size and the dtype in this function.\n type_check.expect(in_types.size() == 1)\n x_type = in_types[0]\n type_check.expect(x_type.dtype.kind == 'f')\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n return xp.pad(inputs[0], self.pad_width, mode=self.mode,\n **self.keywords),\n\n def backward(self, inputs, grads):\n xp = cuda.get_array_module(*inputs)\n gy = grads[0]\n array = inputs[0]\n ndims = array.ndim\n if self.pad_bw.ndim == 1:\n self.pad_bw = numpy.tile(self.pad_bw, (ndims, 1))\n for i in range(ndims):\n gy = xp.take(gy,\n indices=numpy.arange(self.pad_bw[i][0],\n self.pad_bw[i][0]\n + array.shape[i]),\n axis=i)\n return gy,\n\n\ndef pad(x, pad_width, mode, **keywords):\n \"\"\"Pad an input variable.\n\n Args:\n x (chainer.Variable or :class:``numpy.ndarray`` or cupy.ndarray):\n Input data.\n pad_width (int or array-like):\n Number of values padded to the edges of each axis.\n mode (str):\n Specifies how the function fills the periphery of the array.\n `constant`\n Pads with a constant values.\n constant_values (int or array-like):\n The values are padded for each axis.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Pad(pad_width, mode, **keywords)(x)\n","sub_path":"chainer/functions/array/pad.py","file_name":"pad.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"141873654","text":"import datetime\nimport decimal\nimport unittest\n\nfrom stacktach import datetime_to_decimal\n\nclass DatetimeToDecimalTestCase(unittest.TestCase):\n\n def test_datetime_to_decimal(self):\n expected_decimal = decimal.Decimal('1356093296.123')\n utc_datetime = datetime.datetime.utcfromtimestamp(expected_decimal)\n actual_decimal = datetime_to_decimal.dt_to_decimal(utc_datetime)\n self.assertEqual(actual_decimal, expected_decimal)\n\n def test_decimal_to_datetime(self):\n expected_decimal = decimal.Decimal('1356093296.123')\n expected_datetime = datetime.datetime.utcfromtimestamp(expected_decimal)\n actual_datetime = datetime_to_decimal.dt_from_decimal(expected_decimal)\n self.assertEqual(actual_datetime, expected_datetime)\n","sub_path":"tests/unit/test_datetime_to_decimal.py","file_name":"test_datetime_to_decimal.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"68588975","text":"from pwn import*\np = process('./tomato')\nelf = ELF('./tomato')\nlibc = elf.libc\n\n# gadget\nputs_plt = elf.plt['puts']\nputs_got = elf.got['puts']\nread_got = elf.got['read']\nread_plt = elf.plt['read']\nbss = 0x602080+0x500\nrlr_20 = 0x400D4C\nrlr_50 = 0x400D1A\nlr = 0x400D29\npop_rdi = 0x400fa3\npop_rsi = 0x400d84\npop_rdx = 0x400d82\none_list = [0x45216,0x4526a,0xf02a4,0xf1147]\n\n# def\nt = 0.1\ndef menu(sel):\n\tp.sendlineafter('> ',str(sel));sleep(t)\n\ndef buy(count,comment):\n\tmenu(1)\n\tp.sendlineafter('> ',str(count));sleep(t)\n\tp.sendafter('You : ',comment);sleep(t)\n\ndef hidden(comment):\n\tmenu(4)\n\tp.sendafter('explain : ',str(comment));sleep(t)\n\n# hidden menu Jch\npay = ''\npay += 'a'*0x18\npay += p8(2)\nbuy(1,pay)\n\n### stack pivot\n# Rip Controll\npay = ''\npay += 'a'*0x20\t\t# buf\npay += p64(bss+0x20)\t# sfp\npay += p64(rlr_20)\t# rip\nhidden(pay)\n\n# ROP Stage 1 : Expand Read_Leave_Ret Section\npay = ''\npay += p64(bss+0x300)\t# sfp\npay += p64(pop_rsi)\npay += p64(bss+0x200)\npay += p64(rlr_50)\npay += 'b'*(0x20-len(pay))\npay += p64(bss)\npay += p64(lr)\np.send(pay);sleep(t)\n# ROP stage 2 : Leak libc_puts addr\npay = ''\npay += 'x'*0x8\npay += p64(pop_rdi)\npay += p64(puts_got)\npay += p64(puts_plt)\npay += p64(pop_rdi)\npay += p64(0)\npay += p64(pop_rsi)\npay += p64(puts_got)\npay += p64(read_plt)\npay += p64(puts_plt)\npay += 'X'*(0x100-len(pay))\npay += p64(bss+0x200)\t# sfp\npay += p64(lr)\t\t# rip\n\np.sendline(pay);sleep(t)\n\n# FINISH\np.recvuntil('GET OUT!\\n')\np.recvuntil('GET OUT!\\n')\n\nlibc_puts = u64(p.recv(6).ljust(8,'\\x00'))\nlibc_base = libc_puts - libc.symbols['puts']\nlog.info(\"libc_puts : {}\".format(hex(libc_puts)))\nlog.info(\"libc_base : {}\".format(hex(libc_base)))\n\n#p.sendline(p64(libc_base+one_list[0]))\t\t# exploit !!!\np.sendline(p64(0x61616161))\n\np.interactive()\n\n","sub_path":"SFstudy/stack/sf6/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"132498048","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/8/5 10:45\n# @Author : liu yang\n# @Desc: 日志操作\n\nimport logging\nimport os\n\nfrom tools.path_tool import PathTool\nimport datetime\n\n\nclass LoggingTool:\n\n _log_name = 'run.log'\n _format = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n\n @staticmethod\n def get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler(LoggingTool._get_log_path())\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(logging.Formatter(LoggingTool._format))\n logger.addHandler(fh)\n return logger\n\n @staticmethod\n def _get_log_path():\n path = PathTool.get_package_dir('log')\n today = datetime.date.today().strftime('%Y-%m-%d')\n dirs = path + '/' + today\n # print(path)\n\n if not os.path.exists(dirs):\n os.makedirs(dirs)\n return dirs + '/' + LoggingTool._log_name\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"tools/logging_tool.py","file_name":"logging_tool.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"594794584","text":"import random\n\ndef get():\n return 1+(random.randint(1,100000)%6)\n\nN = 1000\nN2 = 1000\nacc = 0\n\nfor i in range(N):\n mx = 0\n for i in range(N2):\n mx = max(mx, get())\n acc += mx\n\nprint (acc * 1.0 / N)\n\n","sub_path":"259-1/a/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"467899980","text":"############################################################\n# -*- coding: utf-8 -*-\n#\n# # # # # # #\n# ## ## # ## # #\n# # # # # # # # # # #\n# # ## # ## ## ######\n# # # # # # #\n#\n# Python-based Tool for interaction with the 10micron mounts\n# GUI with PyQT5 for python\n# Python v3.7.5\n#\n# Michael Würtenberger\n# (c) 2019\n#\n# Licence APL2.0\n#\n###########################################################\n# standard libraries\nimport pytest\nfrom unittest import mock\nimport logging\n\n# external packages\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtWidgets import QInputDialog\nfrom PyQt5.QtCore import QObject\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import QThreadPool\nfrom PyQt5.QtCore import pyqtSignal\nfrom mountcontrol.qtmount import Mount\n\n# local import\nfrom mw4.gui.mainWmixin.tabSettImaging import SettImaging\nfrom mw4.gui.widgets.main_ui import Ui_MainWindow\nfrom mw4.gui.widget import MWidget\nfrom mw4.imaging.camera import Camera\nfrom mw4.imaging.focuser import Focuser\nfrom mw4.imaging.filter import Filter\nfrom mw4.telescope.telescope import Telescope\nfrom mw4.base.loggerMW import CustomLogger\n\n\n@pytest.fixture(autouse=True, scope='function')\ndef module_setup_teardown(qtbot):\n global ui, widget, Test, Test1, app\n\n class Test1(QObject):\n mount = Mount(expire=False, verbose=False, pathToData='mw4/test/data')\n update1s = pyqtSignal()\n threadPool = QThreadPool()\n\n class Test(QObject):\n config = {'mainW': {}}\n threadPool = QThreadPool()\n update1s = pyqtSignal()\n message = pyqtSignal(str, int)\n camera = Camera(app=Test1())\n focuser = Focuser(app=Test1())\n filter = Filter(app=Test1())\n telescope = Telescope(app=Test1())\n\n widget = QWidget()\n ui = Ui_MainWindow()\n ui.setupUi(widget)\n\n app = SettImaging(app=Test(), ui=ui,\n clickable=MWidget().clickable)\n app.changeStyleDynamic = MWidget().changeStyleDynamic\n app.guiSetText = MWidget().guiSetText\n app.close = MWidget().close\n app.deleteLater = MWidget().deleteLater\n app.log = CustomLogger(logging.getLogger(__name__), {})\n\n qtbot.addWidget(app)\n\n yield\n\n del widget, ui, Test, Test1, app\n\n\ndef test_initConfig_1():\n suc = app.initConfig()\n assert suc\n\n\ndef test_storeConfig_1():\n suc = app.storeConfig()\n assert suc\n\n\ndef test_updateParameters_1():\n suc = app.updateParameters()\n assert suc\n\n\ndef test_updateParameters_2():\n app.app.telescope.data['TELESCOPE_INFO.TELESCOPE_FOCAL_LENGTH'] = 1\n app.app.camera.data['CCD_INFO.CCD_PIXEL_SIZE_X'] = 1\n app.app.camera.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] = 1\n app.app.telescope.data['TELESCOPE_INFO.TELESCOPE_APERTURE'] = 1\n app.app.camera.data['CCD_INFO.CCD_MAX_X'] = 1\n app.app.camera.data['CCD_INFO.CCD_MAX_Y'] = 1\n app.app.camera.data['CCD_COOLER.COOLER_ON'] = True\n app.app.camera.data['READOUT_QUALITY.QUALITY_LOW'] = True\n suc = app.updateParameters()\n assert suc\n\n\ndef test_setCoolerTemp_1():\n with mock.patch.object(QMessageBox,\n 'critical'):\n suc = app.setCoolerTemp()\n assert not suc\n\n\ndef test_setCoolerTemp_2():\n app.app.camera.data['CCD_TEMPERATURE.CCD_TEMPERATURE_VALUE'] = 10\n with mock.patch.object(QMessageBox,\n 'critical'):\n with mock.patch.object(QInputDialog,\n 'getInt',\n return_value=(10, False)):\n suc = app.setCoolerTemp()\n assert not suc\n\n\ndef test_setCoolerTemp_3():\n app.app.camera.data['CCD_TEMPERATURE.CCD_TEMPERATURE_VALUE'] = 10\n with mock.patch.object(QMessageBox,\n 'critical'):\n with mock.patch.object(QInputDialog,\n 'getInt',\n return_value=(10, True)):\n suc = app.setCoolerTemp()\n assert suc\n\n\ndef test_setFilterNumber_1():\n with mock.patch.object(QMessageBox,\n 'critical'):\n suc = app.setFilterNumber()\n assert not suc\n\n\ndef test_setFilterNumber_2():\n app.app.filter.data['FILTER_SLOT.FILTER_SLOT_VALUE'] = 10\n with mock.patch.object(QMessageBox,\n 'critical'):\n with mock.patch.object(QInputDialog,\n 'getInt',\n return_value=(10, False)):\n suc = app.setFilterNumber()\n assert not suc\n\n\ndef test_setFilterNumber_3():\n app.app.filter.data['FILTER_SLOT.FILTER_SLOT_VALUE'] = 10\n with mock.patch.object(QMessageBox,\n 'critical'):\n with mock.patch.object(QInputDialog,\n 'getInt',\n return_value=(10, True)):\n suc = app.setFilterNumber()\n assert suc\n\n\ndef test_setFilterName_1():\n with mock.patch.object(QMessageBox,\n 'critical'):\n suc = app.setFilterName()\n assert not suc\n\n\ndef test_setFilterName_2():\n app.app.filter.data['FILTER_SLOT.FILTER_SLOT_VALUE'] = 10\n with mock.patch.object(QMessageBox,\n 'critical'):\n with mock.patch.object(QInputDialog,\n 'getItem',\n return_value=(10, False)):\n suc = app.setFilterName()\n assert not suc\n\n\ndef test_setFilterName_3():\n app.app.filter.data['FILTER_SLOT.FILTER_SLOT_VALUE'] = 1\n app.app.filter.data['FILTER_SLOT_NAME_1'] = 'test1'\n app.app.filter.data['FILTER_SLOT_NAME_2'] = 'test2'\n with mock.patch.object(QMessageBox,\n 'critical'):\n with mock.patch.object(QInputDialog,\n 'getItem',\n return_value=('test1', True)):\n suc = app.setFilterName()\n assert suc\n\n\ndef test_setDownloadModeFast():\n suc = app.setDownloadModeFast()\n assert suc\n\n\ndef test_setDownloadModeSlow():\n suc = app.setDownloadModeSlow()\n assert suc\n\n\ndef test_setCoolerOn():\n suc = app.setCoolerOn()\n assert suc\n\n\ndef test_setCoolerOff():\n suc = app.setCoolerOff()\n assert suc\n","sub_path":"mw4/test/test_new/gui/mainWmixin/test_tabSettImaging.py","file_name":"test_tabSettImaging.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"532089046","text":"cities = [\"Delhi\", \"Bangalore\", \"Pune\", \"Kolkata\", \"Hyderabad\", \"Chandigarh\"]\n\n# Key thing to notice here is that file=city_file is passed as a named argument but not a value to it hence it does not\n# spaces around file\n\n# The data is written to a buffer from where it is in the background written in a text file.\n\n# Python 3 in 2008 has come up with a Flush parameter which speeds up the data writing process on a slow device.\n\n# with open('cityFile.txt', 'w') as city_file:\n# for city in cities:\n# #print(city, file=city_file)\n# print(city, file=city_file, flush=True)\n\ncitiesRead = []\n\nwith open('cityFile.txt', 'r') as cities:\n for city in cities:\n print(city, end='')","sub_path":"AdvancedPythonProject/FileIOWrite.py","file_name":"FileIOWrite.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"85367235","text":"# coding=utf-8\n# Copyright 2019 Kacper Kielak\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementation of a Wasserstein Generative Adversarial Net with Gradient\npenalty as introduced in Gulrajani et al. (2017).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom dopamine.generators.wgan import wgan\nfrom dopamine.generators.gan.gan import DISCRIMINATOR_SCOPE\nfrom dopamine.generative_tasks import gen_lib\nimport tensorflow as tf\n\nimport gin.tf\n\n\n@gin.configurable\nclass WassersteinGANGP(wgan.WassersteinGAN):\n\n def __init__(self,\n sess,\n output_shape,\n processing_dtype=tf.float32,\n conditional_input_shapes=None,\n noise_shape=(100,),\n generator_network_fn=gen_lib.mnist_generator_gan,\n discriminator_network_fn=gen_lib.mnist_discriminator_gan,\n tf_device='/cpu:*',\n max_tf_checkpoints_to_keep=4,\n g_optimizer=tf.train.AdamOptimizer(),\n d_optimizer=tf.train.AdamOptimizer(),\n k=1,\n penalty_coeff=10,\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n \"\"\"Initializes GANs and constructs the components of their graph.\n\n Args:\n sess: `tf.Session`, for executing ops.\n output_shape: tuple of ints describing the output shape.\n processing_dtype: tf.DType, specifies the type used to processing data.\n Note that it should be some type of float (e.g. tf.float32 or tf.float64).\n conditional_input_shapes: tuple of tuples of ints describing the\n conditional input shapes (there may be more than one input). None in\n case of no conditional inputs.\n generator_network_fn: function expecting three parameters:\n (noise, conditional_inputs, output_shape). This function will return\n the object containing the tensors output by the generator network.\n discriminator_network_fn: function expecting three parameters:\n (conditional_inputs, output). This function will return\n the object containing the tensor output by the discriminator network,\n and the tensor containing its logit.\n tf_device: str, Tensorflow device on which the agent's graph is executed.\n max_tf_checkpoints_to_keep: int, the number of TensorFlow\n checkpoints to keep.\n g_optimizer: `tf.train.Optimizer`, for training the generator.\n d_optimizer: `tf.train.Optimizer`, for training the discriminator.\n k: int, number of iterations of the discriminator per generator iteration.\n penalty_coeff: float, coefficient specifying the importance of gradient\n penalty in the overall discriminator loss function.\n summary_writer: SummaryWriter object for outputting training statistics.\n Summary writing disabled if set to None.\n summary_writing_frequency: int, frequency with which summaries will be\n written. Lower values will result in slower training.\n allow_partial_reload: bool, whether we allow reloading a partial agent\n (for instance, only the network parameters).\n \"\"\"\n self.penalty_coeff = penalty_coeff\n wgan.WassersteinGAN.__init__(self,\n sess,\n output_shape,\n processing_dtype=processing_dtype,\n conditional_input_shapes=conditional_input_shapes,\n noise_shape=noise_shape,\n generator_network_fn=generator_network_fn,\n discriminator_network_fn=discriminator_network_fn,\n tf_device=tf_device,\n max_tf_checkpoints_to_keep=max_tf_checkpoints_to_keep,\n g_optimizer=g_optimizer,\n d_optimizer=d_optimizer,\n k=k,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency,\n allow_partial_reload=allow_partial_reload)\n tf.logging.info('\\t penalty_coeff: %d', penalty_coeff)\n\n def _define_discriminator_loss(self):\n \"\"\"Defines loss for the discriminator network.\n\n For wasserstein GAN with Gradient Penalty, discriminator loss is defined by:\n max (real_discrimination - gen_discrimination - gradient_penalty) =\n = min (gen_discrimination - real_discrimination + gradient_penalty)\n\n Returns: Tensor containing discriminator loss value.\n \"\"\"\n # Calculate standard loss\n real_d_loss = tf.reduce_mean(self._real_discriminator_out)\n real_d_loss = tf.negative(real_d_loss, name='real_discriminator_loss')\n gen_d_loss = tf.reduce_mean(self._gen_discriminator_out,\n name='gen_discriminator_loss')\n non_penalized_loss = tf.add(real_d_loss, gen_d_loss,\n name='non_penalized_discrminator_loss')\n\n # Calculate gradient penalty\n differences = tf.subtract(self._generator_outputs,\n self._real_output_ph,\n name='differences')\n random_scaling = tf.random_uniform(\n shape=[tf.shape(self._real_output_ph)[0], *([1]*len(self.output_shape))],\n dtype=self.processing_dtype, minval=0, maxval=1\n )\n interpolates = tf.add(self._real_output_ph,\n differences * random_scaling,\n name='interpolates')\n\n with tf.variable_scope(DISCRIMINATOR_SCOPE, reuse=True):\n interpolates_discriminator_out = self.discriminator_network_fn(\n self._conditional_inputs, interpolates\n )\n\n interp_grads = tf.gradients(interpolates_discriminator_out, [interpolates],\n name='interpolates_gradients')[0]\n slopes = tf.sqrt(tf.reduce_sum(tf.square(interp_grads),\n reduction_indices=[1]), name='slopes')\n gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2,\n name='gradient_penalty')\n gradient_penalty = tf.scalar_mul(self.penalty_coeff, gradient_penalty)\n\n return tf.add(non_penalized_loss, gradient_penalty,\n name='discriminator_loss')\n\n def _build_discriminator_train_op(self):\n discriminator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope=DISCRIMINATOR_SCOPE)\n self._d_grads = self.d_optimizer.compute_gradients(\n self._discriminator_loss,\n var_list=discriminator_vars + self.d_optimizer.variables()\n )\n self._d_train_op = self.d_optimizer.apply_gradients(self._d_grads)\n","sub_path":"dopamine/generators/wgan_gp/wgan_gp.py","file_name":"wgan_gp.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110636510","text":"import math\r\nimport random\r\nimport csv\r\nimport sys\r\nimport gc\r\nfrom tqdm import tqdm\r\n# other users followed by particular user. key:userID,value: set of users\r\nuser_follow = {}\r\n# other users who follow this user. key: userID,value: set of users.\r\nfollow_user = {}\r\n# how many people this user follow\r\nuser_follow_num = {}\r\n# how many people follow this user\r\nfollow_user_num = {}\r\n\r\n# input train set\r\nwith open('train.txt', \"r\") as f:\r\n for line in f:\r\n piece = line.strip().split(\"\\t\")\r\n this_user = piece[0]\r\n who_follow_this_user = set()\r\n for i in range(1, len(piece)):\r\n who_follow_this_user.add(piece[i])\r\n follow_user.setdefault(piece[i], set(this_user)).add(this_user)\r\n follow_user_num[piece[i]] = follow_user_num.get(piece[i], 0) + 1\r\n if len(who_follow_this_user)==0:\r\n print(\"user {} has an empty list\".format(this_user))\r\n else:\r\n user_follow[this_user] = who_follow_this_user\r\n user_follow_num[this_user] = len(who_follow_this_user)\r\nprint(\"train.txt is inputed\")\r\n# print(\"source list has {} samples. target list has {} samples. Intersection has {} samples.\".\r\n# format(\r\n# len(user_follow),\r\n# len(follow_user),\r\n# len(set(user_follow.keys())&set(follow_user.keys()))))\r\n\r\n# input test_set\r\ntest_set = {}\r\ntest_result=[]\r\nwith open('test-public.txt', \"r\") as f:\r\n # read header and pass\r\n line = f.readline()\r\n # read data\r\n line = f.readline().strip()\r\n while line:\r\n piece = line.split(\"\\t\")\r\n test_set[piece[1]] = piece[2]\r\n test_result.append((piece[1],piece[2]))\r\n line = f.readline().strip()\r\nprint(\"test-public.txt is inputed.\")\r\n# print(\"source in 20W is {}, source in 500W is {}, target in 20W is {}, target in 500W is {}\".format(source_in_source_list,source_in_target_list,target_in_source_list,target_in_target_list))\r\n\r\n\"\"\"\r\nmetric list:\r\nCN:Common Neighbors\r\nJC:Jaccard Coefficient\r\nSI:Sørensen Index\r\nSC:Salton Cosine Similarity\r\nHP:Hub Promoted\r\nHD:Hub Depressed\r\nLHN:Leicht-Holme-Nerman\r\nAA:Adamic-Adar Coefficient\r\nPA:Preferential Attachment\r\n\"\"\"\r\nmetrics = ['CN', 'JC', 'SI', 'SC', 'HP', 'HD', 'LHN', 'RA', 'PA']\r\n\"\"\"\r\nmode list:\r\nfeature explanation: reference from https://arxiv.org/pdf/1411.5118.pdf\r\nfollow means source user follow others\r\nfollow by means source user followed by others\r\nintersection: source user follow and target user follow by\r\nreverse_intersection: source user follow by and target user follow\r\n\"\"\"\r\n# modes = ['follow', 'follow_by', 'intersection', 'reverse_intersection']\r\nmodes = ['follow_by', 'intersection']\r\n\"\"\"\r\nfeature list:\r\n 'follow_CN','follow_by_CN','intersection_CN','reverse_intersection_CN'\r\n 'follow_JC','follow_by_JC','intersection_JC','reverse_intersection_JC'\r\n 'follow_SI', 'follow_by_SI','intersection_SI','reverse_intersection_SI'\r\n 'follow_SC', 'follow_by_SC','intersection_SC','reverse_intersection_SC'\r\n 'follow_HP', 'follow_by_HP','intersection_HP','reverse_intersection_HP'\r\n 'follow_HD', 'follow_by_HD','intersection_HD','reverse_intersection_HD'\r\n 'follow_LHN', 'follow_by_LHN','intersection_LHN','reverse_intersection_LHN'\r\n 'follow_AA', 'follow_by_AA','intersection_AA','reverse_intersection_AA'\r\n 'follow_PA', 'follow_by_PA','intersection_PA','reverse_intersection_PA'\r\n\"\"\"\r\n# note the sequence is not consistent with above comment\r\n# creat header for features\r\nfeatures = ['source', 'target', 'exist',\r\n 'user_follow_num_source', 'follow_user_num_source',\r\n 'follow_user_num_target']\r\nfor mode in modes:\r\n for metric in metrics:\r\n features.append(mode + '_' + metric)\r\nprint(\"feature is created with dictionary: /n {}\".format(features))\r\n\r\n\r\n# calculate each feature for a sample pair.\r\ndef calculate_metric(mode, source=None, target=None):\r\n # if mode is 'follow':\r\n # first_list = user_follow.get(source, set()) # safe\r\n # second_list = user_follow.get(target, set())\r\n # elif mode is 'follow_by':\r\n # first_list = follow_user.get(source, set()) # safe\r\n # second_list = follow_user.get(target, set()) # safe\r\n # elif mode is 'intersection':\r\n # first_list = user_follow.get(source, set()) # safe\r\n # second_list = follow_user.get(target, set()) # safe\r\n # else:\r\n # first_list = follow_user.get(source, set()) # safe\r\n # second_list = user_follow.get(target, set())\r\n\r\n if mode is 'follow_by':\r\n first_list = follow_user.get(source, set()) # safe\r\n second_list = follow_user.get(target, set()) # safe\r\n elif mode is 'intersection':\r\n first_list = user_follow.get(source, set()) # safe\r\n second_list = follow_user.get(target, set()) # safe\r\n else:\r\n print('Error, unknown mode {}'.format(mode))\r\n sys.exit(0)\r\n intersection = first_list & second_list\r\n union = first_list | second_list\r\n PA = len(first_list) * len(second_list)\r\n CN = float(len(intersection))\r\n JC = CN / len(union)\r\n SI = CN / (len(first_list) + len(second_list))\r\n SC = CN / math.sqrt(len(first_list) * len(second_list))\r\n HP = CN / min(len(first_list), len(second_list))\r\n HD = CN / max(len(first_list), len(second_list))\r\n LHN = CN / PA\r\n CN = int(CN)\r\n RA = 0\r\n # calculate RA\r\n if mode is \"follow_by\":\r\n for i in intersection:\r\n if user_follow_num.get(i):\r\n RA += float(1) / user_follow_num.get(i)\r\n elif mode is \"intersection\":\r\n # note that source follow more user, less important this intermediary is\r\n # more user follow i, less important this intermediary is\r\n # i follow more user , less important this intermediary is\r\n # more user follow target, less important this intermediary is\r\n for i in intersection:\r\n res = user_follow_num.get(source, 0) * \\\r\n follow_user_num.get(i, 0) * \\\r\n user_follow_num.get(i, 0) * \\\r\n follow_user_num.get(target, 0)\r\n if res != 0:\r\n RA += float(1) / res\r\n else:\r\n for i in intersection:\r\n # note that target follow more user, less important this intermediary is\r\n # more user follow i, less important this intermediary is\r\n # i follow more user , less important this intermediary is\r\n # more user follow source, less important this intermediary is\r\n res = user_follow_num.get(target, 0) * \\\r\n follow_user_num.get(i, 0) * \\\r\n user_follow_num.get(i, 0) * \\\r\n follow_user_num.get(source, 0)\r\n if res != 0:\r\n RA += float(1) / res\r\n return [CN, JC, SI, SC, HP, HD, LHN, RA, PA]\r\n\r\n\r\n# random generate train sample, no duplicate with test set. and generate features.\r\ndef generate_train_set():\r\n user_list = list(user_follow.keys())\r\n follow_list = list(follow_user.keys())\r\n with open(\"train_set.csv\", \"w\", newline=\"\") as f:\r\n writer = csv.DictWriter(f, fieldnames=features)\r\n writer.writeheader()\r\n for i in tqdm(range(50000)):\r\n try:\r\n source = random.choice(user_list)\r\n target = random.choice(list(user_follow.get(source)))\r\n feature_for_this = {'source': source,\r\n 'target': target,\r\n 'exist': 1,\r\n 'user_follow_num_source': user_follow_num.get(source),\r\n 'follow_user_num_source': follow_user_num.get(source),\r\n 'follow_user_num_target': follow_user_num.get(target)}\r\n for mode in modes:\r\n for one_metrics in list(zip(calculate_metric(mode=mode, source=source, target=target), metrics)):\r\n feature_for_this[mode + '_' + one_metrics[1]] = one_metrics[0]\r\n writer.writerow(feature_for_this)\r\n except IndexError:\r\n pass\r\n print('positive sample generated')\r\n gc.collect()\r\n for i in tqdm(range(50000)):\r\n try:\r\n source = random.choice(user_list)\r\n while (True):\r\n target = random.choice(follow_list)\r\n if target in (user_follow.get(source)) or test_set.get(source) == target:\r\n continue\r\n else:\r\n break\r\n feature_for_this = {'source': source,\r\n 'target': target,\r\n 'exist': 0,\r\n 'user_follow_num_source': user_follow_num.get(source),\r\n 'follow_user_num_source': follow_user_num.get(source),\r\n 'follow_user_num_target': follow_user_num.get(target)}\r\n for mode in modes:\r\n for one_metrics in list(zip(calculate_metric(mode=mode, source=source, target=target), metrics)):\r\n feature_for_this[mode + '_' + one_metrics[1]] = one_metrics[0]\r\n writer.writerow(feature_for_this)\r\n except IndexError:\r\n pass\r\n print('negative samples generated')\r\n gc.collect()\r\ngenerate_train_set()\r\n\r\ndef generate_test_set():\r\n with open(\"test_set.csv\", \"w\", newline=\"\") as f:\r\n features.remove('exist')\r\n writer = csv.DictWriter(f, fieldnames=features)\r\n writer.writeheader()\r\n for test_sample in tqdm(test_result):\r\n feature_for_this = {'source': test_sample[0],\r\n 'target': test_sample[1],\r\n 'user_follow_num_source': user_follow_num.get(test_sample[0]),\r\n 'follow_user_num_source': follow_user_num.get(test_sample[0]),\r\n 'follow_user_num_target': follow_user_num.get(test_sample[1])}\r\n for mode in modes:\r\n for one_metrics in list(zip(calculate_metric(mode=mode, source=test_sample[0], target=test_sample[1]), metrics)):\r\n feature_for_this[mode + '_' + one_metrics[1]] = one_metrics[0]\r\n writer.writerow(feature_for_this)\r\n print('testcsv is generated')\r\ngenerate_test_set()\r\n","sub_path":"sml_proj1/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":10533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"620307754","text":"# Code for displaying single circulation model\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\n\ndef display_fluxes(data_structure, output_file_string=\"\", t_limits=[], \\\n kinetic_scheme='3state_with_SRX',\n dpi=300):\n no_of_rows = 3\n no_of_cols = 1\n\n f = plt.figure(constrained_layout=True)\n f.set_size_inches([7, 4])\n spec2 = gridspec.GridSpec(nrows=no_of_rows, ncols=no_of_cols,\n figure=f)\n\n # Check for t_limits, prune data if necessary\n if t_limits:\n t = data_structure['time']\n vi = np.nonzero((t>=t_limits[0])&(t<=t_limits[1]))\n data_structure = data_structure.iloc[vi]\n\n if (kinetic_scheme=='3state_with_SRX'):\n\n ax1 = f.add_subplot(spec2[0, 0])\n ax1.plot('time', 'J1', data=data_structure, label='J1')\n ax1.plot('time', 'J2', data=data_structure, label='J2')\n #ax1.set_xlabel('Time (s)')\n ax1.set_ylabel('Flux',fontsize = 7)\n ax1.legend(bbox_to_anchor=(1.05, 1),fontsize = 7)\n ax1.tick_params(labelsize = 7)\n\n ax2 = f.add_subplot(spec2[1, 0])\n ax2.plot('time', 'J3', data=data_structure, label='J3')\n ax2.plot('time', 'J4', data=data_structure, label='J4')\n #ax2.set_xlabel('Time (s)')\n ax2.set_ylabel('Flux',fontsize = 7)\n ax2.legend(bbox_to_anchor=(1.05, 1),fontsize = 7)\n ax2.tick_params(labelsize = 7)\n\n ax3 = f.add_subplot(spec2[2, 0])\n ax3.plot('time', 'Jon', data=data_structure, label='Jon')\n ax3.plot('time', 'Joff', data=data_structure, label='Joff')\n ax3.set_xlabel('Time (s)',fontsize = 7)\n ax3.set_ylabel('Flux',fontsize = 7)\n ax3.legend(bbox_to_anchor=(1.05, 1),fontsize = 7)\n ax3.tick_params(labelsize = 7)\n\n if (output_file_string):\n save_figure_to_file(f, output_file_string, dpi);\n\ndef save_figure_to_file(f, im_file_string, dpi=None, verbose=1):\n # Writes an image to file\n\n import os\n from skimage.io import imsave\n\n # Check directory exists and save image file\n dir_path = os.path.dirname(im_file_string)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n if (verbose):\n print('Saving figure to to %s' % im_file_string)\n\n f.savefig(im_file_string, dpi=dpi)\n","sub_path":"Python_code/modules/MyoSim/half_sarcomere/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101230628","text":"from machine import Pin\r\nimport time\r\n\r\nfilas = [ 13, 12, 14, 27 ]\r\ncolumnas = [ 26, 25, 35, 34 ]\r\n\r\npin_fila = [ Pin(nombre_pin, mode=Pin.OUT) for nombre_pin in filas ]\r\n\r\npin_columna = [ Pin(nombre_pin, mode=Pin.IN, pull=Pin.PULL_DOWN) for nombre_pin in columnas ]\r\n\r\nmatriz = [[\"1\", \"2\", \"3\", \"A\"]\r\n ,[\"4\", \"5\", \"6\", \"B\"]\r\n ,[\"7\", \"8\", \"9\", \"C\"]\r\n ,[\"*\", \"0\", \"#\", \"D\"]]\r\n\r\n\r\nespera = 120 // len(filas)\r\n\r\ndef get_pin_value():\r\n\r\n valores = [pin_columna[0].value(),pin_columna[1].value(),pin_columna[2].value(),pin_columna[3].value()]\r\n #valores1 = [pin_fila[0].value(),pin_fila[1].value(),pin_fila[2].value(),pin_fila[3].value()]\r\n #print(valores)\r\n #print(valores1)\r\n try:\r\n index_t = valores.index(1)\r\n return index_t + 1\r\n except:\r\n return None\r\n\r\ndef getkey():\r\n\r\n for p in pin_fila:\r\n p.value(0)\r\n\r\n while True:\r\n\r\n lfila = -1\r\n\r\n for fila in pin_fila:\r\n lfila += 1\r\n fila.value(1)\r\n #print(lfila)\r\n\r\n time.sleep_ms(espera)\r\n\r\n index1 = get_pin_value()\r\n\r\n if index1:\r\n time.sleep_ms(10)\r\n index2 = get_pin_value()\r\n\r\n if index1 == index2:\r\n fila.value(0)\r\n return matriz[lfila][index1-1]\r\n\r\n fila.value(0)\r\n\r\n\r\n\r\ndef getkey_(timeout = 500):\r\n timeoutaux = time.ticks_ms()\r\n while True:\r\n\r\n if time.ticks_ms() > timeoutaux + timeout:\r\n break\r\n\r\n lfila = -1\r\n\r\n for fila in pin_fila:\r\n lfila += 1\r\n fila.value(1)\r\n #print(lfila)\r\n\r\n time.sleep_ms(espera)\r\n\r\n index1 = get_pin_value()\r\n\r\n if index1:\r\n time.sleep_ms(10)\r\n index2 = get_pin_value()\r\n\r\n if index1 == index2:\r\n return matriz[lfila][index1-1]\r\n\r\n fila.value(0)\r\n\r\n\r\ndef define_pines(vector):\r\n if len(vector) == 8:\r\n filas = vector[0:4]\r\n columnas = vector[4:8]\r\n\r\n pin_fila = [ Pin(nombre_pin, mode=Pin.OUT) for nombre_pin in filas ]\r\n\r\n pin_columna = [ Pin(nombre_pin, mode=Pin.IN, pull=Pin.PULL_DOWN) for nombre_pin in columnas ]\r\n\r\n else:\r\n print(\"error: Deben ser 8 pines\")","sub_path":"keypad.py","file_name":"keypad.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"207010417","text":"import json\r\nimport csv\r\nimport argparse\r\nimport glob\r\nimport matplotlib\r\nimport os\r\njoin = os.path.join\r\nimport numpy as np\r\n\r\nmatplotlib.use(\"Agg\")\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom logger import add_file_handler_to_logger, logger\r\n\r\nadd_file_handler_to_logger(name=\"main\", dir_path=f\"logs/\", level=\"DEBUG\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-docker_name\", default=\"fully_suplearn_subtask1\", help=\"team docker name\")\r\n parser.add_argument(\"-save_path\", default=\"./results\", help=\"save_path\")\r\n time_interval = 0.1\r\n args = parser.parse_args()\r\n logger.info(\"we are counting: {args.docker_name}\")\r\n json_dir = join(args.save_path, args.docker_name)\r\n csv_path = join(json_dir, args.docker_name + '_Efficiency.csv')\r\n jsonl = sorted(glob.glob(json_dir + \"/*.json\"))\r\n alldata = []\r\n for item in jsonl:\r\n csv_l = []\r\n name = item.split(os.sep)[-1].split(\".\")[0]\r\n csv_l.append(name + '.nii.gz')\r\n zitem = item\r\n with open(zitem) as f:\r\n try:\r\n js = json.load(f)\r\n except Exception as error:\r\n logger.error(f\"{item} have error\")\r\n logger.exception(error)\r\n if \"time\" not in js:\r\n logger.error(f\"{item} don't have time!!!!\")\r\n logger.info(f\"Manually compute {item}\")\r\n time = time_interval * len(js[\"gpu_memory\"])\r\n else:\r\n time = js[\"time\"]\r\n csv_l.append(np.round(time,2))\r\n #CPU\r\n user, system, all_cpu_used = [item[0] for item in js['cpu_list']], [item[1] for item in js['cpu_list']], [\r\n 100 - item[2] for item in js['cpu_list']]\r\n plt.cla()\r\n x = [item * time_interval for item in range(len(user))]\r\n plt.xlabel(\"Time (s)\", fontsize=\"large\")\r\n plt.ylabel(\"CPU Utilization (%)\", fontsize=\"large\")\r\n # plt.plot(x, user, \"b\", ms=10, label=\"User %\")\r\n # plt.plot(x, system, \"r\", ms=10, label=\"System %\")\r\n plt.plot(x, all_cpu_used, \"b\", ms=10, label=\"Used %\")\r\n plt.legend()\r\n plt.savefig(zitem.replace(\".json\", \"_CPU-Time.png\"))\r\n #RAM\r\n RAM = js[\"RAM_list\"]\r\n plt.cla()\r\n x = [item * time_interval for item in range(len(RAM))]\r\n plt.xlabel(\"Time (s)\", fontsize=\"large\")\r\n plt.ylabel(\"RAM (MB)\", fontsize=\"large\")\r\n plt.plot(x, RAM, \"b\", ms=10, label=\"RAM\")\r\n plt.legend()\r\n plt.savefig(zitem.replace(\".json\", \"_RAM-Time.png\"))\r\n mem = js[\"gpu_memory\"]\r\n x = [item * time_interval for item in range(len(mem))]\r\n plt.cla()\r\n plt.xlabel(\"Time (s)\", fontsize=\"large\")\r\n plt.ylabel(\"GPU Memory (MB)\", fontsize=\"large\")\r\n plt.plot(x, mem, \"b\", ms=10, label=\"a\")\r\n plt.savefig(zitem.replace(\".json\", \"_GPU-Time.png\"), dpi=300)\r\n count_set = set(mem)\r\n # select the stable memory\r\n # count_list = []\r\n # for citem in count_set:\r\n # cts = mem.count(citem)\r\n # if cts > 0.02 * len(mem):\r\n # count_list.append(citem)\r\n # directly count the max GPU memory\r\n max_mem = max(count_set)\r\n csv_l.append(np.round(max_mem))\r\n csv_l.append(np.round(sum(mem) * time_interval))\r\n csv_l.append(np.round(max(all_cpu_used),2))\r\n csv_l.append(np.round(sum(all_cpu_used) * time_interval,2))\r\n csv_l.append(np.round(max(RAM),2))\r\n csv_l.append(np.round(sum(RAM) * time_interval))\r\n alldata.append(csv_l)\r\n\r\n f = open(csv_path, \"w\",newline='')\r\n writer = csv.writer(f)\r\n writer.writerow([\"Name\", \"Time\", \"MaxGPU_Mem\", \"AUC_GPU_Time\",'MaxCPU_Utilization','AUC_CPU_Time','MaxRAM'\r\n ,'AUC_RAM_Time' ])\r\n for i in alldata:\r\n writer.writerow(i)\r\n f.close()\r\n","sub_path":"FLARE23/load_json.py","file_name":"load_json.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505995720","text":"from flask import Flask, render_template, request\nimport pickle\n\n\napp = Flask(__name__, template_folder='templates')\nmodel = pickle.load(open(\"model.pkl\", 'rb'))\ncv = pickle.load(open('count_vect', 'rb'))\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/predict', methods=['GET','POST'])\ndef predict():\n message = request.form.get('text')\n data = [message]\n vect = cv.transform(data).toarray()\n pred = model.predict(vect)\n return render_template('result.html', prediction = pred, msg = message)\n\n\nif __name__ == '__main__':\n import warnings\n warnings.warn(\"use 'python -m nltk', not 'python -m nltk.downloader'\", DeprecationWarning)\n app.run(port=5000, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588636614","text":"import subprocess\nimport time\n\ndef getContainerInterface(cid):\n\n cmd =\"\".join([\"docker exec -it \",cid,\" ip a l eth0 | awk -F ':' '{if (NR==1) print $1+1}'\"])\n interface_index = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True).communicate()[0].split()[0]\n cmd = \"\".join([\"ip link ls | grep '^\",interface_index,\"'|awk -F ':' '{print $2}'\"])\n output = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True).communicate()[0].split()[0]\n return output\n\ndef getCpuUtil(cid):\n cmd = \"\".join([\"cat /sys/fs/cgroup/cpuacct/docker/\",cid,\"/cpuacct.stat | awk '{print $2}'\"])\n output = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True).communicate()[0].split(\"\\n\")\n return (time.time(),sum([int(v) for v in output if v]))\n\ndef getMemoryUtil(cid):\n cmd = \"\".join([\"cat /sys/fs/cgroup/memory/docker/\",cid,\"/memory.usage_in_bytes\"])\n output = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True).communicate()[0].split(\"\\n\")[0]\n return (time.time(),output)\n\n","sub_path":"search/sensors/helpers/getContainerInfo.py","file_name":"getContainerInfo.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468254529","text":"import requests\nfrom PIL import Image\nfrom io import BytesIO\n\nimport streamlit as st\n\nfrom app.abstract_classes.abstract_navigation_radio import AbstractNavigationRadio\n\nTSDS_BANNER_URL = \"https://raw.githubusercontent.com/thatscotdatasci/thatscotdatasci.github.io/master/assets/images/homepage_1800.jpg\"\n\n\nclass HomeRadio(AbstractNavigationRadio):\n\n name = \"Home\"\n _display_header = False\n\n def _action(self):\n\n\n st.markdown(\"\"\"\n # ThatScotDataSci Streamlit Example App\n \"\"\")\n\n # Display the TSDS banner\n tsds_banner_data = requests.get(TSDS_BANNER_URL)\n tsds_banner = Image.open(BytesIO(tsds_banner_data.content))\n st.image(tsds_banner, use_column_width=True)\n\n st.markdown(\"\"\"\n This is a super simple, starter for 10 Streamlit app with the purpose of demoing functionality, and documenting \n how to run and deploy a Stremlit app to Heroku using GitHub.\n \n I've also been working on creating re-usable objects to aid with creating and organising multi-page Streamlit \n apps in a clean manor.\n \n Use the radio buttons in the sidebar to select a page to view.\n \"\"\")\n","sub_path":"app/navigation_radios/home_radio.py","file_name":"home_radio.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"57467898","text":"import horse_solver as hs\nimport graph_utils as gu\nfrom os import listdir\nfrom os.path import isfile, join\nimport os\nimport sys\n\ndef soln_file_to_list():\n '''\n Converts the contents of a solution file soln_file to a list of integers where\n each integer is the score for the ith subproblem\n\n Output\n scores - a list of scores\n '''\n scores = []\n with open('scores.out', 'r') as score_file:\n for line in score_file:\n scores += [int(line)]\n return scores\n\ndef output_file_to_list():\n '''\n Converts the contents of an output file to a list of lists where each component\n list is a solution to the horse racing problem\n \n Output:\n solutions - a list of strings, where each string is a solution to the ith graph\n '''\n solutions = []\n with open('output.out', 'r') as output_file:\n for line in output_file:\n solutions.append(line)\n return solutions\n\nif __name__ == '__main__':\n dir_path = sys.argv[1] #must be written fully\n d_type = sys.argv[2]\n solutions = []\n scores = []\n max_scores = []\n proportions = []\n\n old_scores_exist = False\n if os.path.isfile('./scores.out'):\n old_scores_exist = True\n old_scores = soln_file_to_list()\n if os.path.isfile('./output.out'):\n old_solutions = output_file_to_list()\n for i in range(1, 601):\n f_name = str(i) + \".in\"\n f_path = dir_path + f_name\n G = gu.import_graph(f_path)\n soln, score = hs.solve(G, d_type)\n min_score, max_score = gu.min_max_score(G)\n proportion = 100.0 * score/(1e-5 + max_score * 1.0)\n print(\"Solved {} with score {}, {}% of maximum score\".format(f_name, score, proportion))\n solutions += [soln]\n scores += [score]\n max_scores += [max_score]\n proportions += [proportion]\n\n output = open('output.out', 'wr')\n score_file = open('scores.out', 'wr')\n num_updates = 0\n for i in range(600):\n if old_scores_exist:\n if scores[i] > old_scores[i]:\n #new solution is better, update it\n output.write(solutions[i] + \"\\n\")\n score_file.write(repr(scores[i]) + \"\\n\")\n num_updates += 1\n else:\n #old solution is better, keep it\n output.write(old_solutions[i])\n score_file.write(repr(old_scores[i]) + \"\\n\")\n else:\n output.write(solutions[i] + \"\\n\")\n score_file.write(repr(scores[i]) + \"\\n\")\n num_updates += 1\n output.close()\n score_file.close()\n print(\"Total Score: {}\".format(gu.total_score(scores, max_scores)))\n print(\"Updated solutions to {} graphs\".format(num_updates))\n\n","sub_path":"solve_inputs.py","file_name":"solve_inputs.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623856226","text":"from django.db import connection\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\n\n\nclass JSONResponse(HttpResponse):\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\n@csrf_exempt\ndef status(request):\n if request.method == 'GET':\n message = ''\n try:\n c = connection.cursor()\n c.execute('SELECT 1')\n row = c.fetchone()\n db_ready = row[0] == 1\n return JSONResponse({\n 'db': {\n 'ready': db_ready,\n 'message': message\n }\n })\n except DatabaseError as e:\n message = str(e)\n finally:\n c.close()\n","sub_path":"cla_backend/apps/status/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"398817230","text":"import logging\nimport re\nimport sys\nfrom dataclasses import dataclass, field\nfrom http.cookies import Morsel # noqa\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom typing import Awaitable, Dict, Iterator, List, Optional, Tuple, TypeVar\n\nimport aiohttp\nimport click\n\nfrom neuromation.api import Client, Factory, gen_trace_id\nfrom neuromation.api.config import _ConfigData\n\nfrom .asyncio_utils import Runner\n\n\nlog = logging.getLogger(__name__)\n\n\nTEXT_TYPE = (\"application/json\", \"text\", \"application/x-www-form-urlencoded\")\n\nHEADER_TOKEN_PATTERN = re.compile(\n r\"(Bearer|Basic|Digest|Mutual)\\s+(?P[^ ]+\\.[^ ]+\\.[^ ]+)\"\n)\n\n\n_T = TypeVar(\"_T\")\n\n\n@dataclass\nclass Root:\n color: bool\n tty: bool\n terminal_size: Tuple[int, int]\n disable_pypi_version_check: bool\n network_timeout: float\n config_path: Path\n trace: bool\n verbosity: int\n trace_hide_token: bool\n command_path: str\n command_params: List[Dict[str, Optional[str]]]\n skip_gmp_stats: bool\n\n _client: Optional[Client] = None\n _factory: Optional[Factory] = None\n _runner: Runner = field(init=False)\n\n def __post_init__(self) -> None:\n self._runner = Runner(debug=self.verbosity >= 2)\n self._runner.__enter__()\n\n def close(self) -> None:\n if self._client is not None:\n self.run(self._client.close())\n\n try:\n # Suppress prints unhandled exceptions\n # on event loop closing\n sys.stderr = None # type: ignore\n self._runner.__exit__(*sys.exc_info())\n finally:\n sys.stderr = sys.__stderr__\n\n def run(self, main: Awaitable[_T]) -> _T:\n return self._runner.run(main)\n\n @property\n def _config(self) -> _ConfigData:\n assert self._client is not None\n return self._client.config._config_data\n\n @property\n def quiet(self) -> bool:\n return self.verbosity < 0\n\n @property\n def timeout(self) -> aiohttp.ClientTimeout:\n return aiohttp.ClientTimeout(\n None, None, self.network_timeout, self.network_timeout\n )\n\n @property\n def client(self) -> Client:\n assert self._client is not None\n return self._client\n\n @property\n def factory(self) -> Factory:\n if self._factory is None:\n trace_configs: Optional[List[aiohttp.TraceConfig]]\n if self.trace:\n trace_configs = [self._create_trace_config()]\n else:\n trace_configs = None\n self._factory = Factory(\n path=self.config_path,\n trace_configs=trace_configs,\n trace_id=gen_trace_id(),\n )\n return self._factory\n\n async def init_client(self) -> Client:\n if self._client is not None:\n return self._client\n client = await self.factory.get(timeout=self.timeout)\n\n self._client = client\n return self._client\n\n def _create_trace_config(self) -> aiohttp.TraceConfig:\n trace_config = aiohttp.TraceConfig()\n trace_config.on_request_start.append(self._on_request_start) # type: ignore\n trace_config.on_request_chunk_sent.append(\n self._on_request_chunk_sent # type: ignore\n )\n trace_config.on_request_end.append(self._on_request_end) # type: ignore\n trace_config.on_response_chunk_received.append(\n self._on_response_chunk_received # type: ignore\n )\n return trace_config\n\n def _print_debug(self, lines: List[str]) -> None:\n txt = \"\\n\".join(click.style(line, dim=True) for line in lines)\n click.echo(txt, err=True)\n\n def _process_chunk(self, chunk: bytes, printable: bool) -> List[str]:\n if not chunk:\n return []\n if printable:\n return chunk.decode(errors=\"replace\").split(\"\\n\")\n else:\n return [f\"[binary {len(chunk)} bytes]\"]\n\n async def _on_request_start(\n self,\n session: aiohttp.ClientSession,\n context: SimpleNamespace,\n data: aiohttp.TraceRequestStartParams,\n ) -> None:\n path = data.url.raw_path\n if data.url.raw_query_string:\n path += \"?\" + data.url.raw_query_string\n lines = [f\"> {data.method} {path} HTTP/1.1\"]\n for key, val in data.headers.items():\n if self.trace_hide_token:\n val = self._sanitize_header_value(val)\n lines.append(f\"> {key}: {val}\")\n lines.append(\"> \")\n self._print_debug(lines)\n\n content_type = data.headers.get(\"Content-Type\", \"\")\n context.request_printable = content_type.startswith(TEXT_TYPE)\n\n async def _on_request_chunk_sent(\n self,\n session: aiohttp.ClientSession,\n context: SimpleNamespace,\n data: aiohttp.TraceRequestChunkSentParams,\n ) -> None:\n chunk = data.chunk\n lines = [\n \"> \" + line\n for line in self._process_chunk(chunk, context.request_printable)\n ]\n self._print_debug(lines)\n\n async def _on_request_end(\n self,\n session: aiohttp.ClientSession,\n context: SimpleNamespace,\n data: aiohttp.TraceRequestEndParams,\n ) -> None:\n lines = [f\"< HTTP/1.1 {data.response.status} {data.response.reason}\"]\n for key, val in data.response.headers.items():\n lines.append(f\"< {key}: {val}\")\n self._print_debug(lines)\n\n content_type = data.response.headers.get(\"Content-Type\", \"\")\n context.response_printable = content_type.startswith(TEXT_TYPE)\n\n async def _on_response_chunk_received(\n self,\n session: aiohttp.ClientSession,\n context: SimpleNamespace,\n data: aiohttp.TraceResponseChunkReceivedParams,\n ) -> None:\n chunk = data.chunk\n lines = [\n \"< \" + line\n for line in self._process_chunk(chunk, context.response_printable)\n ]\n self._print_debug(lines)\n\n def _sanitize_header_value(self, text: str) -> str:\n for token in self._find_all_tokens(text):\n token_safe = self._sanitize_token(token)\n text = text.replace(token, token_safe)\n return text\n\n def _sanitize_token(self, token: str) -> str:\n tail_len: int = 5\n # at least a third part of the token should be hidden\n if tail_len >= len(token) // 3:\n return f\"\"\n hidden = f\"\"\n return token[:tail_len] + hidden + token[-tail_len:]\n\n def _find_all_tokens(self, text: str) -> Iterator[str]:\n for match in HEADER_TOKEN_PATTERN.finditer(text):\n yield match.group(\"token\")\n","sub_path":"neuromation/cli/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"27374615","text":"from frictionless import validate\n\n\n# Table\n\n\ndef test_validate():\n report = validate(\"data/table.csv\")\n assert report.valid\n\n\ndef test_validate_invalid():\n report = validate(\"data/invalid.csv\")\n assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\"]) == [\n [None, 3, \"blank-header\"],\n [None, 4, \"duplicate-header\"],\n [2, 3, \"missing-cell\"],\n [2, 4, \"missing-cell\"],\n [3, 3, \"missing-cell\"],\n [3, 4, \"missing-cell\"],\n [4, None, \"blank-row\"],\n [5, 5, \"extra-cell\"],\n ]\n","sub_path":"tests/validate/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"415628276","text":"import sklearn.decomposition\nimport sklearn.ensemble\nimport sklearn.linear_model\nimport sklearn.preprocessing\nfrom sklearn.model_selection import cross_val_predict\n\nfrom baikal import make_step\n\n\ndef _fit_predict_proba(self, X, y):\n self.fit(X, y)\n return cross_val_predict(self, X, y, method=\"predict_proba\")\n\n\ndef _fit_decision_function(self, X, y):\n self.fit(X, y)\n return cross_val_predict(self, X, y, method=\"decision_function\")\n\n\nLinearRegression = make_step(sklearn.linear_model.LinearRegression)\nLogisticRegression = make_step(sklearn.linear_model.LogisticRegression)\nLinearSVC = make_step(sklearn.svm.LinearSVC)\nLinearSVCOOF = make_step(\n sklearn.svm.LinearSVC, attr_dict={\"fit_predict\": _fit_decision_function}\n)\nRandomForestClassifier = make_step(sklearn.ensemble.RandomForestClassifier)\nRandomForestClassifierOOF = make_step(\n sklearn.ensemble.RandomForestClassifier,\n attr_dict={\"fit_predict\": _fit_predict_proba},\n)\nExtraTreesClassifier = make_step(sklearn.ensemble.ExtraTreesClassifier)\nPCA = make_step(sklearn.decomposition.PCA)\nLabelEncoder = make_step(sklearn.preprocessing.LabelEncoder)\nStandardScaler = make_step(sklearn.preprocessing.StandardScaler)\n","sub_path":"tests/helpers/sklearn_steps.py","file_name":"sklearn_steps.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"349298864","text":"#!/usr/bin/env python\n\nimport string\nimport md5\n\ndef dict_all():\n array_all_letters = list(string.printable)\n dict_all_letters = {}\n for x in array_all_letters:\n dict_all_letters[x] = md5.new(x).hexdigest()\n return dict_all_letters\n\ndef crypt(word):\n word_list = list(word)\n for position, item in enumerate(word_list):\n for letter in dict_all().items():\n if item == letter[0]:\n word_list[position] = letter[1]\n crypt = \" \".join(word_list)\n return crypt\n\ndef decrypt(word):\n word_list = word.split()\n for position, item in enumerate(word_list):\n for letter in dict_all().items():\n if item == letter[1]:\n word_list[position] = letter[0]\n decrypt = \"\".join(word_list)\n return decrypt\n\nif __name__ == \"__main__\":\n word = \"This is a blue sky!!! Oh, it's so amazing!\"\n crypted = crypt(word)\n print (\"These are my words encrypted!!!\")\n print (\"\")\n print (\"\")\n print (crypted)\n print (\"\")\n print (\"\")\n\n solved = decrypt(crypted)\n print (\"These are my real words, decrypted!!!\")\n print (\"\")\n print (\"\")\n print (solved)\n print (\"\")\n print (\"\")\n print (\"Thanks for using my program!!!\")\n","sub_path":"various/cypher.py","file_name":"cypher.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646181295","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.utils.encoding import python_2_unicode_compatible\n\nfrom django.db import models\n\n\nclass AbstractTimeStamped(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Database(AbstractTimeStamped):\n name = models.CharField(max_length=255, unique=True)\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass Table(AbstractTimeStamped):\n name = models.CharField(max_length=255)\n database = models.ForeignKey(Database)\n\n class Meta:\n unique_together = (('name', 'database',),)\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass Row(AbstractTimeStamped):\n DATA_TYPE_CHOICES = (\n (\"datetime\", \"datetime\",),\n (\"date\", \"date\",),\n (\"int\", \"int\",),\n (\"bigint\", \"bigint\",),\n (\"varchar(1)\", \"varchar(1)\",),\n (\"varchar(2)\", \"varchar(2)\",),\n (\"varchar(3)\", \"varchar(3)\",),\n (\"varchar(4)\", \"varchar(4)\",),\n (\"varchar(5)\", \"varchar(5)\",),\n (\"varchar(6)\", \"varchar(6)\",),\n (\"varchar(7)\", \"varchar(7)\",),\n (\"varchar(8)\", \"varchar(8)\",),\n (\"varchar(9)\", \"varchar(9)\",),\n (\"varchar(10)\", \"varchar(10)\",),\n (\"varchar(11)\", \"varchar(1)1\",),\n (\"varchar(12)\", \"varchar(12)\",),\n (\"varchar(13)\", \"varchar(13)\",),\n (\"varchar(14)\", \"varchar(14)\",),\n (\"varchar(15)\", \"varchar(15)\",),\n (\"varchar(16)\", \"varchar(16)\",),\n (\"varchar(17)\", \"varchar(17)\",),\n (\"varchar(18)\", \"varchar(18)\",),\n (\"varchar(19)\", \"varchar(19)\",),\n (\"varchar(20)\", \"varchar(20)\",),\n (\"varchar(50)\", \"varchar(50)\",),\n (\"varchar(100)\", \"varchar(100)\",),\n )\n\n class Meta:\n unique_together = (('table', 'data_item',),)\n ordering = ['table', 'data_item']\n\n data_item = models.CharField(max_length=255)\n description = models.TextField()\n data_type = models.CharField(max_length=255, choices=DATA_TYPE_CHOICES)\n derivation = models.TextField()\n data_dictionary_name = models.CharField(max_length=255)\n data_dictionary_link = models.URLField(max_length=500)\n table = models.ForeignKey(Table)\n\n # currently the below are not being shown in the template\n # after requirements are finalised we could consider removing them.\n technical_check = models.CharField(max_length=255, null=True, blank=True)\n is_derived_item = models.NullBooleanField(default=False)\n definition_id = models.IntegerField(null=True, blank=True)\n author = models.CharField(max_length=255, blank=True, null=True)\n created_date_ext = models.DateField(blank=True, null=True)\n\n def __str__(self):\n return \"{} ({}.{})\".format(\n self.data_dictionary_name,\n self.table.name,\n self.table.database.name\n )\n\n\nclass SiteDescription(models.Model):\n description = models.TextField()\n","sub_path":"csv_schema/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"508849869","text":"from django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.template import loader\nfrom django.shortcuts import render\nfrom forms import DestinationForm, CrewForm, CargoForm, ShipForm,VoyageForm, RouteForm,EnumCountrySelection,SearchByCredentialsForm\n\nfrom mydatabase import ObjectsManager\n\n\ndef main(request):\n template = loader.get_template('schedule/main.html')\n\n if request.method == 'POST':\n if 'enumselect' in request.POST:\n routes = ObjectsManager.route.getRelatedToCountryObjects(request.POST['portcountry'])\n cred = SearchByCredentialsForm()\n context = {\n 'routelist': routes, 'buttons': False, 'credform':cred\n }\n return HttpResponse(template.render(context, request))\n\n if 'withb' in request.POST:\n cred = SearchByCredentialsForm()\n\n routes = ObjectsManager.route.getRelatedToCredentialObjects(request.POST['credentials'])\n\n context = {\n 'routelist': routes, 'buttons': False, 'credform': cred\n }\n return HttpResponse(template.render(context, request))\n\n if 'withoutb' in request.POST:\n cred = SearchByCredentialsForm()\n\n routes = ObjectsManager.route.getNotRelatedToCredentialObjects(request.POST['credentials'])\n\n context = {\n 'routelist': routes, 'buttons': False, 'credform': cred\n }\n return HttpResponse(template.render(context, request))\n\n if request.method == 'GET':\n if 'fit' in request.GET:\n ObjectsManager.ports.fitfromfile()\n ObjectsManager.crew.fitfromfile()\n ObjectsManager.cargo.fitfromfile()\n ObjectsManager.ships.fitfromfile()\n return HttpResponseRedirect('/main/')\n\n if 'showcrew' in request.GET:\n return HttpResponseRedirect('/main/crew')\n if 'showcargo' in request.GET:\n return HttpResponseRedirect('/main/cargo')\n if 'showports' in request.GET:\n return HttpResponseRedirect('/main/ports')\n if 'showships' in request.GET:\n return HttpResponseRedirect('/main/ships')\n if 'showvoyage' in request.GET:\n return HttpResponseRedirect('/main/voyage')\n if 'addroute' in request.GET:\n return HttpResponseRedirect('/main/addnew')\n\n if 'onlychina' in request.GET:\n\n routes = ObjectsManager.route.getRelatedToCountryObjects('China')\n cred = SearchByCredentialsForm()\n context = {\n 'routelist': routes, 'buttons': False, 'credform':cred\n }\n return HttpResponse(template.render(context, request))\n\n if 'allroutes' in request.GET:\n\n return HttpResponseRedirect('/main/')\n\n\n routes = ObjectsManager.route.getObjects()\n\n cred = SearchByCredentialsForm()\n\n selectform = EnumCountrySelection()\n context = {\n 'routelist': routes, 'buttons': True, 'enumselect': selectform, 'credform':cred\n }\n\n return HttpResponse(template.render(context, request))\n\n\ndef ports(request):\n if request.method == 'GET':\n if 'back' in request.GET:\n return HttpResponseRedirect('/main/')\n if 'addport' in request.GET:\n return HttpResponseRedirect('/main/ports/addnew')\n\n destinations = ObjectsManager.ports.getObjects()\n template = loader.get_template('schedule/destinations.html')\n context = {\n 'destinationslist': destinations,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef voyage(request):\n if request.method == 'GET':\n if 'back' in request.GET:\n return HttpResponseRedirect('/main/')\n if 'addvoyage' in request.GET:\n return HttpResponseRedirect('/main/voyage/addnew')\n\n voyages = ObjectsManager.voyage.getObjects()\n template = loader.get_template('schedule/voyage.html')\n context = {\n 'voyagelist': voyages,\n }\n\n return HttpResponse(template.render(context, request))\n\n\ndef crew(request):\n if request.method == 'GET':\n if 'back' in request.GET:\n return HttpResponseRedirect('/main/')\n if 'addcrew' in request.GET:\n return HttpResponseRedirect('/main/crew/addnew')\n\n passengers = ObjectsManager.crew.getObjects()\n\n template = loader.get_template('schedule/crew.html')\n context = {\n 'passengerslist': passengers,\n }\n\n return HttpResponse(template.render(context, request))\n\n\ndef cargo(request):\n if request.method == 'GET':\n if 'back' in request.GET:\n return HttpResponseRedirect('/main/')\n if 'addcargo' in request.GET:\n return HttpResponseRedirect('/main/cargo/addnew')\n\n car = ObjectsManager.cargo.getObjects()\n\n template = loader.get_template('schedule/cargo.html')\n context = {\n 'cargolist': car,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef ships(request):\n if request.method == 'GET':\n if 'back' in request.GET:\n return HttpResponseRedirect('/main/')\n if 'addship' in request.GET:\n return HttpResponseRedirect('/main/ships/addnew')\n\n shipsl = ObjectsManager.ships.getObjects()\n\n template = loader.get_template('schedule/ships.html')\n context = {\n 'shipslist': shipsl,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef addcrew(request):\n if request.method == 'POST':\n if 'addcrew' in request.POST:\n crew = ObjectsManager.crew\n crew.additem(request.POST['crewname'], request.POST['crewsirname'],\n request.POST['crewsex'], request.POST['crewage'], request.POST['crewnationality'])\n return HttpResponseRedirect('/main/crew/')\n\n template = loader.get_template('schedule/adding/addcrew.html')\n\n crewform = CrewForm()\n context = {\n 'crewform': crewform,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef addport(request):\n if request.method == 'POST':\n if 'addport' in request.POST:\n port = ObjectsManager.ports\n port.additem(request.POST['portname'], request.POST['portlocation'])\n return HttpResponseRedirect('/main/ports/')\n\n template = loader.get_template('schedule/adding/addport.html')\n portform = DestinationForm()\n context = {\n 'portform': portform,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef addvoyage(request):\n if request.method == 'POST':\n if 'addvoyage' in request.POST:\n voyage = ObjectsManager.voyage\n voyage.additem(request.POST['portfrom'], request.POST['portto'], request.POST['fromdate'],\n request.POST['todate'])\n return HttpResponseRedirect('/main/voyage/')\n\n template = loader.get_template('schedule/adding/addvoyage.html')\n\n voyageform = VoyageForm()\n context = {\n 'voyageform': voyageform,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef addroute(request):\n if request.method == 'POST':\n if 'addroute' in request.POST:\n route = ObjectsManager.route\n route.additem(request.POST['svoyage'], request.POST['scargo'], request.POST['sship'], request.POST['screw'])\n return HttpResponseRedirect('/main/')\n\n template = loader.get_template('schedule/adding/addroute.html')\n routeform = RouteForm()\n context = {\n 'routeform': routeform,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef addcargo(request):\n if request.method == 'POST':\n if 'addcargo' in request.POST:\n cargo = ObjectsManager.cargo\n cargo.additem(request.POST['cargoname'], request.POST['cargoweight'], request.POST['cargoprice'])\n return HttpResponseRedirect('/main/cargo/')\n\n template = loader.get_template('schedule/adding/addcargo.html')\n cargoform = CargoForm()\n context = {\n 'cargoform': cargoform\n }\n return HttpResponse(template.render(context, request))\n\n\ndef addship(request):\n if request.method == 'POST':\n if 'addship' in request.POST:\n ship = ObjectsManager.ships\n ship.additem(request.POST['shipname'], request.POST['shipcountry'], request.POST['shiptype'])\n return HttpResponseRedirect('/main/ships/')\n\n template = loader.get_template('schedule/adding/addship.html')\n shipform = ShipForm()\n context = {\n 'shipform': shipform,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef cargoedit(request, cargoid):\n if request.method == 'POST':\n if 'editcargob' in request.POST:\n cargo = ObjectsManager.cargo\n cargo.edititem(cargoid, request.POST['cargoname'], request.POST['cargoweight'], request.POST['cargoprice'])\n return HttpResponseRedirect('/main/cargo/')\n\n cargo = ObjectsManager.cargo\n\n updatable = cargo.get_by_id(cargoid)\n updatable = updatable[0]\n req = {'cargoname': updatable[2], 'cargoweight': updatable[1],\n 'cargoprice': updatable[3]}\n form = CargoForm(req)\n\n context = {'cargoeditform': form}\n\n template = loader.get_template('schedule/editing/editcargo.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef cargodelete(request, cargoid):\n ObjectsManager.cargo.deleteitem(cargoid)\n return HttpResponseRedirect('/main/cargo/')\n\n\ndef crewedit(request, crewid):\n if request.method == 'POST':\n if 'editcrewb' in request.POST:\n crew = ObjectsManager.crew\n crew.edititem(crewid, request.POST['crewname'], request.POST['crewsirname'],\n request.POST['crewsex'], request.POST['crewage'], request.POST['crewnationality'])\n return HttpResponseRedirect('/main/crew/')\n\n crew = ObjectsManager.crew\n\n updatable = crew.get_by_id(crewid)\n updatable = updatable[0]\n req = {'crewname': updatable[1], 'crewsirname': updatable[2],\n 'crewsex': updatable[3], 'crewage': updatable[4], 'crewnationality': updatable[5]}\n\n form = CrewForm(req)\n\n context = {'creweditform': form}\n\n template = loader.get_template('schedule/editing/editcrew.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef voyageedit(request, voyageid):\n if request.method == 'POST':\n if 'editvoyageb' in request.POST:\n voyage = ObjectsManager.voyage\n voyage.edititem(voyageid, request.POST['portfrom'], request.POST['portto'], request.POST['fromdate'],\n request.POST['todate'])\n return HttpResponseRedirect('/main/voyage/')\n\n voyage = ObjectsManager.voyage\n\n updatable = voyage.get_by_id(voyageid)\n updatable = updatable[0]\n req = {'portfrom': updatable[1], 'portto': updatable[2],\n 'fromdate': updatable[3], 'todate': updatable[4]}\n\n form = VoyageForm(req)\n\n context = {'voyageeditform': form}\n\n template = loader.get_template('schedule/editing/editvoyage.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef routeedit(request, routeid):\n if request.method == 'POST':\n if 'editrouteb' in request.POST:\n route = ObjectsManager.route\n route.edititem(routeid, request.POST['svoyage'], request.POST['scargo'], request.POST['sship'],\n request.POST['screw'])\n return HttpResponseRedirect('/main/')\n\n route = ObjectsManager.route\n\n updatable = route.get_by_id(routeid)\n updatable = updatable[0]\n req = {'svoyage': updatable[1], 'scargo': updatable[2],\n 'sship': updatable[3], 'screw': updatable[4]}\n\n form = RouteForm(req)\n\n context = {'routeeditform': form}\n\n template = loader.get_template('schedule/editing/editroute.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef crewdelete(request, crewid):\n ObjectsManager.crew.deleteitem(crewid)\n return HttpResponseRedirect('/main/crew/')\n\n\ndef shipedit(request, shipid):\n if request.method == 'POST':\n if 'editshipb' in request.POST:\n ship = ObjectsManager.ships\n ship.edititem(shipid, request.POST['shipname'], request.POST['shipcountry'], request.POST['shiptype'])\n return HttpResponseRedirect('/main/ships/')\n\n ship = ObjectsManager.ships\n\n updatable = ship.get_by_id(shipid)\n updatable = updatable[0]\n req = {'shipname': updatable[1], 'shiptype': updatable[2],\n 'shipcountry': updatable[3]}\n\n form = ShipForm(req)\n\n context = {'shipeditform': form}\n template = loader.get_template('schedule/editing/editship.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef shipdelete(request, shipid):\n ObjectsManager.ships.deleteitem(shipid)\n return HttpResponseRedirect('/main/ships/')\n\n\ndef portedit(request, portid):\n if request.method == 'POST':\n if 'editportb' in request.POST:\n port = ObjectsManager.ports\n port.edititem(portid, request.POST['portname'], request.POST['portlocation'])\n return HttpResponseRedirect('/main/ports/')\n\n port = ObjectsManager.ports\n\n updatable = port.get_by_id(portid)\n updatable = updatable[0]\n req = {'portname': updatable[1], 'portlocation': updatable[2]}\n\n form = DestinationForm(req)\n\n context = {'editportform': form}\n\n template = loader.get_template('schedule/editing/editport.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef portdelete(request, portid):\n ObjectsManager.ports.deleteitem(portid)\n return HttpResponseRedirect('/main/ports/')\n\n\ndef voyagedelete(request, voyageid):\n ObjectsManager.voyage.deleteitem(voyageid)\n return HttpResponseRedirect('/main/voyage/')\n\n\ndef routedelete(request, routeid):\n ObjectsManager.route.deleteitem(routeid)\n return HttpResponseRedirect('/main/')\n","sub_path":"schedule/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367155441","text":"import requests\nfrom config import *\n\n@celery.task\ndef post_data(data,r_url,revert):\n if r_url[:7] != 'http://':\n r_url = 'http://'+r_url\n\n __data = {}\n\n if(isinstance(data,list)):\n __data = {\n \"result\":data,\n \"verdict\":0,\n \"revert\":revert\n }\n else:\n __data = {\n \"result\":data[\"message\"],\n \"verdict\":data[\"verdict\"],\n \"revert\":revert\n }\n requests.post(r_url,json=__data)\n return\n\n","sub_path":"core/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"180919874","text":"balance = input(\"Balance: \")\nbalanceaux = balance\nannualInterestRate = input(\"Annual Interest Rate: \")\nmonthlyInterestRate = annualInterestRate / 12.0\n\nlowBound = balance / 12.0\nupperBound = (balance * (1 + monthlyInterestRate)**12) / 12.0\n\nwhile abs(balanceaux) > 0.01: \n midBound = (lowBound + upperBound) / 2.0\n balanceaux = balance\n for month in range(1, 13):\n balanceaux = balanceaux - midBound\n balanceaux = balanceaux * (1 + monthlyInterestRate)\n if (balanceaux > 0.01):\n lowBound = midBound\n elif (balanceaux < 0.01):\n upperBound = midBound\n \nprint(\"Payment: %.2f\" % midBound)","sub_path":"mit6001x/pset2/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30754375","text":"from PIL import Image\nimport os, shutil\n\n#Image Compress executed\n\ndef compressImage(srcPath, dstPath):\n \n # Traversal the files name in the source directory.\n for filename in os.listdir(srcPath):\n \n # if the directory does not exist,make it.Keep level structure.\n if not os.path.exists(srcPath):\n os.makedirs(dstPath)\n\n # Splicing complete file or folder paths.\n srcFile = os.path.join(srcPath, filename)\n dstFile = os.path.join(dstPath, filename)\n\n # if it's a file,executed it.\n if os.path.isfile(srcFile):\n \n try:\n \n # Open the original image and save it after compress.\n # (It can be used \"if scrFile.endswith(\".jpg\") or split , splitext etc.. functions compression for specific files.\n sImg = Image.open(srcFile)\n\n w, h = sImg.size\n\n # Set the compression size and options. ! Alert That The Dimensions Are In BRACKETS.\n dImg = sImg.resize((int(w / 2), int(h / 2), Image.ANTIALIAS))\n\n # It can be used \"srcFile\" orginal path to save,or change the suffix to save. \n # After the save function,it can be add compression encoding options such as JPG.\n dImg.save(dstFile)\n\n print(dstFile + \"Done!\")\n\n except Exception:\n \n print(dstFile + \"Fail!\")\n\n # if it's a folder,Recursion.\n if os.path.isdir(srcFile):\n \n compressImage(srcFile, dstFile)\n\n\nif __name__ == '__main__':\n\n # Traversal the image to be added. \n path = os.walk(\"./prepare\")\n\n for root , dirs, files in path:\n for f in files:\n\n # Moving the files\n shutil.move(os.path.join(root, f), os.path.join(\"./finish\", f))\n\n\n # Traversal and Delete the image to be compression.\n path = os.walk(\"./compress\")\n \n for root, dirs, files in path:\n for f in files:\n os.remove(os.path.join(root, f))\n\n\n # Traversal the image to be compression.\n compressImage(\"./finish\", \"./compress\")\n\n","sub_path":"resize-v2.py","file_name":"resize-v2.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"276392779","text":"from django.contrib.auth import authenticate,login, logout\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom .forms import ExtendedUserCreationForm, UserProfileForm\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\n\n\n@csrf_exempt\ndef register(request):\n if request.method == \"POST\":\n form = ExtendedUserCreationForm(request.POST)\n profile_form = UserProfileForm(request.POST)\n\n if form.is_valid() and profile_form.is_valid():\n user = form.save()\n profile = profile_form.save(commit=False)\n profile.user = user\n profile.organization_name = profile_form.cleaned_data.get(\"organization_name\").lower()\n profile.save()\n login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n username = User.objects.get(username=form.cleaned_data.get(\"username\"))\n messages.success(request, 'Account created for username %s' % username)\n #return redirect('users/register.html')\n\n else:\n form = ExtendedUserCreationForm()\n profile_form = UserProfileForm()\n\n context = {'form': form, 'profile_form': profile_form}\n return render(request, 'users/register.html', context)\n\n\n@csrf_exempt\ndef user_login(request):\n if request.method == \"POST\":\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n username = User.objects.get(username=form.cleaned_data.get(\"username\"))\n user = form.get_user()\n login(request, user)\n messages.success(request, \"Hello {}, you are logged in!\".format(username))\n return redirect('home')\n else:\n messages.error(request, 'Invalid username or password')\n return redirect('login')\n else:\n form = AuthenticationForm()\n\n context = {'form': form}\n return render(request, 'users/login.html', context)\n\n\ndef user_logout(request):\n logout(request)\n messages.success(request, \"You have been logged out\")\n return redirect('home')","sub_path":"kudos/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"525839674","text":"#!/usr/bin/env python\n\n__author__ = 'frankojis'\n\nimport psutil\nimport datetime\n\n\ndef cpuinfo():\n cores = dict()\n cores['pCores'] = psutil.cpu_count(logical=False)\n cores['vCores'] = psutil.cpu_count()\n return cores\n\n\ndef raminfo():\n ram = dict()\n \"\"\" Main memory Statistics \"\"\"\n main = dict()\n main['total'] = psutil.virtual_memory().total\n main['available'] = psutil.virtual_memory().available\n main['free'] = psutil.virtual_memory().free\n main['used'] = psutil.virtual_memory().used\n main['buffers'] = psutil.virtual_memory().buffers\n main['cached'] = psutil.virtual_memory().cached\n '''Swap memory information'''\n swap = dict()\n swap['total'] = psutil.swap_memory().total\n swap['free'] = psutil.swap_memory().free\n swap['used'] = psutil.swap_memory().used\n swap['in'] = psutil.swap_memory().sin\n swap['out'] = psutil.swap_memory().sout\n ram['main_memory'] = main\n ram['swap_memory'] = swap\n return ram\n\n\ndef filesysteminfo():\n \"\"\"Get disk metrics of mounted physical devices alone\"\"\"\n stats = []\n for disk in psutil.disk_partitions(all=False):\n try:\n metrics = psutil.disk_usage(disk.mountpoint)\n stats.append(\n {\"Filesystem\": disk.device, \"Mounted_on\": disk.mountpoint, \"Type\": disk.fstype,\n \"Size\": metrics.total,\n \"Used\": metrics.used, \"Avail\": metrics.free, \"Use_percent\": metrics.percent})\n except OSError:\n continue\n '''I/O of all devices'''\n counters = []\n io = psutil.disk_io_counters(perdisk=True)\n for i in io:\n counters.append(\n {\"disk\": i, \"rCount\": io[i].read_count, \"wCount\": io[i].write_count, \"rBytes\": io[i].read_bytes,\n \"wBytes\": io[i].write_bytes,\n \"rTime\": io[i].read_time, \"wTime\": io[i].write_time})\n disk = {\"statistics\": stats, \"counters\": counters}\n return disk\n\n\ndef uptime():\n \"\"\"time when the server was last rebooted\"\"\"\n boottime = datetime.datetime.fromtimestamp(psutil.boot_time()).strftime(\"%b %d,%Y %H:%M:%S\")\n users = []\n '''logged in users'''\n for user in psutil.users():\n users.append({\"username\": user.name, \"host\": user.host,\n \"login\": datetime.datetime.fromtimestamp(user.started).strftime(\"%b %d,%Y %H:%M:%S\")})\n uptime = {\"boot\": boottime, \"users\": users}\n return uptime\n\n\ndef netstat():\n import socket\n\n \"\"\"Mapping protocol names based on the socket family and type\"\"\"\n protocols = {(socket.AF_INET, socket.SOCK_STREAM): 'tcp',\n (socket.AF_INET6, socket.SOCK_STREAM): 'tcp6',\n (socket.AF_INET, socket.SOCK_DGRAM): 'udp',\n (socket.AF_INET6, socket.SOCK_DGRAM): 'udp6'}\n connections = psutil.net_connections(kind='inet')\n traffic = []\n for c in connections:\n proto, laddr, faddr, status, PID = protocols[(c.family, c.type)], c.laddr, c.raddr, c.status, c.pid\n try:\n proc = psutil.Process(PID).name()\n except psutil.NoSuchProcess:\n continue\n traffic.append(\n {\"protocol\": proto, \"local\": laddr, \"foreign\": faddr, \"state\": status, \"pid\": PID, \"process\": proc})\n return traffic\n\n\ndef io_counters():\n io_c = psutil.net_io_counters(pernic=False)\n counter = dict()\n sent = dict()\n sent['bytes'] = io_c.bytes_sent\n sent['packets'] = io_c.packets_sent\n sent['errors'] = io_c.errout\n sent['drops'] = io_c.dropout\n counter['sent'] = sent\n received = dict()\n received['bytes'] = io_c.bytes_recv\n received['packets'] = io_c.packets_recv\n received['errors'] = io_c.errin\n received['drops'] = io_c.dropin\n counter['received'] = received\n return counter\n\n\ndef main():\n import helpers\n import socket\n\n ip = helpers.getIpAddress()\n hostname = socket.gethostname()\n hostinfo = dict(ip=ip, hostname=hostname, cpu=cpuinfo(), ram=raminfo(), bootinfo=uptime(),\n filesystem=filesysteminfo(), counters=io_counters(), traffic=netstat(),\n time=datetime.datetime.now())\n return hostinfo\n","sub_path":"client/redhat/src/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420112734","text":"import torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nimport torchvision.transforms.functional as F\n\nimport os\nfrom PIL import Image\nimport random\nimport numpy as np\nimport imageio\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray, gray2rgb\nfrom skimage.viewer import ImageViewer\n\n\nclass RSMaskDataSet(Dataset):\n def __init__(self):\n self.transform_train = transforms.Compose(\n [transforms.Resize((256, 256)),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n\n self.transform_mask = transforms.Compose(\n [transforms.Resize((256, 256)),\n transforms.ToTensor()])\n\n # load all image\n # dir_name_train = self.listdir(r'E:\\DataSet\\NWPU-RESISC45-dataset\\NWPU-RESISC45\\NWPU-RESISC45')\n # self.data_list_train = []\n # for i in dir_name_train:\n # self.data_list_train = self.data_list_train + self.listdir(i)\n # print('Read ' + str(len(self.data_list_train)) + ' images')\n\n # load part image\n dir_name_train = self.listdir(r'E:\\DataSet\\NWPU-RESISC45-dataset\\NWPU-RESISC45\\NWPU-RESISC45\\freeway')\n # dir_name_train = self.listdir(r'E:\\DataSet\\UCMerced_LandUse\\Images\\freeway')\n\n self.data_list_train = dir_name_train\n print('Read ' + str(len(self.data_list_train)) + ' images')\n\n data_route_mask = r'E:\\DataSet\\mask\\mask_test - delete\\testing_mask_dataset'\n self.data_list_mask = self.listdir(data_route_mask)\n print('Read ' + str(len(self.data_list_mask)) + ' images')\n\n def __len__(self):\n return len(self.data_list_train)\n\n def __getitem__(self, item):\n img = self.data_list_train[item]\n img = Image.open(img)\n img = self.transform_mask(img.convert('RGB'))\n\n mask = self.data_list_mask[random.randint(0, len(self.data_list_mask) - 1)]\n mask = Image.open(mask)\n mask = self.transform_mask(mask)\n mask[mask > 0.5] = 1\n mask[mask < 0.5] = 0\n\n edge = self.get_edge(self.data_list_train[item])\n edge = Image.fromarray(edge)\n edge = self.transform_mask(edge)\n\n return img, edge, mask\n\n @staticmethod\n def listdir(path):\n name_list = []\n for file in os.listdir(path):\n name_list.append(os.path.join(path, file))\n return name_list\n\n @staticmethod\n def get_edge(img_name):\n img = imageio.imread(img_name)\n\n # gray to rgb\n if len(img.shape) < 3:\n img = gray2rgb(img)\n\n # create grayscale image\n img_gray = rgb2gray(img)\n\n result = canny(img[:, :, 0], sigma=2).astype(np.uint8)\n result[result < 0.5] = 0\n result[result > 0.5] = 255\n\n return result\n","sub_path":"Deep Learning/Image Inpainting/EdgeConnect/data/RS_Mask_Data.py","file_name":"RS_Mask_Data.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478133489","text":"# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import namedtuple\n\nimport gin.tf\nimport tensorflow as tf\n\nfrom tf_agents.networks.network import Network\n\nfrom alf.algorithms.algorithm import Algorithm, AlgorithmStep, LossInfo\nfrom alf.data_structures import ActionTimeStep\nfrom alf.utils.normalizers import ScalarAdaptiveNormalizer\nfrom alf.utils.normalizers import AdaptiveNormalizer\nfrom alf.algorithms.icm_algorithm import ICMInfo\n\n\n@gin.configurable\nclass RNDAlgorithm(Algorithm):\n \"\"\"Exploration by Random Network Distillation, Burda et al. 2019.\n\n This module generates the intrinsic reward based on the prediction errors of\n randomly generated state embeddings.\n\n Suppose we have a fixed randomly initialized target network g: s -> e_t and\n a trainable predictor network h: s -> e_p, then the intrinsic reward is\n\n r = |e_t - e_p|^2\n\n The reward is expected to be higher for novel states.\n \"\"\"\n\n def __init__(self,\n target_net: Network,\n predictor_net: Network,\n encoder_net: Network = None,\n reward_adapt_speed=None,\n observation_adapt_speed=None,\n observation_spec=None,\n learning_rate=None,\n clip_value=-1.0,\n stacked_frames=True,\n name=\"RNDAlgorithm\"):\n \"\"\"\n Args:\n encoder_net (Network): a shared network that encodes observation to\n embeddings before being input to `target_net` or `predictor_net`;\n its parameters are not trainable\n target_net (Network): the random fixed network that generates target\n state embeddings to be fitted\n predictor_net (Network): the trainable network that predicts target\n embeddings. If fully trained given enough data, predictor_net\n will become target_net eventually.\n reward_adapt_speed (float): speed for adaptively normalizing intrinsic\n rewards; if None, no normalizer is used\n observation_adapt_speed (float): speed for adaptively normalizing\n observations. Only useful if `observation_spec` is not None.\n observation_spec (TensorSpec): the observation tensor spec; used\n for creating an adaptive observation normalizer\n learning_rate (float): the learning rate for prediction cost; if None,\n a global learning rate will be used\n clip_value (float): if positive, the rewards will be clipped to\n [-clip_value, clip_value]; only used for reward normalization\n stacked_frames (bool): a boolean flag indicating whether the input\n observation has stacked frames. If True, then we only keep the\n last frame for RND to make predictions on, as suggested by the\n original paper Burda et al. 2019. For Atari games, this flag is\n usually True (`frame_stacking==4`).\n name (str):\n \"\"\"\n optimizer = None\n if learning_rate is not None:\n optimizer = tf.optimizers.Adam(learning_rate=learning_rate)\n super(RNDAlgorithm, self).__init__(\n train_state_spec=(), optimizer=optimizer, name=name)\n self._encoder_net = encoder_net\n self._target_net = target_net # fixed\n self._predictor_net = predictor_net # trainable\n if reward_adapt_speed is not None:\n self._reward_normalizer = ScalarAdaptiveNormalizer(\n speed=reward_adapt_speed)\n self._reward_clip_value = clip_value\n else:\n self._reward_normalizer = None\n\n self._stacked_frames = stacked_frames\n if stacked_frames and (observation_spec is not None):\n # Assuming stacking in the last dim, we only keep the last frame.\n shape = observation_spec.shape\n new_shape = shape[:-1] + (1, )\n observation_spec = tf.TensorSpec(\n shape=new_shape, dtype=observation_spec.dtype)\n\n # The paper suggests to also normalize observations, because the\n # original observation subspace might be small and the target network will\n # yield random embeddings that are indistinguishable\n self._observation_normalizer = None\n if observation_adapt_speed is not None:\n assert observation_spec is not None, \\\n \"Observation normalizer requires its input tensor spec!\"\n self._observation_normalizer = AdaptiveNormalizer(\n tensor_spec=observation_spec, speed=observation_adapt_speed)\n\n def train_step(self,\n time_step: ActionTimeStep,\n state,\n calc_intrinsic_reward=True):\n \"\"\"\n Args:\n time_step (ActionTimeStep): input time_step data\n state (tuple): empty tuple ()\n calc_intrinsic_reward (bool): if False, only return the losses\n Returns:\n TrainStep:\n outputs: empty tuple ()\n state: empty tuple ()\n info: ICMInfo\n \"\"\"\n observation = time_step.observation\n\n if self._stacked_frames:\n # Assuming stacking in the last dim, we only keep the last frame.\n observation = observation[..., -1:]\n\n if self._observation_normalizer is not None:\n observation = self._observation_normalizer.normalize(observation)\n\n if self._encoder_net is not None:\n observation = tf.stop_gradient(self._encoder_net(observation)[0])\n\n pred_embedding, _ = self._predictor_net(observation)\n target_embedding, _ = self._target_net(observation)\n\n loss = tf.reduce_sum(\n tf.square(pred_embedding - tf.stop_gradient(target_embedding)),\n axis=-1)\n\n intrinsic_reward = ()\n if calc_intrinsic_reward:\n intrinsic_reward = tf.stop_gradient(loss)\n if self._reward_normalizer:\n intrinsic_reward = self._reward_normalizer.normalize(\n intrinsic_reward, clip_value=self._reward_clip_value)\n\n return AlgorithmStep(\n outputs=(),\n state=(),\n info=ICMInfo(reward=intrinsic_reward, loss=LossInfo(loss=loss)))\n\n def calc_loss(self, info: ICMInfo):\n return LossInfo(scalar_loss=tf.reduce_mean(info.loss.loss))\n","sub_path":"alf/algorithms/rnd_algorithm.py","file_name":"rnd_algorithm.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"512446657","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 8 08:21:38 2020\r\n\r\n@author: claud\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\nimport itertools as it\r\nimport numpy as np\r\n\r\nfrom scipy.special import multigammaln\r\nfrom numpy.linalg import slogdet\r\nfrom numpy import logaddexp\r\nfrom math import pi\r\nfrom math import log\r\nfrom math import exp\r\nfrom math import expm1\r\nfrom math import lgamma\r\nimport pandas as pd\r\n\r\nfrom ast import literal_eval\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score\r\nfrom sklearn.metrics import accuracy_score \r\nfrom sklearn.metrics import classification_report\r\n\r\nLOG2PI = log(2*pi)\r\nLOG2 = log(2)\r\n\r\n\r\ndef bhc(data, data_model, crp_alpha=1.0):\r\n \"\"\"\r\n Bayesian hierarchical clustering CRP mixture model.\r\n Notes\r\n -----\r\n The Dirichlet process version of BHC suffers from terrible numerical\r\n errors when there are too many data points. 60 is about the limit. One\r\n could use arbitrary-precision numbers if one were so inclined.\r\n Parameters\r\n ----------\r\n data : numpy.ndarray (n, d)\r\n Array of data where each row is a data point and each column is a\r\n dimension.\r\n data_model : CollapsibleDistribution\r\n Provides the approprite ``log_marginal_likelihood`` function for the\r\n data.\r\n crp_alpha : float (0, Inf)\r\n CRP concentration parameter.\r\n Returns\r\n -------\r\n assignments : list(list(int))\r\n list of assignment vectors. assignments[i] is the assignment of data to\r\n i+1 clusters.\r\n lml : float\r\n log marginal likelihood estimate.\r\n \"\"\"\r\n # initialize the tree\r\n print(\"bhc starts\")\r\n nodes = dict((i, Node(np.array([x]), data_model, crp_alpha))\r\n for i, x in enumerate(data))\r\n print(\"initialised\")\r\n n_nodes = len(nodes)\r\n assignment = [i for i in range(n_nodes)]\r\n assignments = [list(assignment)]\r\n rks = [0]\r\n\r\n while n_nodes > 1:\r\n max_rk = float('-Inf')\r\n merged_node = None\r\n cnt = 0\r\n # for each pair of clusters (nodes), compute the merger score.\r\n for left_idx, right_idx in it.combinations(nodes.keys(), 2):\r\n cnt = cnt + 1\r\n print(cnt, n_nodes)\r\n tmp_node = Node.as_merge(nodes[left_idx], nodes[right_idx])\r\n\r\n logp_left = nodes[left_idx].logp\r\n logp_right = nodes[right_idx].logp\r\n logp_comb = tmp_node.logp\r\n\r\n log_pi = tmp_node.log_pi\r\n\r\n numer = log_pi + logp_comb\r\n\r\n neg_pi = log(-expm1(log_pi))\r\n denom = logaddexp(numer, neg_pi+logp_left+logp_right)\r\n\r\n log_rk = numer-denom\r\n\r\n if log_rk > max_rk:\r\n max_rk = log_rk\r\n merged_node = tmp_node\r\n merged_right = right_idx\r\n merged_left = left_idx\r\n\r\n rks.append(exp(max_rk))\r\n\r\n # Merge the highest-scoring pair\r\n del nodes[merged_right]\r\n nodes[merged_left] = merged_node\r\n\r\n for i, k in enumerate(assignment):\r\n if k == merged_right:\r\n assignment[i] = merged_left\r\n assignments.append(list(assignment))\r\n\r\n n_nodes -= 1\r\n\r\n # The denominator of log_rk is at the final merge is an estimate of the\r\n # marginal likelihood of the data under DPMM\r\n lml = denom\r\n return assignments, lml\r\n\r\n\r\nclass Node(object):\r\n \"\"\" A node in the hierarchical clustering.\r\n Attributes\r\n ----------\r\n nk : int\r\n Number of data points assigned to the node\r\n data : numpy.ndarrary (n, d)\r\n The data assigned to the Node. Each row is a datum.\r\n crp_alpha : float\r\n CRP concentration parameter\r\n log_dk : float\r\n Some kind of number for computing probabilities\r\n log_pi : float\r\n For to compute merge probability\r\n \"\"\"\r\n\r\n def __init__(self, data, data_model, crp_alpha=1.0, log_dk=None,\r\n log_pi=0.0):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n data : numpy.ndarray\r\n Array of data_model-appropriate data\r\n data_model : idsteach.CollapsibleDistribution\r\n For to calculate marginal likelihoods\r\n crp_alpha : float (0, Inf)\r\n CRP concentration parameter\r\n log_dk : float\r\n Cached probability variable. Do not define if the node is a leaf.\r\n log_pi : float\r\n Cached probability variable. Do not define if the node is a leaf.\r\n \"\"\"\r\n self.data_model = data_model\r\n self.data = data\r\n self.nk = data.shape[0]\r\n self.crp_alpha = crp_alpha\r\n self.log_pi = log_pi\r\n\r\n if log_dk is None:\r\n self.log_dk = log(crp_alpha)\r\n else:\r\n self.log_dk = log_dk\r\n\r\n self.logp = self.data_model.log_marginal_likelihood(self.data)\r\n\r\n @classmethod\r\n def as_merge(cls, node_left, node_right):\r\n \"\"\" Create a node from two other nodes\r\n Parameters\r\n ----------\r\n node_left : Node\r\n the Node on the left\r\n node_right : Node\r\n The Node on the right\r\n \"\"\"\r\n crp_alpha = node_left.crp_alpha\r\n data_model = node_left.data_model\r\n data = np.vstack((node_left.data, node_right.data))\r\n\r\n nk = data.shape[0]\r\n log_dk = logaddexp(log(crp_alpha) + lgamma(nk),\r\n node_left.log_dk + node_right.log_dk)\r\n log_pi = log(crp_alpha) + lgamma(nk) - log_dk\r\n print(crp_alpha, lgamma(nk), log_dk )\r\n\r\n if log_pi == 0:\r\n raise RuntimeError('Precision error')\r\n\r\n return cls(data, data_model, crp_alpha, log_dk, log_pi)\r\n\r\n\r\nclass CollapsibleDistribution(object):\r\n \"\"\" Abstract base class for a family of conjugate distributions. \"\"\"\r\n\r\n def log_marginal_likelihood(self, X):\r\n \"\"\" Log of the marginal likelihood, P(X|prior). \"\"\"\r\n pass\r\n\r\n\r\nclass NormalInverseWishart(CollapsibleDistribution):\r\n \"\"\"\r\n Multivariate Normal likelihood with multivariate Normal prior on mean and\r\n Inverse-Wishart prior on the covariance matrix.\r\n All math taken from Kevin Murphy's 2007 technical report, 'Conjugate\r\n Bayesian analysis of the Gaussian distribution'.\r\n \"\"\"\r\n\r\n def __init__(self, **prior_hyperparameters):\r\n self.nu_0 = prior_hyperparameters['nu_0']\r\n self.mu_0 = prior_hyperparameters['mu_0']\r\n self.kappa_0 = prior_hyperparameters['kappa_0']\r\n self.lambda_0 = prior_hyperparameters['lambda_0']\r\n\r\n self.d = float(len(self.mu_0))\r\n\r\n self.log_z = self.calc_log_z(self.mu_0, self.lambda_0, self.kappa_0,\r\n self.nu_0)\r\n\r\n @staticmethod\r\n def update_parameters(X, _mu, _lambda, _kappa, _nu, _d):\r\n n = X.shape[0]\r\n xbar = np.mean(X, axis=0)\r\n kappa_n = _kappa + n\r\n nu_n = _nu + n\r\n mu_n = (_kappa*_mu + n*xbar)/kappa_n\r\n\r\n S = np.zeros(_lambda.shape) if n == 1 else (n-1)*np.cov(X.T)\r\n dt = (xbar-_mu)[np.newaxis]\r\n\r\n back = np.dot(dt.T, dt)\r\n lambda_n = _lambda + S + (_kappa*n/kappa_n)*back\r\n\r\n assert(mu_n.shape[0] == _mu.shape[0])\r\n assert(lambda_n.shape[0] == _lambda.shape[0])\r\n assert(lambda_n.shape[1] == _lambda.shape[1])\r\n\r\n return mu_n, lambda_n, kappa_n, nu_n\r\n\r\n @staticmethod\r\n def calc_log_z(_mu, _lambda, _kappa, _nu):\r\n d = len(_mu)\r\n sign, detr = slogdet(_lambda)\r\n log_z = LOG2*(_nu*d/2.0) + (d/2.0)*log(2*pi/_kappa) +\\\r\n multigammaln(_nu/2, d) - (_nu/2.0)*detr\r\n\r\n return log_z\r\n\r\n def log_marginal_likelihood(self, X):\r\n n = X.shape[0]\r\n params_n = self.update_parameters(X, self.mu_0, self.lambda_0,\r\n self.kappa_0, self.nu_0, self.d)\r\n log_z_n = self.calc_log_z(*params_n)\r\n\r\n return log_z_n - self.log_z - LOG2PI*(n*self.d/2)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n hypers = {\r\n 'mu_0': np.zeros(10),\r\n 'nu_0': 12.0,\r\n 'kappa_0': 1.0,\r\n 'lambda_0': np.eye(10)\r\n }\r\n data_model = NormalInverseWishart(**hypers)\r\n\r\n #import data into a dataframe\r\n filename='markov_blanket_features.csv'\r\n df=pd.read_csv(filename) #no header\r\n\r\n\r\n\r\n #assign columns 1 to end to an array - data\r\n data = []\r\n #in the same loop, create the labels\r\n labels = []\r\n for i in np.arange(0,len(df)):\r\n data.append(df.iloc[i,1:-1].tolist()) #literal_eval prevents a list as been written as a string\r\n if df.iloc[i,0] == \"ALL\":\r\n labels.append(0)\r\n else:\r\n labels.append(1)\r\n\r\n asgn, _ = bhc(data, data_model, 40)\r\n z = np.array(asgn[-2], dtype=float)\r\n change = []\r\n for i in range(len(z)):\r\n if z[i] == 0:\r\n change.append(0) \r\n else:\r\n change.append(1)\r\n #change z to 0 and 1 values\r\n \r\n z = change\r\n\r\n \r\n #now you can calculate performance metrics with sklearn, such as confusion matrix\r\n c = confusion_matrix(labels, z)\r\n print(c)\r\n accuracy = accuracy_score(labels, z)\r\n accuracy2 = (c[0,0]+c[1,1])/71\r\n print('The accuracy is:' ,accuracy) \r\n precision = c[1,1]/(c[1,1]+c[0,1])\r\n precision2 = precision_score(labels,z)\r\n recall = c[1,1]/(c[1,1]+c[1,0])\r\n recall2 = recall_score(labels,z)\r\n print('The precision is: ',precision)\r\n print('The recall is: ',recall)\r\n f1 = f1_score(labels,z)\r\n f12 = 2 * (precision * recall) / (precision + recall)\r\n print('The F1 score is:' ,f1)","sub_path":"Leukemia/BHC_leukemia_mb.py","file_name":"BHC_leukemia_mb.py","file_ext":"py","file_size_in_byte":9667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"16609796","text":"from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.views import (\n LoginView, LogoutView\n)\nfrom django.contrib.auth import authenticate, login,logout\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.signing import BadSignature, SignatureExpired, loads, dumps\nfrom django.http import Http404, HttpResponseBadRequest\nfrom django.shortcuts import redirect,render\nfrom django.template.loader import render_to_string\nfrom django.views import generic\nfrom .forms import (\n LoginForm, UserCreateForm, StudentCreateForm, SocietyCreateForm\n)\nfrom django.contrib.auth.decorators import login_required\nfrom .models import User, Student, Society\n\n#User = get_user_model()\nUser = User\nStudent = Student\nSociety = Society\n\ndef selectfunc(request):\n return render(request,'select.html')\n\n'''\ndef loginfunc(request):\n if request.method == 'POST':\n print(request.POST)\n username2 = request.POST['username']\n password2 = request.POST['password']\n user = authenticate(request, email=username2, password=password2)\n\n if user is not None:\n login(request, user)\n return render(request,'list.html')\n \n else:\n return redirect('user_login')\n\n return render(request, 'user_login.html')\n'''\n\n#@login_required\ndef listfunc(request):\n print('hello')\n print(type(request.user))\n return render(request,'list.html')\n\ndef selectlogin(request):\n return render(request, 'select_login.html')\n\nclass UserLogin(LoginView):\n \"\"\"ログインページ\"\"\"\n form_class = LoginForm\n template_name = 'user_login.html'\n\nclass StudentLogin(LoginView):\n \"\"\"ログインページ\"\"\"\n form_class = LoginForm\n template_name = 'user_login.html'\n\nclass SocietyLogin(LoginView):\n \"\"\"ログインページ\"\"\"\n form_class = LoginForm\n template_name = 'society_login.html'\n\n\nclass Logout(LogoutView):\n \"\"\"ログアウトページ\"\"\"\n template_name = 'select.html'\n\n\nclass UserCreate(generic.CreateView):\n \"\"\"ユーザー仮登録\"\"\"\n template_name = 'user_create.html'\n form_class = UserCreateForm\n\n def form_valid(self, form):\n \"\"\"仮登録と本登録用メールの発行.\"\"\"\n # 仮登録と本登録の切り替えは、is_active属性を使うと簡単です。\n # 退会処理も、is_activeをFalseにするだけにしておくと捗ります。\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # アクティベーションURLの送付\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': self.request.scheme,\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('app/mail_template/create/subject.txt', context)\n message = render_to_string('app/mail_template/create/message_for_user.txt', context)\n\n user.email_user(subject, message)\n return redirect('app:user_create_done')\n\nclass StudentCreate(generic.CreateView):\n \"\"\"ユーザー仮登録\"\"\"\n template_name = 'user_create.html'\n form_class = StudentCreateForm\n\n def form_valid(self, form):\n \"\"\"仮登録と本登録用メールの発行.\"\"\"\n # 仮登録と本登録の切り替えは、is_active属性を使うと簡単です。\n # 退会処理も、is_activeをFalseにするだけにしておくと捗ります。\n student = form.save(commit=False)\n student.is_active = False\n student.save()\n\n # アクティベーションURLの送付\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': self.request.scheme,\n 'domain': domain,\n 'token': dumps(student.pk),\n 'user': student,\n }\n\n subject = render_to_string('app/mail_template/create/subject.txt', context)\n message = render_to_string('app/mail_template/create/message_for_user.txt', context)\n\n student.email_user(subject, message)\n return redirect('app:user_create_done')\n\n\nclass SocietyCreate(generic.CreateView):\n \"\"\"ユーザー仮登録\"\"\"\n template_name = 'user_create.html'\n form_class = SocietyCreateForm\n\n def form_valid(self, form):\n \"\"\"仮登録と本登録用メールの発行.\"\"\"\n # 仮登録と本登録の切り替えは、is_active属性を使うと簡単です。\n # 退会処理も、is_activeをFalseにするだけにしておくと捗ります。\n society = form.save(commit=False)\n society.is_active = False\n society.save()\n\n # アクティベーションURLの送付\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': self.request.scheme,\n 'domain': domain,\n 'token': dumps(society.pk),\n 'user': society,\n }\n\n subject = render_to_string('app/mail_template/create/subject.txt', context)\n message = render_to_string('app/mail_template/create/message_for_society.txt', context)\n\n society.email_user(subject, message)\n return redirect('app:user_create_done')\n\n\nclass UserCreateDone(generic.TemplateView):\n \"\"\"ユーザー仮登録したよ\"\"\"\n template_name = 'user_create_done.html'\n\nclass StudentCreateDone(generic.TemplateView):\n \"\"\"ユーザー仮登録したよ\"\"\"\n template_name = 'user_create_done.html'\n\nclass SocietyCreateDone(generic.TemplateView):\n \"\"\"ユーザー仮登録したよ\"\"\"\n template_name = 'user_create_done.html'\n\n\nclass UserCreateComplete(generic.TemplateView):\n \"\"\"メール内URLアクセス後のユーザー本登録\"\"\"\n template_name = 'user_create_complete.html'\n timeout_seconds = getattr(settings, 'ACTIVATION_TIMEOUT_SECONDS', 60*60*24) # デフォルトでは1日以内\n\n def get(self, request, **kwargs):\n \"\"\"tokenが正しければ本登録.\"\"\"\n token = kwargs.get('token')\n try:\n user_pk = loads(token, max_age=self.timeout_seconds)\n\n # 期限切れ\n except SignatureExpired:\n return HttpResponseBadRequest()\n\n # tokenが間違っている\n except BadSignature:\n return HttpResponseBadRequest()\n\n # tokenは問題なし\n else:\n try:\n user = User.objects.get(pk=user_pk)\n except User.DoesNotExist:\n return HttpResponseBadRequest()\n else:\n if not user.is_active:\n # 問題なければ本登録とする\n user.is_active = True\n user.save()\n return super().get(request, **kwargs)\n\n return HttpResponseBadRequest()\n\n\nclass StudentCreateComplete(generic.TemplateView):\n \"\"\"メール内URLアクセス後のユーザー本登録\"\"\"\n template_name = 'user_create_complete.html'\n timeout_seconds = getattr(settings, 'ACTIVATION_TIMEOUT_SECONDS', 60*60*24) # デフォルトでは1日以内\n\n def get(self, request, **kwargs):\n \"\"\"tokenが正しければ本登録.\"\"\"\n token = kwargs.get('token')\n try:\n user_pk = loads(token, max_age=self.timeout_seconds)\n\n # 期限切れ\n except SignatureExpired:\n return HttpResponseBadRequest()\n\n # tokenが間違っている\n except BadSignature:\n return HttpResponseBadRequest()\n\n # tokenは問題なし\n else:\n try:\n student = Student.objects.get(pk=user_pk)\n except Student.DoesNotExist:\n return HttpResponseBadRequest()\n else:\n if not student.is_active:\n # 問題なければ本登録とする\n student.is_active = True\n student.save()\n return super().get(request, **kwargs)\n\n return HttpResponseBadRequest()\n\n\nclass SocietyCreateComplete(generic.TemplateView):\n \"\"\"メール内URLアクセス後のユーザー本登録\"\"\"\n template_name = 'user_create_complete.html'\n timeout_seconds = getattr(settings, 'ACTIVATION_TIMEOUT_SECONDS', 60*60*24) # デフォルトでは1日以内\n\n def get(self, request, **kwargs):\n \"\"\"tokenが正しければ本登録.\"\"\"\n token = kwargs.get('token')\n try:\n user_pk = loads(token, max_age=self.timeout_seconds)\n\n # 期限切れ\n except SignatureExpired:\n return HttpResponseBadRequest()\n\n # tokenが間違っている\n except BadSignature:\n return HttpResponseBadRequest()\n\n # tokenは問題なし\n else:\n try:\n society = Society.objects.get(pk=user_pk)\n except Society.DoesNotExist:\n return HttpResponseBadRequest()\n else:\n if not society.is_active:\n # 問題なければ本登録とする\n society.is_active = True\n society.save()\n return super().get(request, **kwargs)\n\n return HttpResponseBadRequest()\n","sub_path":"demo/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"472864917","text":"import heapq\n\n\nclass Solution:\n def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:\n \"\"\"\n Intuition:\n I found it helpful to visualize the input as an m×n matrix of sums,\n for example for nums1=[1,7,11], and nums2=[2,4,6]:\n\n 2 4 6\n +------------\n 1 | 3 5 7\n 7 | 9 11 13\n 11 | 13 15 17\n\n Of course the smallest pair overall is in the top left corner, the one\n with sum 3. We don't even need to look anywhere else. After including\n that pair in the output, the next-smaller pair must be the next on the\n right (sum=5) or the next below (sum=9). We can keep a \"horizon\" of\n possible candidates, implemented as a heap / priority-queue, and roughly\n speaking we'll grow from the top left corner towards the right/bottom.\n\n Time Complexity:\n The complexity of this algorithm is O(k*logk) if k 0:\r\n print(\"lower\")\r\n else:\r\n print(\"higher\")\r\n number= int(input(\"Guess a number between 0 and 10: \"))\r\n guessNumber= guessNumber+1\r\n\r\n\r\n# computer that says whether the secret number is correct\r\n\r\n#components you need: user input of the secret number, computer pick a secret number, message about current winning or losing, \r\n# --> player can choose to play multiple times, types of number 0 to 10, while user hasn't guessed the number have the user guess again\r\n\r\n\r\n","sub_path":"C200/My Notes/Write a simple program.py","file_name":"Write a simple program.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"331999792","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# 新旧类的问题在python3中没有差别,应为python都是新式类,但是在python2中dir方法有差别\n\n\n# 旧式类\nclass OldClass:\n def __init__(self, account, name):\n self.account = account\n self.name = name\n\n\n# 新式类\nclass NewClass(object):\n def __init__(self, account, name):\n self.account = account\n self.name = name\n\n\nif __name__ == '__main__':\n old_class = OldClass(111111, 'OldClass')\n print(old_class)\n print(type(old_class))\n print(dir(old_class))\n print('\\n')\n new_class = NewClass(222222, 'NewClass')\n print(new_class)\n print(type(new_class))\n print(dir(new_class))\n","sub_path":"studypython/新旧式类.py","file_name":"新旧式类.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630942116","text":"from bs4 import BeautifulSoup\nimport glob, codecs, os\n\ndef removeNonAscii(s): return \"\".join(i for i in s if ord(i)<128)\n\ndef stripTags(in_text):\n # convert in_text to a mutable object (e.g. list)\n s_list = list(in_text)\n i,j = 0,0\n while i < len(s_list):\n # iterate until a left-angle bracket is found\n if s_list[i] == '<':\n while s_list[i] != '>':\n # pop everything from the the left-angle bracket until the right-angle bracket\n s_list.pop(i)\t\n # pops the right-angle bracket, too\n s_list.pop(i)\n else:\n i=i+1\t\t\n # convert the list back into text\n join_char=''\n return join_char.join(s_list)\n\nfor i in glob.glob(\"C:\\\\Text\\\\Professional\\\\Text Data\\\\ECCO\\\\tcpxml\\\\*.xml\"):\n \n filenamewithextension = os.path.basename(i)\n filename = os.path.splitext(filenamewithextension)[0]\n \n with open(\"poetry_in_\" + filename + \".txt\",\"w\") as out:\n \n fp = codecs.open(i, \"r\", \"utf-8\")\n read = fp.read()\n soup = BeautifulSoup(read)\n lines = soup.findAll(\"l\")\n for j in lines:\n out.write(stripTags(str(j)) + \"\\n\")\n \n \n \n #to do: measure number of lines. write lines to new file based on filename of infile. Create metadata for poetry to see which years are sparse. Repeat for EEBO","sub_path":"find_ecco_poetry.py","file_name":"find_ecco_poetry.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"125129618","text":"#!python3.6\nimport difflib\na = \"qabxcd\"\nb = \"abycdf\"\nprint(f'a:{a}\\nb:{b}')\ns = difflib.SequenceMatcher(None, a, b)\nfor tag, i1, i2, j1, j2 in s.get_opcodes():\n print('{:7} a[{}:{}] --> b[{}:{}] {!r:>8} --> {!r}'.format(tag, i1, i2, j1, j2, a[i1:i2], b[j1:j2]))\n\n\n","sub_path":"24/00/get_opcodes.py","file_name":"get_opcodes.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"602028822","text":"'''\r\n author: Scott Jorgensen\r\n name: cash_feed.py\r\n date: 7th March 2012\r\n purpose: Allows the loading and report generation for the cash account balances.\r\n'''\r\n\r\nfrom kew.pe.control import Control\r\n\r\nimport logging\r\nimport os\r\n\r\nclass CashFilter(object): \r\n \"\"\"Loads the CS cash file \r\n \"\"\"\r\n cash_map = {'CSVollinCallBal': 'load_balances_CS_call',\r\n 'CSVollinBal': 'load_balances_CS',\r\n 'UBSVollinBal': 'load_balances_UBS',\r\n 'UBSVollinCallBal': 'load_balances_CS_call',\r\n 'CustomCashBalancesNonNetted': 'load_balances_PB',\r\n }\r\n \r\n \r\n def __init__(self, path, db_url):\r\n self.path = os.path.abspath(path)\r\n self.db_url = db_url\r\n \r\n def run(self): \r\n control = Control(file_archive='',db_url=self.db_url)\r\n the_account_balance_loader = 'reportingdb@kewcapital.com'\r\n name = self.path.rsplit('//')[-1]\r\n for prefix, funcname in self.cash_map.iteritems():\r\n if name.startswith(prefix):\r\n func = getattr(control, funcname)\r\n logging.info(\"Found function for [%s] - [%s]\" % (name, funcname))\r\n break\r\n else:\r\n msg = \"Unkown file name [%s]\" % name\r\n logging.error(msg)\r\n raise ValueError(msg)\r\n \r\n func(self.path,the_account_balance_loader)\r\n logging.info(\"Loaded file [%s] to database\")\r\n \r\n \r\n \r\n ","sub_path":"src/sandbox/bank_accounts/old/illiquids/scheduledfeeds/cash_filter.py","file_name":"cash_filter.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"317512052","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom . import settings\n\nurlpatterns = [\n url(r'^', include('Art_Sign.pages.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^api/', include('Art_Sign.api.urls')),\n url(r'tinymce/', include('tinymce.urls')),\n]\n\nif settings.USE_CUSTOM_ADMIN:\n urlpatterns += [\n url(r'^agenda/', include('Art_Sign.agenda.urls')),\n ]\n","sub_path":"Art_Sign/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153853682","text":"#!/usr/bin/python\n\nimport re\nfrom subprocess import call, check_output\n\n'''\nThis script is meant for crontab to be running every 5 minutes\nIt brings up reverse SSH tunnel if not present.\nThese tunnels are required for operation of ZBAT\n\nTo configure crontab\n\tcrontab -e\n\t# add entry\n\t*/5 * * * * python /home/automation/startReverseSSHTunnel.py\n\nThis script requires that you've already setup certificate for ssh. If you haven't here are the steps:\n1) On local host generate a private/public key pair. Name your cert 'zbat' and leave passphrase empty\n\tssh-keygen -t rsa -b 2048 -v\n2) Copy public key over to remote host. This is the same thing as copy the content of zbat.pub into remote host file .ssh/authorized_keys\n\tssh-copy-id -i .ssh/zbat.pub automation@zbat001.cloud.zingbox.com\n'''\n\nsshTunnelMapping = \t[\n\t\t\t\"22443:192.168.10.23:443\", # connection for panfw\n\t\t\t\"22089:192.168.10.40:8089\", # connection for splunk\n\t\t\t\"22022:192.168.20.184:22\", # connection for traffic generator\n\t\t\t\"22094:192.168.10.40:9400\", # connection for openvas\n\t\t\t\"22044:192.168.10.63:4444\" # connection to Window Laptop (selenium for IE11)\n\t\t\t]\n\n# hosts list and corresponding certificate to use for authentication between this host and remote userHosts\nuserHosts = [\"automation@zbat001.azure.zingbox.com\", \"automation@zbat001.cloud.zingbox.com\"]\ncerts = [\"/home/automation/.ssh/zbat.pem\", \"/home/automation/.ssh/zbat2.pem\"]\n\n# for each host, start all the reverse SSH tunnel mapping\nfor i in xrange(0, len(userHosts)):\n for mapping in sshTunnelMapping:\n\n # For each host, increment the mapping port from 22xxx to 23xxx, 24xxx, etc...\n if i > 0:\n tempmapping = list(mapping)\n tempmapping[1] = 2+i\n mapping = ''.join(str(v) for v in tempmapping)\n\n # search for existing tunnel\n try:\n grepCommand = [\"ps\", \"-fC\", \"ssh\"]\n output = check_output(grepCommand)\n except Exception as e:\n output = \"not found\"\n pass\n\n # start tunnel if none existing\n rePattern = re.compile(mapping + ' ' + userHosts[i])\n if not re.search(rePattern, output):\n command = [\"ssh\", \"-R\", mapping, userHosts[i], \"-i\", certs[i], \"-fNT\"]\n print(' '.join(command))\n call(command)\n","sub_path":"util/startReverseSSHTunnel.py","file_name":"startReverseSSHTunnel.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337256196","text":"import numpy as np\nimport pandas as pd\nimport re\nimport regex\nimport json\nimport MeCab\nimport difflib\nimport wikitextparser as wtp\n\n\ndef read_jasonl(filename):\n with open(filename) as f:\n return [json.loads(line.rstrip('\\r\\n')) for line in f.readlines()]\n\ndef flatten(multi_list: list):\n return [item for sublist in multi_list for item in sublist if (not isinstance(item, str)) or (len(item) is not 0)]\n\ndef clean_text(text: str):\n cleaned = re.sub(r'\\\\[a-zA-Z0-9]+', '', text)\n cleaned = regex.sub(r'(?\\{(?:[^{}]+|(?&rec))*\\})', '', cleaned)\n cleaned = re.sub(r'\\\"', '', cleaned)\n cleaned = re.sub(r'\\s{2,}', ' ', cleaned)\n cleaned = re.sub(r'\\s{2,}', ' ', cleaned)\n cleaned = re.sub(r'(\\^\\s*[^\\^]+)', ' ', cleaned)\n \n return cleaned\n\ndef _sub(text, s):\n for i in s:\n if isinstance(i, str):\n text = text.replace(i, '')\n else:\n text = text.replace(i.string, '')\n \n return text\n\ndef _clean_source_text(parsed_source_text):\n clean_text = _sub(parsed_source_text.contents, parsed_source_text.templates)\n clean_text = _sub(clean_text, parsed_source_text.tags())\n clean_text = _sub(clean_text, parsed_source_text.external_links)\n clean_text = re.sub(r'\\n|\\t|\\r', ' ', clean_text)\n clean_text = re.sub(r'={2,}.*?={2,}', '', clean_text)\n clean_text = re.sub(r'\\[\\[[^\\]]+:.+?\\]\\]', '', clean_text)\n clean_text = re.sub(r'\\[\\[[^\\]]+?\\||\\]\\]|\\[\\[', '', clean_text)\n clean_text = re.sub(r'\\'{2,}|\\*+|#+', '', clean_text)\n clean_text = re.sub(r'<[^>]*?>.*?<\\/[^>]*?>', '', clean_text)\n clean_text = re.sub(r'\\{\\{.*?\\}\\}|\\{.*?\\}', '', clean_text)\n \n return clean_text\n\ndef _complement_subtitle(article_df: pd.DataFrame):\n # サブカテゴリ名の無い部分のうち,先頭部分は NO_SUBTITLE で埋める\n if len(article_df.loc[article_df.heading != '']) is 0:\n article_df.loc[article_df.heading == '', ['heading']] = 'NO_SUBTITLE'\n else :\n article_df.loc[0:article_df[article_df.heading != ''].index[0], ['heading']] = 'NO_SUBTITLE'\n \n # サブカテゴリ名が無い場合は,1つ前のサブカテゴリ名で補完する\n while len(article_df.loc[article_df.heading == '']) > 0: \n article_df.loc[article_df.heading == '', ['heading']] = \\\n article_df.loc[article_df.loc[article_df.heading == '', ['heading']].index - 1, 'heading'].values[0]\n\n return article_df\n\ndef _search_subtitle(source_text: str):\n m = re.search(r'==+\\s*([^=]+)\\s*==+', source_text)\n if m:\n heading = m.group(1)\n else:\n heading = ''\n\n return heading\n\ndef _get_subtitle_of_sentence(article_df: pd.DataFrame, source_text: str, heading: str):\n df = article_df.copy()\n for s in re.findall(r'.*?。', source_text):\n m_sentence = difflib.get_close_matches(s.strip(), df.sentence.values, n=1)\n if len(m_sentence) > 0 and len(heading) > 0:\n # heading にサブタイトル名を追加\n df.loc[df.sentence == m_sentence[0], ['heading']] += heading + ','\n \n return df\n\ndef get_subtitle(sentence_df: pd.DataFrame, wiki_dump_data: list):\n df = sentence_df.assign(heading = '')\n new_train_df = pd.DataFrame()\n for _id in df._id.unique():\n article_df = df.loc[df._id == _id]\n \n row_article = [entry for entry in wiki_dump_data if entry['index']['_id'] == _id][0]\n parsed = wtp.parse(row_article['source_text'])\n for source in parsed.sections[1:]:\n heading = _search_subtitle(source.string)\n section_text = _clean_source_text(source)\n article_df = _get_subtitle_of_sentence(article_df, section_text, heading)\n \n article_df = _complement_subtitle(article_df)\n new_train_df = new_train_df.append(article_df)\n\n return new_train_df\n\ndef text2sentence(text: str):\n if re.search(r'。', text):\n return list(map(lambda s: s.strip(), re.findall(\".*?。\", clean_text(text))))\n else:\n return [clean_text(text)]\n\ndef contains_patt(match_text: [str, list]):\n if isinstance(match_text, str):\n return re.escape(f\"{match_text}\")\n elif isinstance(match_text, list):\n return \"|\".join([re.escape(t) for t in match_text])\n else:\n print(\"Unexpected type.\")\n return \"\"\n\ndef train2dict(train_data: list, attribute: str):\n train_dict = {}\n for entry in train_data:\n train_dict[str(entry['WikipediaID'])] = flatten([text2sentence(item) for item in entry['Attributes'][attribute]])\n\n return train_dict\n\ndef df2dict(result: pd.DataFrame, value_column: str):\n return result.groupby('_id')[value_column].apply(lambda x: list(set(x.tolist()))).to_dict()\n\ndef extract_from_dict(train: dict, ids: list):\n return dict([[_id, train[_id]] for _id in ids])\n\ndef labeling(sentence_df: pd.DataFrame, train_dict: dict):\n _sentence_df = sentence_df.assign(label = False)\n for _id, train_values in train_dict.items():\n if len(train_values) is 0:\n continue\n\n _sentence_df.loc[_sentence_df._id == str(_id), 'label'] = \\\n _sentence_df.loc[_sentence_df._id == str(_id)].sentence.str.contains(contains_patt(train_values))\n\n return _sentence_df\n\ndef is_noun1(hinshi: list):\n if hinshi[0] in ['名詞', '接頭詞']:\n return True\n else:\n return False\n\ndef is_noun2(hinshi: list):\n if (hinshi[0] == '名詞') and (hinshi[1] == '固有名詞') and (hinshi[2] != '一般'):\n return False\n elif (hinshi[0] == '名詞') and (hinshi[1] in ['代名詞', '非自立', '特殊']):\n return False\n elif hinshi[0] == '名詞' or (hinshi[0] == '接頭詞' and hinshi[1] == '名詞接続'):\n return True\n else:\n return False\n\ndef is_noun3(hinshi, noun):\n if not (hinshi[0] in ['名詞', '接頭詞']) and (len(noun) == 0):\n return False\n elif (hinshi[0] == '名詞') and (hinshi[1] == '固有名詞') and (hinshi[2] != '一般'):\n return False\n elif (hinshi[0] == '名詞') and (hinshi[1] in ['代名詞', '非自立', '特殊']):\n return False\n elif (hinshi[0] in ['名詞', '接頭詞']) or ((hinshi[0] == '助詞') and (hinshi[1] in ['連体化', '並立助詞', '副助詞'])):\n return True\n else:\n return False\n\ndef _remove_tail_adv(noun, hinshi):\n while hinshi.pop() != '名詞':\n noun.pop()\n if len(hinshi) == 0:\n break\n\ndef get_noun_list(text: str, join=True, condition=2):\n mecab_param = MeCab.Tagger(\"-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd\")\n mecab_param.parse(\"\")\n node = mecab_param.parseToNode(text)\n \n hinshi_list = []\n noun_list = []\n noun = []\n while node:\n if len(node.surface) == 0:\n node = node.next\n continue\n\n hinshi = node.feature.split(',')\n\n if condition is 1: is_noun = is_noun1(hinshi)\n elif condition is 2: is_noun = is_noun2(hinshi)\n elif condition is 3: is_noun = is_noun3(hinshi, noun)\n else: is_noun = False\n\n if is_noun:\n if join:\n hinshi_list.append(hinshi[0])\n noun.append(node.surface)\n else:\n noun_list.append(node.surface)\n elif (len(noun) > 0) and join:\n _remove_tail_adv(noun, hinshi_list) \n noun_list.append(''.join(noun))\n noun = []\n hinshi_list = []\n \n node = node.next\n \n if (len(noun) > 0) and join:\n _remove_tail_adv(noun, hinshi_list) \n noun_list.append(''.join(noun))\n\n return noun_list\n\ndef is_oxidation_state_parts(surface):\n return re.match(r'^\\($|^[IV]{1,4}$|^\\)$', surface)\n \ndef is_oxidation_state(word):\n return re.match(r'^\\([IV]{1,4}\\)$', \"\".join(word))\n\ndef get_compound_list(text: str):\n mecab_param = MeCab.Tagger(\"-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd\")\n mecab_param.parse(\"\")\n node = mecab_param.parseToNode(text)\n \n compound_list = []\n compound = []\n oxidation_state = []\n while node:\n if len(node.surface) == 0:\n node = node.next\n continue\n \n hinshi = node.feature.split(',') \n if is_oxidation_state_parts(node.surface):\n oxidation_state.append(node.surface)\n elif (len(oxidation_state) > 0) and is_oxidation_state(oxidation_state): \n compound.append(''.join(oxidation_state))\n oxidation_state = []\n \n if is_noun2(hinshi):\n compound.append(node.surface)\n elif len(compound) > 0 and not is_oxidation_state_parts(node.surface):\n compound_list.append(''.join(compound))\n compound = []\n \n node = node.next\n \n if len(compound) > 0:\n if is_oxidation_state(oxidation_state): compound += oxidation_state\n compound_list.append(''.join(compound))\n\n return compound_list\n\ndef get_word_list(text: str, condition_func=None):\n mecab_param = MeCab.Tagger(\"-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd\")\n mecab_param.parse(\"\")\n node = mecab_param.parseToNode(text)\n \n words = []\n while node:\n if len(node.surface) == 0:\n node = node.next\n continue\n\n hinshi = node.feature.split(',')\n if condition_func(hinshi):\n words.append(node.surface)\n \n node = node.next\n \n return words\n\ndef _validate_precision(extraction: list, train: list):\n if len(extraction) is 0:\n return []\n if len(train) is 0:\n return [False] * len(extraction)\n\n extraction_df = pd.DataFrame({\"extraction\": extraction})\n precision_list = \\\n np.array(extraction_df.extraction.str.contains(contains_patt(train)).tolist()) \\\n + np.array(extraction_df.apply(lambda x: True if re.search(contains_patt(x.extraction), ','.join(train)) else False, axis=1).tolist())\n \n return precision_list.tolist()\n\ndef _validate_recall(extraction: list, train: list):\n if len(extraction) is 0:\n return [False] * len(train)\n if len(train) is 0:\n return []\n\n train_df = pd.DataFrame({\"train\": train})\n recall_list = \\\n np.array(train_df.train.str.contains(contains_patt(extraction)).tolist()) \\\n + np.array(train_df.apply(lambda x: True if re.search(contains_patt(x.train), ','.join(extraction)) else False, axis=1).tolist())\n\n return recall_list.tolist()\n\ndef validation(extraction: dict, train: dict):\n precision_list = []\n recall_list = []\n for train_id, train_values in train.items():\n if extraction.get(train_id):\n precision_list += _validate_precision(extraction[train_id], train_values)\n recall_list += _validate_recall(extraction[train_id], train_values)\n else:\n recall_list += [False] * len(train_values)\n\n precision_list = np.array(precision_list)\n recall_list = np.array(recall_list)\n\n precision = precision_list.sum() / precision_list.shape[0]\n recall = recall_list.sum() / recall_list.shape[0]\n f1 = 2 * precision * recall / (precision + recall)\n \n return {\"precision\": precision, \"recall\": recall, \"f1\": f1}\n","sub_path":"notebook/shinra_util.py","file_name":"shinra_util.py","file_ext":"py","file_size_in_byte":11275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"499032336","text":"# -*- coding: utf-8 -*-\n\n\nimport logging\n\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARN)\nlogger = logging.getLogger(name=\"RunLogger\")\nlogger.setLevel(logging.DEBUG)\n\n\nBASE_HOST = 'apertsev.dev.pyn.ru'\n\n\nhh_hosts = ['hh.ru', 'rabota.mail.ru', 'jobs.tut.by', 'jobs.day.az', 'career.ru']\n\nbase_roles = {\n 'back_office': ('admin@hhtest.ru', '123'),\n 'applicant': ('tk19@appl.ru', '123456'),\n 'employer': ('9911463-776@hhtest.ru', '123'),\n# 'nedouser': ('hhtest402@mail.ru', '123asd'),\n 'anonymous': (None, None),\n}\n\n\ndef get_url(domain, path=''):\n return 'http://' + domain + '.' + BASE_HOST + path\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"190236573","text":"\nfrom core.plato import Plato\nfrom core.newton import Newton\n\n\nSIZE = 50.0\n\n\nclass Fwoogle(Newton):\n \n def __init__(self, x, y):\n Newton.__init__(self, x, y, SIZE, SIZE)\n \n self.scale_to_container = True\n \n def draw(self, canvas):\n Newton.draw(self, canvas)\n \n canvas.set_line_width(0.0)\n canvas.set_source_rgba(0.5, 0.5, 1.0, 0.9)\n canvas.rectangle(self.left, self.top, self.width, self.height)\n canvas.fill()\n \n \n ","sub_path":"ontology/reactors/fwoogle.py","file_name":"fwoogle.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152161202","text":"from gen import utils\nimport torch\nfrom torch import nn, Tensor\nimport torch.nn.functional as F\n\n\nclass AE(nn.Module):\n \"\"\"Auto Encoder\n\n Completely from thin air.\n \"\"\"\n\n def __init__(self, n_in: int, n_h: int = 50, n_z: int = 10, p: float = 0.01):\n super().__init__()\n utils.logger.info(f\"n_in {n_in}, n_h {n_h}, n_z {n_z}\")\n self.in_layer = nn.Linear(n_in, n_h)\n\n self.encoder = nn.Sequential(\n self.in_layer,\n nn.BatchNorm1d(n_h),\n nn.Dropout(p),\n nn.ReLU(),\n nn.Linear(n_h, n_z),\n ) # self.mean_layer\n\n self.decoder = nn.Sequential(\n nn.Linear(n_z, n_h),\n nn.BatchNorm1d(n_h),\n nn.Dropout(p),\n nn.ReLU(),\n nn.Linear(n_h, n_in),\n nn.BatchNorm1d(n_in),\n )\n\n def decode(self, z):\n return self.decoder(z)\n\n def encode(self, x_cat, x_cont):\n return self.encoder(x_cont)\n\n def forward(self, x_cat, x_cont):\n z = self.encode(x_cat, x_cont)\n return self.decode(z), x_cont\n\n def sample(self, num_samples: int, current_device: int, **kwargs) -> Tensor:\n z = torch.randn(num_samples, self.n_z)\n z = z.to(current_device)\n samples = self.decode(z)\n return samples\n\n def generate_reconstruction(\n self, x_cat: Tensor, x_cont: Tensor, *args, **kwargs\n ) -> Tensor:\n return self.forward(x_cat, x_cont)[0]\n\n\nclass AE_Loss:\n def __init__(self):\n pass\n\n def loss(self, reconstructions, originals) -> dict:\n reconstruction_loss = F.mse_loss(reconstructions, originals)\n loss = reconstruction_loss\n return {\n \"loss\": loss,\n \"Reconstruction_Loss\": reconstruction_loss,\n }\n\n def __call__(self, X, y):\n reconstructions, originals = X\n return self.loss(reconstructions, originals)[\"loss\"]\n","sub_path":"gen/ae.py","file_name":"ae.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"314253284","text":"import tensorflow as tf\nfrom tensorflow import keras\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = tf.keras.utils.normalize(x_train, axis=1)\nx_test = tf.keras.utils.normalize(x_test, axis=1)\n\nimport matplotlib.pyplot as plt\n\nplt.imshow(x_train[0], cmap=plt.cm.binary)\n\nmodel = tf.keras.models.Sequential()\nprint(x_train.shape)\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics='accuracy')\n\nmodel.fit(x_train, y_train, epochs=10)\n\nmodel.summary()\nkeras.utils.plot_model(model, \"./arch.png\", show_shapes=True)\n\nval_loss, val_accuracy = model.evaluate(x_test, y_test)\n\nmodel.save(\"mnist_num_reader.model\")","sub_path":"MNIST-Simple CNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505480448","text":"# Import packages\nimport os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport sys\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n# Import utilites\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\nclass VideoCamera(object):\n def __init__(self):\n # Using OpenCV to capture from device 0. If you have trouble capturing\n # from a webcam, comment the line below out and use a video file\n # instead.\n self.video = cv2.VideoCapture(1)\n # If you decide to use video.mp4, you must have this file in the folder\n # as the main.py.\n # self.video = cv2.VideoCapture('isis.mp4')\n # Name of the directory containing the object detection module we're using\n MODEL_NAME = 'inference_graph'\n\n # Grab path to current working directory\n CWD_PATH = os.getcwd()\n\n # Path to frozen detection graph .pb file, which contains the model that is used\n # for object detection.\n PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\n\n # Path to label map file\n PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\n\n # Number of classes the object detector can identify\n NUM_CLASSES = 90\n\n ## Load the label map.\n # Label maps map indices to category names, so that when our convolution\n # network predicts `5`, we know that this corresponds to `king`.\n # Here we use internal utility functions, but anything that returns a\n # dictionary mapping integers to appropriate string labels would be fine\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n self.category_index = label_map_util.create_category_index(categories)\n\n # Load the Tensorflow model into memory.\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self.sess = tf.Session(graph=detection_graph)\n\n\n # Define input and output tensors (i.e. data) for the object detection classifier\n\n # Input tensor is the image\n self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n # Output tensors are the detection boxes, scores, and classes\n # Each box represents a part of the image where a particular object was detected\n self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n # Each score represents level of confidence for each of the objects.\n # The score is shown on the result image, together with the class label.\n self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\n # Number of objects detected\n self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n \n \n \n def __del__(self):\n self.video.release()\n \n def get_frame(self):\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n\n ret, frame = self.video.read()\n\n frame_expanded = np.expand_dims(frame, axis=0)\n\n # Perform the actual detection by running the model with the image as input\n \n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: frame_expanded})\n\n # Draw the results of the detection (aka 'visulaize the results')\n vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n self.category_index,\n use_normalized_coordinates=True,\n line_thickness=8,\n min_score_thresh=0.60)\n\n # We are using Motion JPEG, but OpenCV defaults to capture raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n ret, jpeg = cv2.imencode('.jpg', frame)\n return jpeg.tobytes()\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"289354183","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 31 23:01:16 2017\n\n@author: owen\n\"\"\"\nimport collections\n#class Solution(object):\n# def findLHS(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: int\n# \"\"\"\n# cnt=collections.defaultdict(int)\n# for num in nums:\n# cnt[num]+=1\n# lastKey,lastLen=None,None\n# res=0\n#\n# for key,length in sorted(cnt.items()):\n# if lastKey is not None and lastKey+1==key: # must use lastKey is not None, do not use if lastKey, because lastKey could be 0\n# res=max(res,lastLen+length)\n# lastKey,lastLen=key,length\n# return res\n\nclass Solution:\n def findLHS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n cnt=collections.defaultdict(int)\n for num in nums:\n cnt[num]+=1\n res=0\n for key in cnt.keys():\n if key+1 in cnt.keys(): \n res=max(res,cnt[key]+cnt[key+1])\n return res \n \nif __name__==\"__main__\":\n print(Solution().findLHS([1,3,2,2,5,2,3,7]))\n print(Solution().findLHS([0,3,0,0,1,1,1,3,1,3,2,3,2,3,-1,0,2,1,0,0,0,1,3,3,-3,3,3,1,3]))","sub_path":"594. Longest Harmonious Subsequence.py","file_name":"594. Longest Harmonious Subsequence.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112514939","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport argparse\nimport sys\nimport os\nimport pandas as pd\nimport subprocess\nimport pexpect\nimport pdb\n\n\n#Arguments for argparse module:\nparser = argparse.ArgumentParser(description = '''A program that groups uids into their h-groups.''')\n \nparser.add_argument('H_group_file', nargs=1, type= str,\n default=sys.stdin, help = 'path to file with H-groups.')\n\nparser.add_argument('domain_file', nargs=1, type= str,\n default=sys.stdin, help = 'path to file ECOD domain description.')\n\n\nargs = parser.parse_args()\n\nH_group_file = args.H_group_file[0]\ndomain_file = args.domain_file[0]\n\n\ndef read_groups(H_group_file):\n\t'''Read X_group.H_group s into list\n\t'''\n\n\tH_groups = [] #Store H-groups\n\twith open(H_group_file) as file:\n\t\tfor line in file:\n\t\t\tline = line.rstrip() #remove \\n\n\t\t\tH_groups.append(line)\n\n\treturn H_groups\n\n#Functions\ndef group_ids(domain_file, H_groups):\n\t'''A function that gets the ids for each H-group and writes\n\tthem to a file called X_group.H_group.txt\n\t'''\n\tcount_H_groups = 0 #Count H groups\n\t\n\tfor H_group in H_groups:\n\t\tuids = [] #Save uids\n\t\tpdb_ids = [] #Save pdb_ids\n\t\tcount_H_groups +=1\n\t\t\n\t\tx_group = str(H_group).split('.')[0] #family level\n\t\thom = str(H_group).split('.')[1] #homology level\n\t\tfile_name = H_group +'.txt'\n\t\twith open(domain_file) as file:\n\t\t\tfor line in file:\n\t\t\t\tline = line.rstrip() #remove \\n\n\t\t\t\t\n\t\t\t\tif line[0] != '#': #Comment lines, include meta\n\t\t\t\t\tline = line.split(\"\\t\") #split on tab\n\t\t\t\t\tmatch_group = line[3].split('.')[0:2]\n\t\t\t\t\tif x_group == match_group[0]:\n\t\t\t\t\t\tif hom == match_group[1]:\n\t\t\t\t\t\t\tuid = line[0]\n\t\t\t\t\t\t\tpdb_id = line[1]\n\t\t\t\t\t\t\tuids.append(uid)\n\t\t\t\t\t\t\tpdb_ids.append(pdb_id)\n\t\t\t\t\n\t\t\t#After going through the entire file, the matched uids are written to a file\n\t\t\twrite_file(file_name, uids, pdb_ids)\n\t\t\tprint(H_group)\n\t\n\tprint(count_H_groups)\n\treturn None\n\n\ndef write_file(file_name, uids, pdb_ids):\n\t'''Write uids in same homology group to file\n\t'''\n\ttry:\n\t\twith open(file_name, \"w\") as file:\n\t\t\tfor i in range(0, len(uids)):\n\t\t\t\tfile.write(uids[i]+ '\\t' + pdb_ids[i] + '\\n')\n\n\texcept:\n\t\traise IOerror('Could not write file: ' + file_name)\n\n\treturn None\n\n\n#####MAIN PROGRAM#####\nH_groups = read_groups(H_group_file)\ngroup_ids(domain_file, H_groups)\n","sub_path":"ECOD/group_h.py","file_name":"group_h.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"212734149","text":"inFile = open('words.txt', 'r') \nwords = inFile.read().splitlines()\n\npalindromes = []\n\nfor item in words:\n if item == item[::-1] and len(item) > 1:\n palindromes.append(item)\n\nwith open('palindromes.txt', 'w') as outFile:\n for item in palindromes:\n outFile.write(\"%s\\n\" % item)\n\noutFile.close()\n","sub_path":"palindromeWords.py","file_name":"palindromeWords.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"527223609","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright 2013 Mellanox Technologies, Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom vsa.model.san_base import SanBase\nfrom vsa.infra.params import iSCSIOpts, OsType\nfrom vsa.infra.config import scripts_dir\n\nclass GeneralProp(SanBase):\n set_params=[\n 'basewwn', 'defaultos', 'noifconfig', 'iscsiopt', 'iscsiredirect', 'redirothernet',\n 'snmpmanagers', 'hbarefresh', 'reserved_vgspace', 'default_bstype'\n ]\n\n def __init__(self):\n \"\"\"\n The description of __init__ comes here.\n @return\n \"\"\"\n SanBase.__init__(self,'General','General Properties')\n self.fullpath='/general'\n self.noifconfig=True\n self.defaultos=OsType.unknown\n self.basewwn='0008f1111fff0000'\n self.iscsiopt={}\n self.iscsiredirect=True\n self.hbarefresh=True\n self.redirif=''\n self.redirectcb=scripts_dir+'/vsa_redirect_callback.bash'\n self.redirothernet=False\n self.snmpmanagers=''\n self.reserved_vgspace = 10 # reserved vg space in %\n self.default_bstype = 'rdwr' # default bstype for luns\n\n def set_iscsiopt(self,san,key,val='',test=0):\n \"\"\"\n The description of set_iscsiopt comes here.\n @param san\n @param key\n @param val\n @param test\n @return\n \"\"\"\n return san.robots.set_strdict(self,key,val,test,iSCSIOpts)\n","sub_path":"src/vsa/model/general_prop.py","file_name":"general_prop.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"77145346","text":"import pulp\nimport random\nimport networkx as nx\nimport math,time\nimport matplotlib.pyplot as plt\n\ndef make_random_graph(n):\n G = nx.DiGraph()\n for i in range(n):\n G.add_node(i,x=random.randint(0,10),y=random.randint(0,10))\n for i in range(n):\n for j in range(n):\n if i != j:\n dist = math.sqrt((G.nodes()[i]['x']-G.nodes()[j]['x'])**2 + (G.nodes()[i]['y']-G.nodes()[j]['y'])**2)\n G.add_edge(i,j,dist=dist)\n return G\n\ndef get_random_sequential_order(num_node,m):\n box = set()\n # 選好順序(i,j)をm個まで取得 (i < j)\n while len(box) < m:\n i = random.randint(0,num_node-2)\n j = random.randint(i+1,num_node-1)\n if (i,j) not in box:\n box.add((i,j))\n return box\n\ndef solve_SOP(G,precedense,num_node,ss):\n problem = pulp.LpProblem(name='SOP',sense=pulp.LpMinimize)\n x = {(i,j):pulp.LpVariable(cat=\"Binary\",name=f\"x_{i}_{j}\") for (i,j) in G.edges()}\n u = {i:pulp.LpVariable(cat=\"Integer\",name=f\"u_{i}\",lowBound=1,upBound=num_node-1) for i in G.nodes()}\n cost = {(i,j):G.adj[i][j]['dist'] for (i,j) in G.edges()}\n\n problem += pulp.lpSum([x[(i,j)]*cost[(i,j)] for (i,j) in G.edges()])\n\n\n for i in G.nodes():\n if i != num_node-1:\n problem.addConstraint(pulp.lpSum([x[(i,j)] for j in range(num_node) if j != i]) == 1, f'outflow_{i}')\n if i != 0:\n problem.addConstraint(pulp.lpSum([x[(j,i)] for j in range(num_node) if j != i]) == 1, f'inflow_{i}')\n\n for i,j in G.edges():\n if i != ss and j != ss:\n problem.addConstraint(u[i]-u[j]+(num_node-1)*x[i,j] <= num_node-2, f'up_{i}_{j}')\n\n for i,j in precedense:\n problem.addConstraint(u[i]+1 <= u[j], f'sequential_{i}_{j}')\n\n u[ss] = 0\n\n print('start solving')\n start = time.time()\n status = problem.solve(pulp.CPLEX())\n # status = problem.solve()\n print(pulp.LpStatus[status])\n\n duartion = time.time()-start\n print(duartion)\n\n if pulp.LpStatus[status] != 'Optimal':\n print('Infeasible!')\n exit()\n\n return x,u,duartion\n\ndef plot(G,x,u,precedense,ss):\n pos = {i: (G.nodes()[i]['x'], G.nodes()[i]['y']) for i in G.nodes()}\n nx.draw_networkx_nodes(G, pos, node_size=100, alpha=1, node_color='skyblue')\n edgelist = [e for e in G.edges() if x[e].value() > 0]\n nx.draw_networkx_edges(G, pos, edgelist=edgelist,width=3)\n precedense = [e for e in precedense]\n nx.draw_networkx_edges(G, pos, edgelist=precedense,edge_color='red')\n for i in G.nodes():\n if i != ss:\n plt.text(G.nodes()[i]['x'],G.nodes()[i]['y'],int(u[i].value()))\n else:\n plt.text(G.nodes()[i]['x'],G.nodes()[i]['y'],u[i])\n\n\n plt.show()\n\ndef main():\n # node数\n num_node = 10\n # 選好順序の個数\n num_precedence = 5\n # 始点\n ss = 0\n # 選好順序リストの取得\n precedense = get_random_sequential_order(num_node,num_precedence)\n print(precedense)\n # random graphの獲得\n G = make_random_graph(num_node)\n # SOP\n x,u = solve_SOP(G,precedense,num_node,ss)\n # 描画\n plot(G,x,u,precedense,ss)\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"nsop.py","file_name":"nsop.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"421666590","text":"'''Copyright Gigaspaces, 2017, All Rights Reserved'''\nfrom cloudify.plugins import lifecycle\n\nOP_START = 'hacker.interfaces.lifecycle.start'\nOP_STOP = 'hacker.interfaces.lifecycle.stop'\nOP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots'\nOP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots'\nREQUIRED_OPS = set([OP_START, OP_SS_D, OP_STOP])\n\n\ndef build_instance_sequence(instance, operation,\n state_start=None, state_end=None):\n '''\n Builds sequenced subgraph tasks for an instance\n\n .. note::\n\n The sequence will not be built if the instance provided\n does not have a node with an operation defined in the\n operation parameter.\n\n :param `CloudifyWorkflowNodeInstance` instance:\n Node instance to execute tasks against\n :param str operation:\n Node (lifecycle) operation to execute\n :param str state_start:\n Verb to describe operation start\n :param str state_stop:\n Verb to describe operation finish\n '''\n tasks = list()\n # Only build the sequence if the node operation exists\n if operation not in instance.node.operations:\n return tasks\n # Add task starting state\n if state_start:\n tasks.append(instance.send_event('%s host' % state_start))\n tasks.append(instance.set_state(state_start.lower()))\n # Add task operation\n tasks.append(instance.execute_operation(operation))\n # Add task ended state\n if state_end:\n tasks.append(instance.send_event('%s host' % state_end))\n tasks.append(instance.set_state(state_end.lower()))\n return tasks\n\n\ndef build_instance_subgraph(instance, graph):\n '''\n Builds a subgraph for an instance\n\n :param `CloudifyWorkflowNodeInstance` instance:\n Node instance to execute tasks against\n :param `TaskDependencyGraph` graph:\n Task graph to create sequences from\n '''\n # Init a \"stop instance\" subgraph\n sg_stop = graph.subgraph('stop_subgraph')\n seq_stop = sg_stop.sequence()\n seq_stop.add(*build_instance_sequence(\n instance, OP_STOP, 'Stopping', 'Stopped'))\n # Init a \"recreate snapshots\" subgraph\n sg_snap = graph.subgraph('snapshot_subgraph')\n seq_snap = sg_snap.sequence()\n if OP_SS_D in instance.node.operations:\n seq_snap.add(*build_instance_sequence(instance, OP_SS_D))\n if OP_SS_C in instance.node.operations:\n seq_snap.add(*build_instance_sequence(instance, OP_SS_C))\n # Init a \"start instance\" subgraph\n sg_start = graph.subgraph('stop_subgraph')\n seq_start = sg_start.sequence()\n seq_start.add(*build_instance_sequence(\n instance, OP_START, 'Starting', 'Started'))\n # Create subgraph dependencies\n graph.add_dependency(sg_snap, sg_stop)\n graph.add_dependency(sg_start, sg_snap)\n\n\ndef refresh_snapshots(ctx, **_):\n '''\n Executes a complex, graph-based set of lifecycle events\n to stop all host (compute) instances, delete all\n existing instance snapshots, take new snapshots\n of all attached volumes, and start the instances\n back up when complete.\n '''\n graph = ctx.graph_mode()\n # Find all compute hosts and build a sequence graph\n for node in ctx.nodes:\n if not REQUIRED_OPS.issubset(node.operations):\n ctx.logger.warn(\n 'Skipping refresh_snapshots workflow for node \"%s\" because '\n 'it does not have all required operations defined' % node.id)\n continue\n # Iterate over each node instance\n for instance in node.instances:\n if not lifecycle.is_host_node(instance):\n ctx.logger.warn(\n 'Skipping refresh_snapshots workflow for node instance '\n '\"%s\" because it is not a compute host' % instance.id)\n continue\n build_instance_subgraph(instance, graph)\n # Execute the sequences\n return graph.execute()\n","sub_path":"advanced-workflows/task-graphs-lab/solution/plugins/custom/plugin/workflows.py","file_name":"workflows.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"26359167","text":"\"\"\"\n Library of EV3 robot functions that are useful in many different applications. For example things\n like arm_up, arm_down, driving around, or doing things with the Pixy camera.\n\n Add commands as needed to support the features you'd like to implement. For organizational\n purposes try to only write methods into this library that are NOT specific to one tasks, but\n rather methods that would be useful regardless of the activity. For example, don't make\n a connection to the remote control that sends the arm up if the ir remote control up button\n is pressed. That's a specific input --> output task. Maybe some other task would want to use\n the IR remote up button for something different. Instead just make a method called arm_up that\n could be called. That way it's a generic action that could be used in any task.\n\"\"\"\n\nimport ev3dev.ev3 as ev3\nimport math\nimport time\n\nMAX_SPEED = 900\n\nclass Snatch3r(object):\n \"\"\"Commands for the Snatch3r robot that might be useful in many different programs.\"\"\"\n\n def __init__(self):\n # Connect two large motors on output ports B and C\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.max_speed = 900\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n self.running = True\n\n\n # Check that the motors are actually connected\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy\n\n def drive_inches(self, inch_target, speed):\n \"\"\"drives robot forward or backward at a specified speed depending on\n whether the distance is postive or negative\"\"\"\n\n degrees_per_inch = 90\n motor_turns_needed_in_degrees = inch_target * \\\n degrees_per_inch\n self.left_motor.run_to_rel_pos(\n position_sp=motor_turns_needed_in_degrees,\n speed_sp=speed,\n stop_action=\"brake\")\n self.right_motor.run_to_rel_pos(\n position_sp=motor_turns_needed_in_degrees, speed_sp=speed,\n stop_action=\"brake\")\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n self.right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def turn_degrees(self, degrees_to_turn, turn_speed_sp):\n \"\"\"Turns robot to the specified degree at a specified speed. Turns\n left if the degree is positive and right if the degree is negative.\"\"\"\n\n length = math.fabs(degrees_to_turn) * 0.049\n degrees_per_inch = 90\n motor_turns_needed_in_degrees = length * \\\n degrees_per_inch\n if degrees_to_turn > 0:\n self.left_motor.run_to_rel_pos(\n position_sp=-motor_turns_needed_in_degrees,\n speed_sp=turn_speed_sp,\n stop_action=\"brake\")\n self.right_motor.run_to_rel_pos(\n position_sp=motor_turns_needed_in_degrees, speed_sp=turn_speed_sp,\n stop_action=\"brake\")\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n self.right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n elif degrees_to_turn < 0:\n self.left_motor.run_to_rel_pos(\n position_sp=motor_turns_needed_in_degrees,\n speed_sp=turn_speed_sp,\n stop_action=\"brake\")\n self.right_motor.run_to_rel_pos(\n position_sp=-motor_turns_needed_in_degrees,\n speed_sp=turn_speed_sp,\n stop_action=\"brake\")\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n self.right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def arm_calibration(self):\n \"\"\"Lifts the robots arm until the touch sensor is pressed, then it\n lowers the arm 14.2 revolutions and sets the position to 0\"\"\"\n\n self.arm_motor.run_forever(speed_sp=self.max_speed)\n\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep()\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n\n self.arm_motor.position = 0\n\n def arm_up(self):\n \"\"\"Lifts the robots arm until the touch sensor is pressed, then it\n stops\"\"\"\n\n self.arm_motor.run_forever(speed_sp=self.max_speed)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep()\n\n def arm_down(self):\n \"\"\"Lowers the arms to the 0 position defined from calibration\"\"\"\n\n self.arm_motor.run_to_abs_pos(position_sp=0, speed_sp=self.max_speed)\n self.arm_motor.wait_while(\n ev3.Motor.STATE_RUNNING) # Blocks until the motor finishes running\n ev3.Sound.beep()\n\n def shutdown(self):\n \"\"\"Stops all robot actions and exits code\"\"\"\n\n self.running = False\n\n self.left_motor.stop(stop_action=\"brake\")\n self.right_motor.stop(stop_action=\"brake\")\n self.arm_motor.stop(stop_action=\"brake\")\n\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n\n print('Goodbye')\n ev3.Sound.speak(\"Goodbye\").wait()\n\n def loop_forever(self):\n # This is a convenience method that I don't really recommend for most programs other than m5.\n # This method is only useful if the only input to the robot is coming via mqtt.\n # MQTT messages will still call methods, but no other input or output happens.\n # This method is given here since the concept might be confusing.\n while self.running:\n time.sleep(\n 0.1) # Do nothing (except receive MQTT messages) until an MQTT message calls shutdown.\n\n def stop(self):\n \"\"\"Stops all robot actions\"\"\"\n\n self.left_motor.stop(stop_action=\"brake\")\n self.right_motor.stop(stop_action=\"brake\")\n self.arm_motor.stop(stop_action=\"brake\")\n\n def forward(self, left_speed_entry, right_speed_entry):\n \"\"\"moves robot forward at specified speed\"\"\"\n self.left_motor.run_forever(speed_sp=left_speed_entry)\n self.right_motor.run_forever(speed_sp=right_speed_entry)\n\n def left(self, left_speed_entry):\n \"\"\"moves robots left track forward at specified speed\"\"\"\n self.left_motor.run_forever(speed_sp=left_speed_entry)\n\n def back(self, left_speed_entry, right_speed_entry):\n \"\"\"moves robot back at specified speed\"\"\"\n self.left_motor.run_forever(speed_sp=-left_speed_entry)\n self.right_motor.run_forever(speed_sp=-right_speed_entry)\n\n def right(self, right_speed_entry):\n \"\"\"moves robots right track forward at specified speed\"\"\"\n self.right_motor.run_forever(speed_sp=right_speed_entry)\n\n def seek_beacon(self):\n \"\"\"\n Uses the IR Sensor in BeaconSeeker mode to find the beacon. If the beacon is found this return True.\n If the beacon is not found and the attempt is cancelled by hitting the touch sensor, return False.\n \"\"\"\n forward_speed = 300\n turn_speed = 100\n\n # To find the IR beacon (with the remote in beacon mode)\n beacon_seeker = ev3.BeaconSeeker() # Assumes remote is set to channel 1\n print(\"Heading\", beacon_seeker.heading)\n print(\"Distance\", beacon_seeker.distance)\n\n while not self.touch_sensor.is_pressed:\n current_heading = beacon_seeker.heading # use the beacon_seeker\n # heading\n current_distance = beacon_seeker.distance # use the beacon_seeker distance\n\n if current_distance == -128:\n # If the IR Remote is not found just sit idle for this program until it is moved.\n print(\"IR Remote not found. Distance is -128\")\n self.stop()\n else:\n if math.fabs(current_heading) < 2:\n # Close enough of a heading to move forward\n print(\"On the right heading. Distance: \", current_distance)\n if current_distance <= 1:\n self.stop()\n print(forward_speed)\n self.drive_inches(3, forward_speed)\n print('great')\n\n return True\n else:\n self.forward(forward_speed, forward_speed)\n elif math.fabs(current_heading) < 10:\n if current_heading < 0:\n self.forward(-turn_speed, turn_speed)\n elif current_heading > 0:\n self.forward(turn_speed, -turn_speed)\n else:\n print(\"Heading is too far off to fix: \", current_heading)\n\n time.sleep(0.02)\n\n # The touch_sensor was pressed to abort the attempt if this code runs.\n print(\"Abandon ship!\")\n self.stop()\n return False\n","sub_path":"libs/robot_controller.py","file_name":"robot_controller.py","file_ext":"py","file_size_in_byte":9471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"446805710","text":"from xmlrpclib import ServerProxy\n\nif __name__ == \"__main__\":\n client = ServerProxy(\"http://localhost:5020\")\n #print client.current_state()\n \n client.new_run(1234567)\n #Test firing SuperK laser (monitor power meter).\n #client.superk_master_mode(1, 1000, 6000, 6100, 5, 14, 5000, 0.25)\n #Test the current state after firing laser.\n #currentState = controller.current_state()\n #logging.debug('Current state: \\n{}'.format(currentState))\n client.laserheads_master_mode(4, 1000, 20000, 4, 14, 2000, 0.25)","sub_path":"testing/smellie_client_test.py","file_name":"smellie_client_test.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"176787323","text":"######## galIMF ##########\r\n\r\n\r\n#python3 code, last update Sat 27 May\r\n# This is the main module, galIMF.py, controling and operating the other two modules IGIMF and OSGIMF\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\n#importing modules and libraries\r\n\r\nimport math\r\nimport csv # csv and izip/zip are used to create output files\r\ntry:\r\n from itertools import izip as zip\r\nexcept ImportError: # will be python 3.x series\r\n pass\r\n\r\n\r\n\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\n\r\n# The star mass resolution is the lower resolution among\r\n# the resolution of histogram (resolution_histogram_relative)\r\n# and the resolution of star generation (resolution_star_... in the file IMF_schulz.py)\r\nresolution_histogram_relative = 0.01 # The star mass resolution of histogram is: the star mass * resolution_histogram_relative\r\n#also re-defined in a test file, it scales automatically with the SFR\r\n\r\n# function_galIMF takes in I/OS-GMF parameters and create output files\r\ndef function_galIMF(IorS, SFR, alpha3_model, delta_t, Z_over_H, I_ecl, M_ecl_U, M_ecl_L, beta_model,\r\n I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U, printout=False):\r\n if IorS == \"I\":\r\n global List_xi, List_M_str_for_xi_str\r\n Function_draw_IGIMF(SFR, alpha3_model, beta_model, delta_t, Z_over_H,\r\n I_ecl, M_ecl_U, M_ecl_L, I_str, M_str_L, alpha_1, alpha1_model,\r\n M_turn, alpha_2, alpha2_model, M_turn2, M_str_U)\r\n if printout==True:\r\n # write data for GalIMF_Result/IGIMF_shape\r\n with open('GalIMF_IGIMF.txt', 'w') as f:\r\n writer = csv.writer(f, delimiter=' ')\r\n f.write(\"# IGIMF output file. It gives the IGIMF. The columns are:\\n# mass xi\\n\\n\")\r\n writer.writerows(\r\n zip(List_M_str_for_xi_str, List_xi))\r\n print(\"\\n### IGIMF data generated in the file GalIMF_IGIMF.txt ###\\n\")\r\n return\r\n elif IorS ==\"OS\":\r\n global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number\r\n sample_for_one_epoch(SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,\r\n I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, Z_over_H, M_str_U)\r\n Function_draw(SFR, M_str_L, M_str_U, M_ecl_L, resolution_histogram_relative)\r\n function_make_drop_line()\r\n # write data for GalIMF_Result/histogram\r\n function_draw_histogram()\r\n if printout == True:\r\n with open('GalIMF_OSGIMF.txt', 'w') as f:\r\n writer = csv.writer(f, delimiter=' ')\r\n f.write(\r\n \"# OSGIMF output file. It gives the star number in each mass range. The columns are:\\n# mass_range_center mass_range mass_range_upper_limit mass_range_lower_limit star_number_in_the_mass_range\\n\\n\")\r\n writer.writerows(\r\n zip(mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number))\r\n print(\"\\n### OSGIMF data generated in the file GalIMF_OSGIMF.txt ###\\n\")\r\n return\r\n else:\r\n print(\"Input parameter 'IorS' wrong!\")\r\n return\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n######## IGIMF.py #########\r\n\r\n#python3 code, last update Sat 27 May\r\n# IGIMF.py is module computing IGIMF as described in Yan et al 2017\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\n\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\n#initialization of floating length arrays\r\nList_M_ecl_for_xi_ecl = []\r\nList_xi_ecl = []\r\nList_M_str_for_xi_str = []\r\nList_xi_str = []\r\nList_xi = []\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\n#Function_dar_IGIMF computes the IGIMF by combining Function_ECMF (embedded cluster mass\r\n# function) and Function_IMF (stellar mass function in individual embedded clusters)\r\n# equation (1) from Yan et al. 2017\r\n# function returns values of global lists:\r\n# List_M_ecl_for_xi_ecl - list of masses, M_ecl, of embedded clusters for ECMF\r\n# List_xi IGIMF (xi_IGIMF = dN/dm, dN number of star in a mass bin dm) values\r\n# by default normalized to total mass in Msun units (= SFR*10Myr)\r\n# List_M_str_for_xi_str list of stellar masses for stellar IMF in Msun units\r\n# List_xi_L logarithmic IGIMF (xi_IGIMF_L = dN/d log_10 m)\r\n# List_Log_M_str - natural logarithm\r\ndef Function_draw_IGIMF(SFR, alpha3_model, beta_model, delta_t, Z_over_H, I_ecl, M_ecl_U, M_ecl_L,\r\n I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U):\r\n if SFR != 0:\r\n global List_M_ecl_for_xi_ecl, List_xi, List_M_str_for_xi_str, List_xi_L, List_Log_M_str, x_IMF, y_IMF\r\n Function_ECMF(SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, Z_over_H)\r\n x_IMF = []\r\n y_IMF = []\r\n alpha_1_change = Function_alpha_1_change(alpha_1, alpha1_model, Z_over_H)\r\n alpha_2_change = Function_alpha_2_change(alpha_2, alpha2_model, Z_over_H)\r\n alpha_3_change = Function_alpha_3_change(alpha3_model, List_M_ecl_for_xi_ecl[-1], Z_over_H)\r\n function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,\r\n M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)\r\n List_xi = [0] * len(x_IMF)\r\n number_of_ecl = len(List_M_ecl_for_xi_ecl) - 1\r\n Function_IMF(alpha3_model, Z_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U,\r\n number_of_ecl, 0)\r\n x_IMF = []\r\n y_IMF = []\r\n function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,\r\n M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)\r\n List_M_str_for_xi_str = x_IMF\r\n lenth = len(List_M_str_for_xi_str)\r\n List_xi_L = [0] * lenth\r\n List_Log_M_str = [0] * lenth\r\n Function_xi_to_xiL(lenth - 1, List_xi[0])\r\n else:\r\n List_M_str_for_xi_str = [0, 1000]\r\n List_xi = [0, 0]\r\n return\r\n\r\n#Function_ECMF computes IMF of star clusters (ECMF - embedded cluster mass function)\r\n#The assumed shape of ECMF is single powerlaw with slope beta (function of SFR)\r\n# the empyrical lower limit for star cluster mass if 50 Msun\r\n# the hypotetical upper mass limit is 10^9 Msun, but the M_ecl^max is computed, eq (12) in Yan et al. 2017\r\ndef Function_ECMF(SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, Z_over_H):\r\n global List_M_ecl_for_xi_ecl, List_xi_ecl, x_ECMF, y_ECMF\r\n x_ECMF = []\r\n y_ECMF = []\r\n beta_change = Function_beta_change(beta_model, SFR, Z_over_H)\r\n function_draw_xi_ecl(M_ecl_L, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)\r\n List_M_ecl_for_xi_ecl = x_ECMF\r\n del List_M_ecl_for_xi_ecl[0]\r\n del List_M_ecl_for_xi_ecl[-1]\r\n List_xi_ecl = y_ECMF\r\n del List_xi_ecl[0]\r\n del List_xi_ecl[-1]\r\n return\r\n\r\n#Function_IMF computes stellar IMF in individual embedded star clusters\r\ndef Function_IMF(alpha3_model, Z_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U, number_of_ecl, i):\r\n while i < number_of_ecl:\r\n global List_M_str_for_xi_str, List_xi_str, List_M_ecl_for_xi_ecl, x_IMF, y_IMF\r\n x_IMF = []\r\n y_IMF = []\r\n M_ecl = List_M_ecl_for_xi_ecl[i]\r\n alpha_3_change = Function_alpha_3_change(alpha3_model, M_ecl, Z_over_H)\r\n # Here only alpha_3_change is recalculated as alpha1(2)_change do not depend on M_ecl thus do not change.\r\n function_draw_xi_str(M_str_L, M_ecl, I_str, M_str_L, alpha_1_change, M_turn,\r\n alpha_2_change, M_turn2, alpha_3_change, M_str_U)\r\n List_M_str_for_xi_str = x_IMF\r\n List_xi_str = y_IMF\r\n number_of_str = len(List_M_str_for_xi_str)\r\n Function_update_List_xi(i, number_of_str, 0)\r\n (i) = (i+1)\r\n return\r\n\r\n\r\ndef Function_update_List_xi(i, number_of_str, j):\r\n while j < number_of_str:\r\n global List_xi, List_xi_str, List_xi_ecl, List_M_ecl_for_xi_ecl\r\n List_xi[j] += List_xi_str[j] * List_xi_ecl[i] * (List_M_ecl_for_xi_ecl[i+1] - List_M_ecl_for_xi_ecl[i])\r\n (j) = (j+1)\r\n return\r\n\r\n\r\ndef Function_xi_to_xiL(i, unit):\r\n global List_xi_L, List_xi, List_M_str_for_xi_str, List_Log_M_str\r\n while i > -1:\r\n if List_xi[i] == 0:\r\n List_xi[i] = 10**(-5)\r\n List_xi_L[i] = math.log((List_xi[i] * math.log(10) * List_M_str_for_xi_str[i] / unit * 1800), 10)\r\n List_Log_M_str[i] = math.log(List_M_str_for_xi_str[i] , 10)\r\n (i) = (i-1)\r\n return\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n############ OSGIMF #############\r\n\r\n\r\n#-----------------------------------------------------------------------------------------\r\n#initialization of open-length arrays\r\n#-----------------------------------------------------------------------------------------\r\nList_M_str_all_i = []\r\nList_n_str_all_i = []\r\nList_mass_grid_x_axis = []\r\nList_star_number_in_mass_grid_y_axis = []\r\nList_star_number_in_mass_grid_y_axis2 = []\r\nList_star_number_in_mass_grid_y_axis3 = []\r\nList_star_number_in_mass_grid_y_axis4 = []\r\nList_mass_grid = []\r\nList_star_number_in_mass_grid = []\r\n#-----------------------------------------------------------------------------------------\r\n\r\n#This function gives the stellar masses in entire galaxy in unsorted manner\r\n#i.e. the stars are grouped in parent clusters\r\ndef sample_for_one_epoch(SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,\r\n I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, Z_over_H, M_str_U):\r\n global List_M_str_all_i, List_n_str_all_i, list_M_ecl_i\r\n beta_change = Function_beta_change(beta_model, SFR, Z_over_H)\r\n Function_sample_cluster(SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)\r\n len_of_M_ecl_list = len(list_M_ecl_i)\r\n List_M_str_all_i = []\r\n List_n_str_all_i = []\r\n Function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,\r\n M_turn2, Z_over_H, M_str_U, len_of_M_ecl_list, 0)\r\n return\r\n\r\n#Masses of formed clusters\r\ndef Function_sample_cluster(SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change):\r\n global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl\r\n list_m_ecl_i = []\r\n list_n_ecl_i = []\r\n list_M_ecl_i = []\r\n M_max_ecl = 0\r\n function_sample_from_ECMF(SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)\r\n return\r\n#Stellar masses in a given star cluster\r\ndef Function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,\r\n M_turn2, Z_over_H, M_str_U, len_of_M_ecl_list, i):\r\n while i < len_of_M_ecl_list: # sample a total number of i clusters\r\n global List_M_str_all_i, List_n_str_all_i, list_m_str_i, list_n_str_i, list_M_str_i\r\n list_m_str_i = []\r\n list_n_str_i = []\r\n list_M_str_i = []\r\n alpha_1_change = Function_alpha_1_change(alpha_1, alpha1_model, Z_over_H)\r\n alpha_2_change = Function_alpha_2_change(alpha_2, alpha2_model, Z_over_H)\r\n alpha_3_change = Function_alpha_3_change(alpha3_model, list_M_ecl_i[i], Z_over_H)\r\n function_sample_from_IMF(list_M_ecl_i[i],\r\n I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)\r\n List_M_str_all_i += [list_M_str_i] # save all i clusters in \"all_i\" list\r\n List_n_str_all_i += [list_n_str_i]\r\n (i) = (i+1)\r\n return\r\n\r\n\r\n##################################################################################\r\n## The sampling is finished here. Below are just sorting, binning, and plotting.##\r\n##################################################################################\r\n\r\n# Now star mass are recorded in individual star clusters in the \"List_M_str_all_i\" and \"List_n_str_all_i\"\r\n# we have for the whole galaxy: cluster mass, number of cluster with certain mass\r\n# and for each cluster: star mass, number of stars with certain mass\r\n# Sort out all star mass in a epoch into a mass grid\r\n\r\n# Main purporpose here is the sorting of the stellar masses and preparation for\r\n#plotting output\r\ndef Function_draw(SFR, M_str_low, M_str_up, M_ecl_low, resolution_histogram_relative):\r\n M_low = min(M_str_low, M_ecl_low)\r\n global List_mass_grid, List_star_number_in_mass_grid, List_mass_grid_x_axis, List_star_number_in_mass_grid_y_axis\r\n # for all stars\r\n List_mass_grid = []\r\n Function_mass_grid(SFR, M_str_up, M_low, resolution_histogram_relative)\r\n List_mass_grid += [M_low]\r\n List_star_number_in_mass_grid = [0] * (len(List_mass_grid) - 1)\r\n Function_sort_out_star_mass(0)\r\n ##########\r\n List_mass_grid_x_axis = [M_str_up]\r\n make_mass_grid_x_axis(1)\r\n List_mass_grid_x_axis += [M_low]\r\n List_star_number_in_mass_grid_y_axis = []\r\n make_star_number_in_mass_grid_y_axis(0)\r\n List_mass_grid_x_axis = [List_mass_grid_x_axis[0]] + List_mass_grid_x_axis\r\n List_mass_grid_x_axis += [List_mass_grid_x_axis[-1]]\r\n List_star_number_in_mass_grid_y_axis = [0.0000001] + List_star_number_in_mass_grid_y_axis\r\n List_star_number_in_mass_grid_y_axis += [0.0000001]\r\n # for most massive star\r\n global List_mass_grid2, List_star_number_in_mass_grid2, List_mass_grid_x_axis2, List_star_number_in_mass_grid_y_axis2\r\n List_mass_grid2 = List_mass_grid\r\n List_star_number_in_mass_grid2 = [0] * (len(List_mass_grid2) - 1)\r\n Function_sort_out_star_mass2(0)\r\n ##########\r\n List_star_number_in_mass_grid_y_axis2 = []\r\n make_star_number_in_mass_grid_y_axis2(0)\r\n List_star_number_in_mass_grid_y_axis2 = [0.0000001] + List_star_number_in_mass_grid_y_axis2\r\n List_star_number_in_mass_grid_y_axis2 += [0.0000001]\r\n ###################################\r\n global List_mass_grid3, List_star_number_in_mass_grid3, List_mass_grid_x_axis3, List_star_number_in_mass_grid_y_axis3\r\n List_mass_grid3 = List_mass_grid\r\n List_star_number_in_mass_grid3 = [0] * (len(List_mass_grid3) - 1)\r\n Function_sort_out_star_mass3(0)\r\n ##########\r\n List_star_number_in_mass_grid_y_axis3 = []\r\n make_star_number_in_mass_grid_y_axis3(0)\r\n List_star_number_in_mass_grid_y_axis3 = [0.0000001] + List_star_number_in_mass_grid_y_axis3\r\n List_star_number_in_mass_grid_y_axis3 += [0.0000001]\r\n ###################################\r\n global List_mass_grid4, List_star_number_in_mass_grid4, List_mass_grid_x_axis4, List_star_number_in_mass_grid_y_axis4\r\n List_mass_grid4 = List_mass_grid\r\n List_star_number_in_mass_grid4 = [0] * (len(List_mass_grid4) - 1)\r\n Function_sort_out_star_mass4(0)\r\n ##########\r\n List_star_number_in_mass_grid_y_axis4 = []\r\n make_star_number_in_mass_grid_y_axis4(0)\r\n List_star_number_in_mass_grid_y_axis4 = [0.0000001] + List_star_number_in_mass_grid_y_axis4\r\n List_star_number_in_mass_grid_y_axis4 += [0.0000001]\r\n return\r\n\r\n\r\n### make a mass grid ###\r\n\r\ndef Function_mass_grid(SFR, mass, M_str_low, resolution_histogram_relative):\r\n while mass > M_str_low:\r\n global List_mass_grid\r\n List_mass_grid += [mass]\r\n (mass) = (mass * (1-resolution_histogram_relative))\r\n # we find it is useful to use the following form of mass grid sometimes.\r\n # One can apply this alternative form by quote the above line (add a # in front of the line) and unquote the below.\r\n #(mass) = (mass * (0.967 + math.log(SFR, 10) / 400) / (math.log(mass + 1) ** 2 / (2 ** (math.log(SFR, 10) + 6.85) - 1) + 1))\r\n return\r\n\r\n#count the number of star in each grid\r\ndef Function_sort_out_star_mass(i):\r\n while i < len(List_M_str_all_i):\r\n global l\r\n l = 0\r\n SubFunction_sort_out(i, 0)\r\n (i)=(i+1)\r\n return\r\ndef Function_sort_out_star_mass2(i):\r\n while i < len(List_M_str_all_i):\r\n global l\r\n l = 0\r\n SubFunction_sort_out2(i, 0)\r\n (i)=(i+1)\r\n return\r\ndef Function_sort_out_star_mass3(i):\r\n while i < len(List_M_str_all_i):\r\n global l\r\n l = 0\r\n SubFunction_sort_out3(i, 1)\r\n (i)=(i+1)\r\n return\r\ndef Function_sort_out_star_mass4(i):\r\n while i < len(List_M_str_all_i):\r\n global l\r\n l = 0\r\n SubFunction_sort_out4(i, 2)\r\n (i)=(i+1)\r\n return\r\n\r\ndef SubFunction_sort_out(i, j):\r\n while j < len(List_M_str_all_i[i]):\r\n global l, List_n_str_all_i\r\n Function_find_k(i, j, l)\r\n List_star_number_in_mass_grid[l] += List_n_str_all_i[i][j] * list_n_ecl_i[i]\r\n (j)=(j+1)\r\n return\r\ndef SubFunction_sort_out2(i, j):\r\n if j < len(List_M_str_all_i[i]):\r\n global l\r\n Function_find_k(i, j, l)\r\n List_star_number_in_mass_grid2[l] += list_n_ecl_i[i]\r\n return\r\ndef SubFunction_sort_out3(i, j):\r\n if j < len(List_M_str_all_i[i]):\r\n global l\r\n Function_find_k(i, j, l)\r\n List_star_number_in_mass_grid3[l] += list_n_ecl_i[i]\r\n return\r\ndef SubFunction_sort_out4(i, j):\r\n if j < len(List_M_str_all_i[i]):\r\n global l\r\n Function_find_k(i, j, l)\r\n List_star_number_in_mass_grid4[l] += list_n_ecl_i[i]\r\n return\r\n\r\ndef Function_find_k(i, j, k):\r\n while List_mass_grid[k+1] > List_M_str_all_i[i][j]:\r\n global l\r\n l = k+1\r\n (k) = (k+1)\r\n return\r\n\r\n\r\n# prepare for the breaking line plot\r\ndef make_mass_grid_x_axis(i):\r\n global List_mass_grid_x_axis, List_mass_grid\r\n while i < len(List_mass_grid)-1:\r\n List_mass_grid_x_axis += [List_mass_grid[i]]*2\r\n (i)=(i+1)\r\n return\r\n\r\n\r\ndef make_star_number_in_mass_grid_y_axis(i):\r\n global List_star_number_in_mass_grid_y_axis, List_star_number_in_mass_grid, List_mass_grid\r\n while i < len(List_star_number_in_mass_grid):\r\n List_star_number_in_mass_grid_y_axis += [\r\n List_star_number_in_mass_grid[i]/(List_mass_grid[i] - List_mass_grid[i+1])]*2\r\n (i)=(i+1)\r\n return\r\ndef make_star_number_in_mass_grid_y_axis2(i):\r\n global List_star_number_in_mass_grid_y_axis2, List_star_number_in_mass_grid2, List_mass_grid2\r\n while i < len(List_star_number_in_mass_grid2):\r\n List_star_number_in_mass_grid_y_axis2 += [\r\n List_star_number_in_mass_grid2[i]/(List_mass_grid2[i] - List_mass_grid2[i+1])]*2\r\n (i)=(i+1)\r\n return\r\ndef make_star_number_in_mass_grid_y_axis3(i):\r\n global List_star_number_in_mass_grid_y_axis3, List_star_number_in_mass_grid3, List_mass_grid3\r\n while i < len(List_star_number_in_mass_grid3):\r\n List_star_number_in_mass_grid_y_axis3 += [\r\n List_star_number_in_mass_grid3[i]/(List_mass_grid3[i] - List_mass_grid3[i+1])]*2\r\n (i)=(i+1)\r\n return\r\ndef make_star_number_in_mass_grid_y_axis4(i):\r\n global List_star_number_in_mass_grid_y_axis4, List_star_number_in_mass_grid4, List_mass_grid4\r\n while i < len(List_star_number_in_mass_grid4):\r\n List_star_number_in_mass_grid_y_axis4 += [\r\n List_star_number_in_mass_grid4[i]/(List_mass_grid4[i] - List_mass_grid4[i+1])]*2\r\n (i)=(i+1)\r\n return\r\n\r\n\r\ndef function_make_drop_line1(i):\r\n while i < len(List_star_number_in_mass_grid_y_axis)-1:\r\n if List_star_number_in_mass_grid_y_axis[i] == 0:\r\n List_star_number_in_mass_grid_y_axis[i] = 0.0000001\r\n (i) = (i+1)\r\n\r\ndef function_make_drop_line2(i):\r\n while i < len(List_star_number_in_mass_grid_y_axis2)-1:\r\n if List_star_number_in_mass_grid_y_axis2[i] == 0:\r\n List_star_number_in_mass_grid_y_axis2[i] = 0.0000001\r\n (i) = (i+1)\r\n\r\ndef function_make_drop_line3(i):\r\n while i < len(List_star_number_in_mass_grid_y_axis3)-1:\r\n if List_star_number_in_mass_grid_y_axis3[i] == 0:\r\n List_star_number_in_mass_grid_y_axis3[i] = 0.0000001\r\n (i) = (i+1)\r\n\r\ndef function_make_drop_line4(i):\r\n while i < len(List_star_number_in_mass_grid_y_axis4)-1:\r\n if List_star_number_in_mass_grid_y_axis4[i] == 0:\r\n List_star_number_in_mass_grid_y_axis4[i] = 0.0000001\r\n (i) = (i+1)\r\n\r\ndef function_make_drop_line():\r\n function_make_drop_line1(0)\r\n function_make_drop_line2(0)\r\n function_make_drop_line3(0)\r\n function_make_drop_line4(0)\r\n return\r\n\r\n\r\n######################## histogram ########################\r\n\r\nmass_range_center = []\r\nmass_range = []\r\nmass_range_upper_limit = []\r\nmass_range_lower_limit = []\r\nstar_number = []\r\n\r\ndef function_draw_histogram():\r\n global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number\r\n mass_range_center = []\r\n i = 0\r\n while i < len(List_mass_grid) - 1:\r\n mass_range_center += [\r\n 0.5 * (List_mass_grid[i] + List_mass_grid[i + 1])]\r\n i = i + 1\r\n mass_range = []\r\n i = 0\r\n while i < len(List_mass_grid) - 1:\r\n mass_range += [List_mass_grid[i] - List_mass_grid[i + 1]]\r\n i = i + 1\r\n mass_range_upper_limit = []\r\n i = 0\r\n while i < len(List_mass_grid):\r\n mass_range_upper_limit += [List_mass_grid[i]]\r\n i = i + 1\r\n mass_range_lower_limit = []\r\n i = 0\r\n while i < len(List_mass_grid) - 1:\r\n mass_range_lower_limit += [List_mass_grid[i + 1]]\r\n i = i + 1\r\n star_number = List_star_number_in_mass_grid + []\r\n return\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n############## IMF #################\r\n\r\n# use equations in \"supplementary-document-galimf.pdf\"\r\n\r\n# The star mass resolution is the lower resolution among \"relative resolution\" and \"absolute resolution\" where\r\n# the relative resolution = star mass * resolution_star_relative\r\n# the absolute resolution = resolution_star_absolute\r\nresolution_star_relative = 0.001\r\nresolution_star_absolute = 0.001\r\n\r\nlist_m_str_i = []\r\nlist_n_str_i = []\r\nlist_M_str_i = []\r\n\r\ndef function_sample_from_IMF(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):\r\n global list_m_str_i, list_n_str_i, list_M_str_i, M_max, M_max_function, k3, k2, k1, resolution_star_relative, resolution_star_absolute\r\n M_max = 0\r\n M_max_function = 0\r\n function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)\r\n k3 = 0\r\n k2 = 0\r\n k1 = 0\r\n function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)\r\n list_m_str_i = []\r\n list_n_str_i = []\r\n function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute) # equation 16\r\n list_M_str_i = []\r\n length_n = len(list_n_str_i)\r\n function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n) # equation 18\r\n del list_n_str_i[0]\r\n return\r\n\r\n# M_max is computed by solving simultaneously equations (3) and (4) from Yan et al 2017\r\ndef function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):\r\n global M_max_function, M_max, M_max_function\r\n M_constant = M_ecl * M_U ** (1 - alpha_3) / I_str / (1 - alpha_3) - M_turn2 ** (alpha_2 - alpha_3) * M_turn ** (\r\n alpha_1 - alpha_2) * (M_turn ** (2 - alpha_1) - M_L ** (2 - alpha_1)) / (2 - alpha_1) - M_turn2 ** (\r\n alpha_2 - alpha_3) * (M_turn2 ** (2 - alpha_2) - M_turn ** (\r\n 2 - alpha_2)) / (2 - alpha_2) + M_turn2 ** (2 - alpha_3) / (2 - alpha_3) # equation 14\r\n function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, 100, 10, -1) # equation 14\r\n M_max_function = 1\r\n if M_max < M_turn2:\r\n M_constant2 = M_ecl * M_turn2 ** (1 - alpha_2) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (\r\n alpha_3 - alpha_2) * (M_U ** (\r\n 1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) - M_turn ** (alpha_1 - alpha_2) * (\r\n M_turn ** (2 - alpha_1) - M_L ** (\r\n 2 - alpha_1)) / (2 - alpha_1) + M_turn ** (2 - alpha_2) / (2 - alpha_2) # equation 23\r\n function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, 0.75, 0.1, -1) # equation 23\r\n M_max_function = 2\r\n if M_max < M_turn:\r\n M_constant3 = M_ecl * M_turn ** (1 - alpha_1) / I_str / (1 - alpha_1) + M_ecl * M_turn ** (\r\n alpha_2 - alpha_1) * (M_turn2 ** (\r\n 1 - alpha_2) - M_turn ** (1 - alpha_2)) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (\r\n alpha_3 - alpha_2) * M_turn ** (\r\n alpha_2 - alpha_1) * (M_U ** (1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) + M_L ** (\r\n 2 - alpha_1) / (2 - alpha_1)\r\n # equation 27\r\n function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, 100, 10, -1) # equation 27\r\n M_max_function = 3\r\n if M_max < M_L:\r\n M_max_function = 0\r\n print(\"M_max < M_L\")\r\n return\r\n\r\ndef function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):\r\n global M_max_function, k3, k2, k1, M_max\r\n if M_max_function == 1:\r\n k3 = I_str*(1-alpha_3)/(M_U**(1-alpha_3)-M_max**(1-alpha_3))\r\n # equation 12\r\n elif M_max_function == 2:\r\n k3 = I_str/(M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_max**(1-alpha_2))/(1-alpha_2) + (\r\n M_U**(1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))\r\n # equation 21\r\n elif M_max_function == 3:\r\n k3 = I_str/(M_turn2**(alpha_2-alpha_3) * M_turn**(alpha_1-alpha_2) * (M_turn**(1-alpha_1)-M_max**(1-alpha_1)) / (\r\n 1-alpha_1) + M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_turn**(1-alpha_2))/(1-alpha_2) + (M_U**(\r\n 1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))\r\n # equation 25\r\n else:\r\n print(\"function_M_max went wrong\")\r\n return\r\n k2 = k3*M_turn2**(alpha_2-alpha_3) # equation 2\r\n k1 = k2*M_turn**(alpha_1-alpha_2) # equation 2\r\n return\r\n\r\ndef function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1, step, pm): # equation 14\r\n m_1 = round(m_1, 10) # round\r\n M_x = m_1**(2-alpha_3)/(2-alpha_3) + M_ecl*m_1**(1-alpha_3)/I_str/(1-alpha_3)\r\n if abs(M_x-M_constant) < abs(M_constant) * 10 ** (-7):\r\n global M_max\r\n M_max = m_1\r\n elif m_1 - step <= M_L or m_1 + step >= M_U:\r\n function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1, step / 2, pm)\r\n elif M_x > M_constant and pm == -1:\r\n function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 - step, step, -1)\r\n elif M_x > M_constant and pm == 1:\r\n function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 - step / 2, step / 2, -1)\r\n elif M_x < M_constant and pm == 1:\r\n function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 + step, step, 1)\r\n elif M_x < M_constant and pm == -1:\r\n function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 + step / 2, step / 2, 1)\r\n return\r\n\r\ndef function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1, step, pm): # equation 23\r\n m_1 = round(m_1, 10) # round\r\n M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)\r\n if abs(M_x - M_constant2) < abs(M_constant2) * 10 ** (-7):\r\n global M_max\r\n M_max = m_1\r\n elif m_1 - step <= M_L or m_1 + step >= M_U:\r\n function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1, step / 2, pm)\r\n elif M_x > M_constant2 and pm == -1:\r\n function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 - step, step, -1)\r\n elif M_x > M_constant2 and pm == 1:\r\n function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 - step / 2, step / 2, -1)\r\n elif M_x < M_constant2 and pm == 1:\r\n function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 + step, step, 1)\r\n elif M_x < M_constant2 and pm == -1:\r\n function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 + step / 2, step / 2, 1)\r\n return\r\n\r\ndef function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step, pm): # equation 27\r\n m_1 = round(m_1, 10) # round\r\n M_x = m_1 ** (2 - alpha_1) / (2 - alpha_1) + M_ecl * m_1 ** (1 - alpha_1) / I_str / (1 - alpha_1)\r\n if abs(M_x-M_constant3) < abs(M_constant3) * 10 ** (-7):\r\n global M_max\r\n M_max = m_1\r\n elif m_1 - step <= M_L or m_1 + step >= M_U:\r\n function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step / 2, pm)\r\n elif M_x > M_constant3 and pm == -1:\r\n function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step, step, -1)\r\n elif M_x > M_constant3 and pm == 1:\r\n function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step / 2, step / 2, -1)\r\n elif M_x < M_constant3 and pm == 1:\r\n function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step, step, 1)\r\n elif M_x < M_constant3 and pm == -1:\r\n function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step / 2, step / 2, 1)\r\n return\r\n\r\ndef function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute): # equation 16\r\n global list_m_str_i\r\n if M_max > 100:\r\n loop_m_i_first_three(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute, 0)\r\n (m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)\r\n loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)\r\n (m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)\r\n loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)\r\n cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])\r\n return\r\n elif M_max > M_turn2:\r\n loop_m_i(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute)\r\n (m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)\r\n loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)\r\n (m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)\r\n loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)\r\n cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])\r\n return\r\n elif M_max > M_turn:\r\n loop_m_i(k2, M_turn, alpha_2, M_max, 0, resolution_star_relative, resolution_star_absolute)\r\n (m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)\r\n loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)\r\n cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])\r\n return\r\n else:\r\n loop_m_i(k1, M_L, alpha_1, M_max, 0, resolution_star_relative, resolution_star_absolute)\r\n cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])\r\n return\r\n\r\ndef function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute):\r\n while m_i - m_i_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):\r\n n_new = round(n_i * 1.05 + 1)\r\n m_i_plus_n_new = (m_i ** (1 - alpha) - n_new * (1 - alpha) / k) ** (1 / (1 - alpha))\r\n (m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)\r\n return m_i_plus_n, n_i\r\n\r\ndef loop_m_i_first_three(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute, count):\r\n while m_i > M_low:\r\n global list_m_str_i, list_n_str_i, n_turn\r\n list_m_str_i += [m_i]\r\n list_n_str_i += [n_i]\r\n m_i_plus_n = (m_i ** (1 - alpha) - n_i * (1 - alpha) / k) ** (1 / (1 - alpha))\r\n if count < 3:\r\n m_i_plus_n = (m_i ** (1 - alpha) - (1 - alpha) / k) ** (1 / (1 - alpha))\r\n n_turn = n_i\r\n (m_i, n_i, count) = (m_i_plus_n, 1, (count+1))\r\n elif m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):\r\n n_turn = n_i\r\n (m_i, n_i) = (m_i_plus_n, n_i)\r\n else:\r\n (m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)\r\n (m_i, n_i) = (m_i_plus_n_new, n_turn)\r\n\r\ndef loop_m_i(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute):\r\n while m_i > M_low:\r\n global list_m_str_i, list_n_str_i, n_turn\r\n list_m_str_i += [m_i]\r\n list_n_str_i += [n_i]\r\n a = m_i ** (1 - alpha) - n_i * (1 - alpha) / k\r\n if a > 0:\r\n b = 1 / (1 - alpha)\r\n m_i_plus_n = a ** b\r\n if m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):\r\n (m_i, n_i) = (m_i_plus_n, n_i)\r\n else:\r\n (m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)\r\n (m_i, n_i) = (m_i_plus_n_new, n_turn)\r\n else:\r\n return\r\n\r\n\r\ndef cross_M_turn(k_before, k_after, M_cross, alpha_before, alpha_after, m_i, resolution_star_relative, resolution_star_absolute):\r\n global n_turn\r\n n_before = int(k_before/(1-alpha_before)*(m_i**(1-alpha_before)-M_cross**(1-alpha_before)))\r\n m_before_cross = (m_i ** (1 - alpha_before) - n_before * (1 - alpha_before) / k_before) ** (1 / (1 - alpha_before))\r\n a = (M_cross**(1-alpha_after)+k_before/k_after*(1-alpha_after)/(1-alpha_before)*(m_before_cross**(\r\n 1-alpha_before)-M_cross**(1-alpha_before))-(1-alpha_after)/k_after)\r\n if a > 0:\r\n m_after_cross = a ** (1/(1-alpha_after))\r\n n_after = int(0.9*(n_turn - n_before - 1))\r\n m_after_cross_plus_n_after = (m_after_cross ** (1 - alpha_after) - n_after * (1 - alpha_after) / k_after) ** (1 / (1 - alpha_after))\r\n if m_i - m_after_cross_plus_n_after > max(resolution_star_relative * m_i, resolution_star_absolute):\r\n return (m_after_cross_plus_n_after, n_before + 1 + n_after)\r\n else:\r\n (m_after_cross_plus_n_new, n_after_new) = function_get_n_new_str_cross(\r\n m_i, m_after_cross, k_after, alpha_after, m_after_cross_plus_n_after, n_after, resolution_star_relative, resolution_star_absolute)\r\n return (m_after_cross_plus_n_new, n_before + 1 + n_after_new)\r\n else:\r\n return (0, 0)\r\n\r\n\r\ndef function_get_n_new_str_cross(m_i, m_after_cross, k, alpha, m_after_cross_plus_n, n_i, resolution_star_relative, resolution_star_absolute):\r\n while m_i - m_after_cross_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):\r\n n_after_new = round(n_i * 1.05 + 1)\r\n m_after_cross_plus_n_new = (m_after_cross ** (1 - alpha) - n_after_new * (1 - alpha) / k) ** (1 / (1 - alpha))\r\n (m_after_cross_plus_n, n_i) = (m_after_cross_plus_n_new, n_after_new)\r\n return m_after_cross_plus_n, n_i\r\n\r\n\r\ndef cross_M_L(k_1, M_L, alpha_1, m_i): # equation 19\r\n global list_m_str_i, list_n_str_i\r\n n_i = int(k_1 / (1 - alpha_1) * (m_i ** (1 - alpha_1) - M_L ** (1 - alpha_1)))\r\n list_m_str_i += [M_L]\r\n list_n_str_i += [n_i]\r\n return\r\n\r\n\r\ndef function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n): # equation 18\r\n global list_m_str_i, new_i, list_M_str_i, M_max, list_n_str_i\r\n new_i = 0\r\n if M_max > M_turn2:\r\n loop_M_i(k3, M_turn2, alpha_3, new_i)\r\n cross_M_turn2(k3, k2, M_turn2, alpha_3, alpha_2, new_i)\r\n if new_i + 1 < len(list_m_str_i):\r\n loop_M_i(k2, M_turn, alpha_2, new_i)\r\n if list_n_str_i[new_i + 1] > 0:\r\n cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)\r\n if new_i + 1 < len(list_m_str_i):\r\n loop_M_i(k1, M_L, alpha_1, new_i)\r\n if list_n_str_i[new_i+1] == 0:\r\n return\r\n else:\r\n M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (2 - alpha_1)) / \\\r\n list_n_str_i[new_i + 1]\r\n list_M_str_i += [M_i]\r\n return\r\n elif M_max > M_turn:\r\n loop_M_i(k2, M_turn, alpha_2, new_i)\r\n cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)\r\n loop_M_i(k1, M_L, alpha_1, new_i)\r\n if list_n_str_i[new_i+1] == 0:\r\n return\r\n else:\r\n M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (\r\n 2 - alpha_1)) / list_n_str_i[new_i + 1]\r\n list_M_str_i += [M_i]\r\n return\r\n else:\r\n loop_M_i(k1, M_L, alpha_1, new_i)\r\n if list_n_str_i[new_i+1] == 0:\r\n return\r\n else:\r\n M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (\r\n 2 - alpha_1)) / list_n_str_i[new_i + 1]\r\n list_M_str_i += [M_i]\r\n return\r\n\r\n\r\ndef loop_M_i(k, M_low, alpha, i):\r\n global list_m_str_i, list_n_str_i, list_M_str_i, new_i\r\n while list_m_str_i[i+1] > M_low:\r\n M_i = k/(2-alpha)*(list_m_str_i[i]**(2-alpha)-list_m_str_i[i+1]**(2-alpha))/list_n_str_i[i+1]\r\n list_M_str_i += [M_i]\r\n new_i = i + 1\r\n (i)=(new_i)\r\n\r\ndef cross_M_turn2(k_before, k_after, M_cross, alpha_before, alpha_after, i):\r\n global list_m_str_i, list_n_str_i, list_M_str_i, new_i\r\n M_i = k_before / (2 - alpha_before) * (list_m_str_i[i] ** (2 - alpha_before) - M_cross ** (2 - alpha_before)\r\n ) / list_n_str_i[i + 1] + k_after / (2 - alpha_after) * (M_cross ** (2 - alpha_after\r\n ) - list_m_str_i[i + 1] ** (2 - alpha_after)) / list_n_str_i[i + 1]\r\n list_M_str_i += [M_i]\r\n new_i = i + 1\r\n return\r\n\r\n\r\n################# draw IMF without sampling #################\r\n\r\ndef k_str(M_str, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):\r\n global M_max, M_max_function, k3, k2, k1\r\n M_max = 0\r\n M_max_function = 0\r\n function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)\r\n k3 = 0\r\n k2 = 0\r\n k1 = 0\r\n function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)\r\n return\r\n\r\nx_IMF = []\r\ny_IMF = []\r\n\r\ndef function_draw_xi_str(M_str, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):\r\n global x_IMF, y_IMF, k1, k2, k3, M_max\r\n k_str(M_str, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)\r\n function_draw_xi_str_loop(M_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3)\r\n return\r\n\r\ndef function_draw_xi_str_loop(M_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3):\r\n global x_IMF, y_IMF, k1, k2, k3, M_max\r\n while M_str < M_max:\r\n x_IMF += [M_str]\r\n if M_str > M_turn2:\r\n xi = k3 * M_str ** (-alpha_3)\r\n elif M_str > M_turn:\r\n xi = k2 * M_str ** (-alpha_2)\r\n else:\r\n xi = k1 * M_str ** (-alpha_1)\r\n y_IMF += [xi]\r\n (M_str) = (1.02 * M_str)\r\n return\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n########### alpha ###########\r\n\r\ndef Function_alpha_1_change(alpha_1, alpha1_model, Z_over_H):\r\n if (alpha1_model == 0):\r\n return alpha_1\r\n elif (alpha1_model == 1):\r\n alpha_1_change = alpha_1 + 0.5 * Z_over_H\r\n return alpha_1_change\r\n else:\r\n print(\"alpha1_model: %s, do not exist.\\nCheck file 'alpha1.py'\" % (alpha1_model))\r\n return\r\n\r\n\r\ndef Function_alpha_2_change(alpha_2, alpha2_model, Z_over_H):\r\n if (alpha2_model == 0):\r\n return alpha_2\r\n elif (alpha2_model == 1):\r\n alpha_2_change = alpha_2 + 0.5 * Z_over_H\r\n return alpha_2_change\r\n else:\r\n print(\"alpha2_model: %s, do not exist.\\nCheck file 'alpha2.py'\" % (alpha2_model))\r\n return\r\n\r\n\r\ndef Function_alpha_3_change(alpha3_model, M_ecl, Z_over_H):\r\n if (alpha3_model == 0):\r\n default_alpha3 = 2.3\r\n # print(\"alpha_3 is set to be a constant: %s, as this is the default alpha_3 value for alpha3_model 0.\\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'\" % (default_alpha3))\r\n return default_alpha3\r\n elif (alpha3_model == 1):\r\n rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)\r\n if rho < 9.5 * 10 ** 4:\r\n alpha_3_change = 2.3\r\n else:\r\n alpha_3_change = 1.86 - 0.43 * math.log(rho / 10 ** 6, 10)\r\n # print(\"Notification in file 'alpha3_model' uncompleted\")\r\n if alpha_3_change < 0.5:\r\n print(\"IMF alpha_3 being\", alpha_3_change, \"out of the tested range from Marks et al. 2012.\")\r\n return alpha_3_change\r\n elif (alpha3_model == 2):\r\n rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)\r\n x = -0.1405 * Z_over_H + 0.99 * math.log(rho / 10 ** 6, 10)\r\n if x < -0.87:\r\n alpha_3_change = 2.3\r\n else:\r\n alpha_3_change = -0.41 * x + 1.94\r\n # print(\"Notification in file 'alpha3_model' uncompleted\")\r\n return alpha_3_change\r\n else:\r\n # print(\"alpha_3 is set to be a constant: %s, as this is the input value of parameter 'alpha3_model'.\\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'\" % (alpha3_model))\r\n return alpha3_model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n########## ECMF #########\r\n\r\n# This part gives the cluster masses according to file \"supplementary-document-galimf.pdf\".\r\n\r\n# The code is only valid when SFR > 3 * 10^(-10) solar / year.\r\n\r\n# Inputs:\r\n# SFR,\u000Edelta_t, I, M_U, M_L, \\beta\r\n\r\n# step 1\r\n# use equation 13 or 17\r\n# give first integration limit m_1 i.e. M_max_ecl\r\n\r\n# step 2\r\n# use equation 10 or 14\r\n# give k\r\n\r\n# step 3\r\n# use equation 21\r\n# give every integration limit m_i and the number of stars in this region n_i\r\n\r\n# step 4\r\n# use equation 22 or 23\r\n# give every cluster mass M_i\r\n\r\n# Outputs:\r\n# list of star mass \"list_M_ecl_i\"\r\n# and the number of star with each mass \"list_n_ecl_i\"\r\n\r\n\r\n################### sample cluster from ECMF #####################\r\n\r\nresolution_cluster_relative = 0.001 # The mass resolution of a embedded cluster with mass M is: M * resolution_cluster_relative.\r\nlist_m_ecl_i = []\r\nlist_n_ecl_i = []\r\nlist_M_ecl_i = []\r\nM_max_ecl = 0\r\n\r\ndef function_sample_from_ECMF(SFR, delta_t, I_ecl, M_U, M_L, beta):\r\n global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl, resolution_cluster_relative\r\n M_tot = SFR * delta_t * 10**6 # units in Myr\r\n if beta == 2:\r\n M_max_ecl = 0\r\n function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 44\r\n k = I_ecl/(1/M_max_ecl-1/M_U) # equation 41\r\n list_m_ecl_i = [M_max_ecl]\r\n list_n_ecl_i = []\r\n function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48\r\n list_M_ecl_i = []\r\n length_n = len(list_n_ecl_i)\r\n function_M_i_2(k, 0, length_n) # equation 50\r\n else:\r\n M_max_ecl = 0\r\n function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, 10**8, 10**7, -1) # equation 40\r\n k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 37\r\n list_m_ecl_i = [M_max_ecl]\r\n list_n_ecl_i = []\r\n function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48\r\n list_M_ecl_i = []\r\n length_n = len(list_n_ecl_i)\r\n function_M_i_not_2(k, beta, 0, length_n) # equation 49\r\n return\r\n\r\n\r\ndef function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step, pm): # equation 44\r\n m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year\r\n M_x = I_ecl * (math.log(m_1) - math.log(M_L)) / (1 / m_1 - 1 / M_U)\r\n if M_tot * (1. + 10 ** (-5)) > M_x > M_tot * (1- 10 ** (-5)):\r\n global M_max_ecl\r\n M_max_ecl = m_1\r\n elif m_1 - step < M_L or m_1 + step > M_U:\r\n function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step/10, pm)\r\n elif M_x > M_tot and pm == -1:\r\n function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step, step, -1)\r\n elif M_x > M_tot and pm == 1:\r\n function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step/10, step/10, -1)\r\n elif M_x < M_tot and pm == 1:\r\n function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step, step, 1)\r\n elif M_x < M_tot and pm == -1:\r\n function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step/10, step/10, 1)\r\n\r\n\r\ndef function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step, pm): # equation 40\r\n m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year\r\n M_x = I_ecl * (1 - beta) / (2 - beta) * (m_1 ** (2 - beta) - M_L ** (2 - beta)) / (\r\n M_U ** (1 - beta) - m_1 ** (1 - beta))\r\n if M_tot * (1.+10**(-5)) > M_x > M_tot * (1-10**(-5)):\r\n global M_max_ecl\r\n M_max_ecl = m_1\r\n elif m_1 - step <= M_L or m_1 + step >= M_U:\r\n function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step/2, pm)\r\n elif M_x > M_tot and pm == -1:\r\n function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step, step, -1)\r\n elif M_x > M_tot and pm == 1:\r\n function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step/2, step/2, -1)\r\n elif M_x < M_tot and pm == 1:\r\n function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step, step, 1)\r\n elif M_x < M_tot and pm == -1:\r\n function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step/2, step/2, 1)\r\n\r\ndef function_m_i_ecl(m_i, M_L, k, beta, n_i): # equation 48\r\n while m_i > M_L:\r\n global list_m_ecl_i, list_n_ecl_i, resolution_cluster_relative\r\n m_i_plus_n = (m_i**(1-beta) - n_i * (1-beta) / k)**(1/(1-beta))\r\n if m_i_plus_n < M_L:\r\n list_m_ecl_i += [M_L]\r\n n_L = int((m_i**(1-beta) - M_L**(1-beta)) * k / (1-beta))\r\n if n_L == 0:\r\n return\r\n else:\r\n list_n_ecl_i += [n_L]\r\n return\r\n elif m_i - m_i_plus_n > resolution_cluster_relative * m_i:\r\n list_m_ecl_i += [m_i_plus_n]\r\n list_n_ecl_i += [n_i]\r\n (m_i, n_i) = (m_i_plus_n, n_i)\r\n else:\r\n (m_i_plus_n_new, n_new) = function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i)\r\n list_m_ecl_i += [m_i_plus_n_new]\r\n list_n_ecl_i += [n_new]\r\n (m_i, n_i) = (m_i_plus_n_new, n_new)\r\n return\r\n\r\n\r\ndef function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i):\r\n while m_i - m_i_plus_n < resolution_cluster_relative * m_i:\r\n n_new = round(n_i * 1.05 + 1)\r\n m_i_plus_n_new = (m_i ** (1 - beta) - n_new * (1 - beta) / k) ** (1 / (1 - beta))\r\n (m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)\r\n return m_i_plus_n, n_i\r\n\r\n\r\ndef function_M_i_2(k, i, length_n): # equation 50\r\n while i < length_n:\r\n global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i\r\n M_i = k * (math.log(list_m_ecl_i[i]) - math.log(list_m_ecl_i[i+1])) / list_n_ecl_i[i]\r\n list_M_ecl_i += [M_i]\r\n (i) = (i+1)\r\n return\r\n\r\n\r\ndef function_M_i_not_2(k, beta, i, length_n): # equation 49\r\n while i < length_n:\r\n global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i\r\n M_i = k / (2-beta) * (list_m_ecl_i[i]**(2-beta)-list_m_ecl_i[i+1]**(2-beta)) / list_n_ecl_i[i]\r\n list_M_ecl_i += [M_i]\r\n (i) = (i+1)\r\n return\r\n\r\n################### draw ECMF without sampling #####################\r\n\r\ndef k_ecl(M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):\r\n global M_max_ecl\r\n M_tot = SFR * delta_t * 10 ** 6 # units in Myr\r\n if beta == 2:\r\n M_max_ecl = 0\r\n function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 44\r\n k = I_ecl/(1/M_max_ecl-1/M_U) # equation 41\r\n else:\r\n M_max_ecl = 0\r\n function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, M_U/10, M_U/100, -1) # equation 40\r\n k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 37\r\n return k\r\n\r\nx_ECMF = []\r\ny_ECMF = []\r\n\r\ndef function_draw_xi_ecl(M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):\r\n global x_ECMF, y_ECMF\r\n k = k_ecl(M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta)\r\n function_draw_xi_ecl_loop(M_ecl, k, M_U, beta)\r\n x_ECMF = [x_ECMF[0]] + x_ECMF\r\n x_ECMF += [x_ECMF[-1]]\r\n y_ECMF = [0.000000001] + y_ECMF\r\n y_ECMF += [0.000000001]\r\n return\r\n\r\ndef function_draw_xi_ecl_loop(M_ecl, k, M_U, beta):\r\n while M_ecl < M_max_ecl:\r\n global x_ECMF, y_ECMF\r\n x_ECMF += [M_ecl]\r\n xi = k * M_ecl ** (-beta)\r\n y_ECMF += [xi]\r\n (M_ecl) = (1.01 * M_ecl)\r\n return\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n########## beta ###########\r\n\r\n\r\n\r\ndef Function_beta_change(beta_model, SFR, Z_over_H):\r\n if (beta_model == 0):\r\n default_beta = 2.00000001\r\n return default_beta\r\n elif (beta_model == 1):\r\n beta_change = -0.106 * math.log(SFR, 10) + 2.000001 #+ 0.5*Z_over_H\r\n if beta_change < 1.5:\r\n beta_change = 1.5\r\n elif beta_change > 2.5:\r\n beta_change = 2.5\r\n # print(\"ECMF-beta =\", beta_change)\r\n return beta_change\r\n elif (beta_model == 2):\r\n if SFR > 1:\r\n beta_change = -0.106 * math.log(SFR, 10) + 2.00000001\r\n else:\r\n beta_change = 2.0000001\r\n return beta_change\r\n else:\r\n return beta_model\r\n","sub_path":"galIMF.py","file_name":"galIMF.py","file_ext":"py","file_size_in_byte":50337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"31778280","text":"#!/usr/bin/env python3\n\nfrom baselines import logger\nfrom baselines.common.cmd_util import make_atari_env, atari_arg_parser\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nfrom baselines.a2c.a2c import learn\nfrom baselines.ppo2.policies import CnnPolicy, LstmPolicy, LnLstmPolicy\n\ndef train(env_id, num_timesteps, seed, policy, lrschedule, num_env,\n replay_lambda=1, replay_loss=None, ss_rate=1, thetas=None):\n if policy == 'cnn':\n policy_fn = CnnPolicy\n elif policy == 'lstm':\n policy_fn = LstmPolicy\n elif policy == 'lnlstm':\n policy_fn = LnLstmPolicy\n env = VecFrameStack(make_atari_env(env_id, num_env, seed), 4)\n if replay_loss is not None:\n learn_staged(policy_fn, env, seed, total_timesteps=int(num_timesteps * 1.1),\n lrschedule=lrschedule, replay_lambda=replay_lambda, ss_rate=ss_rate,\n replay_loss=replay_loss, thetas=thetas)\n else:\n learn(policy_fn, env, seed, total_timesteps=int(num_timesteps * 1.1), lrschedule=lrschedule)\n env.close()\n\ndef main():\n parser = atari_arg_parser()\n parser.add_argument('--policy', help='Policy architecture', choices=['cnn', 'lstm', 'lnlstm'], default='cnn')\n parser.add_argument('--lrschedule', help='Learning rate schedule', choices=['constant', 'linear'], default='constant')\n parser.add_argument('--replay_lambda', help='Replay regularizer parameter', default=1)\n parser.add_argument('--ss_rate', help='Subsampling rate', default=1)\n parser.add_argument('--replay_loss', help='Replay loss, if any', choices=['L2', 'Distillation'], default=None)\n parser.add_argument('--thetas', help='List of thetas to invert over', nargs='*', default=None)\n args = parser.parse_args()\n logger.configure()\n train(args.env, num_timesteps=args.num_timesteps, seed=args.seed,\n policy=args.policy, lrschedule=args.lrschedule, num_env=16,\n replay_lambda=args.replay_lambda, ss_rate=args.ss_rate,\n replay_loss=args.replay_loss, thetas=args.thetas)\n\nif __name__ == '__main__':\n main()\n","sub_path":"baselines/a2c/run_atari.py","file_name":"run_atari.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213411811","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 20 11:50:59 2017\n\n@author: yzamriy\n\nGoal: Funtion to write out nested dictionaries to csv files \n\nInput: Nested dicitonary, column names for the csv file, filename\n \nOutput: CSV files in ./CSV directory\n\nRetruns: Nothing\n\n\"\"\"\nfrom Roster import Roster\nfrom Competition import Competition\nfrom RankingList import RankingList\nimport os, csv\n\ndef writetoCSV_lev3(data_dict, col_names, filename):\n ''' \n Goal: Writes 3-level nested dictionary into a csv file\n Input: 3-level nested dictionary, column names, output file name\n Ouput: csv file with results in the output directory\n Returns: Nothing\n '''\n home_dir = os.getcwd()\n # subdirectory for csv output\n db_dir = \"./CSV\"\n os.chdir(db_dir)\n \n with open(filename, \"w\", newline = '', encoding = 'utf-8') as toWrite:\n writer = csv.writer(toWrite, delimiter=\",\")\n writer.writerow(col_names)\n for level1 in data_dict:\n row = []\n for level2 in data_dict[level1]:\n row = [level1]\n row.append(level2)\n for level3 in data_dict[level1][level2]:\n row.append(data_dict[level1][level2][level3])\n writer.writerow(row)\n os.chdir(home_dir)\n\ndef writetoCSV_lev2(data_dict, col_names, filename):\n ''' \n Goal: Writes 2-level nested dictionary into a csv file\n Input: 2-level nested dictionary, column names, output file name\n Ouput: csv file with results in the output directory\n Returns: Nothing\n '''\n home_dir = os.getcwd()\n # subdirectory for csv output\n db_dir = \"./CSV\"\n os.chdir(db_dir)\n\n with open(filename, \"w\", newline = '', encoding = 'utf-8') as toWrite:\n writer = csv.writer(toWrite, delimiter=\",\")\n writer.writerow(col_names)\n for ref in data_dict:\n row = [ref]\n for col in data_dict[ref]:\n row.append(data_dict[ref][col])\n writer.writerow(row)\n\n os.chdir(home_dir)\n\n# Enter desired parameters\n# All possible values are stored in the file called \"usapl_parameters.txt\"\n#sex = 'Female' # Possible Values: 'Male' and 'Female'\n#div = 'Raw Open' # Possible Values: 'All', 'Raw Open', etc.\n#fed = 'IPF - Female' # Possible Values: 'IPF - Female' and 'IPF - Male'\n#wclass = '-84' # Possible Values: '-84', '84+', etc.\n#exercise = 'Total' # Possible Values: 'Total', 'Squat', 'Bench press', 'Deadlift'\n#state = 'All' # Possible Values: 'All', 'Nationals', 'Regionals', 'New York'\n#year = 'All' # Possible Values: 'All', '2017', etc.\n#order = 'Weight' # Possible Values: 'Points' and 'Weight'\n#\n#par_list = [sex,div,fed,wclass,exercise,state,year,order]\n\n# Name of the roster file\n#rawinputfile = './Input files/2017 Raw Nationals Roster.csv'\n# Weightclass and division of interest (based on values in the row roster file)\n#weightclass = 'F-84'\n#division = 'FR-O'\n\n# create Roster object\n#roster = Roster(par_list, rawinputfile, weightclass, division)\n#writetoCSV_lev3(roster.return_dict(),roster.get_col_names(), roster.build_filename())\n\n# create RankingList object\n#rank = RankingList(par_list)\n#writetoCSV_lev2(rank.return_dict(),rank.get_col_names(), rank.build_filename())\n\n#lifter = Lifter('lifters-view?id=1768')\n#writetoCSV_lev2(lifter.return_dict(),lifter.get_col_names(), lifter.build_filename())\n\n# create Competition object based on competition URL reference\n#print(CompetitionList(substring = \"Raw Nationals\").get_comp_names())\n#raw12 = Competition('competitions-view?id=616')\n#raw13 = Competition('competitions-view?id=500')\n#raw14 = Competition('competitions-view?id=860')\n#raw15 = Competition('competitions-view?id=992')\n#raw16 = Competition('competitions-view?id=1354')\n#writetoCSV_lev2(raw14.return_dict(),raw14.get_col_names(), raw14.build_filename())\n#writetoCSV_lev2(raw15.return_dict(),raw15.get_col_names(), raw15.build_filename())\n#writetoCSV_lev2(raw16.return_dict(),raw16.get_col_names(), raw16.build_filename())\n#writetoCSV_lev3(raw12.return_hist_dict(),raw12.get_hist_col_names(), raw12.build_hist_filename())\n#writetoCSV_lev3(raw13.return_hist_dict(),raw13.get_hist_col_names(), raw13.build_hist_filename())\n#writetoCSV_lev3(raw14.return_hist_dict(),raw14.get_hist_col_names(), raw14.build_hist_filename())\n#writetoCSV_lev3(raw15.return_hist_dict(),raw15.get_hist_col_names(), raw15.build_hist_filename())\n#writetoCSV_lev3(raw16.return_hist_dict(),raw16.get_hist_col_names(), raw16.build_hist_filename())\n\n#raw17 = Competition('competitions-view?id=1776')\n#writetoCSV_lev2(raw17.return_dict(),raw17.get_col_names(), raw17.build_filename())\n#writetoCSV_lev3(raw17.return_hist_dict(), raw17.get_hist_col_names(), raw17.build_hist_filename())\n\n","sub_path":"writetoCSV.py","file_name":"writetoCSV.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"449046172","text":"from keras import backend as K\n\n\ndef f1_score(y_true, y_pred):\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2 * ((precision * recall) / (precision + recall + K.epsilon()))\n\n\n\"\"\"\nimport numpy as np\nfrom keras.callbacks import Callback\nfrom sklearn.metrics import confusion_matrix, f1_score, precision_score, \\\n recall_score\n \nclass Metrics(Callback):\n def on_train_begin(self, logs={}):\n self.val_f1s = []\n self.val_recalls = []\n self.val_precisions = []\n\n\n def on_epoch_end(self, epoch, logs={}):\n val_predict = (\n np.asarray(self.model.predict(self.model.validation_data[0]))).round()\n val_targ = self.model.validation_data[1]\n _val_f1 = f1_score(val_targ, val_predict)\n _val_recall = recall_score(val_targ, val_predict)\n _val_precision = precision_score(val_targ, val_predict)\n self.val_f1s.append(_val_f1)\n self.val_recalls.append(_val_recall)\n self.val_precisions.append(_val_precision)\n print “ — val_f1: % f — val_precision: % f — val_recall % f” % (\n _val_f1, _val_precision, _val_recall)\n return\n\n\nmetrics = Metrics()\n\"\"\"\n","sub_path":"iNeural/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"284200354","text":"import re\nimport time\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom spider.get_comment import SQL\nfrom spider.get_comment import getprice\nrequests.packages.urllib3.disable_warnings()\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'\n ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36'}\n\nfor i in range(128383, 140432, 2):\n sss = SQL.save_mysql()\n link = sss.read_link(i)\n try:\n print(i)\n for row in link:\n url1 = row[0]\n print(url1)\n phone_id = url1.split('/')[-1].strip(\".html\")\n # phone_id = re.findall(r\"\\d+\", url1)\n print(phone_id)\n url = \"http:\"+url1\n print(url)\n price = getprice.jd_price(url)\n if price == KeyError:\n price = 0\n try:\n content = requests.get(url, headers=headers,timeout=100)\n except:\n content = requests.get(url, headers=headers)\n html = content.text\n #print html\n soup = BeautifulSoup(html, 'lxml')\n title = re.findall(r'(.*?)', html)\n name = title[0].replace('【行情 报价 价格 评测】-京东','')\\\n .replace('【','').replace('】','')\n print(name)\n num = getprice.get_comment_num(phone_id)\n print(num)\n print('id:'+phone_id[0])\n sss.save_comment(i, name, int(float(price)), int(num), phone_id)\n content.close()\n time.sleep(0.1)\n except:\n print(\"Error: 读取失败\")\n else:\n print(\"内容成功写入数据库\")\n","sub_path":"spider/get_comment/select_Netbook_info.py","file_name":"select_Netbook_info.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288128254","text":"\"\"\"Helper class for Indego.\"\"\"\nimport logging\nfrom dataclasses import dataclass, is_dataclass\nfrom datetime import datetime\nfrom typing import Any\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef nested_dataclass(*args, **kwargs): # noqa: D202\n \"\"\"Wrap a nested dataclass object.\"\"\"\n\n def wrapper(cls):\n cls = dataclass(cls, **kwargs)\n original_init = cls.__init__\n\n def __init__(self, *args, **kwargs):\n for name, value in kwargs.items():\n field_type = cls.__annotations__.get(name, None)\n if hasattr(field_type, \"__args__\"):\n inner_type = field_type.__args__[0]\n if is_dataclass(inner_type):\n new_obj = [inner_type(**dict_) for dict_ in value]\n kwargs[name] = new_obj\n else:\n if is_dataclass(field_type) and isinstance(value, dict):\n new_obj = field_type(**value)\n kwargs[name] = new_obj\n\n original_init(self, *args, **kwargs)\n\n cls.__init__ = __init__\n return cls\n\n return wrapper(args[0]) if args else wrapper\n\n\ndef convert_bosch_datetime(dt: Any = None) -> datetime:\n \"\"\"Create a datetime object from the string (or give back the datetime object) from Bosch. Checks if a valid number of milliseconds is sent.\"\"\"\n if dt:\n if isinstance(dt, str):\n if dt.find(\".\") > 0:\n return datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S.%f%z\")\n return datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S%z\")\n if isinstance(dt, datetime):\n return dt\n return None\n","sub_path":"pyIndego/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"517861322","text":"import pygame\n\nclass Button:\n def __init__(self, text, position, screen, font_size=20, clicked=False, attribute=None):\n self.text = text\n\n self.size = ((len(text) + 1) * 12, font_size + 15)\n\n self.clicked = self.inform if not clicked else clicked\n\n self.font_size = font_size\n self.position = position\n self.screen = screen\n self.width = 2\n self.charged = False\n self.attribute = attribute\n\n def draw(self):\n font = pygame.font.Font(\"20806.ttf\", self.font_size)\n text = font.render(self.text, True, (0, 0, 0))\n self.screen.blit(text, (self.position[0] + 6, self.position[1] + 4))\n\n pygame.draw.rect(self.screen, (0, 0, 0), (self.position[0], self.position[1], self.size[0], self.size[1]),\n width=2)\n if self.charged > 0:\n pygame.draw.rect(self.screen, (0, 0, 0), (self.position[0] + 3, self.position[1] + 3, self.size[0] - 5,\n self.size[1] - 5,), width=1)\n if self.charged == 2:\n pygame.draw.rect(self.screen, (0, 0, 0), (self.position[0] + 5, self.position[1] + 5, self.size[0] - 9,\n self.size[1] - 9,), width=1)\n\n def inform(self):\n print(\">>> ouch\")\n\n def under_cursor(self, mPos):\n checked_x = self.position[0] <= mPos[0] <= self.position[0] + self.size[0]\n checked_y = self.position[1] <= mPos[1] <= self.position[1] + self.size[1]\n return checked_x and checked_y\n","sub_path":"buttonClass.py","file_name":"buttonClass.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"606794475","text":"import sys\n\ntwos = 0\nthrees = 0\n\nfile = open(sys.argv[1])\n\nfor line in file:\n\tcounts = dict()\n\tfor letter in line:\n\t\tif letter in counts:\n\t\t\ttmp = counts[letter]\n\t\t\tcounts[letter] = tmp+1\n\t\telse:\n\t\t\tcounts[letter] = 1\n\tif 2 in counts.values():\n\t\ttwos += 1\n\tif 3 in counts.values():\n\t\tthrees += 1\n\nprint(twos * threes)\nfile.close()","sub_path":"day2/puzzle1.py","file_name":"puzzle1.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"61583007","text":"import acm\nimport OptionFunctions\n\n\noptions = acm.FOption.Select('')\n\nfor option in options:\n if option.ExpiryDate()>'2019-01-01':\n for trade in option.Trades():\n approxLoad = trade.AdditionalInfo().Approx_46_load()\n #print approxLoad\n #print type(approxLoad)\n if approxLoad is not None and approxLoad==True:\n continue\n acquirer = trade.Acquirer()\n \n \n baseType = OptionFunctions.GetExoticOptionBaseType(option)\n productType = option.ProductTypeChlItem()\n if productType is not None:\n productType = productType.Name()\n if acquirer and acquirer.Name() == \"NLD DESK\":\n print(str(trade.Oid())+','+str(trade.Type())+\",\"+str(baseType) + \",\"+str(productType)+\",\")\n \n \n \n","sub_path":"Python modules/Rayan.py","file_name":"Rayan.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"382964959","text":"HOURS = 8\r\nOVERTIME = 1.5\r\n\r\n\r\nclass TimeCard:\r\n\r\n def __init__(self, start_date, end_date, start_time, end_time):\r\n self.start_date = start_date\r\n self.end_date = end_date\r\n self.start_time = start_time\r\n self.end_time = end_time\r\n\r\n def calculate_daily_pay(self, start_time, end_time, rate):\r\n hours = (end_time + 12) - start_time\r\n if hours <= HOURS:\r\n total_pay = (HOURS * rate)\r\n else:\r\n overtime_pay = ((hours - HOURS) * OVERTIME * rate)\r\n regular_pay = (HOURS * rate)\r\n total_pay = overtime_pay + regular_pay\r\n return total_pay\r\n\r\n\r\n","sub_path":"accounting/timecard.py","file_name":"timecard.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470437102","text":"import ast\nfrom game.exceptions import *\nfrom functools import reduce\nfrom game.services import *\nfrom connection.models import *\n\ndef change_player(players,index_player):\n index_player = int(index_player)\n return players[0].auto_increment_id if index_player == len(players)-1 else players[index_player+1].auto_increment_id\n\ndef index_player(id_player, players):\n return players.index(reduce(lambda a, b: a if a.auto_increment_id == id_player else b, players))\n\n \ndef calculate_position(player_pos, movement):\n position = [player_pos[0]+movement[0], player_pos[1]+movement[1]]\n if is_coord_valide(position):\n return position\n else:\n raise OufOfBoardError(\"Test\", \"OutOfBoardError has occured\")\n\ndef is_coord_valide(coord):\n return len(list(filter(lambda x: x >= 0 and x < 8 , coord))) == 2\n\ndef update_board_content(board, player, num_player, previous_pos):\n position = player.pos\n if board[position[0]][position[1]] != num_player+1 and board[position[0]][position[1]] != 0:\n player.pos = previous_pos\n raise NotEmptyCellError(\"Test\", \"NotEmptyCellError has occured\")\n board[position[0]][position[1]] = num_player+1\n return board, player.pos\n\ndef string_to_list(s):\n if isinstance(s, str):\n return ast.literal_eval(s)\n return s\n\ndef listing_game_players(game_players):\n return listing_player_pos(list(game_players))\n\ndef listing_player_pos(players):\n return [convert_pos(p) for p in players]\n\ndef convert_pos(player):\n player.pos = string_to_list(player.pos)\n return player\n\n\ndef build_game_state(game_state_data, players, curr_player, code_error):\n players = build_players_entities(players)\n game_state = {\n \"game_id\" : game_state_data.auto_increment_id,\n \"board\" : game_state_data.board,\n \"players\" : players,\n \"current_player\" : curr_player,\n \"code\" : code_error, \n }\n return game_state\n\ndef build_players_entities(players):\n players_obj = []\n for player in players:\n dic = {\n \"id\":player.auto_increment_id,\n \"name\" : player.user.username,\n \"color\" : player.color,\n \"position\" : player.pos\n }\n players_obj.append(dic)\n return players_obj\n\ndef build_colors(users):\n colors = []\n for user in users:\n temp = []\n color1= user.color1\n color2= user.color2\n temp.append(color1)\n temp.append(color2)\n colors.append(temp)\n colors = sort_colors(colors)\n return colors\n\n\ndef sort_colors(colors):\n colorsReplace = [\"salmon\",\"pink\",\"coral\",\"gold\",\"darkkhaki\",\"orchid\"] #a completer si + de 7 joueurs (longueur de colors replace >= nb joueurs-1)\n tabColors = []\n #print(colors[0][0])\n tabColors.append(colors[0][0])\n i = 1\n j=0\n while i < len(colors):\n temp = colors[i][0]\n if temp not in tabColors:\n tabColors.append(temp)\n else:\n temp = colors[i][1]\n if temp not in tabColors:\n tabColors.append(temp)\n else:\n tabColors.append(colorsReplace[j])\n j+=1\n i+=1\n return tabColors\n \ndef move_pos(player, movement, game_state, players):\n previous_pos = player.pos\n player.pos = calculate_position(player.pos, movement)\n game_state.board, player.pos = update_board_content(game_state.board, player, players.index(player), previous_pos)\n complete_boxes(game_state.board,players.index(player)+1,player.pos)\n return game_state\n\ndef search_player_by_id(players, id):\n return reduce(lambda a, b: a.user.username if a.auto_increment_id == id else b, players)\n\n\n\ndef count_elements(stats, element):\n if(stats.get(element,0)==0):\n stats[element] = 1\n else:\n stats.update({element:stats.get(element)+1})\n return stats\n\ndef define_winner(board):\n stats = {}\n for line in board:\n for cell in line:\n stats = count_elements(stats, cell)\n max = -1\n kmax = -1\n tie = False\n for key,element in stats.items():\n if max < element:\n kmax = key\n max = element\n tie = False\n elif max == element:\n tie = True\n return (kmax, max, tie)\n\ndef end_of_game(board):\n return all([line.count(0)==0 for line in board])\n\ndef complete_boxes(board,player,coord):\n list_coor=[]\n completed_list=[]\n if coord[0]>0 and board[coord[0]-1][coord[1]]==0:\n coord1=[coord[0]-1,coord[1]]\n list_coor.append(coord1) \n if coord[0]0 and board[coord[0]][coord[1]-1]==0:\n coord3=[coord[0],coord[1]-1]\n list_coor.append(coord3)\n if coord[1]0:\n x=list_coor[0]\n for x in list_coor: \n for y in x:\n if y < 0: \n del x[y]\n if len(x)%2==1: \n list_coor.remove(x)\n list_of_boxes_to_fill(list_coor,board,player,completed_list) \n tab=clean_tab(completed_list)\n board=complete_board(tab,board,player) \n \ndef list_of_boxes_to_fill(liste,board,player,full_liste):\n for elem in liste:\n temp_liste=[elem]\n i=0\n while i0 and board[coord[0]-1][coord[1]]==0:\n coord1=[coord[0]-1,coord[1]]\n if coord1 not in temp_liste:\n temp_liste.append(coord1)\n else: \n if coord[0]>0 and board[coord[0]-1][coord[1]]!=player:\n temp_liste=[]\n return temp_liste\n if coord[0]0 and board[coord[0]][coord[1]-1]==0:\n coord3=[coord[0],coord[1]-1]\n if coord[1]>0 and coord3 not in temp_liste:\n temp_liste.append(coord3)\n else: \n if coord[1]>0 and board[coord[0]][coord[1]-1]!=player:\n temp_liste=[]\n return temp_liste\n if coord[1]2 and self.colunas>2: # verifica se a primeira linha tem 2 nº\n # e se esses valores são maiores que 2, de modo a ser um tabuleiro no mínimo de 3*3\n if len(lines)-1!=self.linhas:ficheiro_valido=0 \n #verifica o nº de linhas, que terá de ser igual a self.linhas, ou senão, ficheiro inváildo\n else:\n for i in range(1,len(lines)):\n line=lines[i].split() #separa carateres por linha\n if line.count('.')+line.count('#')+line.count('X')+line.count('O')!=self.colunas: \n #conta se o nº de carateres válido que tem de ser igual ao nº colunas\n ficheiro_valido=0 #ficheiro invalido\n else: ficheiro_valido=0 #ficheiro invalido\n except ValueError: ficheiro_valido=0 #ficheiro invalido, por não ter valores numéricos na 1ª linha \n if ficheiro_valido==1: # se ficheiro válido, escreve os carateres no self.tabuleiros\n self.tabuleiro=[]\n for i in range(1,len(lines)):\n self.tabuleiro.append(lines[i].split()) \n else: print(\"Erro: ficheiro com formato inválido.\") \n return self.tabuleiro\n except:\n print(\"Erro: na leitura do tabuleiro\")\n else:\n ficheiro.close()\n return self.tabuleiro\n \n def backup(self): \n #quando se mostra um tabuleiro, com outro aberto\n #retorna os dados originais; limpa-se backups\n self.linhas = self.lin_backup\n self.colunas = self.col_backup\n self.tabuleiro = self.tab_backup\n self.lin_backup=-1\n self.col_backup=-1\n self.tab_backup=[]\n \n def tabuleiro_valido(self):\n valido=1 #se valido=1, tabuleiro válido\n for i in range(self.linhas):\n if valido==1: #De modo a não fazer nada caso já tenha determinado que não e valido\n for j in range(self.colunas):\n if valido==1: #De modo a não fazer nada caso já tenha determinado que não e valido\n if self.tabuleiro[i][j]=='X' or self.tabuleiro[i][j]=='O': #se o carater a analisar é um X ou O\n try: #para não bloquear se ultrapassar os limites da matriz \n if (self.tabuleiro[i][j]==self.tabuleiro[i-1][j] and self.tabuleiro[i][j]==self.tabuleiro[i+1][j]) and i-1>-1: \n # verficar se os pontos na mesma coluna são iguais, i-1>-1 de modo a não comparar com o tabuleiro[-1], que seria o tabuleiro[len(tabuleiro)-1] \n valido=0 #tabuleiro não válido\n except IndexError: pass #indices fora da matriz não se podem comparar\n try:\n if(self.tabuleiro[i][j]==self.tabuleiro[i-1][j-1] and self.tabuleiro[i][j]==self.tabuleiro[i+1][j+1]) and i-1>-1 and j-1>-1: \n # verficar se os pontos na mesma diagonal são iguais i-1>-1 de modo a não comparar com o tabuleiro[-1], que seria o tabuleiro[len(tabuleiro)-1] \n valido=0 #tabuleiro não válido\n except IndexError: pass #indices fora da matriz não se podem comparar\n try: \n if (self.tabuleiro[i][j]==self.tabuleiro[i][j-1] and self.tabuleiro[i][j]==self.tabuleiro[i][j+1]) and j-1>-1: \n # verficar se os pontos na mesma linha são iguais\n valido=0 #tabuleiro não válido\n except IndexError: pass #indices fora da matriz não se podem comparar\n try:\n if (self.tabuleiro[i][j]==self.tabuleiro[i-1][j+1] and self.tabuleiro[i][j]==self.tabuleiro[i+1][j-1]) and i-1>-1 and j-1>-1: \n # verficar se os pontos na mesma diagonal são iguais\n valido=0 #tabuleiro não válido\n except IndexError: pass #indices fora da matriz não se podem comparar\n if self.tabuleiro==[]: # tabuleiro vazio\n print(\"Erro: nenhum tabuleiro aberto.\")\n return False \n elif valido==1: return True\n else:return False\n \n def jogada(self,caract,lin,col):\n try:\n if caract==\"X\" or caract==\"O\": #verifica caratares válidos\n if self.tabuleiro[lin][col]==\".\": #verifica se posição não está bloqueada\n if lin>=0 and lin=0 and col dir\n #[[(i),(j-1)],[(i),(j+1)]]:linha\n #[[(i-1),(j)],[(i+1),(j)]]:coluna\n #[[(i-1),(j+1)],[(i+1),(j-1)]]: diag dir -> esq\n todas_direcoes= [[[(i-2),(j-2)],[(i-1),(j-1)]],[[(i),(j-2)],[(i),(j-1)]],\n [[(i+1),(j-1)],[(i+2),(j-2)]],[[(i-2),(j)],[(i-1),(j)]],\n [[(i+1),(j)],[(i+2),(j)]],[[(i-2),(j+2)],[(i-1),(j+1)]],\n [[(i),(j+1)],[(i),(j+2)]],[[(i+1),(j+1)],[(i+2),(j+2)]],\n [[(i-1),(j-1)],[(i+1),(j+1)]],[[(i),(j-1)],[(i),(j+1)]],\n [[(i-1),(j)],[(i+1),(j)]],[[(i-1),(j+1)],[(i+1),(j-1)]]] \n sug=0 #para guardar se alguma sugestão já foi definida para o ponto (i,j)\n for direc in todas_direcoes:\n if sug==0: #caso já se saiba que é uma possível sugestão, isto limita o código a correr \n try:\n if (self.tabuleiro[direc[0][0]][direc[0][1]]==self.tabuleiro[direc[1][0]][direc[1][1]] \n and (self.tabuleiro[direc[0][0]][direc[0][1]]==\"X\" or \n self.tabuleiro[direc[0][0]][direc[0][1]]==\"O\") and \n direc[0][0]>-1 and direc[0][1]>-1 and direc[1][0]>-1 and direc[1][1]>-1):\n # Verifica se em alguma das opções de todas_direcoes ocorre\n # que os 2 elementos são iguais e iguais a X ou O, e garantir que os indices\n # não são negativos\n if self.tabuleiro[direc[0][0]][direc[0][1]]==\"O\": cel='X' #sugere o carater oposto para não bloquear o jogo\n else: cel='O'#sugere o carater oposto para não bloquear o jogo\n sugestoes_2.append([i,j,cel])#guarda linha, coluna e sugestao de carater\n sug=1#sugestão já definida\n except IndexError: pass #caso os indices ultrapassem os limites da matriz\n #de modo a não colocar mais condições que limitam os índices a valores menores que os limites da matriz \n sugestao = [-1,-1] #variável para o return\n if sugestoes_2 != []: #se houver alguma situação em que 2 carateres adjacentes iguais\n sugestao=random.choice(sugestoes_2) #escolha aleatória da lista de sugestão, porque todas as sugestões são de jogadas prioritárias e importantes a resolver\n else: # se nao houver nenhuma sequencia de 2 elementos iguais na lista, procura-se a primeira casa disponível \n sug=0 #para guardar se alguma sugestão já foi definida\n lin=0\n col=0\n try:\n while sug==0 and lin 0 and not self.tabuleiro_valido()))\n and not self.resolve_auto_bifurc.is_empty()): #se tabuleiro não resolvido e não válido, com bifurcações em memória\n print(\"Erro: tabuleiro inválido. Irá se retornar à última posição em que se podia ser ambas as jogadas.\")\n resolvido=0 #caso o tabuleiro estivesse preenchido, mas a última jogada tornava o tabuleiro inválido\n ult_pos=self.resolve_auto_bifurc.pop()\n pos_ult_jog=[]#irá guardar a linha e coluna aonde jogar O quando ocorre bifurcação\n while self.jogada_n > ult_pos: #enquanto não se faz os undo's até à bifurcação\n if self.jogada_n> ult_pos+1: self.return_undo(1) #se for antes da última jogada, não é preciso o retorno das posições\n else: pos_ult_jog=self.return_undo(1,1)#última jogada antes da bifurcação, retorna os posições da jogada anterior\n if gerar == 0: self.printpuzzle() #imprime cada iteração da resolução (exceto no gerar)\n print()\n self.jogada('O',pos_ult_jog[0],pos_ult_jog[1]) #no local da jogada da bifurcação joga O, que é o opsto do default X\n if gerar == 0: self.printpuzzle() #imprime cada iteração da resolução (exceto no gerar)\n print()\n \n if resolvido==1 and self.tabuleiro_valido():print(\"Tabuleiro Preenchido. Acabou o jogo.\") # o tabuleiro cheio e válido o jogo acabou\n elif gerar==0: print(\"Erro: tabuleiro sem solução.\")\n if gerar!=0 and dific==0:\n if resolvido==1 and self.tabuleiro_valido(): return True\n else: return False \n del self.resolve_auto_bifurc #elimina a stack de bifurcações\n self.resolve_auto_bifurc=Stack() #cria uma stack de bifurcações vazia\n \n def return_undo(self, anc=0, volta_atras_res_aut=0): \n #volta a jogada anterior; se anc diferente de 0, é usado no volta_unancora\n #volta_atras_res_aut!=0: usando no resolver para quando o resolver nao consegue\n #acabar um tabuleiro, mas teve pelo menos um jogada em que pode escolher entre X e O\n try:\n if not self.historico.is_empty(): #se o histórico não está vazio\n volta=[]\n if volta_atras_res_aut!=0: volta=self.historico.top()#guarda a alteração de linha para o resolver automático\n ult_altera=self.historico.pop() #define o tabuleiro como a jogada anterior\n self.tabuleiro[ult_altera[0]][ult_altera[1]]='.' # altera o tableiro as últimas posições alteradas para '.': célula vazia\n self.jogada_n-=1 #decrementa-se-se o nº da jogada\n if volta_atras_res_aut!=0: return volta #retornar a alteração de linha para o resolver automático\n else:#se o histórico está vazio (só tem uma jogada no histórico): retorna à jogada original\n if anc==0: print(\"Erro: não há jogadas anteriores em memória.\") # esta mensagem nao é imprimida se utilizada no undo ancora\n except: print(\"Erro: no undo\")\n \n def nova_ancora(self, new_var=0): \n #define guarda posição de jogada numa stack\n #new_var!=0: chamado em self.ver_mais_1_sol\n try:\n global pos\n pos=deepcopy(self.jogada_n) #duplica-se a instância self.jogada_n\n self.ancora.push(pos) #adiciona-se uma posição de jogadas no self.ancora\n del pos # elimina-se a instÂncia com o destruidor __del__\n if new_var==0: print(\"Âncora atual:\") #ancora atual é igual ao tabuleiro atual, a ser imprimido pelo código da Shell\n except: print(\"Erro: no ancora.\")\n \n def volta_unancora(self): #volta a ancora anterior \n try:\n if self.ancora.is_empty(): \n print(\"Erro: não há âncoras em memória.\") # se a stack do self.ancora está vazia\n elif self.ancora.top()>self.jogada_n: # se a jogada atual procede a última ancora\n print(\"Erro: última jogada procede última âncora; esta âncora será eliminada.\") # se, com o undo, o jogador já passou para posições anteriores\n self.ancora.pop() #última âncora eliminada\n else:\n pos= self.ancora.pop() #retira a última âncora\n while self.jogada_n > pos: #enquanto há jogadas posteriores à última ancora\n self.return_undo(1) #retorna a jogada anterior\n except: print(\"Erro: no undo.\")\n\n\n def gerar_tabuleiro(self,dific,linha,coluna):\n try:\n if (dific==1 or dific==2) and linha > 2 and coluna > 2: \n # se o nível de dificuldade é 1 (fácil)\n # ou 2 (díficil), e se o tamanho do tabuleiro é pelo menos 3*3\n # back-up (recorrendo a deepcopy nos Stacks) das variáveis que são usadas no jogada, undo, resolver,...\n # e limpeza (recorrendo ao __del__ em Stacks), de modo a ter \"variáveis vazias\", de modo a puder-se\n # usar os métodos já criados\n self.tab_backup=self.tabuleiro \n self.lin_backup=self.linhas\n self.col_backup=self.colunas\n self.linhas=linha\n self.colunas=coluna\n self.settabuleiro([]) #define o tabuleiro como vazio\n global jogadas_pos_gerar, historico_bk, ancora_bk\n jogadas_pos_gerar=deepcopy(self.jogada_n)\n historico_bk=deepcopy(self.historico)\n ancora_bk=deepcopy(self.ancora)\n novo_tab=[] # irá ter um tabuleiro vazio de tamanho linha*coluna\n for i in range(linha):\n novo_tab.append([])\n for j in range(coluna):\n novo_tab[i].append('.')\n poss_linha=[] #irá ter todos os valores 0:linha\n #usado para o random de jogadas bloquedas no tabuleiro\n for i in range(linha):poss_linha.append(i) #[0,1,2....,linha]\n poss_col=[] #irá ter todos os valores 0:coluna\n #usado para o random de jogadas bloquedas no tabuleiro\n for i in range(coluna):poss_col.append(i) #[0,1,2....,coluna]\n tab_valido = False #vai verificar se o tabulero criado é válido \n while not tab_valido: #enquanto não encontrar um tabuleiro com as caraterísticas que se pretende\n self.gerar_novas_variaveis()#limpa histórico, ancora e nº de jogadas; guarda em ancora a jogada inicial\n tab_gerar=novo_tab #igual a tabuleiro vazio\n tab_gerado_valido=False\n n_casas_bloq=int(uniform(0.03,0.10)*self.linhas*self.colunas) # 3% a 10% de casas bloqueadas (\"#\")\n for i in range(n_casas_bloq): #coloca o nº de casas bloqueadas definidas atrás\n linha_aleat=random.choice(poss_linha) #escolhe uma linha aleatoriamente\n col_aleat=random.choice(poss_col) #escolhe uma coluna aleatoriamente\n tab_gerar[linha_aleat][col_aleat]=\"#\" #coloca \"#\" na posição aleatória\n self.settabuleiro(tab_gerar) #guarda este novo tabuleiro\n tab_gerado_valido=False\n try: \n self.def_resolver_auto(1) #resolve o tabuleiro\n print(\"OK\")\n tab_gerado_valido=self.tabuleiro_valido() #verifica a validade\n except: pass\n if tab_gerado_valido:\n n_casas_bloq_jogadas=int(uniform(0.10,0.25)*linha*coluna) #bloqueia jogadas em 15% a 30% do tabuleiro\n for i in range(self.linhas*self.colunas, n_casas_bloq_jogadas-1, -1):\n colocado=False #verifica se foi colocada uma casa bloqueada no tabuleiro\n while not colocado: #enquanto não for colocada uma casa bloqueada no tabuleiro\n linha_aleat=random.choice(poss_linha) #escolhe uma linha aleatoriamente\n col_aleat=random.choice(poss_col) #escolhe uma coluna aleatoriamente\n if self.tabuleiro[linha_aleat][col_aleat]=='O' or self.tabuleiro[linha_aleat][col_aleat]=='X': # se a acasa aleatoriamente escolhida estiver preenchida\n self.tabuleiro[linha_aleat][col_aleat]='.' #coloca a casa vazia\n colocado=True\n self.gerar_novas_variaveis() #limpa histórico, ancora e nº de jogadas; guarda em ancora a jogada inicial\n self.conta_bifurc_gerar=0 #retorna a contagem de bifurcações a 0\n try:self.def_resolver_auto(1,1) #resolve o tabuleiro \n except: pass\n fator=float(self.conta_bifurc_gerar/(self.linhas*self.colunas - n_casas_bloq_jogadas)) \n #calcula a fracao de casas jogadas se poderiam\n # jogar tanto X ou O sobre o nº de casas total - as casas já bloquedas\n fator_facil_dificil=0.15 # se menos que 15% bifurcações: fácil, se mais: díficil\n if not self.ver_mais_1_sol(): # se não tem outra solução\n if ((dific == 1 and fator < fator_facil_dificil) or\n (dific == 2 or fator > fator_facil_dificil)):\n tab_valido=True #tabuleiro válido\n self.volta_unancora() #retorna ao tabuleiro inicial\n else: self.settabuleiro([]) #limpa tabuleiro\n else: self.settabuleiro([]) #limpa tabuleiro\n \n filename='Tab_dific_'+str(dific)+'_linhas_'+str(self.linhas)+'_colunas'+str(self.colunas)+'_auto.txt' #nome do ficheiro\n self.guardar_tabuleiro(filename) # guarda o ficheiro\n #retorna às variáveis originais\n self.backup()\n del self.historico\n del self.ancora\n self.historico=deepcopy(historico_bk)\n self.ancora=deepcopy(ancora_bk)\n self.conta_bifurc_gerar=0\n self.jogada_n=jogadas_pos_gerar\n del historico_bk\n del ancora_bk\n del jogadas_pos_gerar\n else:\n print(\"Erro: parâmetros inválidos.\") \n except: print(\"Erro: no gerar\")\n \n def ver_mais_1_sol(self):\n #vai verificar se o tabuleiro do gerar tem mais que uma solução\n outra_sol=False\n if self.resolve_auto_bifurc.size() != 0:\n ult_pos=self.resolve_auto_bifurc.pop() #última bifurcação\n pos_ult_jog=[]#irá guardar a linha e coluna aonde jogar O quando ocorre bifurcação\n while self.jogada_n > ult_pos: #enquanto não se faz os undo's até à bifurcação\n if self.jogada_n> ult_pos+1: self.return_undo(1) #se for antes da última jogada, não é preciso o retorno das posições\n else: pos_ult_jog=self.return_undo(1,1) #última jogada antes da bifurcação, retorna os posições da jogada anterior\n self.jogada('O',pos_ult_jog[0],pos_ult_jog[1]) #no local da jogada da bifurcação joga O, que é o opsto do default X\n print(\"Verificando outras soluções\")\n outra_sol=self.def_resolver_auto() #resolve o tabuleiro, à procura de uma 2ª solução\n print(\"Verificado\")\n return outra_sol\n \n def gerar_novas_variaveis(self):\n #limpa histórico e ancoras para o gerar\n del self.historico\n del self.ancora\n self.jogada_n=0 \n self.historico=Stack()\n self.ancora=Stack()\n self.nova_ancora(1) #ancora do tabuleiro vazio\n \n def getlinhas(self): #retorna o nº de linhas\n return self.linhas\n \n def getcolunas(self): #retorna o nº de colunas\n return self.colunas\n \n def gettabuleiro(self): #retorna tabuleiro\n return self.tabuleiro\n \n def settabuleiro(self, t): # iguala tabuleiro a t\n self.tabuleiro = t\n \n def printpuzzle(self): #imprime o tabuleiro\n for linha in self.tabuleiro:\n for simbolo in linha:\n print(simbolo,end=\" \")\n print()\n \n def guardar_tabuleiro(self, filename):\n try:\n jogo=self.gettabuleiro()#devolve o tabuleiro\n lin,col=self.getlinhas(),self.getcolunas()\n guardar_puzzle=open(filename,'w') #abre o ficheiro arg\n guardar_puzzle.write(str(lin)+' '+str(col)+'\\n') #escreve 1ª linha com nº linhas e colunas\n for i in range(lin): #imprimir linha a linha os carateres separados por espaços\n for j in range(col): #imprimir coluna a coluna os carateres\n if j != col-1: #se o ciclo não está no fim de uma linha\n guardar_puzzle.write(jogo[i][j]+' ') #imprimir carater e espaço\n elif i != lin-1: #se o ciclo está no fim de uma linha, mas não na última linha\n guardar_puzzle.write(jogo[i][j]+'\\n') #imprimir carater e mudança de linha\n else: #ciclo na última linha e coluna\n guardar_puzzle.write(jogo[i][j])#imprime só carater\n except: print(\"Erro: no guardar.\")\n else: guardar_puzzle.close() #fecha o ficheiro\n\n","sub_path":"GandaGaloEngine.py","file_name":"GandaGaloEngine.py","file_ext":"py","file_size_in_byte":30247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"383928025","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfanofile = open(\"fano.dat\", \"w\")\nratefile = open(\"rate.dat\", \"w\")\n\nfor g_ei in np.arange(1, 4.01, 0.25):\n\tfor g_ie in np.arange(1, 4.01, 0.25):\n\t\tfname = \"raster\" + '%.2f' % g_ei + \"_\" '%.2f' % g_ie + \"filtered_E.dat\"\n\n\t\ttlist, nlist = np.loadtxt(fname, unpack=True)\n\n\t\tprint(g_ei, g_ie)\n\t\tnspikes = np.zeros(4096)\n\t\tnrates = np.zeros(4096)\n\n\t\tfor t, n in zip(tlist, nlist):\n\t\t\tn = int(n)\n\n\t\t\tnspikes[n] += 1\n\n\t\tnrates = nspikes/10.\n\n\t\tplt.figure()\n\t\tplt.hist(nrates, bins='auto', density=True)\n\t\tplt.savefig(\"ratehist_\" + str(g_ei) + \"_\" + str(g_ie) + \".png\")\n\t\tplt.close()\n\n\t\t#for i in range(4096):\n\t\t#\tcvilist[i] = np.var(dtlist)/np.mean(dtlist)\n\n\t\tfano = np.var(nrates)/np.mean(nrates)\n\n\t\tratefile.write(str(np.mean(nrates)) + \" \")\n\t\tfanofile.write(str(fano) + \" \")\n\n\n\tratefile.write(\"\\n\")\n\tfanofile.write(\"\\n\")\n","sub_path":"calcStat/calculate_fano.py","file_name":"calculate_fano.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"296439438","text":"import torch\nimport threading\nfrom torch import optim\nimport torch.distributed.rpc as rpc\nimport numpy as np\nimport os\nimport jsonpickle\n\nimport policy\nimport config as C\nimport utils\n\n\nclass BatchUpdateParameterServer():\n\n\n def __init__(self,batch_update_size = C.UPDATE_SIZE):\n\n\n self.HIDDEN_SIZE = C.HIDDEN_SIZE\n #self.DEVICE = torch.device('cuda' if torch.cuda.is_available() and C.use_cuda else 'cpu')\n self.DEVICE = 'cpu'\n\n self.model = policy.policy(observation_space_size=C.observation_space_size,\n action_space_size=C.action_space_size,\n hidden_size=self.HIDDEN_SIZE).to(self.DEVICE)\n\n self.lock = threading.Lock()\n self.future_model = torch.futures.Future()\n self.batch_update_size = batch_update_size\n self.curr_update_size = 0\n self.current_rewards = []\n self.optimization_history = []\n self.optimizer = optim.Adam(params=self.model.parameters(), lr=C.ALPHA)\n for p in self.model.parameters():\n p.grad = torch.zeros_like(p)\n\n \n def get_model(self):\n #return self.model.cpu() #because rpc can only pass cpu data\n #return self.model.to('cpu')\n return self.model\n\n @staticmethod\n @rpc.functions.async_execution\n def update_and_fetch_model(ps_rref, grads, rewards):\n self = ps_rref.local_value()\n\n utils.timed_log(f\"PS got {self.curr_update_size}/{self.batch_update_size} updates\")\n\n for p, g in zip(self.model.parameters(), grads):\n p.grad += g\n\n for reward in rewards:\n self.current_rewards.append(reward)\n self.curr_update_size += 1\n\n with self.lock:\n\n fut = self.future_model\n\n if self.curr_update_size >= self.batch_update_size:\n for p in self.model.parameters():\n p.grad /= self.batch_update_size\n\n self.curr_update_size = 0\n self.optimizer.step()\n self.optimizer.zero_grad()\n fut.set_result(self.model)\n utils.timed_log(f\"rewards length is {len(self.current_rewards)}\")\n utils.timed_log(f\"average reward is {np.mean(self.current_rewards)}\")\n utils.timed_log(\"PS updated model\")\n self.future_model = torch.futures.Future()\n\n #save the model\n cwd = os.getcwd()\n parameter_file = C.trained_model_name\n cwd = os.path.join(cwd,parameter_file)\n torch.save(self.model.state_dict(),cwd)\n\n #record optimization history\n self.optimization_history.append(np.mean(self.current_rewards))\n optimization_history = {}\n optimization_history['history'] = self.optimization_history\n #store the history\n cwd = os.getcwd()\n #cwd = os.path.join(cwd, 'data_folder')\n parameter_file = 'optimization_history.json'\n cwd = os.path.join(cwd,parameter_file)\n with open(cwd, 'w') as statusFile:\n statusFile.write(jsonpickle.encode(optimization_history))\n\n self.current_rewards = []\n\n return fut","sub_path":"spy_many_discrete_actions/spy/reward_type_3/model_1_gpu/BatchUpdateParameterServer.py","file_name":"BatchUpdateParameterServer.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"253817028","text":"import os \nf=list(open('20.list'))\nf=[i.split() for i in f]\ntype_dict={a:b for(a,b) in f}\nucf_dir='/home/mcislab/li/UCF101/'\nucf_video=os.listdir(ucf_dir)\nvideo_dir=[ucf_dir+i for i in ucf_video]\nucf_examples=[]\nfor i in video_dir:\n\tfra_len=len(os.listdir(i))\n\tif (fra_len<=16):\n\t\tcontinue\n\tdir_=i\n\ttype_=type_dict[i.split('/')[-1].split('_')[1]]\n\tstart_=0\n\tiou_=1\n\tinterval_=int(fra_len/16)\n\tucf_example=[dir_,type_,start_,iou_,interval_]\n\tucf_examples.append(ucf_example)\nwith open('train.list','a+') as f:\n\tfor i in ucf_examples:\n\t\tf.write(i[0]+' '+str(i[1])+' '+str(i[2])+' '+str(i[3])+' '+str(i[4])+'\\n')\n\n","sub_path":"localization/list/test_ucf.py","file_name":"test_ucf.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"608571807","text":"from gensim.models import Word2Vec\nfrom train import TrainLogger\n\nif __name__ == '__main__':\n model = Word2Vec.load('data/zhwiki_vs100w5mc5.model')\n sim_words = model.wv.most_similar(\n positive=['国王', '女'],\n negative=['男'],\n topn=5\n )\n print(sim_words)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"158044812","text":"from agent import pipeline, source\nfrom agent import cli\nfrom ..test_zpipeline_base import TestInputBase\nfrom ...test_pipelines.test_zpipeline_base import get_schema_id\n\n\nclass TestDirectory(TestInputBase):\n __test__ = True\n params = {\n 'test_create_source_with_file': [{'file_name': 'directory_sources'}],\n 'test_create_with_file': [{'file_name': 'directory_pipelines'}],\n }\n\n def test_source_create(self, cli_runner):\n result = cli_runner.invoke(cli.source.create, catch_exceptions=False,\n input=\"directory\\ntest_dir_csv\\n/home/test-directory-collector\\n*.csv\\nDELIMITED\\n\\ny\\n\\n\\n\")\n assert result.exit_code == 0\n assert source.repository.exists('test_dir_csv')\n\n def test_create(self, cli_runner):\n pipeline_id = 'test_dir_csv'\n result = cli_runner.invoke(cli.pipeline.create, ['-a'], catch_exceptions=False,\n input=f\"{pipeline_id}\\ntest_dir_csv\\n\\ny\\ncount_records\\ny\\n\\nClicks:gauge\\nClicks:clicks\\ntimestamp_datetime\\nstring\\nMMddyyyy\\n\\nver Country\\nExchange optional_dim\\nversion:1\\n\\n\\n\\n1h\\n\\n\\n\")\n assert result.exit_code == 0\n pipeline_obj = pipeline.repository.get_by_id(pipeline_id)\n assert pipeline_obj.schema == {\n 'id': get_schema_id(pipeline_id),\n 'version': '1',\n 'name': pipeline_id,\n 'dimensions': ['ver', 'Country', 'Exchange', 'optional_dim', 'version'],\n 'measurements': {'clicks': {'aggregation': 'average', 'countBy': 'none'},\n 'count_records': {'aggregation': 'sum', 'countBy': 'none'}},\n 'missingDimPolicy': {\n 'action': 'fill',\n 'fill': 'NULL'\n }\n }\n","sub_path":"agent/tests/test_input/test_1/test_directory_http.py","file_name":"test_directory_http.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241160323","text":"#!/usr/bin/env python\n\nimport os\nfrom shutil import rmtree\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport dcoloring, render\nfrom multiprocessing import Pool\n\nFILE_NAME = 'exp(z)'\nWIDTH = 1.5\nHEIGHT = 1.5\nPOINTS_PER_DIM = 2048\nFRAMES = 240\nFPS = 60\nBACK_FORTH = True\nTEMP_DIR = 'temp'\nOUT_DIR = 'mp_out'\n\nx, y = np.ogrid[\n -WIDTH/2:WIDTH/2:POINTS_PER_DIM*1j,\n -HEIGHT/2:HEIGHT/2:POINTS_PER_DIM*1j\n]\nz = x + 1j*y\n\n# 0 to 1 inclusive\nlerp = np.arange(0.0, 1.0 + 1.0/FRAMES/2, 1.0/FRAMES)\ncerp = dcoloring.cos_interpolation(lerp)\ninterp = cerp\n\n# Perpare plots and function\nfunc_name = 'tan(z^-1)'\npath = OUT_DIR + '/' + func_name\n\ndef save_frame(idx):\n\n w = z * (1.0 - interp[idx]) + np.tan(1/z) * (interp[idx])\n\n cfunc_plot = dcoloring.colorize(w, grid=False)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.imshow(cfunc_plot, extent=(-WIDTH / 2, WIDTH / 2, -HEIGHT / 2, HEIGHT / 2))\n ax.set(xlabel='{:3.0f}%'.format(interp[idx] * 100), title=func_name)\n\n # Save frame\n print('Rendering frame {0:{2}}/{1:{2}}'.format(idx, FRAMES, int(np.log10(FRAMES) + 1)))\n temp_path = '{}/frame.{}.png'.format(TEMP_DIR, idx)\n fig.savefig(temp_path, dpi=600, transparent=True)\n\n # Resize for aliasing\n img = cv2.imread(temp_path)\n img = cv2.resize(img, (int(img.shape[1] / 3), int(img.shape[0] / 3)), interpolation=cv2.INTER_AREA)\n cv2.imwrite(temp_path, img)\n\n # Save image of complete function\n if idx == FRAMES - 1:\n img_path = '{}.png'.format(path)\n fig.savefig(img_path, dpi=1600, transparent=True)\n img = cv2.imread(img_path)\n img = cv2.resize(img, (int(img.shape[1] / 4), int(img.shape[0] / 4)), interpolation=cv2.INTER_AREA)\n cv2.imwrite('{}.png'.format(path), img)\n\n plt.close(fig)\n\n\ncfunctions = [\n # ('exp(z)', np.exp(z)),\n # ('ln(z)', np.log(z)),\n # ('z^3', z*z*z),\n # ('z^2', z*z),\n # ('z^0.5', np.sqrt(z)),\n # ('z^-1', 1/z),\n # ('z^-0.5', 1/np.sqrt(z)),\n # ('z^-2', 1/(z*z)),\n # ('sin(z)', np.sin(z)),\n # ('sinh(z)', np.sinh(z)),\n # ('asin(z)', np.arcsin(z)),\n # ('tan(z)', np.tan(z)),\n # ('Zoomed sin(z^-1)2', np.sin(1/z)),\n # ('Area_6-3_8m', np.tan(1/z)),\n]\n\nif not os.path.exists(OUT_DIR):\n os.makedirs(OUT_DIR)\n\nprint('Processing {}'.format(func_name))\n\nif not os.path.exists(TEMP_DIR):\n os.makedirs(TEMP_DIR)\n\nwith Pool(processes=3) as pool:\n pool.map(save_frame, range(0, len(interp)))\n\nfile_paths = sorted(['{}/{}'.format(TEMP_DIR, fn) for fn in os.listdir(TEMP_DIR)], key=lambda fp: int(fp.split('.')[1]))\n\nif BACK_FORTH:\n file_paths = file_paths + list(reversed(file_paths[1:-1]))\n\nrender.create_webm(path, file_paths, fps=FPS, bitrate='8162k')\nrmtree(TEMP_DIR)\nplt.close('all')\n","sub_path":"graph/mp_interp.py","file_name":"mp_interp.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628645068","text":"from datetime import datetime, timedelta\nfrom flask import jsonify, abort, request, Blueprint\nimport pandas as pd\nimport random\nimport os,sys\nfrom py2neo import Graph,Node,Relationship,NodeMatcher\nimport pickle\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom IPython import get_ipython\nimport re\nimport create_graph as cg\nsns.set()\n\nSMALL_SIZE = 8\nMEDIUM_SIZE = 10\nBIGGER_SIZE = 12\n\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE)\n\nREQUEST_API = Blueprint('request_api', __name__)\n\n\ndef get_blueprint():\n \"\"\"Return the blueprint for the main app module\"\"\"\n return REQUEST_API\n\n@REQUEST_API.route('/Basic_Neo4j/', methods=['GET'])\ndef create_tree(_subject,_classes):\n\tgraph=Graph(\"bolt://localhost:7687\",auth=(\"neo4j\",\"1234\"))\n\tsub=_subject.upper()\n\tcl=_classes.upper()\n\tcg.generate_graph(graph,sub.split(\",\"),cl.split(\",\"))\n\n@REQUEST_API.route('/Delete_Graph', methods=['GET'])\ndef delete_tree():\n\tgraph=Graph(\"bolt://localhost:7687\",auth=(\"neo4j\",\"1234\"))\n\tgraph.delete_all()\n\tprint(\"Graph deleted successfully\")\n\n@REQUEST_API.route('/Complete_Tree', methods=['GET'])\ndef get_tree():\n graph=Graph(\"bolt://localhost:7687\",auth=(\"neo4j\",\"admin\"))\n G=nx.DiGraph()\n query=\"MATCH(n {Name: \\\"PHYSICS\\\"}) CALL apoc.path.subgraphAll(n,{relationshipFilter:\\\"HAS>|EXTENDED>\\\",bfs:false}) YIELD nodes,relationships UNWIND relationships AS r Return startNode(r).Name,id(startNode(r)),Type(r),r.Weight,endNode(r).Name,id(endNode(r));\"\n k=graph.run(query)\n lt=[]\n for i in k:\n G.add_node(i[0])\n G.add_node(i[4])\n G.add_weighted_edges_from([(i[0],i[4],i[3])])\n dt={}\n dt[\"start\"]=i[0]\n dt[\"type\"]=i[1]\n dt[\"end\"]=i[2]\n lt.append(dt)\n print(dt)\n plt.figure()\n nx.draw_networkx(G,pos=None,arrows=True,with_labels=True)\n # nx.draw_networkx_edge_labels(G,pos,weight)\n plt.show()\n print(\"----------------------------------------------------------------------------------------\")\n return jsonify(lt)\n\n@REQUEST_API.route('/Sub_Tree/', methods=['GET'])\ndef get_subtree(_name):\n graph=Graph(\"bolt://localhost:7687\",auth=(\"neo4j\",\"admin\"))\n print(_name)\n G=nx.DiGraph()\n st=_name\t\n query=\"MATCH(n {Name: '%s'}) CALL apoc.path.subgraphAll(n,{relationshipFilter:\\\"HAS>|EXTENDED>\\\",bfs:false}) YIELD nodes,relationships UNWIND relationships AS r Return startNode(r).Name,id(startNode(r)),Type(r),r.Weight,endNode(r).Name,id(endNode(r)) Order By r.Weight DESC;\"%(st)\n print(query)\n k=graph.run(query)\n lt=[]\n for i in k:\n G.add_node(i[0])\n G.add_node(i[4])\n G.add_weighted_edges_from([(i[0],i[4],i[3])])\n dt={}\n dt[\"start\"]=i[0]\n dt[\"type\"]=i[2]\n dt[\"weight\"]=i[3]\n dt[\"end\"]=i[4]\n lt.append(dt)\n print(dt)\n plt.figure()\n nx.draw_networkx(G,pos=None,arrows=True,with_labels=True)\n # nx.draw_networkx_edge_labels(G,pos,weight)\n plt.show()\n print(\"----------------------------------------------------------------------------------------\")\n return jsonify(lt)\n\n@REQUEST_API.route('/Leafs_EM', methods=['GET'])\ndef leafs():\n graph=Graph(\"bolt://localhost:7687\",auth=(\"neo4j\",\"admin\"))\n query=\"MATCH (n:Concept) RETURN n\"\n print(query)\n k=graph.run(query)\n lt=[]\n for i in k:\n lt.append(i)\n print(i)\n return jsonify(lt)\n\n@REQUEST_API.route('/EM_Taxo', methods=['GET'])\ndef get_Taxo():\n graph=Graph(\"bolt://localhost:7687\",auth=(\"neo4j\",\"admin\"))\n query=\"MATCH(n {Name: \\\"PHYSICS\\\"}) CALL apoc.path.subgraphAll(n,{relationshipFilter:\\\"HAS>\\\",bfs:false}) YIELD nodes,relationships UNWIND relationships AS r Return startNode(r).Name,TYPE(r),endNode(r).Name;\"\n k=graph.run(query)\n lt=[]\n for i in k:\n dt={}\n dt[\"start\"]=i[0]\n dt[\"type\"]=i[1]\n dt[\"end\"]=i[2]\n lt.append(dt)\n print(dt)\n print(\"----------------------------------------------------------------------------------------\")\n return jsonify(lt)\n\n@REQUEST_API.route('/Shortest_Path//', methods=['GET'])\ndef get_shortest(_start,_end):\n graph=Graph(\"bolt://localhost:7687\",auth=(\"neo4j\",\"admin\"))\n query=\"MATCH (from {Name:'%s'}), (to {Name:'%s'}) CALL apoc.algo.dijkstraWithDefaultWeight(from, to, 'HAS>|EXTENDED>', 'Weight',1) yield path as path, weight as wt With wt , reduce(output=[], n IN relationships(path) | output+n ) as nodeCollection Order BY wt Limit 1 UNWIND nodeCollection as client RETURN distinct startNode(client).Name,Type(client),endNode(client).Name\"%(_start,_end)\n k=graph.run(query)\n lt=[]\n for i in k:\n dt={}\n dt[\"start\"]=i[0]\n dt[\"type\"]=i[1]\n dt[\"end\"]=i[2]\n lt.append(dt)\n print(dt)\n print(\"----------------------------------------------------------------------------------------\")\n return jsonify(lt)\n","sub_path":"src/routes/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133641871","text":"import os\nimport sys\nimport unittest\nfrom subprocess import run\n\nfrom pathlib import Path\nimport requests\n\nsys.path.append(os.path.abspath('..'))\n\nfrom sardadmin.group import Group, history\nfrom sardadmin.user import User\n\ngroups = [\n 'Domain Admins',\n 'Domain Users',\n 'Domain Guests',\n 'Domain Computers',\n 'Administrators',\n 'Account Operators',\n 'Print Operators',\n 'Backup Operators',\n 'Replicators',\n]\n\nusers = [\n 'root',\n 'nobody',\n]\n\ndef clean():\n for x in User.listAll():\n if not x in users:\n User(x).delete()\n for x in Group.listAll():\n if not x in groups:\n Group(x).delete()\n\n# extra measure to avoid running this in production\nassert set(groups) == set(Group.listAll())\n\n# extra measure to avoid running this in production\nassert set(users) == set(User.listAll())\n\nclass UserTest(unittest.TestCase):\n def setUp(self):\n clean()\n\n def tearDown(self):\n clean()\n\n def test_list(self):\n self.assertListEqual(User.listAll(), users)\n\n def test_criate_delete(self):\n self.assertListEqual(User.listAll(), users)\n User('criate_delete').create()\n self.assertListEqual(User.listAll(), users + ['criate_delete'])\n User('criate_delete').delete()\n self.assertListEqual(User.listAll(), users)\n self.assertListEqual(Group.listAll(), groups)\n\n def test_permissions(self):\n mypath = '/home/permissions/a/b/c/d/e/f'\n os.makedirs(mypath, mode=0o000)\n User('permissions').create()\n uid = User('permissions').uid()\n gid = Group('permissions').gid()\n self.assertEqual(os.stat(mypath).st_gid, gid)\n self.assertEqual(os.stat(mypath).st_uid, uid)\n os.removedirs(mypath)\n\n def test_populateHome(self):\n User('populateHome').create()\n self.assertEqual(os.path.exists('/home/populateHome/Desktop/operacoes'), True)\n\n\nclass GroupTest(unittest.TestCase):\n def setUp(self):\n clean()\n\n def tearDown(self):\n clean()\n\n def test_list(self):\n self.assertListEqual(Group.listAll(), groups)\n\n def test_criate_delete(self):\n self.assertListEqual(Group.listAll(), groups)\n Group('criate_delete', history_timeout=0.1).create()\n self.assertListEqual(Group.listAll(), groups + ['criate_delete'])\n Group('criate_delete').delete()\n self.assertListEqual(Group.listAll(), groups)\n\n def test_folder(self):\n Group('folder', history_timeout=0.1).create()\n self.assertListEqual(Group.listAll(), groups + ['folder'])\n history[-1]['thread'].join()\n self.assertEqual(os.path.exists('/operacoes/folder'), True)\n stat = os.stat('/operacoes/folder')\n self.assertEqual(stat.st_mode, 0o40550)\n\n def test_users(self):\n Group('users', history_timeout=0.1).create()\n User('usersA').create()\n User('usersA').enterGroup('users')\n User('usersB').create()\n User('usersB').enterGroup('users')\n myusers = Group('users').users()\n self.assertListEqual(myusers, ['usersA', 'usersB'])\n\n def wait_history(self):\n while True:\n job = history[-1]\n running = job['running']\n if not running:\n break\n job['thread'].join()\n\n def test_permissions(self, mode=0o000):\n if os.path.exists('/operacoes/permissions'):\n run(['rm', '-r', '/operacoes/permissions'], check=True)\n Group('permissions', history_timeout=0.1).create()\n self.wait_history()\n\n dirs = [\n '/operacoes/permissions/a/b/c',\n ]\n files = [\n '/operacoes/permissions/file.dd',\n '/operacoes/permissions/log',\n '/operacoes/permissions/SARD/Lista de Arquivos.csv',\n '/operacoes/permissions/SARD/indexador/somedir/java.jar',\n ]\n files2 = [\n '/operacoes/permissions/SARD/indexador/tools/file.txt',\n '/operacoes/permissions/SARD/indexador/jre/bin/file.txt',\n '/operacoes/permissions/SARD/indexador/lib/file.txt',\n '/operacoes/permissions/SARD/a.exe',\n ]\n uid = 0\n gid = Group('permissions').gid()\n for d in dirs:\n os.makedirs(d, mode=mode)\n os.chown(d, 13, 13)\n for f in files + files2:\n os.makedirs(os.path.dirname(f), mode=mode, exist_ok=True)\n Path(f).touch(mode=mode)\n os.chown(f, 13, 13)\n Group('permissions', history_timeout=0.1).permissions()\n self.wait_history()\n\n for d in dirs:\n self.assertEqual(os.stat(d).st_mode, 0o40555, d)\n for f in files:\n self.assertEqual(os.stat(f).st_mode, 0o100444, f)\n for d in [\n '/operacoes/permissions',\n ]:\n self.assertEqual(os.stat(d).st_gid, gid, d)\n self.assertEqual(os.stat(d).st_uid, uid, d)\n self.assertEqual(os.stat(d).st_mode, 0o40550, d)\n for f in files2:\n self.assertEqual(os.stat(f).st_mode, 0o100555, f)\n\n\nclass APIGroupTest(unittest.TestCase):\n def setUp(self):\n clean()\n\n def tearDown(self):\n clean()\n\n def test_list(self):\n resp = requests.get('http://api:80/group/')\n data = resp.json()\n self.assertDictEqual(data, {\"groups\": groups})\n\n def test_add(self):\n resp = requests.post('http://api:80/group/add')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.get('http://api:80/group/')\n data = resp.json()\n self.assertIn(\"add\", data['groups'])\n self.assertListEqual(Group.listAll(), groups + ['add'])\n Group('add').delete()\n self.assertListEqual(Group.listAll(), groups)\n\n def test_list_members(self):\n resp = requests.post('http://api:80/group/list_members')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userAm')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userAm/group/list_members')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userBm')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userBm/group/list_members')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.get('http://api:80/group/list_members')\n data = resp.json()\n self.assertListEqual(data, ['userAm', 'userBm'])\n\n def test_double_add(self):\n resp = requests.post('http://api:80/group/double_add')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/group/double_add')\n self.assertEqual(resp.ok, False)\n\n def test_double_perm(self):\n resp = requests.post('http://api:80/group/double_perm')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/group/double_perm/permissions')\n self.assertEqual(resp.ok, False)\n\nclass APIUserTest(unittest.TestCase):\n def setUp(self):\n clean()\n\n def tearDown(self):\n clean()\n\n def test_list(self):\n resp = requests.get('http://api:80/user/')\n data = resp.json()\n self.assertDictEqual(data, {\"users\": users})\n\n def test_add(self):\n resp = requests.post('http://api:80/user/addU')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.get('http://api:80/user/')\n data = resp.json()\n self.assertIn(\"addU\", data['users'])\n self.assertListEqual(User.listAll(), users + ['addU'])\n User('addU').delete()\n self.assertListEqual(User.listAll(), users)\n self.assertListEqual(Group.listAll(), groups)\n\n def test_add_listgroups(self):\n resp = requests.post('http://api:80/group/groupA')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/group/groupB')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/group/groupC')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userList')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userList/group/groupA')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userList/group/groupB')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/userList/group/groupC')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.get('http://api:80/user/userList')\n data = resp.json()\n self.assertListEqual(\n sorted(data['groups']),\n sorted(['groupA', 'groupB', 'groupC', 'userList', 'Domain Users']))\n\n def test_double_add(self):\n resp = requests.post('http://api:80/user/double_add')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/double_add')\n self.assertEqual(resp.ok, False)\n\nclass APIPasswordTest(unittest.TestCase):\n def setUp(self):\n clean()\n\n def tearDown(self):\n clean()\n\n def test_change(self):\n resp = requests.post('http://api:80/user/change')\n self.assertEqual(resp.text, \"\")\n self.assertEqual(resp.ok, True)\n resp = requests.post('http://api:80/user/change/reset_password', json={\n \"password\": \"1234\"\n })\n self.assertDictEqual(resp.json(), {\"password\": \"1234\"})\n self.assertEqual(resp.ok, True)\n","sub_path":"tests/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"508186270","text":"#!/usr/bin/python3\n# Copyright (c) 2017, 2022, Oracle Corporation and/or its affiliates.\n# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl\n# Derived and adapted from https://www.ateam-oracle.com/secure-way-of-managing-secrets-in-oci\n\nimport os\nimport oci\nfrom oci.container_engine import ContainerEngineClient\n\ncompartment_id = '${compartment_id}'\ncluster_id = '${cluster_id}'\nregion = '${region}'\npools_to_taint = ['${pools_to_taint}']\n\nsigner = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()\n\noce = oci.container_engine.ContainerEngineClient(config={'region': region}, signer=signer)\n\n# Get list of node pools\nlist_pools = []\n\nfor p in pools_to_taint:\n list_pools = oce.list_node_pools(compartment_id,cluster_id=cluster_id,name=p)\n\n# Count number of node pools to taint\n number_of_node_pools = len(list_pools.data)\n\n# Get list of node pool ids to taint\n pool_ids = []\n\n for n in range(0,number_of_node_pools):\n pool_ids.append(list_pools.data[n].id)\n\n# for all node pool ids to taint, get a list of node pools\n node_pools = []\n\n for node_pool_id in pool_ids:\n resp = oce.get_node_pool(node_pool_id)\n node_pools.append(resp.data)\n\n# for all node pools to taint, get a list of their worker nodes\n all_nodes = []\n\n for nodepool in node_pools:\n try:\n nodes = nodepool.nodes\n for node in nodes:\n all_nodes.append(node)\n except TypeError:\n continue\n\n# for each node in the node pool, get their private_ip and write to file\n with open('taint_autoscaler_pool_list.txt', 'a') as filehandle:\n for nodepool in node_pools:\n try:\n nodes = nodepool.nodes\n for node in nodes:\n filehandle.write('%s\\n' % node.private_ip)\n except TypeError:\n continue","sub_path":"modules/extensions/scripts/create_autoscaler_pool_taint_list.py","file_name":"create_autoscaler_pool_taint_list.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443766561","text":"from urllib import request, parse\nfrom bs4 import BeautifulSoup\nfrom incaptcha import Application\nfrom PIL import Image\nfrom io import BytesIO\nimport http.cookiejar\nimport requests\nimport json\ncaptcha='cloud'\n\ncaptchaid=''\nreq = request.Request('https://accounts.douban.com/login')\nreq.add_header('Origin', 'https://accounts.douban.com')\nreq.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36')\nreq.add_header('Referer', 'https://accounts.douban.com/login?alias=15831431009&redir=https%3A%2F%2Fwww.douban.com&source=None&error=1012')\nreq.add_header('Upgrade-Insecure-Requests', '1')\nfilename = 'cookie.txt'\ncookie =http.cookiejar.MozillaCookieJar(filename)\nopener = request.build_opener(request.HTTPCookieProcessor(cookie))\nf=opener.open(req)\ncookie.save(ignore_discard=True, ignore_expires=True)\nprint('Status:', f.status, f.reason)\nfor k, v in f.getheaders():\n print('%s: %s' % (k, v))\n str=''\n while 1:\n str=f.readline().decode('utf-8')\n if(str==''):break\n else:\n soup=BeautifulSoup(str,'lxml')\n try:\n tag=soup.img\n if (tag['id'] == 'captcha_image'):\n mm=tag['src']\n r = requests.get(mm)\n i = Image.open(BytesIO(r.content))\n i.show()\n captcha=input(\"验证码:\")\n for i in range(4):\n str=f.readline().decode('utf-8')\n soup1 = BeautifulSoup(str, 'lxml')\n tag1=soup1.input\n print(tag1['value'])\n captchaid=tag1['value']\n except TypeError as e:\n pass\n except KeyError as e:\n pass\n\n\nlogin_data = parse.urlencode([\n ('source',''),\n ('redir', 'https://www.douban.com'),\n ('form_email', '504420499@qq.com'),\n ('form_password', 'test123456'),\n ('captcha-solution',captcha),\n ('captcha-id',captchaid),\n ('login', '登录'),\n])\n\nresult =opener.open(req,login_data.encode(\"utf-8\"))\nprint('Status:', result.status, result.reason)\nfor k, v in result.getheaders():\n cookie.save(ignore_discard=True, ignore_expires=True)\n print('%s: %s' % (k, v))\n str=''\n while 1:\n str=result.readline().decode('utf-8')\n if (str == ''): break\n print(str)\n\n\n\n","sub_path":"untitled/login1.py","file_name":"login1.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"11308078","text":"import config\nimport aiohttp\nimport asyncio\nimport discord\nimport datetime\n\nimport util.exceptions as exceptions\n\n\n# -- Twitch --\n# Background task that posts an alert if twitch.tv/democraciv is live\n\nclass Twitch:\n\n def __init__(self, bot):\n self.bot = bot\n self.twitch_API_url = \"https://api.twitch.tv/helix/streams?user_login=\" + config.getTwitch()[\n 'twitchChannelName']\n self.twitch_API_token = config.getTokenFile()['twitchAPIKey']\n self.http_header = {'Client-ID': self.twitch_API_token}\n self.streamer = config.getTwitch()['twitchChannelName']\n self.active_stream = False\n\n async def check_twitch_livestream(self):\n try:\n async with self.bot.session.get(self.twitch_API_url, headers=self.http_header) as response:\n twitch = await response.json()\n except aiohttp.ClientConnectionError as e:\n print(\"ERROR - ConnectionError in Twitch session.get()!\\n\")\n print(e)\n\n try:\n twitch['data'][0]['id']\n except (IndexError, KeyError):\n self.active_stream = False\n return False\n\n thumbnail = twitch['data'][0]['thumbnail_url'].replace('{width}', '720').replace('{height}', '380')\n return [twitch['data'][0]['title'], thumbnail]\n\n async def twitch_task(self):\n await self.bot.wait_until_ready()\n\n try:\n channel = discord.utils.get(self.bot.democraciv_guild_object.text_channels,\n name=config.getTwitch()['twitchAnnouncementChannel'])\n except AttributeError:\n print(f'ERROR - I could not find the Democraciv Discord Server! Change \"democracivServerID\" '\n f'in the config to a server I am in or disable Twitch announcements.')\n raise exceptions.GuildNotFoundError(config.getConfig()[\"democracivServerID\"])\n\n if channel is None:\n raise exceptions.ChannelNotFoundError(config.getReddit()['redditAnnouncementChannel'])\n\n while not self.bot.is_closed():\n twitch_data = await self.check_twitch_livestream()\n if twitch_data is not False:\n if self.active_stream is False:\n self.active_stream = True\n embed = self.bot.embeds.embed_builder(title=f\":satellite: {self.streamer} - Live on Twitch\",\n description=\"\", time_stamp=True)\n embed.add_field(name=\"Title\", value=twitch_data[0], inline=False)\n embed.add_field(name=\"Link\", value=f\"https://twitch.tv/{self.streamer}\", inline=False)\n embed.set_image(url=twitch_data[1])\n\n if config.getTwitch()['everyonePingOnAnnouncement']:\n await channel.send(f'@everyone {self.streamer} is live on Twitch!')\n\n await channel.send(embed=embed)\n await asyncio.sleep(180)\n","sub_path":"event/twitch.py","file_name":"twitch.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262126691","text":"from boards import getBoard\n\nboard = getBoard()\n\ndef disp_board(board):\n for i in range(len(board)):\n if i % 3 == 0 and i != 0:\n print(\"----------------------\")\n for j in range(len(board[0])):\n if j%3 == 0 and j != 0:\n print(\"|\",end=' ')\n if j == 8:\n print(board[i][j])\n else:\n print(str(board[i][j]),end=' ')\n\ndef check_empty(board):\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 0:\n return (i,j) #row,col\n\n return None\n\ndef plausible(board,num,pos):\n for i in range(len(board[0])):\n if board[pos[0]][i] == num and pos[1] != i:\n return False\n \n for i in range(len(board)):\n if board[i][pos[1]] == num and pos[0] != i:\n return False\n\n x_coord = pos[1]//3\n y_coord = pos[0]//3\n\n for i in range(y_coord*3,y_coord*3 + 3):\n for j in range(x_coord*3,x_coord*3 + 3):\n if board[i][j] == num and (i,j) == pos:\n return False\n \n return True\n\ndef solve(board):\n pos = check_empty(board)\n\n if not pos:\n return True\n else:\n row,col = pos\n for i in range(1,10):\n if plausible(board,i,(row,col)):\n board[row][col] = i\n\n if solve(board):\n return True\n \n board[row][col] = 0\n\n return False\n\nprint(\"Chosen board:\\n\")\ndisp_board(board)\nsolve(board)\nprint(\"\\n\\n\")\nprint(\"Solved Board:\")\ndisp_board(board)","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"330303335","text":"#\n# created by\n# Antonio Garcia-Uceda Juarez\n# PhD student\n# Medical Informatics\n#\n# created on 09/02/2018\n# Last update: 09/02/2018\n########################################################################################\n\nfrom Common.CPUGPUdevicesManager import *\nfrom Common.WorkDirsManager import *\nfrom DataLoaders.LoadDataManager import *\nfrom Preprocessing.ImageGeneratorManager import *\nif TYPE_DNNLIBRARY_USED == 'Keras':\n from DataLoaders.BatchDataGenerator_Keras import *\n from Networks_Keras.Callbacks import *\n from Networks_Keras.Metrics import *\n from Networks_Keras.Networks import *\n from Networks_Keras.Optimizers import *\nelif TYPE_DNNLIBRARY_USED == 'Pytorch':\n from DataLoaders.BatchDataGenerator_Pytorch import *\n from Networks_Pytorch.Callbacks import *\n from Networks_Pytorch.Metrics import *\n from Networks_Pytorch.Networks import *\n from Networks_Pytorch.Optimizers import *\n from Networks_Pytorch.Trainers import *\nimport argparse\n\n\n\ndef main(args):\n # First thing, set session in the selected(s) devices: CPU or GPU\n set_session_in_selected_device(use_GPU_device=True,\n type_GPU_installed=args.typeGPUinstalled)\n\n # ---------- SETTINGS ----------\n nameModelsRelPath = args.modelsdir\n\n # Get the file list:\n nameImagesFiles = 'images*' + getFileExtension(FORMATTRAINDATA)\n nameGroundTruthFiles = 'grndtru*'+ getFileExtension(FORMATTRAINDATA)\n # ---------- SETTINGS ----------\n\n\n workDirsManager = WorkDirsManager(args.basedir)\n TrainingDataPath = workDirsManager.getNameExistPath(workDirsManager.getNameTrainingDataPath())\n if args.use_restartModel:\n ModelsPath = workDirsManager.getNameExistPath(args.basedir, nameModelsRelPath)\n else:\n ModelsPath = workDirsManager.getNameUpdatePath(args.basedir, nameModelsRelPath)\n\n listTrainImagesFiles = findFilesDir(TrainingDataPath, nameImagesFiles)\n listTrainGroundTruthFiles = findFilesDir(TrainingDataPath, nameGroundTruthFiles)\n\n if args.useValidationData:\n ValidationDataPath = workDirsManager.getNameExistPath(workDirsManager.getNameValidationDataPath())\n\n listValidImagesFiles = findFilesDir(ValidationDataPath, nameImagesFiles)\n listValidGroundTruthFiles = findFilesDir(ValidationDataPath, nameGroundTruthFiles)\n\n if not listValidImagesFiles or not listValidGroundTruthFiles:\n use_validation_data = False\n message = \"No validation data used for training the model...\"\n CatchWarningException(message)\n else:\n use_validation_data = True\n else:\n use_validation_data = False\n\n\n \n # BUILDING MODEL\n # ----------------------------------------------\n print(\"_\" * 30)\n print(\"Building model...\")\n print(\"_\" * 30)\n\n if args.use_restartModel:\n initial_epoch = args.epoch_restart\n args.num_epochs += initial_epoch\n else:\n initial_epoch = 0\n\n if TYPE_DNNLIBRARY_USED == 'Keras':\n if (not args.use_restartModel) or (args.use_restartModel and args.restart_only_weights):\n model_constructor = DICTAVAILMODELS3D(IMAGES_DIMS_Z_X_Y,\n tailored_build_model=args.tailored_build_model,\n num_layers=args.num_layers,\n num_featmaps_base=args.num_featmaps_base,\n type_network=args.type_network,\n type_activate_hidden=args.type_activate_hidden,\n type_activate_output=args.type_activate_output,\n type_padding_convol=args.type_padding_convol,\n is_disable_convol_pooling_lastlayer=args.disable_convol_pooling_lastlayer,\n isuse_dropout=args.isUse_dropout,\n isuse_batchnormalize=args.isUse_batchnormalize)\n optimizer = DICTAVAILOPTIMIZERS(args.optimizer, lr=args.learn_rate)\n loss_fun = DICTAVAILLOSSFUNS(args.lossfun, is_masks_exclude=args.masksToRegionInterest).loss\n metrics =[DICTAVAILMETRICFUNS(imetrics, is_masks_exclude=args.masksToRegionInterest).get_renamed_compute() for imetrics in args.listmetrics]\n model = model_constructor.get_model()\n # compile model\n model.compile(optimizer=optimizer, loss=loss_fun, metrics=metrics)\n # output model summary\n model.summary()\n\n if args.use_restartModel:\n print(\"Loading saved weights and restarting...\")\n modelSavedPath = joinpathnames(ModelsPath, 'model_'+ args.restart_modelFile +'.hdf5')\n print(\"Restarting from file: \\'%s\\'...\" %(modelSavedPath))\n model.load_weights(modelSavedPath)\n\n else: #args.use_restartModel and args.restart_only_weights:\n print(\"Loading full model: weights, optimizer, loss, metrics ... and restarting...\")\n modelSavedPath = joinpathnames(ModelsPath, 'model_' + args.restart_modelFile + '.hdf5')\n print(\"Restarting from file: \\'%s\\'...\" %(modelSavedPath))\n\n loss_fun = DICTAVAILLOSSFUNS(args.lossfun, is_masks_exclude=args.masksToRegionInterest).loss\n metrics =[DICTAVAILMETRICFUNS(imetrics, is_masks_exclude=args.masksToRegionInterest).get_renamed_compute() for imetrics in args.listmetrics]\n custom_objects = dict(map(lambda fun: (fun.__name__, fun), [loss_fun] + metrics))\n # load and compile model\n model = NeuralNetwork.get_load_saved_model(modelSavedPath, custom_objects=custom_objects)\n\n # Callbacks:\n callbacks_list = []\n callbacks_list.append(RecordLossHistory(ModelsPath, [DICTAVAILMETRICFUNS(imetrics, is_masks_exclude=args.masksToRegionInterest).get_renamed_compute() for imetrics in args.listmetrics]))\n filename = joinpathnames(ModelsPath, 'model_{epoch:02d}_{loss:.5f}_{val_loss:.5f}.hdf5')\n callbacks_list.append(callbacks.ModelCheckpoint(filename, monitor='loss', verbose=0))\n # callbacks_list.append(callbacks.EarlyStopping(monitor='val_loss', patience=10, mode='max'))\n\n # output model summary\n model.summary()\n\n elif TYPE_DNNLIBRARY_USED == 'Pytorch':\n if (not args.use_restartModel) or (args.use_restartModel and args.restart_only_weights):\n model_net = DICTAVAILMODELS3D(IMAGES_DIMS_Z_X_Y)\n optimizer = DICTAVAILOPTIMIZERS(args.optimizer, model_net.parameters(), lr=args.learn_rate)\n loss_fun = DICTAVAILLOSSFUNS(args.lossfun, is_masks_exclude=args.masksToRegionInterest)\n trainer = Trainer(model_net, optimizer, loss_fun)\n\n if args.use_restartModel:\n print(\"Loading saved weights and restarting...\")\n modelSavedPath = joinpathnames(ModelsPath, 'model_'+ args.restart_modelFile +'.pt')\n print(\"Restarting from file: \\'%s\\'...\" %(modelSavedPath))\n trainer.load_model_only_weights(modelSavedPath)\n\n else: #args.use_restartModel and args.restart_only_weights:\n print(\"Loading full model: weights, optimizer, loss, metrics ... and restarting...\")\n modelSavedPath = joinpathnames(ModelsPath, 'model_' + args.restart_modelFile + '.pt')\n print(\"Restarting from file: \\'%s\\'...\" %(modelSavedPath))\n trainer = Trainer.load_model_full(modelSavedPath)\n\n trainer.setup_losshistory_filepath(ModelsPath,\n isexists_lossfile=args.use_restartModel)\n trainer.setup_validate_model(freq_validate_model=FREQVALIDATEMODEL)\n trainer.setup_savemodel_filepath(ModelsPath,\n type_save_models='full_model',\n freq_save_intermodels=FREQSAVEINTERMODELS)\n\n # output model summary\n #trainer.get_summary_model()\n # ----------------------------------------------\n\n\n\n # LOADING DATA\n # ----------------------------------------------\n print(\"-\" * 30)\n print(\"Loading data...\")\n print(\"-\" * 30)\n\n print(\"Load Training data...\")\n if (args.slidingWindowImages or args.transformationImages or args.elasticDeformationImages):\n print(\"Generate Training images with Batch Generator of Training data...\")\n (train_xData, train_yData) = LoadDataManager.loadData_ListFiles(listTrainImagesFiles,\n listTrainGroundTruthFiles)\n train_images_generator = getImagesDataGenerator3D(args.slidingWindowImages,\n args.prop_overlap_Z_X_Y,\n args.transformationImages,\n args.elasticDeformationImages)\n train_batch_data_generator = TrainingBatchDataGenerator(IMAGES_DIMS_Z_X_Y,\n train_xData,\n train_yData,\n train_images_generator,\n batch_size=args.batch_size,\n shuffle=SHUFFLETRAINDATA)\n print(\"Number volumes: %s. Total Data batches generated: %s...\" %(len(listTrainImagesFiles),\n len(train_batch_data_generator)))\n else:\n (train_xData, train_yData) = LoadDataManagerInBatches(IMAGES_DIMS_Z_X_Y).loadData_ListFiles(listTrainImagesFiles,\n listTrainGroundTruthFiles)\n print(\"Number volumes: %s. Total Data batches generated: %s...\" %(len(listTrainImagesFiles),\n len(train_xData)))\n\n if use_validation_data:\n print(\"Load Validation data...\")\n if (args.slidingWindowImages or args.transformationImages or args.elasticDeformationImages):\n print(\"Generate Validation images with Batch Generator of Validation data...\")\n args.transformationImages = args.transformationImages and args.useTransformOnValidationData\n args.elasticDeformationImages = args.elasticDeformationImages and args.useTransformOnValidationData\n (valid_xData, valid_yData) = LoadDataManager.loadData_ListFiles(listValidImagesFiles,\n listValidGroundTruthFiles)\n valid_images_generator = getImagesDataGenerator3D(args.slidingWindowImages,\n args.prop_overlap_Z_X_Y,\n args.transformationImages,\n args.elasticDeformationImages)\n valid_batch_data_generator = TrainingBatchDataGenerator(IMAGES_DIMS_Z_X_Y,\n valid_xData,\n valid_yData,\n valid_images_generator,\n batch_size=args.batch_size,\n shuffle=SHUFFLETRAINDATA)\n validation_data = valid_batch_data_generator\n print(\"Number volumes: %s. Total Data batches generated: %s...\" %(len(listValidImagesFiles),\n len(valid_batch_data_generator)))\n else:\n (valid_xData, valid_yData) = LoadDataManagerInBatches(IMAGES_DIMS_Z_X_Y).loadData_ListFiles(listValidImagesFiles,\n listValidGroundTruthFiles)\n validation_data = (valid_xData, valid_yData)\n print(\"Number volumes: %s. Total Data batches generated: %s...\" % (len(listTrainImagesFiles),\n len(valid_xData)))\n else:\n validation_data = None\n\n\n\n # TRAINING MODEL\n # ----------------------------------------------\n print(\"-\" * 30)\n print(\"Training model...\")\n print(\"-\" * 30)\n\n if TYPE_DNNLIBRARY_USED == 'Keras':\n if (args.slidingWindowImages or\n args.transformationImages):\n model.fit_generator(train_batch_data_generator,\n nb_epoch=args.num_epochs,\n steps_per_epoch=args.max_steps_epoch,\n verbose=1,\n callbacks=callbacks_list,\n validation_data=validation_data,\n shuffle=SHUFFLETRAINDATA,\n initial_epoch=initial_epoch)\n else:\n model.fit(train_xData, train_yData,\n batch_size=args.batch_size,\n epochs=args.num_epochs,\n steps_per_epoch=args.max_steps_epoch,\n verbose=1,\n callbacks=callbacks_list,\n validation_data=validation_data,\n shuffle=SHUFFLETRAINDATA,\n initial_epoch=initial_epoch)\n\n elif TYPE_DNNLIBRARY_USED == 'Pytorch':\n trainer.train(train_batch_data_generator,\n num_epochs=args.num_epochs,\n max_steps_epoch=args.max_steps_epoch,\n valid_data_generator=validation_data,\n initial_epoch=initial_epoch)\n # ----------------------------------------------\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--basedir', default=BASEDIR)\n parser.add_argument('--modelsdir', default='Models')\n parser.add_argument('--tailored_build_model', type=str2bool, default=TAILORED_BUILD_MODEL)\n parser.add_argument('--type_network', type=str, default=TYPE_NETWORK)\n parser.add_argument('--num_layers', type=int, default=NUM_LAYERS)\n parser.add_argument('--num_featmaps_base', type=int, default=NUM_FEATMAPS_BASE)\n parser.add_argument('--type_activate_hidden', type=str, default=TYPE_ACTIVATE_HIDDEN)\n parser.add_argument('--type_activate_output', type=str, default=TYPE_ACTIVATE_OUTPUT)\n parser.add_argument('--type_padding_convol', type=str, default=TYPE_PADDING_CONVOL)\n parser.add_argument('--disable_convol_pooling_lastlayer', type=str2bool, default=DISABLE_CONVOL_POOLING_LASTLAYER)\n parser.add_argument('--isUse_dropout', type=str2bool, default=ISUSE_DROPOUT)\n parser.add_argument('--isUse_batchnormalize', type=str2bool, default=ISUSE_BATCHNORMALIZE)\n parser.add_argument('--imodel', default=IMODEL)\n parser.add_argument('--optimizer', default=IOPTIMIZER)\n parser.add_argument('--num_epochs', type=int, default=NUM_EPOCHS)\n parser.add_argument('--max_steps_epoch', type=int, default=None)\n parser.add_argument('--batch_size', type=int, default=BATCH_SIZE)\n parser.add_argument('--learn_rate', type=float, default=LEARN_RATE)\n parser.add_argument('--lossfun', default=ILOSSFUN)\n parser.add_argument('--listmetrics', type=parseListarg, default=LISTMETRICS)\n parser.add_argument('--masksToRegionInterest', type=str2bool, default=MASKTOREGIONINTEREST)\n parser.add_argument('--slidingWindowImages', type=str2bool, default=SLIDINGWINDOWIMAGES)\n parser.add_argument('--prop_overlap_Z_X_Y', type=str2tuplefloat, default=PROP_OVERLAP_Z_X_Y)\n parser.add_argument('--transformationImages', type=str2bool, default=TRANSFORMATIONIMAGES)\n parser.add_argument('--elasticDeformationImages', type=str2bool, default=ELASTICDEFORMATIONIMAGES)\n parser.add_argument('--useValidationData', type=str2bool, default=USEVALIDATIONDATA)\n parser.add_argument('--useTransformOnValidationData', type=str2bool, default=USETRANSFORMONVALIDATIONDATA)\n parser.add_argument('--typeGPUinstalled', type=str, default=TYPEGPUINSTALLED)\n parser.add_argument('--useMultiThreading', type=str2bool, default=USEMULTITHREADING)\n parser.add_argument('--use_restartModel', type=str2bool, default=USE_RESTARTMODEL)\n parser.add_argument('--restart_modelFile', default=RESTART_MODELFILE)\n parser.add_argument('--restart_only_weights', type=str2bool, default=RESTART_ONLY_WEIGHTS)\n parser.add_argument('--epoch_restart', type=int, default=EPOCH_RESTART)\n args = parser.parse_args()\n\n print(\"Print input arguments...\")\n for key, value in vars(args).iteritems():\n print(\"\\'%s\\' = %s\" %(key, value))\n\n main(args)\n","sub_path":"Scripts_Experiments/TrainingModel.py","file_name":"TrainingModel.py","file_ext":"py","file_size_in_byte":17186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"194306921","text":"import logging\nimport datetime\n# add database logger\ndef get_database_logger():\n now = datetime.datetime.now()\n current_time = str(now.year)+str(now.month)+str(now.day)+\"-\"+str(now.hour)+\"-\"+str(now.minute)\n #---------------------------------------------------------\n database_logger = logging.getLogger(\"sql_log\")\n database_logger.setLevel(level = logging.INFO)\n\n database_handler = logging.FileHandler(current_time+\".log\")\n database_handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s,%(name)s,%(levelname)s,%(message)s')\n database_handler.setFormatter(formatter)\n\n database_logger.addHandler(database_handler)\n return database_logger\n\n# add table generation logger\ndef get_table_logger():\n now = datetime.datetime.now()\n current_time = str(now.year)+str(now.month)+str(now.day)+\"-\"+str(now.hour)+\"-\"+str(now.minute)\n #---------------------------------------------------------\n table_logger = logging.getLogger(\"table_log\")\n table_logger.setLevel(level = logging.INFO)\n\n table_handler = logging.FileHandler(current_time+\".log\")\n table_handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s,%(name)s,%(levelname)s,%(message)s')\n table_handler.setFormatter(formatter)\n\n table_logger.addHandler(table_handler)\n return table_logger\n# add compress logger\ndef get_cmprs_logger():\n now = datetime.datetime.now()\n current_time = str(now.year)+str(now.month)+str(now.day)+\"-\"+str(now.hour)+\"-\"+str(now.minute)\n #---------------------------------------------------------\n cmprs_logger = logging.getLogger(\"compress_log\")\n cmprs_logger.setLevel(level = logging.INFO)\n\n cmprs_handler = logging.FileHandler(current_time+\".log\")\n cmprs_handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s,%(name)s,%(levelname)s,%(message)s')\n cmprs_handler.setFormatter(formatter)\n\n cmprs_logger.addHandler(cmprs_handler)\n return cmprs_logger\n\ndef get_mail_logger():\n now = datetime.datetime.now()\n current_time = str(now.year)+str(now.month)+str(now.day)+\"-\"+str(now.hour)+\"-\"+str(now.minute)\n #---------------------------------------------------------\n mail_logger = logging.getLogger(\"mail_log\")\n mail_logger.setLevel(level = logging.INFO)\n\n mail_handler = logging.FileHandler(current_time+\".log\")\n mail_handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s,%(name)s,%(levelname)s,%(message)s')\n mail_handler.setFormatter(formatter)\n\n mail_logger.addHandler(mail_handler)\n return mail_logger\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588686544","text":"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 20 11:12:05 2018\n\n@author: xie\n\"\"\"\nimport os\nimport torch\nfrom torch.nn import init\nimport torch.nn as nn\nimport numpy as np\nfrom collections import OrderedDict\nfrom .base_model import BaseModel\nfrom . import networks\n\n\n\nclass EncoderDecoder1dModel(BaseModel):\n \n def __init__(self, input_nc, output_nc, lr, isTrain):\n BaseModel.__init__(self)\n self.gpu_ids = str(0)\n self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU \n self.save_dir = './checkpoints/encoderdecoder1d/'\n self.loss_names = ['L1','L2','tv']\n self.model_names = ['EncoderDecoder1d']\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n \n\n \n self.netEncoderDecoder1d = networks.define_EncoderDecoder(input_nc, output_nc, init_type='normal', init_gain=0.02, gpu_ids = self.gpu_ids, use_norm = True)\n# self.netEncoderDecoder1d = networks.define_ShortEncoderDecoder(input_nc,output_nc, init_type='normal', init_gain=0.02, gpu_ids = self.gpu_ids, use_norm = False)\n \n \n self.lr = lr\n self.beta1 = float(0.5)\n self.optimizer = torch.optim.Adam(self.netEncoderDecoder1d.parameters(), lr=self.lr, betas=(self.beta1, 0.999))\n self.criterionTV = networks.MuTV1dLoss().to(self.device)\n self.criterionDe2 = networks.MuDevia1dLoss().to(self.device)\n self.criterionL1 = torch.nn.L1Loss().to(self.device)\n self.criterionL2 = torch.nn.MSELoss().to(self.device)\n \n \n def set_input(self, input): \n self.real_A = input['A'].to(self.device)\n self.real_B = input['B'].to(self.device)\n self.real_A = self.real_A.float() \n self.real_B = self.real_B.float() \n self.image_paths = input['A_paths'] \n\n def forward(self):\n real_A = self.real_A\n self.fake_B = self.netEncoderDecoder1d(real_A) \n \n def backward(self):\n self.loss_L1 = self.criterionL1(self.real_B, self.fake_B)\n self.loss_L2 = self.criterionL2(self.real_B, self.fake_B)\n self.loss_tv = self.criterionTV(self.fake_B, TV = int(2))* 1\n self.loss_de = self.criterionDe2(self.fake_B, self.real_B)* 1\n self.loss_1d = self.loss_L1\n self.loss_1d.backward()\n \n\n def optimize_parameters(self):\n\n self.forward() # compute fake images: G(A) \n self.optimizer.zero_grad() # set G's gradients to zero\n self.backward() # calculate graidents for G\n self.optimizer.step() # update D's weights\n\n\n \n\n\n \n \n\n\n\n\n\n\n\n\n \n ","sub_path":"models/encoderdecoder1d_model.py","file_name":"encoderdecoder1d_model.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339440654","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\n# from rest_framework.authtoken import views as rest_framework_views\n\napp_name = 'user'\n\nurlpatterns = [\n # /user/\n url(r'^$', views.UserView.as_view(), name='index'),\n\n # /user/register/\n url(r'^register/$', views.Register.as_view(), name='register'),\n\n # /user/login/\n url(r'^login/$', views.Login.as_view(), name = 'login'),\n\n # user/email_id/\n url(r'^email_id/$', views.Email.as_view(), name='email_id'),\n\n # user/profile/\n # url(r'^profile/$', views.Profile.as_view(), name='profile'),\n\n # user/home/\n # url(r'^home/$', views.Home.as_view(), name='home'),\n\n # /user/test/\n # url(r'test/$', views.Imageget.as_view(), name = 'image'),\n\n # user/post_ad\n url(r'postAd/$', views.PostAd.as_view(), name='postAd'),\n\n # user/getProducts\n url(r'getProducts/$', views.GetProducts.as_view(), name='GetProducts'),\n\n # user/delete/\n url(r'delete/$', views.Delete.as_view(), name='delete'),\n\n url(r'test/$', views.Test.as_view(), name='test'),\n\n # user/get_auth_token\n # url(r'^get_auth_token/$', rest_framework_views.obtain_auth_token, name='get_auth_token'),\n]","sub_path":"NITKART_Backend/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433205635","text":"import os.path\nfrom glob import glob\nimport random\nimport SoundMusic as sm\nimport traceback as tb\n\ndef get_name(file):\n return os.path.basename(os.path.splitext(file)[0])\n\ndef do(n):\n files = glob(\"../dataset/*\")\n for _ in range(n):\n try:\n file = random.choice(files)\n files.remove(file)\n name = get_name(file)\n sm.test.do(file, f\"batch-{name}\", \"../svm.pickle\")\n except KeyboardInterrupt:\n return\n except:\n continue\n\ndef stereo(n, inp, out, svm):\n files = glob(f\"{inp}/*\")\n for _ in range(n):\n try:\n fileN = random.choice(files)\n file = os.path.join(inp, fileN)\n print(f\"Inspiration from {fileN}\")\n files.remove(fileN)\n name = get_name(file)\n sm.generation.stereo(file, f\"{out}/batch-{name}\", svm)\n except KeyboardInterrupt:\n return\n except:\n tb.print_exc()\n continue","sub_path":"src/SoundMusic/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"386513236","text":"from django.test import TestCase\nfrom django.utils import timezone\n\nfrom .models import Course, Step\n\n\nclass CourseModelTests(TestCase):\n def test_course_creation(self):\n course = Course.objects.create(\n title=\"Python Regular Expressions\",\n description=\"Learn to write regular expressions in Python\"\n )\n now = timezone.now()\n self.assertLess(course.created_at, now)\n\n\nclass StepModelTests(TestCase):\n def setUp(self):\n self.course = Course.objects.create(\n title=\"Python Testing\",\n description=\"Learn to write tests in Python\"\n )\n \n def test_step_creation(self):\n step = Step.objects.create(\n title=\"Introduction to Doctests\",\n description=\"Learn to write tests in your docstrings.\",\n course=self.course\n )\n self.assertIn(step, self.course.step_set.all())","sub_path":"Django_Basics/Class_files/s6v2/learning_site/courses/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"420403706","text":"#\n# @lc app=leetcode.cn id=694 lang=python3\n#\n# [694] 不同岛屿的数量\n#\n# https://leetcode-cn.com/problems/number-of-distinct-islands/description/\n#\n# algorithms\n# Medium (47.25%)\n# Likes: 28\n# Dislikes: 0\n# Total Accepted: 1.6K\n# Total Submissions: 3.4K\n# Testcase Example: '[[1,1,0,0,0],[1,1,0,0,0],[0,0,0,1,1],[0,0,0,1,1]]'\n#\n# 给定一个非空01二维数组表示的网格,一个岛屿由四连通(上、下、左、右四个方向)的 1 组成,你可以认为网格的四周被海水包围。\n# \n# 请你计算这个网格中共有多少个形状不同的岛屿。两个岛屿被认为是相同的,当且仅当一个岛屿可以通过平移变换(不可以旋转、翻转)和另一个岛屿重合。\n# \n# \n# \n# 样例 1:\n# \n# 11000\n# 11000\n# 00011\n# 00011\n# \n# \n# 给定上图,返回结果 1。\n# \n# \n# \n# 样例 2:\n# \n# 11011\n# 10000\n# 00001\n# 11011\n# \n# 给定上图,返回结果 3。\n# \n# 注意:\n# \n# 11\n# 1\n# \n# \n# 和\n# \n# ⁠1\n# 11\n# \n# \n# 是不同的岛屿,因为我们不考虑旋转、翻转操作。\n# \n# \n# \n# 注释 :  二维数组每维的大小都不会超过50。\n# \n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def numDistinctIslands(self, grid: List[List[int]]) -> int:\n w, h = len(grid), len(grid[0])\n d = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n\n def traverse(x: int, y: int) -> tuple:\n grid[x][y] = 0\n r, q = [], [(x, y)]\n\n while q:\n x, y = q.pop(0)\n r.append((x, y))\n for dx, dy in d:\n nx, ny = x + dx, y + dy\n if 0 <= nx < w and 0 <= ny < h and grid[nx][ny]:\n grid[nx][ny] = 0\n q.append((nx, ny))\n\n mx, my = min(r, key=lambda k: k[0])[0], min(r, key=lambda k: k[1])[1]\n return tuple(map(lambda k: (k[0] - mx) * h + k[1] - my, sorted(r)))\n\n return len(set(traverse(x, y) for x in range(w) for y in range(h) if grid[x][y]))\n\n# @lc code=end\n","sub_path":"medium/694.不同岛屿的数量.py","file_name":"694.不同岛屿的数量.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381800641","text":"# -*- coding: utf-8 -*-\n\n# Consider the fraction, n/d, where n and d are positive integers. If nd and\n# HCF(n,d)=1, it is called a reduced proper fraction.\n\n# If we list the set of reduced proper fractions for d 8 in ascending order of\n# size, we get:\n\n# 1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3,\n# 5/7, 3/4, 4/5, 5/6, 6/7, 7/8\n\n# It can be seen that 2/5 is the fraction immediately to the left of 3/7.\n\n# By listing the set of reduced proper fractions for d 1,000,000 in ascending\n# order of size, find the numerator of the fraction immediately to the left\n# of 3/7.\n\n\ndef mcd(a, b):\n if b == 0:\n return a\n else:\n return mcd(b, a % b)\n\n\ndef mcd_it(a, b):\n while b != 0:\n aux = b\n b = a % b\n a = aux\n return a\n\n\ndef result():\n d_fix = 7\n n_fix = 3\n d_ant = 1000000\n n_ant = 1\n limite = 1000000\n\n # n/d\n for d in range(1, limite+1):\n\n n_calc, m = divmod(n_fix*d, d_fix)\n\n if m == 0:\n continue\n\n for n in range(n_calc, 0, -1):\n\n if mcd_it(n, d) == 1:\n\n if (n*d_ant) > (d*n_ant):\n d_ant = d\n n_ant = n\n else:\n break\n\n # print(\"Resultado 0071\", n_ant)\n return n_ant\n","sub_path":"projecteuler/problems/d0050/p0071/r0071.py","file_name":"r0071.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"583341907","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: pawel\n\"\"\"\nimport tensorflow as tf\nimport tensorflow.contrib as contrib\nimport pandas as pd\nimport tempfile\nimport numpy as np\n\n\ntrain_file = \"adult-training.csv\"\ntest_file = \"adult-test.csv\"\n\nCOLUMNS = [\"age\", \"workclass\", \"fnlwgt\", \"education\", \"education_num\",\n \"marital_status\", \"occupation\", \"relationship\", \"race\", \n \"gender\", \"capital_gain\", \"capital_loss\", \"hours_per_week\", \n \"native_country\",\"income_bracket\"]\n \nLABEL_COLUMN = \"label\"\n\nCATEGORICAL_COLUMNS = [\"workclass\", \"education\", \"marital_status\", \n \"occupation\",\"relationship\", \"race\", \"gender\", \n \"native_country\"]\n \nCONTINUOUS_COLUMNS = [\"age\", \"education_num\", \"capital_gain\", \n \"capital_loss\",\"hours_per_week\"]\n\nclass ML:\n\tdef __init__(self, train_file, test_file):\n\t\tself.train_file = train_file\n\t\tself.test_file = test_file\n\n\tdef train(self):\n\t\tdf_train = pd.read_csv(self.train_file, names = COLUMNS, skipinitialspace = True,engine= \"python\")\n\t\tdf_train.dropna(how=\"any\",axis = 0)\n\n\n\ndf_test = pd.read_csv(test_file,names = COLUMNS,skipinitialspace = True, skiprows=1, engine = \"python\")\n\n\ndf_test.dropna(how=\"any\", axis = 0)\n\ndf_train[LABEL_COLUMN] = (df_train[\"income_bracket\"].apply(lambda x: \">50K\" in x)).astype(int)\ndf_test[LABEL_COLUMN] = (df_test[\"income_bracket\"].apply(lambda x: \">50K\" in x)).astype(int)\n\nage = contrib.layers.real_valued_column(\"age\")\nage_buckets = contrib.layers.bucketized_column(age,boundaries=[20, 25, 30, 35, 40, 45,50, 55, 60, 65])\n\neducation_num = contrib.layers.real_valued_column(\"education_num\")\ncapital_gain = contrib.layers.real_valued_column(\"capital_gain\")\ncapital_loss = contrib.layers.real_valued_column(\"capital_loss\")\nhours_per_week = contrib.layers.real_valued_column(\"hours_per_week\")\n\nworkclass = contrib.layers.sparse_column_with_hash_bucket(\"workclass\", hash_bucket_size= 100)\neducation = contrib.layers.sparse_column_with_hash_bucket(\"education\",hash_bucket_size=100)\nmarital_status = contrib.layers.sparse_column_with_hash_bucket(\"marital_status\",hash_bucket_size=100)\noccupation = contrib.layers.sparse_column_with_hash_bucket(\"occupation\",hash_bucket_size=1000)\nrelationship = contrib.layers.sparse_column_with_hash_bucket(\"relationship\",hash_bucket_size=100)\nrace = contrib.layers.sparse_column_with_hash_bucket(\"race\",hash_bucket_size=100)\nnative_country = contrib.layers.sparse_column_with_hash_bucket(\"education\",hash_bucket_size=1000)\ngender = contrib.layers.sparse_column_with_keys(\"gender\",keys=[\"male\",\"female\"])\n\neducation_occupation = contrib.layers.crossed_column(columns=[education,occupation],hash_bucket_size= int(1e4))\nage_education_occupation = contrib.layers.crossed_column(columns=[age_buckets,education,occupation], hash_bucket_size= int(1e6))\nnative_country_occupation = contrib.layers.crossed_column(columns= [native_country,occupation], hash_bucket_size= int(1e4))\nrace_occupation = contrib.layers.crossed_column(columns = [race,occupation], hash_bucket_size = int(1e4))\n\nwide_columns = [age,age_buckets,education_num,capital_gain,capital_loss,hours_per_week,workclass,education\n \n\n,marital_status,occupation,relationship,race,native_country,gender,education_occupation,age_education_occupation\n ,native_country_occupation,race_occupation]\n\n\ndeep_columns = [age,education_num,capital_gain,capital_loss,hours_per_week,\n contrib.layers.embedding_column(workclass,dimension=8),\n contrib.layers.embedding_column(education,dimension=8),\n contrib.layers.embedding_column(marital_status,dimension=8),\n contrib.layers.embedding_column(occupation,dimension=8),\n contrib.layers.embedding_column(relationship,dimension=8),\n contrib.layers.embedding_column(race,dimension=8),\n contrib.layers.embedding_column(native_country,dimension=8),\n contrib.layers.embedding_column(gender,dimension=8)\n ]\ndef input_function(df):\n continuos_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}\n categorical_cols = {k: tf.SparseTensor(indices= [[i,0] for i in range(df[k].size)],values= df[k].values, dense_shape= [df[k].size,1]) for k in CATEGORICAL_COLUMNS}\n label = tf.constant(df[LABEL_COLUMN].values)\n feature_cols = dict(continuos_cols)\n feature_cols.update(categorical_cols)\n return feature_cols,label\n \n \n#DEFINE MODEL DIR\nmodel_dir = tempfile.mkdtemp()\n\n\n#BUILD AND TRAIN MODEL\n\nm = contrib.learn.DNNLinearCombinedClassifier(model_dir = \nmodel_dir,linear_feature_columns=wide_columns,dnn_feature_columns=deep_columns,\n dnn_hidden_units= [100,50],fix_global_step_increment_bug=True)\n \n\nm.fit(input_fn= lambda: input_function(df_train),steps= 200)\nresults = m.evaluate(input_fn = lambda: input_function(df_test),steps = 1)\nfor key in sorted(results):\n print(key, end= \" \")\n print(results[key])\n \n \n \n \n","sub_path":"salary_prediction/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474033592","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport rrcf\n\nimport matplotlib.pyplot as plt\n\n\n# Specify sample parameters\nn = 1000\nd = 2\nnum_outliers = 10\n\n# Seed tree with zero-mean, normally distributed data\nX = np.random.randn(n,d)\n# Set the last ten points as outliers\nX[-num_outliers:, :] += 4\n\n# Construct forest\nforest = []\n\n# Specify forest parameters\nnum_trees = 100\ntree_size = 128\nsample_size_range = (n // tree_size, tree_size)\n\nwhile len(forest) < num_trees:\n # Select random subsets of points uniformly from point set\n ixs = np.random.choice(n, size=sample_size_range,\n replace=False)\n # Add sampled trees to forest\n trees = [rrcf.RCTree(X[ix], index_labels=ix) for ix in ixs]\n forest.extend(trees)\n\n# Compute average CoDisp\navg_codisp = pd.Series(0.0, index=np.arange(n))\nindex = np.zeros(n)\nfor tree in forest:\n codisp = pd.Series({leaf : tree.codisp(leaf)\n for leaf in tree.leaves})\n avg_codisp[codisp.index] += codisp\n np.add.at(index, codisp.index.values, 1)\navg_codisp /= index\n\ninlier_point = avg_codisp[:-10].mean()\noutlier_point = avg_codisp[-10:].mean()\nprint(inlier_point, outlier_point)\nanomaly_score = np.percentile(avg_codisp, 97)\nanomaly_points = X[avg_codisp >= anomaly_score]\n\nplt.scatter(X[:-num_outliers, 0], X[:-num_outliers, 1], c='g', label='inlier')\nplt.scatter(X[-num_outliers:, 0], X[-num_outliers:, 1], c='b', label='outlier')\nplt.scatter(anomaly_points[:, 0], anomaly_points[:, 1], c='r', marker='o', label='mark')\nplt.legend()\nplt.show()","sub_path":"test/test_anomaly.py","file_name":"test_anomaly.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524582507","text":"import datetime as dt\r\nadd_library(\"minim\")\r\n\r\nsection = 1\r\n\r\ndef setup():\r\n global minim\r\n global bomb_sound_effect\r\n global dog\r\n \r\n size(600, 600)\r\n \r\n # Load in sound effects\r\n minim = Minim(this)\r\n bomb_sound_effect = minim.loadFile(\"Bomb.wav\")\r\n \r\n # Load in images\r\n dog = loadImage(\"dog.jpg\")\r\n\r\ndef draw():\r\n global bomb_sound_effect\r\n global section\r\n \r\n background(255, 0, 255)\r\n \r\n if section == 1:\r\n section1()\r\n else:\r\n section2()\r\n \r\ndef section1():\r\n # Part 1 --- Simple time and date\r\n current_time = dt.datetime.now()\r\n current_month = current_time.strftime(\"%B\")\r\n current_seconds = current_time.strftime(\"%S\")\r\n current_min = current_time.strftime(\"%M\")\r\n \r\n textSize(30)\r\n text(current_seconds, 200, 200)\r\n text(current_month, 100, 300)\r\n text(current_min, 100, 200)\r\n \r\n bomb_sound_effect.setGain(-30) # Decrease the volume of the sound effect\r\n bomb_sound_effect.play()\r\n \r\n tint(255, 130, 190, 255) # Use the last number to change the transparency\r\n image(dog, 100, 400, 200, 100)\r\n\r\ndef section2():\r\n # Part 2 --- Alarm Clock\r\n pass\r\n \r\n\r\ndef keyPressed():\r\n global section\r\n \r\n if key == \"T\" or key == \"t\":\r\n if section == 1:\r\n section = 2\r\n else:\r\n section = 1\r\n \r\ndef mousePressed():\r\n # You only need this for section 2\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n","sub_path":"Lessons/Week30/sample_code/sample_code.pyde","file_name":"sample_code.pyde","file_ext":"pyde","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"43825236","text":"#-*- coding:utf-8 -*-\n\nimport pandas as pd\nimport re\nimport matplotlib.pylab as plt\nimport seaborn as sns\nimport os\nimport gc\n\n\nclass corrAna(object):\n '''need to input three parameters to initialize, type controls rolling or aggravated\n 0 for rolling, 1 for aggravated;\n level : 0 for major option, 1 for secondary, 2 for third '''\n\n def __init__(self, filedir, start_date, end_date, type, level = 0):\n self.filedir = filedir\n self.start_date = start_date\n self.end_date = end_date\n self.type = type\n self.level = level\n self.symbolDict = {}\n\n def generateDayLst(self, start=None, end=None ):\n if start == None:\n days = pd.date_range(start=self.start_date, end=self.end_date, freq='B')\n else:\n days = pd.date_range(start=start, end=start, freq='B')\n dayLst = []\n for day in days:\n temp = day.strftime('%Y-%m-%d').split('-')\n day = temp[0]+temp[1]+temp[2]\n dayLst.append(day)\n return dayLst\n\n def loaddata(self, day, split = 2):\n '''only load single day\n split controls split one sec into how many parts'''\n if type(day) == type('a'):\n dir = self.filedir + day + '.dat.gz'\n if type(day) == type(1):\n dir = self.filedir + str(day) + '.dat.gz'\n temp = pd.read_csv(dir, header=None, index_col=0, compression='gzip',\n names=['ticker', 'bid_price', 'bid_volume', 'ask_price', 'ask_volume', 'last_price',\n 'last_volume', 'open_interest', 'turnover'])\n self.timeIndex(temp, day, split=split)\n temp.sort_index(inplace=True)\n if split == 2:\n timerange1 = pd.date_range(day+' 09', day+' 11:30', freq = '500ms')\n timerange2 = pd.date_range(day + ' 13:30', day + ' 15', freq='500ms')\n elif split == 4:\n timerange1 = pd.date_range(day + ' 09', day + ' 11:30', freq='250ms')\n timerange2 = pd.date_range(day + '13:30', day + ' 15', freq='250ms')\n flag = map(lambda x: (x in timerange1) or (x in timerange2), temp.index.values)\n temp = temp[flag]\n return temp\n\n def timeIndex(self, df, date, split = 2):\n '''trim time into 500ms or 250ms and change it into timeseries and set as index'''\n lst = list(df.index.values)\n year, month, day = date[:4],date[4:6],date[6:]\n res = []\n for time in lst:\n s = re.split(r'[:.]', time)\n if split == 2:\n if int(s[-1]) <= 500:\n s = s[0] + ':' + s[1] + ':' + s[2] + '.' + '500'\n elif int(s[-1]) < 1000:\n s[-2] = str(int(s[-2]) + 1)\n if int(s[-2]) == 60:\n s[-3] = str(int(s[-3]) + 1)\n s[-2] = '00'\n if int(s[-3]) == 60:\n s[-3] = '00'\n s[-4] = str(int(s[-4]) + 1)\n elif len(s[-2]) == 1:\n s[-2] = '0' + s[-2]\n s = s[0] + ':' + s[1] + ':' + s[2] + '.' + '000'\n elif split == 4:\n if int(s[-1]) <= 250:\n s = s[0] + ':' + s[1] + ':' + s[2] + '.' + '250'\n elif int(s[-1]) <= 500:\n s = s[0] + ':' + s[1] + ':' + s[2] + '.' + '500'\n elif int(s[-1]) <= 750:\n s = s[0] + ':' + s[1] + ':' + s[2] + '.' + '750'\n elif int(s[-1]) < 1000:\n s[-2] = str(int(s[-2]) + 1)\n if int(s[-2]) == 60:\n s[-3] = str(int(s[-3]) + 1)\n s[-2] = '00'\n if int(s[-3]) == 60:\n s[-3] = '00'\n s[-4] = str(int(s[-4]) + 1)\n elif len(s[-2]) == 1:\n s[-2] = '0' + s[-2]\n s = s[0] + ':' + s[1] + ':' + s[2] + '.' + '000'\n s = year + '-' + month + '-' + day + ' ' + s\n res.append(s)\n df.index = pd.DatetimeIndex(res)\n\n def filterdata(self, df, lst, threshold = 1000):\n '''lst is a list of option that want to keep from raw dataframe'''\n if self.type == 2: # return both rolling and aggravated\n align_base = self.get_align_base(df)\n res = pd.DataFrame()\n for name in lst:\n temp = df[df['ticker'] == name]\n if temp.shape[0] < threshold:\n continue\n else:\n self.calcAll(temp)\n temp = temp.rename(columns={'aggravated_return': name[:2] + str(self.level)+'_agg',\n 'rolling_return': name[:2] + str(self.level)+'_rolling'})\n temp = pd.DataFrame(temp.loc[:, [name[:2] + str(self.level)+'_agg', name[:2] + str(self.level)+'_rolling']])\n temp = self.align_drop(data=temp, base=align_base)\n res = pd.concat([res, temp], axis=1)\n else:\n if self.type == 1:\n keywd = 'aggravated_return'\n else:\n keywd = 'rolling_return'\n align_base = self.get_align_base(df)\n res = pd.DataFrame()\n for name in lst:\n temp = df[df['ticker'] == name]\n if temp.shape[0] < threshold:\n continue\n else:\n self.calcAll(temp)\n temp = temp.rename(columns={keywd: name[:2]+str(self.level)})\n temp = pd.DataFrame(temp.loc[:, name[:2]+str(self.level)])\n temp = self.align_drop(data=temp, base=align_base)\n res = pd.concat([res, temp], axis=1)\n res.fillna(method='ffill', axis=0, inplace=True)\n res.fillna(method='bfill', axis=0, inplace=True)\n return res\n\n def concatdata(self, dayLst, filterLst = 'major', split = 2):\n '''load multidays and filter and concat together\n split means split one second into how many parts, choose from [2,4]'''\n if len(dayLst) == 1:\n symbolKey = dayLst[0]\n else:\n symbolKey = dayLst[0]+'-'+dayLst[-1]\n temp = self.loaddata(day=dayLst[0], split=split)\n if filterLst == 'major':\n major = self.findMostInType(temp)\n self.recordSymbol(symbolKey, major)\n filterLst = major.values()\n res = self.filterdata(temp, lst=filterLst)\n del temp; gc.collect()\n if len(dayLst) > 1:\n for day in dayLst[1:]:\n temp = self.loaddata(day=day, split = split)\n major = self.findMostInType(temp)\n filterLst = major.values()\n self.recordSymbol(symbolKey, major)\n res0 = self.filterdata(temp, lst = filterLst)\n res = pd.concat([res, res0])\n del temp, res0; gc.collect()\n if self.type != 2:\n return res\n if self.type == 2:\n agg_flag = [True if 'agg' in col_name else False for col_name in res.columns.values]\n rolling_flag = [True if 'agg' not in col_name else False for col_name in res.columns.values]\n agg_res, rolling_res = res[res.columns.values[agg_flag]], res[res.columns.values[rolling_flag]]\n return agg_res, rolling_res\n\n def recordSymbol(self, date, symbolLst):\n '''record symbol and ticker'''\n self.symbolDict[date] = symbolLst\n\n def sampledata(self, data, period, how = 'first'):\n df = data.copy()\n if how == 'first':\n res = df.resample(period).first()\n if how == 'mean':\n res = df.resample(period).mean()\n if how == 'last':\n res = df.resample(period).last()\n del df; gc.collect()\n res.dropna(how = 'all',axis = 0, inplace=True)\n return res\n\n def shift_align(self, data, target, lag, align_base):\n '''first shift data of target colume at lag and then align it to origin dataframe'''\n if len(target) == 2:\n target = target\n else:\n target = target[:2]\n df = data.copy()\n targetCol = self.getsymbol(df, target)\n temp = pd.DataFrame(df[targetCol].shift(periods=-int(lag[:-1]), freq = lag[-1]))\n temp = self.align_drop(data=temp, base = align_base)\n df[targetCol] = temp\n return df\n\n def get_align_base(self, df): #获取用于align的base,来源是初始数据的datetime index\n '''get index as the align base for later align'''\n align_base = pd.DataFrame([1 for i in range(df.shape[0])],index=df.index)\n align_base['helper'] = align_base.index\n align_base.drop_duplicates(subset='helper', inplace=True)\n align_base.drop('helper', axis=1, inplace=True)\n return align_base\n\n def align_drop(self, data, base):\n '''align target data to base index'''\n df = data.copy()\n _, df = base.align(df, join='left', axis = 0)\n df = pd.DataFrame(df)\n df['helper'] = df.index\n df.drop_duplicates(subset = 'helper', inplace=True)\n df.drop('helper', axis=1, inplace=True)\n return df\n\n def getsymbol(self, lst, ticker): #依据前两个symbol得到对应的ticker\n '''column name according to ticker as column name maybe ru0 or ru1 or ru2 and use this function to find symbol'''\n if '0' == ticker[-1]:\n ticker = ticker[:-1]\n if len(ticker) == 3:\n ticker = ticker[:2]\n if len(ticker) == 1:\n ticker = ticker + '1'\n for name in lst:\n if ticker == name[:2]:\n return name\n\n def midPrice(self, df): # 计算mid_pricr\n flag = (df.ask_price * df.bid_price) != 0\n if flag.all():\n df.loc[:, 'mid_price'] = (df.ask_price + df.bid_price) / 2\n else:\n bid_index, ask_index = 1, 3\n mid_price = []\n for i in range(df.shape[0]):\n if (df.iloc[i,bid_index] != 0) and (df.iloc[i,ask_index] != 0):\n mid_price.append((df.iloc[i,bid_index] + df.iloc[i,ask_index])/2)\n elif df.iloc[i,bid_index] == 0:\n mid_price.append(df.iloc[i, ask_index])\n elif df.iloc[i,bid_index] == 0:\n mid_price.append(df.iloc[i, bid_index])\n else:\n mid_price.append(0)\n df.loc[:,'mid_price'] = mid_price\n df.mid_price.replace(0,method='ffill', inplace=True)\n\n def rollingRet(self, df):\n res = [0]\n for i in range(1, df.shape[0]):\n if df.mid_price.values[i - 1] == 0:\n temp = 0\n else:\n temp = (df.mid_price.values[i] - df.mid_price.values[i - 1]) / df.mid_price.values[i - 1]\n\n res.append(temp)\n df.loc[:, 'rolling_return'] = res\n\n def aggravatedRet(self, df):\n df.loc[:, 'aggravated_return'] = df.loc[:, 'rolling_return'].values.cumsum()\n\n def calcAll(self, df):\n self.midPrice(df)\n self.rollingRet(df)\n self.aggravatedRet(df)\n\n def filterName(self, lst): # 判断是否为期权\n '''judge whether option or not'''\n ans = []\n for name in lst:\n if not ('-P-' in name or '-C-' in name or 'SR' in name):\n ans.append(name)\n return ans\n\n def findMostInType(self, df): #寻找主力合约 后续补充选择次要合约、第三合约的代码\n if self.level == 0:\n dic = df.groupby('ticker')['turnover'].max()\n lst = dic.index.values\n lst = self.filterName(lst)\n existed = []\n length = {}\n most = {}\n for name in lst:\n l = dic[name]\n if name[:2] in existed:\n if l > length[name[:2]]:\n most[name[:2]] = name\n length[name[:2]] = l\n else:\n existed.append(name[:2])\n length[name[:2]] = l\n most[name[:2]] = name\n return most\n\n def filtervolu(self, df, lst, threshold=1000, volu='ask_volume'):\n '''lst is a list of option that want to keep from raw dataframe'''\n keywd = volu\n align_base = self.get_align_base(df)\n res = pd.DataFrame()\n for name in lst:\n temp = df[df['ticker'] == name]\n if temp.shape[0] < threshold:\n continue\n else:\n self.calcAll(temp)\n temp = temp.rename(columns={keywd: name[:2] + str(self.level)})\n temp = pd.DataFrame(temp.loc[:, name[:2] + str(self.level)])\n temp = self.align_drop(data=temp, base=align_base)\n res = pd.concat([res, temp], axis=1)\n res.fillna(method='ffill', axis=0, inplace=True)\n res.fillna(method='bfill', axis=0, inplace=True)\n return res\n\n def getvolu(self, dayLst, filterLst='major', split=2):\n '''load multidays and filter and concat together\n split means split one second into how many parts, choose from [2,4]'''\n if len(dayLst) == 1:\n symbolKey = dayLst[0]\n else:\n symbolKey = dayLst[0] + '-' + dayLst[-1]\n temp = self.loaddata(day=dayLst[0], split=split)\n if filterLst == 'major':\n major = self.findMostInType(temp)\n self.recordSymbol(symbolKey, major)\n filterLst = major.values()\n res = self.filtervolu(temp, lst=filterLst)\n del temp;\n gc.collect()\n if len(dayLst) > 1:\n for day in dayLst[1:]:\n temp = self.loaddata(day=day, split=split)\n major = self.findMostInType(temp)\n filterLst = major.values()\n self.recordSymbol(symbolKey, major)\n res0 = self.filtervolu(temp, lst=filterLst)\n res = pd.concat([res, res0])\n del temp, res0\n gc.collect()\n return res\n\n def appointedLst(self, data, lst):\n tempLst = []\n for elem in lst:\n temp = self.getsymbol(data,elem)\n tempLst.append(temp)\n appointed = data.loc[:,tempLst]\n return appointed\n\ndef saveFigCsv(return_df, period, output_dir, date, figsize=(30,20), fontsize=10): #路径仅由output_dir指定,freq、date仅影响文件名\n fig,ax = plt.subplots(figsize = figsize)\n sns.set(font_scale=1.25)\n sns.heatmap(return_df.corr(), cmap='coolwarm', cbar=True, annot=True,square=True, fmt='.2f', annot_kws={'size': fontsize})\n plt.xticks(rotation=45, fontsize=fontsize)\n plt.yticks(rotation=0, fontsize=fontsize)\n plt.title(u'correlation heatmap of major option', fontsize=fontsize)\n dir = output_dir + '/'\n if not os.path.exists(dir):\n os.makedirs(dir)\n fig.savefig(dir + date + '_' + period + '.jpg')\n plt.close()\n if 'ind' in return_df.columns.values:\n return_df.drop('ind', axis=1, inplace = True)\n return_df.to_csv(dir +date+'_'+period+'_return.csv')\n return_df.corr().to_csv(dir + date + '_' + period + '_corr.csv')\n\n\n\n\ndef findNstElem(retmat, ticker, k= 10): #找出与单一期权相关度最高的k个\n cols = retmat.corr().nlargest(k, ticker)[ticker].index\n return retmat.loc[:, cols]\n\n","sub_path":"corrlab.py","file_name":"corrlab.py","file_ext":"py","file_size_in_byte":15485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"344273700","text":"import numpy as np\nimport string\nimport matplotlib.pyplot as plt\nfrom scipy.signal import find_peaks\n\nletters = list(string.ascii_uppercase)\nletters = letters[11:]\n\nfrequencies = np.arange(0, 50, 0.05)\nPsr = np.load(\"OutPlane/Psr.npy\")\n\npeaks = find_peaks(Psr, prominence=1)\n\npeakFreqs = []\n\nfor i in range(len(peaks[0])):\n peakFreqs.append(frequencies[peaks[0][i]])\n # plt.axvline(peakFreqs[i])\n plt.plot(peakFreqs[i], Psr[peaks[0][i]], \"r.\")\n plt.text(peakFreqs[i] + 0.1, Psr[peaks[0][i]], letters[i])\n\n# np.save(\"InPeaks\", np.array(peakFreqs))\n\nplt.plot(frequencies, Psr)\nplt.xlabel(\"Frequency (GHz)\")\nplt.ylabel(r\"$P_{\\mathrm{sr}}$ (A.U.)\")\nplt.semilogy()\nplt.xlim(0, np.max(np.array(peakFreqs)) + 1)\nplt.savefig(\"AntiOutPsr.png\", dpi=1000)\n","sub_path":"Resonance/Antiskyrmion/150nm/ViewPsr.py","file_name":"ViewPsr.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"449703672","text":"# Garth Leedle\n# CTEC 121 / Winter 2019\n# Module 4 / Problem Set 5\n# Problem 3 (25 points)\n\n\"\"\"\nDevelop a program that draws some sort of substantial face that includes two eyes, a nose, a mouth with some teeth, two ears and some hair.\n\nYou will find faces that were drawn by students in prior classes in a file named faces.png.\n\"\"\"\n\nfrom graphics import *\n\n\ndef main():\n\n # Create the window\n win = GraphWin('Problem set 5.3 Face', 900, 900)\n win.setBackground('white')\n\n # Create head\n\n # Create Mouth and head\n mouthTop = Circle(Point(300, 220), 300)\n mouthTopCover = Circle(Point(290, 200), 300)\n mouthBottom = Circle(Point(390, 400), 180)\n mouthLside = Circle(Point(248, 545), 85)\n mouthRside = Circle(Point(600, 375), 75)\n teeth = Circle(Point(322, 245), 300)\n teethCenter = Circle(Point(450, 500), 50)\n mouthRpoly = Polygon(Point(527, 402), Point(564, 443))\n mouthLpoly = Polygon(Point(334, 510), Point(330, 570))\n head = Circle(Point(323, 250), 150)\n head.setFill('white')\n head.setWidth(10)\n mouthRpoly.setWidth(5)\n mouthLpoly.setWidth(5)\n mouthTop.setWidth(5)\n teeth.setWidth(2)\n teethCenter.setFill(\"white\")\n teethCenter.setOutline(\"white\")\n mouthTopCover.setFill('white')\n mouthTopCover.setOutline('white')\n mouthBottom.setWidth(5)\n mouthLside.setWidth(5)\n mouthLside.setFill('white')\n mouthLside.setOutline('white')\n mouthRside.setWidth(5)\n mouthRside.setFill('white')\n mouthRside.setOutline('white')\n teeth.draw(win)\n teethCenter.draw(win)\n mouthTop.draw(win)\n mouthTopCover.draw(win)\n head.draw(win)\n mouthBottom.draw(win)\n mouthLside.draw(win)\n mouthRside.draw(win)\n mouthRpoly.draw(win)\n mouthLpoly.draw(win)\n\n # # Create Chin\n chin = Circle(Point(400, 450), 250)\n chin.setWidth(10)\n chin.draw(win)\n\n # Create eye sockets\n eye1 = Circle(Point(500, 200), 150)\n eye1.setFill('white')\n eyepatch1 = Circle(Point(450, 250), 150)\n eyepatch1.setFill('white')\n eyepatch1.setOutline('white')\n eye2 = Circle(Point(160, 350), 150)\n eye2.setFill('white')\n eyepatch2 = Circle(Point(250, 350), 150)\n eyepatch2.setFill('white')\n eyepatch2.setOutline('white')\n eye1.setWidth(10)\n eye2.setWidth(10)\n eye1.draw(win)\n eyepatch1.draw(win)\n eye2.draw(win)\n eyepatch2.draw(win)\n\n # Create Eyeballs\n eyeball1 = Oval(Point(420, 200), Point(500, 280))\n eyeball2 = Oval(Point(200, 320,), Point(280, 400))\n reyeball = Circle(Point(460, 239), 10)\n leyeball = Circle(Point(242, 358), 10)\n leyeball.setFill('black')\n reyeball.setFill('black')\n eyeball1.setFill('light blue')\n eyeball2.setFill('light blue')\n eyeball1.draw(win)\n eyeball2.draw(win)\n leyeball.draw(win)\n reyeball.draw(win)\n\n # Create reflections\n reflect1 = Circle(Point(470, 210), 20)\n reflect2 = Circle(Point(250, 330), 20)\n reflect1.setFill('white')\n reflect1.setOutline('white')\n reflect2.setFill('white')\n reflect2.setOutline('white')\n reflect1.draw(win)\n reflect2.draw(win)\n\n # Create nose\n nose = Polygon(Point(370, 440), Point(420, 414), Point(404, 435))\n nose.setFill('black')\n nose.setWidth(5)\n nose.draw(win)\n\n # Create hair\n hair = Polygon(Point(315, 100), Point(300, 50))\n hair1 = Polygon(Point(325, 100), Point(350, 60))\n hair2 = Polygon(Point(305, 100), Point(250, 60))\n hair.setWidth(5)\n hair1.setWidth(5)\n hair2.setWidth(5)\n hair.draw(win)\n hair1.draw(win)\n hair2.draw(win)\n\n win.getMouse()\n\n\nmain()\n","sub_path":"problem-set-5-problem-3.py","file_name":"problem-set-5-problem-3.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222160790","text":"#Модуль random предоставляет функции для генерации случайных чисел, букв, случайного выбора элементов последовательности.\r\n#random.randint(A, B) - случайное целое число N, A ≤ N ≤ B.\r\nfrom random import randint\r\n\r\n# реализовать генератор,который последовательно выведет значения ключей словарей массива\r\ndef field(list,*args):\r\n #assert позволяет производить проверки истинности утверждений, что может быть использовано в отладочных целях\r\n #Если проверка не пройдена, возбуждается исключение\r\n #для ситуаций, которые не должны происходить вовсе, которые нельзя обработать или это не имеет смысла\r\n assert len(args) > 0, \"There are no input arguments\"\r\n if len(args) == 1:\r\n for item in list:\r\n if item.get(args[0]):\r\n yield item[args[0]]\r\n else:\r\n for item in list:\r\n dictionary = {}\r\n for element in args:\r\n if item.get(element):\r\n dictionary[element] = item[element]\r\n if dictionary:\r\n # вызывающему коду выдается значение dictionary, так как дошли до yield\r\n yield dictionary\r\n\r\n#реализовать генератор, который последовательно выдает заданное\r\n#количество случайных чисел в заданном диапазоне\r\n\r\ndef gen_random(begin, end, num_count):\r\n for item in range(num_count):\r\n yield randint(begin, end)\r\n\r\n","sub_path":"gens.py","file_name":"gens.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70807998","text":"# Implementar la funcion actualizar_persona, que actualiza un registro de una persona basado en su id.\n# Devuelve un booleano en base a si encontro el registro y lo actualizo o no.\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom ejercicio_01 import Persona,reset_tabla\nfrom ejercicio_02 import agregar_persona\nfrom ejercicio_04 import buscar_persona\n\nimport datetime\n\nBase = declarative_base()\nengine = create_engine('mysql://root:852456ale@localhost:3306/python')\nBase.metadata.bind = engine\nDBSession = sessionmaker()\nDBSession.bind = engine\nsession = DBSession()\n\ndef actualizar_persona(id_persona, nombre, nacimiento, dni, altura):\n x = buscar_persona(id_persona)\n if x is False:\n return False\n else:\n x.Nombre = nombre\n x.FechaNacimiento = nacimiento\n x.DNI = dni\n x.Altura= altura\n print(x)\n session.commit()\n\n\n\n@reset_tabla\ndef pruebas():\n id_juan = agregar_persona('juan perez', datetime.datetime(1988, 5, 15), 32165498, 180)\n actualizar_persona(id_juan, 'juan carlos perez', datetime.datetime(1988, 4, 16), 32165497, 181)\n assert buscar_persona(id_juan) == (1, 'juan carlos perez', datetime.datetime(1988, 4, 16), 32165497, 181)\n assert actualizar_persona(123, 'nadie', datetime.datetime(1988, 4, 16), 12312312, 181) is False\n\nif __name__ == '__main__':\n pruebas()\n","sub_path":"practico_03A/ejercicio_05.py","file_name":"ejercicio_05.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"349497474","text":"import logging\nfrom typing import Tuple, Dict\nfrom collections import OrderedDict\n\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objs as go\n\nfrom ..core import ExpMatrix, ExpVector, CellAnnVector\nfrom .. import tools\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',\n 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',\n 'rgb(148, 103, 189)', 'rgb(140, 86, 75)',\n 'rgb(227, 119, 194)', 'rgb(127, 127, 127)',\n 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']\n\n\nCOLORSCALES = {\n 2: {\n 'Viridis': ['#440154', '#fde725'],\n },\n 4: {\n 'Viridis': ['#440154', '#2c728e', '#3fbc73', '#fde725'],\n },\n}\n\ndef transform_clustering(clusters):\n vc = pd.Series(clusters).value_counts()\n d = OrderedDict.fromkeys(vc.index)\n for k in vc.index:\n d[k] = (clusters == k).nonzero()[0]\n return d\n\n\ndef prepare_cell_clusters(\n matrix: ExpMatrix, cell_clusters: CellAnnVector) -> CellAnnVector:\n \"\"\"Prepare cell clusters for use in plotting functions.\n \n \"\"\"\n # if no clusters are given, create a dummy cluster comprising all cells\n if cell_clusters is None:\n cell_clusters = CellAnnVector(cells=matrix.cells,\n data=['all'] * matrix.n)\n\n # add sample sizes to cluster names, if requested\n #if show_n:\n # vc = cell_clusters.value_counts()\n # cell_clusters = cell_clusters.map(lambda x: '%s (n=%d)' % (x, vc[x]))\n \n return cell_clusters\n\n\ndef plot_cells(\n matrix: ExpMatrix, profile: ExpVector = None,\n cell_labels: CellAnnVector = None,\n cluster_order='frequency', cluster_colors=None,\n cluster_labels: Dict[str, str] = None,\n dims: Tuple[int, int] = None,\n title: str = None,\n marker_symbol: str = 'circle',\n jitter=0.00, seed=0,\n colorscale='RdBu', padding=0.05,\n show_cells=None, emin=None, emax=None, opacity=0.7, marker_size=10,\n show_n: bool = True,\n colorbar_label='# transcripts', colorbar_length=0.5,\n width=900, height=650, fixed_aspect_ratio=False,\n margin_left=50, margin_top=65, margin_bottom=50, margin_right=50,\n flip_x=False, flip_y=False,\n showlegend=None, legend_font_size=None,\n legend_x=1.0, legend_y=0.98,\n legend_xanchor='left', legend_yanchor='top',\n font_size=28, font_family='serif', borderwidth=1.0):\n\n if dims is None:\n dims = (0, 1)\n\n if cluster_colors is None:\n cluster_colors = {}\n\n if cluster_labels is None:\n cluster_labels = {}\n\n dim1, dim2 = dims\n\n cell_labels = prepare_cell_clusters(matrix, cell_labels)\n\n # generate jitter\n np.random.seed(seed)\n dx = np.random.rand(matrix.shape[1]) - 0.5\n dy = np.random.rand(matrix.shape[1]) - 0.5\n \n if show_cells is not None and show_cells < matrix.n:\n sel_cells = tools.downsample_cells(matrix.cells, show_cells, \n cell_labels=cell_labels, seed=seed)\n matrix = matrix.loc[:, sel_cells]\n cell_labels = cell_labels.loc[sel_cells]\n if profile is not None:\n profile = profile.loc[sel_cells]\n\n xmn = matrix.iloc[dim1, :].min()\n xmx = matrix.iloc[dim1, :].max()\n ymn = matrix.iloc[dim2, :].min()\n ymx = matrix.iloc[dim2, :].max()\n\n ptp_x = xmx - xmn\n ptp_y = ymx - ymn\n \n #if plot_type.lower() == 'mds':\n if fixed_aspect_ratio:\n # TODO: make sure data is centered\n ptp_max = max(ymx-ymn, xmx-xmn)\n #xmn = ptp_max \n xmn = xmn - (ptp_max - ptp_x)*0.5\n xmx = xmx + (ptp_max - ptp_x)*0.5\n \n ymn = ymn - (ptp_max - ptp_y)*0.5\n ymx = ymx + (ptp_max - ptp_y)*0.5\n\n range_x = [xmn-ptp_max*padding, xmx+ptp_max*padding]\n range_y = [ymn-ptp_max*padding, ymx+ptp_max*padding]\n\n else:\n range_x = [xmn-ptp_x*padding, xmx+ptp_x*padding]\n range_y = [ymn-ptp_y*padding, ymx+ptp_y*padding]\n\n dx = dx*ptp_x*jitter\n dy = dy*ptp_y*jitter\n \n #z = (profile - profile.mean()) / profile.std(ddof=1)\n #print(z.min(), z.max())\n #z = np.log10(profile)\n\n xlabel = matrix.genes[dim1]\n ylabel = matrix.genes[dim2]\n\n labels_old = None\n try:\n labels_old = profile.index\n except AttributeError:\n pass\n\n if profile is not None:\n showscale = True\n else:\n showscale = False\n\n data = []\n \n if profile is not None:\n cell_labels[:] = 'all'\n\n vc = cell_labels.value_counts()\n\n # determine cluster ordering\n if cluster_order == 'frequency':\n ordered_labels = vc.index.tolist()\n elif cluster_order == 'alphabetical':\n ordered_labels = sorted(vc.index.tolist())\n else:\n # assume cluster_order is a list of labels\n ordered_labels = cluster_order[:]\n\n for i, label in enumerate(ordered_labels):\n #count = vc.loc[label]\n if label not in vc.index:\n _LOGGER.warning('No cells with label \"%s\".', label)\n continue\n sel = (cell_labels == label).nonzero()[0]\n\n\n x = matrix.iloc[dim1, sel] + dx[sel]\n y = matrix.iloc[dim2, sel] + dy[sel]\n \n try:\n name = cluster_labels[label]\n except KeyError:\n name = label\n\n if show_n:\n name = str(name) + ' (n=%d)' % vc.loc[label]\n #name = name + ' ($n=%d$)' % vc.loc[label]\n\n text = None\n if profile is not None:\n color = profile.iloc[sel]\n if labels_old is not None:\n text = labels_old[sel]\n else:\n try:\n color = cluster_colors[label]\n except KeyError:\n color = DEFAULT_PLOTLY_COLORS[i]\n text = matrix.cells[sel]\n\n trace = go.Scatter(\n x=x,\n y=y,\n text=text,\n mode='markers',\n name=name,\n marker=dict(\n symbol=marker_symbol,\n size=marker_size,\n color=color,\n colorscale=colorscale,\n cmin=emin,\n cmax=emax,\n opacity=opacity,\n showscale=showscale,\n colorbar=dict(\n len=colorbar_length,\n title=colorbar_label,\n titleside='right',\n thickness=20,\n ticklen=5,\n )\n ),\n )\n\n data.append(trace)\n\n if flip_x:\n range_x = range_x[::-1]\n \n if flip_y:\n range_y = range_y[::-1]\n\n layout = go.Layout(\n margin=dict(l=margin_left, r=margin_right,\n b=margin_bottom, t=margin_top),\n width=width,\n height=height,\n font=dict(size=font_size, family=font_family),\n title=title,\n xaxis=dict(\n title=xlabel,\n zeroline=False,\n range=range_x,\n showticklabels=False,\n showline=True,\n showgrid=False),\n yaxis=dict(\n title=ylabel,\n zeroline=False,\n range=range_y,\n showticklabels=False,\n showline=True,\n showgrid=False),\n #font=dict('')\n showlegend=showlegend,\n legend=dict(\n borderwidth=borderwidth,\n x=legend_x,\n xanchor=legend_xanchor,\n y=legend_y,\n yanchor=legend_yanchor,\n font=dict(\n size=legend_font_size,\n ),\n )\n )\n fig = go.Figure(data=data, layout=layout)\n return fig\n","sub_path":"moana/visualize/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"573402154","text":"#!/usr/bin/env python3\n\"\"\"oled.py\n\"\"\"\n\nimport Adafruit_SSD1306\nimport threading\nimport time\nfrom queue import Queue, Empty\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\n#class Oled_Fake:\n # def set_text(self,a1,a2):\n # pass\n # def clear(self):\n # pass\n #def close(self):\n # pass\nclass Oled:\n\n RST = None # on the SSD1306 this pin isnt used\n font = ImageFont.truetype(\"robot/display/Timeless.ttf\", 14)\n top = -2\n x = 0\n\n def __init__(self):\n self.disp = Adafruit_SSD1306.SSD1306_128_32(rst=self.RST)\n self.disp.begin()\n self.clear()\n\n self._q = Queue()\n self._stop_event = threading.Event()\n self.oled_thread = threading.Thread(target=self._oled_run)\n self.oled_thread.start()\n\n def _oled_run(self):\n while not self._stop_event.is_set():\n text1, text2 = self._q.get()\n self._set_text(text1, text2)\n self._q.task_done()\n\n def _set_text(self, text1, text2):\n image = Image.new(\"1\", (self.disp.width, self.disp.height))\n draw = ImageDraw.Draw(image)\n\n self.disp.clear()\n draw.text((self.x, self.top), text1 + \" \", font=self.font, fill=10)\n draw.text((self.x, self.top + 16), text2 + \" \", font=self.font, fill=10)\n self.disp.image(image)\n self.disp.display()\n\n def set_text(self, text1, text2):\n while not self._q.empty():\n try:\n self._q.get(False)\n except Empty:\n continue\n self._q.task_done()\n\n self._q.put((text1, text2))\n\n # Clear display.\n def clear(self):\n self.disp.clear()\n self.disp.display()\n\n def close(self):\n # Stop thread\n self._stop_event.set()\n self.oled_thread.join()\n\n\ndef main():\n #try:\n # my_oled = Oled()\n #except OSError:\n # my_oled = Oled_Fake()\n #my_oled.set_text(\"LATCH\", \"TEST\")\n my_oled = Oled()\n my_oled.set_text(\"LATCH\", \"TEST\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"robot/activeLighting/oled.py","file_name":"oled.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"584862901","text":"from datadog import statsd\nimport logging\nimport os\nimport pytest\nimport sys\nimport time\n\nfrom helpers import BulkMsgProcessor\nfrom helpers import mock_sqs_session\nfrom helpers import MsgProcessor\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'sqsworkers')))\nimport crew\n\ndef test_crew_with_all_args():\n optionals = {\n 'sqs_session': 'fake session',\n 'queue_name': 'something',\n 'MessageProcessor': MsgProcessor,\n 'logger': logging.getLogger('default'),\n 'statsd': statsd,\n 'sentry': None,\n 'worker_limit': 9\n }\n\n assert crew.Crew(**optionals).worker_limit == 9\n\ndef test_crew_with_no_optionals():\n required_only = {\n 'sqs_session': 'fake session',\n 'queue_name': 'something',\n 'MessageProcessor': MsgProcessor,\n 'logger': logging.getLogger('default'),\n 'statsd': statsd\n }\n\n assert crew.Crew(**required_only).worker_limit == 10\n\ndef test_crew_with_bulk_msg_processor():\n required_only = {\n 'sqs_session': 'fake session',\n 'queue_name': 'something',\n 'MessageProcessor': BulkMsgProcessor,\n 'logger': logging.getLogger('default'),\n 'statsd': statsd,\n 'bulk_mode': True\n }\n\n assert crew.Crew(**required_only).MessageProcessor == BulkMsgProcessor\n\ndef test_crew_with_resource():\n with_resource = {\n 'sqs_resource': 'resource',\n 'MessageProcessor': MsgProcessor,\n 'logger': logging.getLogger('default'),\n 'statsd': statsd\n }\n\n assert crew.Crew(**with_resource).sqs_resource == 'resource'\n\ndef test_crew_without_sqs():\n no_sqs = {\n 'MessageProcessor': MsgProcessor,\n 'logger': logging.getLogger('default'),\n 'statsd': statsd\n }\n\n with pytest.raises(TypeError):\n crew.Crew(**no_sqs)\n\n@mock_sqs_session(n_msgs=10)\ndef test_bulk_start_10_msgs(sqs_session=None, sqs_queue_name=None, mock_=None, *args, **kwargs):\n logger = logging.getLogger('default')\n required_only = {\n 'sqs_session': sqs_session,\n 'queue_name': sqs_queue_name,\n 'MessageProcessor': BulkMsgProcessor,\n 'logger': logger,\n 'statsd': statsd,\n 'worker_limit': 1,\n 'wait_time': 20,\n 'max_number_of_messages': 10,\n 'bulk_mode': True\n }\n\n c = crew.Crew(**required_only)\n # When you really want to be sure about what code you are hitting\n # assert sqs_session.using_real_aws == False\n c.start()\n time.sleep(30)\n try:\n assert sqs_session.delete_count == sqs_session.receive_count\n assert sqs_session.delete_count == 10\n except:\n logging.exception('Exception from test_bulk_start_10_msgs')\n c.stop()\n raise\n else:\n c.stop()\n\n@mock_sqs_session(n_msgs=15)\ndef test_bulk_start_15_msgs(sqs_session=None, sqs_queue_name=None, mock_=None, *args, **kwargs):\n logger = logging.getLogger('default')\n required_only = {\n 'sqs_session': sqs_session,\n 'queue_name': sqs_queue_name,\n 'MessageProcessor': BulkMsgProcessor,\n 'logger': logger,\n 'statsd': statsd,\n 'worker_limit': 1,\n 'wait_time': 20,\n 'max_number_of_messages': 10,\n 'bulk_mode': True\n }\n\n c = crew.Crew(**required_only)\n # When you really want to be sure about what code you are hitting\n # assert sqs_session.using_real_aws == False\n c.start()\n time.sleep(30)\n try:\n assert sqs_session.delete_count == sqs_session.receive_count\n assert sqs_session.delete_count == 15\n except:\n logging.exception('Exception from test_bulk_start_15_msgs')\n c.stop()\n raise\n else:\n c.stop()\n\n@mock_sqs_session(n_msgs=7, n_failed_processing=3)\ndef test_bulk_start_proc_fails(sqs_session=None, sqs_queue_name=None, mock_=None, *args, **kwargs):\n logger = logging.getLogger('default')\n required_only = {\n 'sqs_session': sqs_session,\n 'queue_name': sqs_queue_name,\n 'MessageProcessor': BulkMsgProcessor,\n 'logger': logger,\n 'statsd': statsd,\n 'worker_limit': 1,\n 'wait_time': 15,\n 'max_number_of_messages': 4,\n 'bulk_mode': True\n }\n\n c = crew.Crew(**required_only)\n # When you really want to be sure about what code you are hitting\n # assert sqs_session.using_real_aws == False\n c.start()\n time.sleep(32)\n try:\n assert sqs_session.receive_count >= 7\n assert sqs_session.delete_count == 4\n except:\n logging.exception('Exception from test_bulk_start_proc_fails')\n c.stop()\n raise\n else:\n c.stop()\n\n\n# TODO: this test needs an sqs queue to work\n# def test_start():\n# required_only = {\n# 'sqs_session': None,\n# 'queue_name': 'something',\n# 'MessageProcessor': MsgProcessor,\n# 'logger': logging.getLogger('default'),\n# 'statsd': statsd\n# }\n\n# c = crew.Crew(**required_only)\n# c.start()\n","sub_path":"tests/test_crew.py","file_name":"test_crew.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30683061","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import url, include\nfrom .views import CourseListView, CourseDetailView, CourseVideoView, AddCommentView, AddPraiseView, AddFavView\n\n\nurlpatterns = [\n url('^list/$', CourseListView.as_view(), name=\"course_list\"),\n url('^detail/(?P\\d+)/$', CourseDetailView.as_view(), name='course_detail'),\n url('^video/(?P\\d+)/$', CourseVideoView.as_view(), name='course_video'),\n url(r'^add_comment/$', AddCommentView.as_view(), name='add_comment'),\n url(r'^add_fav/$', AddFavView.as_view(), name='add_fav'),\n url(r'^add_praise/$', AddPraiseView.as_view(), name='add_praise')\n]","sub_path":"jianda/apps/course/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"89768480","text":"\nimport time\nfrom matplotlib.pyplot import imshow\nimport torch\n\nfrom training_configs import *\nfrom dataset import dataset\nfrom eqprop.eqprop import EqPropNet\nfrom eqprop.eqprop_nograd import EqPropNet_NoGrad\n\n\ndef index_to_onehot(index, num_indices):\n onehot = torch.zeros(*index.size(), num_indices)\n onehot.scatter_(-1, index.unsqueeze(dim=-1), 1)\n\n return onehot\n\n\ndef count_hits(net, y):\n _, index = net.output_state().max(dim=-1)\n hits = (y == index).sum().item()\n\n return hits\n\n\ndef train(net, trainloader):\n print(\"Training...\")\n start_time = time.time()\n for epoch in range(EPOCHS):\n running_energy = running_cost = 0.0\n for i, data in enumerate(trainloader):\n x, y = data\n\n # Prepare data\n x = x.view(-1, LAYER_SIZES[0])\n y_onehot = index_to_onehot(y, LAYER_SIZES[-1])\n\n # Train on (x,y) using equilibrium propagation\n energy, cost = net.eqprop(x, y_onehot)\n\n # Perpare stuff for checkpoint reports\n running_energy += energy.mean()\n running_cost += cost.mean()\n if (i + 1) % CHECKPOINT == 0:\n avg_energy = running_energy / CHECKPOINT\n avg_cost = running_cost / CHECKPOINT\n print(\"[%d, %d] energy = %.3f, cost = %.3f, hits = %d/%d\" % (\n epoch+1, i+1, avg_energy, avg_cost,\n count_hits(net, y), y.size()[0]))\n print(\"Time elapsed = %ds\" % (time.time() - start_time))\n running_energy = running_cost = 0.0\n\n print(\"\\nEPOCH: [%d/%d] energy = %.3f, cost = %.3f\\n, hits = %d/%d\" % (\n epoch+1, EPOCHS, energy.mean(), cost.mean(), count_hits(net, y), y.size()[0]))\n\n # Save model\n net.save_parameters(FNAME)\n \n\ndef test(net, testloader):\n print(\"Testing...\")\n running_hits = iterations = 0\n for i, data in enumerate(testloader):\n x, y = data\n\n # Prepare data\n x = x.view(-1, LAYER_SIZES[0])\n y_onehot = index_to_onehot(y, LAYER_SIZES[-1])\n\n # Train on (x,y) using equilibrium propagation\n energy, cost = net.eqprop(x, y_onehot, train=False)\n print(\"[%d] energy = %.3f, cost = %.3f\" % (i, energy.mean(), cost.mean()))\n\n # Calculate hits\n hits = count_hits(net, y)\n print(\"hits =\", hits, \"out of\", y.size()[0])\n\n running_hits += hits\n iterations += 1\n\n print()\n print(\"Average hits =\", running_hits / iterations)\n error = 1 - running_hits / (iterations * y.size()[0])\n print(\"Error = %.3f%%\" % (error * 100))\n\n\ndef main():\n # Set random seed if given\n torch.manual_seed(RANDOM_SEED or torch.initial_seed())\n\n # Define dataset\n trainloader, testloader = dataset(BATCH_SIZE)\n\n # Set model parameters\n model_params = {\n \"batch_size\": BATCH_SIZE,\n \"layers_sizes\": LAYER_SIZES,\n \"learning_rates\": LEARNING_RATES,\n \"n_iter_1\": N_ITER_1,\n \"n_iter_2\": N_ITER_2,\n \"rho\": lambda x: x.clamp(0,1), # Assuming x is a torch.Tensor\n \"beta\": BETA,\n \"dt\": DELTA,\n }\n\n # Define network\n eqprop_net = EqPropNet_NoGrad(**model_params)\n\n # Train\n train(eqprop_net, trainloader)\n\n # Validate\n test(eqprop_net, testloader)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n ","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521761844","text":"'''\n【累积重构-卷积去噪自编码】\n网络结构:L-ConvDAE\n数据集:N_MNIST_pic\n\nConvDAE_3 -> ConvDAE_3_2\n+1:改变网络结构\n+2:使用deconv代替resize\n\n\n'''\n\n# In[45]:\n#导入基本模块\nimport numpy as np\nimport tensorflow as tf\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom time import time\nfrom my_tf_lib import my_io\nimport os\n\n\n# In[]\n#运行环境配置\nconfig = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\n\n\n# In[9]:\n#重置tensorboard graph\ntf.reset_default_graph() \n\n\n# In[9]:\n# 参数标志路径\n# 参数\nepochs = 50\nbatch_size = 128\nlearning_rate=0.001\nkeep_prob_v = 0.7\n\n# 标志\ntrainState = 1 # 0-pred; 1-train; 2-finetunre\npredflag = 1\nt1 = time()\nin_data_flag = 1\npositive_polarity = False\n\n# 路径\ntimestamp = '{:%m-%d_%H-%M/}'.format(datetime.now())\nmodel_root_path = \"D:/1-Document/data/model_data/ConvDAE/\"\nmodel_dir = \"ConvDAE_3_2-pic-tanh--\" + timestamp\nmodel_path = model_root_path + model_dir\n\nif not os.path.isdir(model_path):\n os.makedirs(model_path)\n\ntrain_log_dir = 'logs/train/ConvDAE_3_2-pic_'+timestamp\ntest_log_dir = 'logs/test/ConvDAE_3_2-pic_'+timestamp\n\n\nif in_data_flag==1:\n path1 = \"D:/1-Codes/matlab/resource/dataset/N_MNIST_pic/N_MNIST_pic_train.mat\"\n path2 = \"D:/1-Codes/matlab/resource/dataset/N_MNIST_pic/N_MNIST_pic_test.mat\"\nif in_data_flag==2: \n path1 = \"D:/1-Codes/matlab/resource/dataset/MNIST_diff/MNIST_diff_train.mat\"\n path2 = \"D:/1-Codes/matlab/resource/dataset/MNIST_diff/MNIST_diff_test.mat\"\n\n# In[]:\n# 函数定义\ndef summaryWriter(train_writer, test_writer, record_point, run_tensor, feed_dict, iter):\n tr, tr_cost = sess.run([record_point, run_tensor], feed_dict=feed_dict)\n te, te_cost = sess.run([record_point, run_tensor], feed_dict=feed_dict) \n train_writer.add_summary(tr, iter)\n test_writer.add_summary(te, iter) \n print(iter,\"Train cost:\",tr_cost,\"Test cost\",te_cost) \n \n# In[]:\n# 加载数据及预处理\ntrain_data = my_io.load_mat(path1)\ntest_data = my_io.load_mat(path2)\n\nif in_data_flag==1:\n train_x = train_data['N_MNIST_pic_train'].astype('float32')\n train_y = train_data['N_MNIST_pic_train_gt'].astype('float32')\n test_x = test_data['N_MNIST_pic_test'].astype('float32')\n test_y = test_data['N_MNIST_pic_test_gt'].astype('float32')\n\nif in_data_flag==2:\n train_x = train_data['train_diff'].astype('float32')\n train_y = train_data['train_diff_gt'].astype('float32')\n test_x = test_data['test_diff'].astype('float32')\n test_y = test_data['test_diff_gt'].astype('float32')\n\nprint('train_x: ', train_x.shape, '\\ttrain_y: ', train_y.shape, \n '\\ntest_x: ', test_x.shape, '\\ttest_y: ', test_y.shape)\n\n#将-1,0,1的极性表示转换为0,1,2以适应relu\nif positive_polarity:\n train_x = train_x+1 \n train_y = train_y+1\n test_x = test_x+1\n test_y = test_y+1\n\n# 取部分测试集,防止OOM\ntest_x1 = test_x[0:10000:10].reshape((-1, 28, 28, 1))\ntest_y1 = test_y[0:10000:10].reshape((-1, 28, 28, 1)) \n## 数据打印测试\n#for k in range(5):\n# plt.subplot(2,5,k+1)\n# plt.imshow(train_x[k])\n# plt.title('train_x_%d'%(k+1))\n# plt.xticks([])\n# plt.yticks([]) \n# plt.subplot(2,5,k+6)\n# plt.imshow(train_y[k])\n# plt.title('train_y_%d'%(k+1))\n# plt.xticks([])\n# plt.yticks([])\n\n# In[]:\n# 构造模型\n\n# 选择激活函数\n# act_fun = tf.nn.relu \nact_fun = tf.nn.tanh\nact_fun_out = tf.nn.tanh\n\n# 定义deconv\ndef deconv(input, deconv_weight, output_shape, strides):\n dyn_input_shape = tf.shape(input)\n batch_size = dyn_input_shape[0]\n output_shape = tf.stack([batch_size, output_shape[1], output_shape[2], output_shape[3]])\n output = tf.nn.conv2d_transpose(input, deconv_weight, output_shape, strides, padding=\"SAME\")\n return output\n\n# 输入\nwith tf.name_scope('inputs'):\n inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs_')\n targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets_')\n keep_prob = tf.placeholder(tf.float32) #弃权概率0.0-1.0 1.0表示不使用弃权\n\n# Encoder\nwith tf.name_scope('encoder'):\n conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=act_fun) # 28*28*32\n conv1 = tf.nn.dropout(conv1, keep_prob) #去噪自编码随机mask输入\n maxp1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # 14*14*32\n\n conv2 = tf.layers.conv2d(maxp1, 64, (3,3), padding='same', activation=act_fun) # 14*14*64\n conv2 = tf.nn.dropout(conv2, keep_prob) #去噪自编码随机mask输入 \n maxp2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # 7*7*64\n\n conv3 = tf.layers.conv2d(maxp2, 64, (3,3), padding='same', activation=act_fun) # 7*7*64\n conv3 = tf.nn.dropout(conv3, keep_prob) #去噪自编码随机mask输入 \n maxp3 = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # 4*4*64\n\n\n# mid\n mid1 = tf.layers.conv2d(maxp3, 128, (3,3), padding='same', activation=act_fun) # 4*4*128\n mid2 = tf.layers.conv2d(mid1, 64, (3,3), padding='same', activation=act_fun) # 4*4*64\n \n# ecoder\nwith tf.name_scope('decoder'):\n deconv_weight_1 = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 64], stddev=0.1), name='deconv_weight_1') \n deconv1 = deconv(mid2, deconv_weight_1, [batch_size, 7, 7, 64], [1, 2, 2, 1]) # 7*7*64\n deconv_1 = tf.layers.conv2d(deconv1, 64, (3,3), padding='same', activation=act_fun) # 7*7*64\n deconv_1 = tf.nn.dropout(deconv_1, keep_prob) #去噪自编码随机mask输入 \n\n deconv_weight_2 = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 64], stddev=0.1), name='deconv_weight_2') \n deconv2 = deconv(deconv_1, deconv_weight_2, [batch_size, 14, 14, 64], [1, 2, 2, 1]) # 14*14*64\n deconv_2 = tf.layers.conv2d(deconv2, 32, (3,3), padding='same', activation=act_fun) # 14*14*32\n deconv_2 = tf.nn.dropout(deconv_2, keep_prob) #去噪自编码随机mask输入 \n \n deconv_weight_3 = tf.Variable(tf.truncated_normal(shape=[3, 3, 32, 32], stddev=0.1), name='deconv_weight_3')\n deconv3 = deconv(deconv_2, deconv_weight_3, [batch_size, 28, 28, 32], [1, 2, 2, 1]) # 28*28*32\n deconv_3 = tf.layers.conv2d(deconv3, 1, (3,3), padding='same', activation=act_fun) # 28*28*1\n deconv_3 = tf.nn.dropout(deconv_3, keep_prob) #去噪自编码随机mask输入 \n \nwith tf.name_scope('outputs'):\n outputs_ = tf.layers.conv2d(deconv_3, 1, (3,3), padding='same', activation=act_fun_out) # 28*28*1\n \nwith tf.name_scope('loss'):\n # cross entropy loss,由于包含负值像素,可能有问题\n# xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits_)\n# cost = tf.reduce_mean(xentropy)\n\n # mse loss\n mse = tf.losses.mean_squared_error(targets_ , outputs_)\n cost = tf.reduce_mean(mse)\n \n tf.summary.scalar('cost', cost)\n \nwith tf.name_scope('train'):\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\n\n# In[]\n# 初始化会话和模型保存器及tensorboard\nsess = tf.Session(config=config)\nsess.run(tf.global_variables_initializer())\n\nsaver = tf.train.Saver()\n\nwriter_tr = tf.summary.FileWriter(train_log_dir, sess.graph)\nwriter_te = tf.summary.FileWriter(test_log_dir)\nmerged = tf.summary.merge_all() \n\n\n# In[22]:\n# 训练\niter = 0\ntest_feed_dict={inputs_: test_x1, targets_: test_y1, keep_prob: 1.0}\n\n\nfor e in range(epochs):\n for batch_x, batch_y in my_io.batch_iter(batch_size, train_x, train_y, throw_insufficient=True):\n \n x = batch_x.reshape((-1, 28, 28, 1))\n y = batch_y.reshape((-1, 28, 28, 1))\n\n if iter%100 == 0: \n summaryWriter(writer_tr, writer_te, merged, cost, test_feed_dict, iter)\n \n train_feed_dict = {inputs_: x, targets_: y, keep_prob: keep_prob_v}\n sess.run(optimizer, feed_dict=train_feed_dict)\n \n iter += 1\n \n if e%20 == 0 and e!=0:\n saver.save(sess, model_path+'my_model',global_step=e, write_meta_graph=False)\n # saver.save(sess,model_path+'my_model') \n print('epoch %d model saved to:'%e, model_path+'my_model')\n\n\nsummaryWriter(writer_tr, writer_te, merged, cost, test_feed_dict, iter)\n \nsaver.save(sess,model_path+'my_model') \nprint('epoch: %d model saved to:'%e, model_path+'my_model') \n\n\n# In[61]:\n# 预测\nif predflag==1:\n \n # k=10\n # in_imgs = abs(test_x[k:k+10])\n # gt_imgs = abs(test_y[k:k+10])\n # in_imgs = test_x[k:k+10]\n # gt_imgs = test_y[k:k+10]\n \n # ConvDAE3\n in_imgs = test_x[600:10000:1000]\n gt_imgs = test_y[600:10000:1000]\n \n reconstructed = sess.run(outputs_, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1)), keep_prob: 1.0})\n reconstructed = np.squeeze(reconstructed)\n \n if positive_polarity:\n in_imgs = in_imgs -1\n gt_imgs = gt_imgs - 1\n reconstructed = reconstructed-1 \n \n # zzh:极性化,阈值-0.5,0.5 \n thresh = 0.25\n polarization = 1\n if polarization: \n reconstructed[reconstructed<=-1*thresh] = -1.\n reconstructed[reconstructed>=thresh] = 1.\n reconstructed[(reconstructed>-1*thresh) & (reconstructed[\\.,\\(\\)\\[\\]\\{\\};!?:“”\\\"\\'/])')\n\n self.syntactic_features = {\n '': ['tờ', 'tạp_chí', 'báo', 'đài', 'thông_tấn_xã', 'trang', 'blog'],\n\n '': ['tỉnh', 'thành_phố', 'tp', 'tp.', 'huyện', 'quận', 'xã',\n 'phường', 'thị_trấn', 'thôn', 'bản', 'làng', 'xóm', 'ấp'],\n\n '': ['thành_ủy', 'tỉnh_ủy', 'quận_ủy',\n 'huyện_ủy', 'xã_ủy', 'đảng_ủy'],\n\n '': ['công_an', 'cảnh_sát'],\n\n '': ['ĐH', 'đại_học', 'CĐ', 'cao_đẳng', 'THPT', 'THCS', 'tiểu_học'],\n\n '': ['trường', 'học_viện', 'viện', 'institute', 'university'],\n\n '': ['công_ty', 'công_ty_cổ_phần', 'tập_đoàn', 'hãng', 'xí_nghiệp',\n 'nhà_máy', 'phân_xưởng'],\n\n '': ['liên_hiệp', 'hội', 'hợp_tác_xã', 'câu_lạc_bộ', 'trung_tâm',\n 'liên_đoàn', 'tổng_liên_đoàn'],\n\n '': ['sư_đoàn', 'lữ_đoàn', 'trung_đoàn', 'tiểu_đoàn',\n 'quân_kh', 'liên_kh', 'đại_đội', 'tiểu_đội', 'binh_đoàn'],\n\n '': ['bộ', 'ủy_ban'],\n\n '': ['chính_trị', 'ngoại_giao', 'quốc_phòng', 'công_an', 'tư_pháp',\n 'tài_chính', 'công_thương', 'xây_dựng', 'nội_vụ', 'y_tế',\n 'ngoại_giao', 'lao_động', 'giao_thông', 'thông_tin', 'tt',\n 'giáo_dục', 'gd', 'nông_nghiệp', 'nn', 'kế_hoạch', 'kh',\n 'khoa_học', 'kh', 'văn_hóa', 'tài_nguyên', 'tn', 'dân_tộc'],\n\n '': ['sở', 'phòng', 'ban', 'chi_cục', 'tổng_cục', 'cục'],\n\n '': ['quận', 'q', 'q.', 'ấp', 'quán', 'kh', 'tổ',\n 'khóm', 'xóm', 'trạm', 'số', 'ngách', 'ngõ', 'thôn',\n 'xóm', 'bản', 'làng', 'phường'],\n\n '': ['bang', 'nước', 'vùng', 'miền'],\n\n '': ['sông', 'núi', 'chợ', 'châ', 'đảo', 'đèo', 'cầ',\n 'đồi', 'đồn', 'thủ_đô', 'khách_sạn', 'sân_bay', 'nhà_hàng',\n 'cảng', 'đường', 'phố', 'đại_lộ', 'chung_cư', 'rạch',\n 'hồ', 'kênh', 'bảo_tàng', 'cao_tốc'],\n\n '': ['tỉnh_lộ', 'quốc_lộ'],\n\n '': ['đảng', 'đoàn', 'đội'],\n\n # '' : ['ông', 'bà', 'anh', 'chị', 'cô', 'gì', 'chú',\n # 'bác', 'cậ', 'mợ', 'ngài', 'giám_đốc', 'thủ_tướng',\n # 'tổng_thống'],\n\n }\n\n def map_word_label(self, word):\n \"\"\"\n Detect numbers and punctuation given a word\n :param word: word to detect\n :return: '' if word is a number, if word is a punctuation and exact same word otherwise\n \"\"\"\n if any(char.isdigit() for char in word):\n word = ''\n elif word in [',', '<', '.', '>', '/', '?', '..', '...', '....', ':', ';', '\"', u\"'\", '[', '{', ']',\n '}', '|', '\\\\', '`', '~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '+',\n '=', '’', '‘', '“', '”']:\n word = ''\n return word\n\n def normalize_string(self, content):\n \"\"\"\n Remove unnecessary elements from a sentence\n :param content: sentence needed to be normalized\n :return:normalized sentence\n \"\"\"\n content = self.normalize_special_mark.sub(' \\g ', content)\n return self.normalize_space.sub(' ', content)\n\n def normalize_string_ex(self, content):\n \"\"\"\n Remove unnecessary parts (url, email and datetime) from a sentence\n :param content: sentence needed to be normalized\n :return:normalized sentence\n \"\"\"\n content = content.lower()\n new_content = self.detect_url.sub('', content)\n new_content = self.detect_url2.sub('', new_content)\n new_content = self.detect_email.sub('', new_content)\n new_content = self.detect_datetime.sub('', new_content)\n return new_content\n\n def run(self, word):\n word = word.lower()\n w = self.map_word_label(word)\n if w != '' and w != '' and \\\n w != '' and w != '' and w != '':\n for k, v in self.syntactic_features.items():\n if word in v:\n return k\n return ''\n else:\n return w\n\n def run_ex(self, word):\n word = word.lower()\n w = self.normalize_string_ex(word)\n w = self.map_word_label(w)\n if w != '' and w != '' and \\\n w != '' and w != '' and w != '':\n for k, v in self.syntactic_features.items():\n if word in v:\n return k\n return ''\n else:\n return w\n\n\nif __name__ == '__main__':\n r = Regex()\n s = 'Sáng 3/9, sau khi hoàn tất thủ tục mua chiếc xe Hyundai i10 tại một đại lý bán ôtô trên đường Tam Trinh, ' \\\n 'quận Hai Bà Trưng (Hà Nội), vợ chồng anh Trịnh Thanh Phong ở Tây Mỗ, Nam Từ Liêm (Hà Nội) được nhân viên ' \\\n 'kinh doanh tư vấn có thể đăng ký xe, nộp thuế trước bạ trên mạng thay vì phải mang nhiều loại giấy tờ đi ' \\\n 'nộp trực tiếp.'\n\n annotator = VnCoreNLP(address=\"http://127.0.0.1\", port=9000)\n\n # Input\n text = \"Tên tôi là Trần Quang Trung. tôi ở số 47 phố Chính Kinh, Thanh Xuân, Hà Nội\"\n\n # To perform word segmentation, POS tagging, NER and then dependency parsing\n annotated_text = annotator.annotate(text)\n\n # To perform word segmentation only\n word_segmented_text = annotator.tokenize(text)\n\n for s in annotated_text['sentences']:\n for w in s:\n print(w)","sub_path":"regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"533586798","text":"import zmq\nimport random\nimport time\n\n\ncontext = zmq.Context()\n\nsender = context.socket(zmq.PUSH)\nsender.bind(\"tcp://*:5557\")\n\n\nsink = context.socket(zmq.PUSH)\nsink.connect(\"tcp://localhost:5558\")\n\n\ninput(\"所有工作进程都准备好后按回车键:\")\nprint(\"开始分配任务...\")\n\ntotal_msec = 0\nfor task_nbr in range(100):\n random.seed()\n workload = random.randint(1, 100)\n total_msec += workload\n\n print(\"本次耗时任务所需时间为:{}\".format(workload))\n sender.send_string('{}'.format(workload))\n\nprint(\"总计消耗: {} msec\".format(total_msec))\n\n# 所有任务都发完之后发送一条结束信号的消息\ntime.sleep(8)\nsink.send(\"done\".encode())\n","sub_path":"DataBase/ZeroMQ/03-Divide/divide_tasks.py","file_name":"divide_tasks.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275719182","text":"import os\n\nfrom django.test import TransactionTestCase, Client\nfrom django.urls import reverse\n\nfrom ports.models import Maintainer, Port\nfrom MacPorts.config import TEST_PORTINDEX_JSON\n\n\nclass TestMaintainers(TransactionTestCase):\n reset_sequences = True\n\n def setUp(self):\n self.client = Client()\n Port.load(TEST_PORTINDEX_JSON)\n\n def test_unique_entries_created(self):\n self.assertEquals(Maintainer.objects.count(), 6, \"Failed to create unique entities for maintainers\")\n\n def test_fetch_using_github(self):\n response = self.client.get(reverse('maintainer_detail_github', kwargs={'github_handle': 'user'}))\n maintainers_returned = response.context['maintainers']\n num_of_ports = response.context['all_ports_num']\n self.assertEquals(maintainers_returned.count(), 3)\n self.assertEquals(num_of_ports, 5)\n\n def test_fetch_using_email(self):\n response = self.client.get(reverse('maintainer_detail_email', kwargs={'name': 'user', 'domain': 'email.com'}))\n maintainers_returned = response.context['maintainers']\n num_of_ports = response.context['all_ports_num']\n self.assertEquals(maintainers_returned.count(), 3)\n self.assertEquals(num_of_ports, 4)\n\n def test_maintainers_updated(self):\n updated_port = [{\n \"name\": \"port-A1\",\n \"portdir\": \"categoryA/port-A1\",\n \"version\": \"1.0.0\",\n \"maintainers\": [\n {\n \"email\": {\n \"domain\": \"email.com\",\n \"name\": \"new_user\"\n },\n \"github\": \"new_user\"\n },\n {\n \"github\": \"user\"\n }\n ]\n }]\n Port.update(updated_port)\n port = Port.objects.get(name=\"port-A1\")\n self.assertEquals(port.maintainers.count(), 2)\n self.assertEquals(Maintainer.objects.all().count(), 7)\n self.assertEquals(Port.objects.get(name=\"port-A3-diff\").maintainers.count(), 1)\n","sub_path":"app/ports/tests/test_maintainers.py","file_name":"test_maintainers.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"233169376","text":"\ndef myfunc2(sentence):\n sentence = sentence.split()\n ls = []\n for i in sentence:\n word_count = sentence.count(i) # Pythons count function, count()\n ls.append((i,word_count))\n dict_ = dict(ls)\n print (dict_)\n\n\n\nmyfunc2(\"Rick and morthy and all other tv series are best\")","sub_path":"lab9/Exercise2.py","file_name":"Exercise2.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"442748555","text":"# -*- coding: utf-8 -*-\nimport base64\nfrom odoo import models, fields, api\nfrom odoo.tools.float_utils import float_compare, float_round\n\nclass stock_production_lot(models.Model):\n _inherit = 'stock.production.lot'\n\n barcode_image = fields.Binary(string='Barcode image',compute='_compute_barcode_image')\n count_for_print = fields.Integer('Count for Print')\n produce_date = fields.Datetime('Produce Date')\n\n @api.one\n def _compute_barcode_image(self):\n barcode = self.env['report'].barcode(\n 'Code128',\n self.name,\n width=300,\n height=50,\n humanreadable=0\n )\n\n barcode_base64 = base64.b64encode(barcode)\n self.barcode_image = barcode_base64\n\n @api.model\n def create(self, data):\n # If Name Numer Avaiable\n try:\n if data['name']:\n vals = {'name': data['name'],'product_id': data['product_id'],'count_for_print': data['count_for_print'],'produce_date': data['produce_date']}\n return super(stock_production_lot, self).create(vals)\n except:\n pass\n\n sequence = self.env['ir.sequence'].search([('code', '=', 'stock.lot.serial')])\n product = self.env['product.product'].search([('id', '=', data['product_id'])])\n\n if sequence.category.id == product.categ_id.id:\n if sequence.number_next_actual <= sequence.end_no:\n new_id = sequence.get_new_seq(sequence.id)\n seq_vals = {'name': new_id,'product_id': data['product_id'],'count_for_print': data['count_for_print'],'produce_date': data['produce_date']}\n return super(stock_production_lot, self).create(seq_vals)\n return super(stock_production_lot, self).create({'product_id': data['product_id']})","sub_path":"beta-dev1/central_kitchen/models/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"612840498","text":"from datetime import *\nfrom server import member_db\nfrom server import web_session\nfrom server.web import server\nfrom server.web import modules\nfrom utilities import common\nfrom utilities import web\n\nERROR_SUCCESS = 0\nERROR_POST_TOO_FAST = 1\nERROR_USER_NOT_LOGGED_IN = 2\n\n\nclass FeedBackRequest:\n def __init__(self, sid, device, content):\n self.sid = sid\n self.device = device\n self.content = content\n\n\nclass FeedBackResponse:\n def __init__(self):\n pass\n\n\n@server.dispatch(\".*$\", r\"/feedback\")\nclass FeedBackHandler(web.BaseHandler):\n\n def request_builder(self, raw):\n data = common.Dictate(raw)\n sid = common.binlist2str(data.sid)\n device = common.binlist2str(data.device)\n content = common.binlist2str(data.content)\n return FeedBackRequest(sid, device, content)\n\n def response_builder(self, request, method):\n session = web_session.open_session(sid=request.sid)\n if session is not None:\n uuid = session[\"uuid\"]\n with member_db.begin() as conn:\n feedback_mgr = modules.feedback.create(conn)\n last_post = feedback_mgr.last_post(uuid)\n if not last_post or (datetime.utcnow() - last_post.time).seconds >= 3600:\n feedback_mgr.create(uuid, request.device, request.content)\n response = FeedBackResponse()\n return response, ERROR_SUCCESS\n return None, ERROR_POST_TOO_FAST\n return None, ERROR_USER_NOT_LOGGED_IN\n","sub_path":"server/web/interfaces/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"534150752","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 5 09:38:22 2019\n\n@author: AndrewYq\n\n@Email:hfyqstar@163.com\n\"\"\"\n\nimport numpy as np\nimport time\nimport pandas as pd\n\n#加载数据\ndef loadData(filePath):\n '''\n 加载Mnist数据集\n :param filePath:加载的数据集路径\n :return: 输入数据集及输出数据集\n '''\n print('Start to load data')\n # 读入CSV数据\n # pandas.read_csv默认会将第一行数据作为列名,其中header用于指定某一行作为列名。\n # header=None将列的序列作为列名。\n csv_data=pd.read_csv(filePath, header = None)\n \n # CSV数据集中第一列为输出数据集,第一列以外均为输入数据集\n # dataFrame中若定义了index,那么loc是根据index来索引,iloc根据行号来索引,行号从0开始。\n # loc的索引值为index,为字符串;iloc的索引值为行号,为整数。\n # 切片表达式:dataFrame[row,column]\n XArr=csv_data.iloc[:,1::]\n yArr=csv_data.iloc[:,0]\n \n # 将所有输入数据除255归一化\n XArr=XArr/256\n # Mnsit有0-9是个标记,由于是二分类任务,所以将>=5的作为1,<5为-1\n yArr=yArr.replace([0,1,2,3,4,5,6,7,8,9],[-1,-1,-1,-1,-1,1,1,1,1,1])\n \n # 返回输入数据集和输出数据集\n return XArr,yArr\n\n# 感知机预测\ndef perceptron_train(XArr,yArr,iter=100):\n \n # 计算输入矩阵的维度,其中 m 为横列数, n 为纵列数。\n m,n=np.shape(XArr)\n \n #将数据转换成矩阵形式(在机器学习中通常都是向量的运算,转换称矩阵形式方便运算)\n #转换后的数据中每一个样本的向量都是横向的\n # np.shape(dataMat)的返回值为m,n -> np.shape(dataMat)[1])的值即为n,与样本长度保持一致\n XArrMat=np.mat(XArr)\n #对于只有1xN的label可以不转换成矩阵,直接dataMat[i]即可,这里转换是为了格式上的统一\n yArrMat=np.mat(yArr).T\n \n # 初始化 w 为 0, w 为 weight vector, 长度等于每一个样本的特征值数,为 n。\n w=np.zeros(n)\n # 初始化b=0,b 为 bias\n b=0\n # 初始化步长,也即是梯度下降过程中的下降速率,控制梯度下降速率。\n h = 0.0001\n \n # 进行iter次迭代计算\n for j in range(iter):\n #对于每一个样本进行梯度下降\n #李航书中在2.3.1开头部分使用的梯度下降,是全部样本都算一遍以后,统一\n #进行一次梯度下降\n #在2.3.1的后半部分可以看到(例如公式2.6 2.7),求和符号没有了,此时用\n #的是随机梯度下降,即计算一个样本就针对该样本进行一次梯度下降。\n #两者的差异各有千秋,但较为常用的是随机梯度下降。\n for i in range(m):\n #获取当前样本的向量\n xi = XArrMat[i]\n #获取当前样本所对应的标签\n yi = yArrMat[i]\n #判断是否是误分类样本\n #误分类样本特诊为: -yi(w*xi+b)>=0,详细可参考书中2.2.2小节\n #在书的公式中写的是>0,实际上如果=0,说明改点在超平面上,也是不正确的\n if -1*yi*(w*xi.T+b)>=0:\n #对于误分类样本,进行梯度下降,更新w和b\n w = w + h * yi * xi\n b = b + h *yi\n \n #打印训练进度\n print('Round %d:%d training' % (j, iter))\n \n #返回训练完的w、b \n return w,b \n\ndef perceptron_test(XArr, yArr, w, b):\n '''\n 测试准确率\n :param XArr:测试集\n :param yArr: 测试集标签\n :param w: 训练获得的权重w\n :param b: 训练获得的偏置b\n :return: 正确率\n '''\n \n #获取测试数据集矩阵的大小\n m,n=np.shape(XArr)\n #将数据集转换为矩阵形式方便运算\n XArrMat=np.mat(XArr)\n yArrMat=np.mat(yArr).T\n #错误样本数计数\n errorCount = 0\n #遍历所有测试样本\n for i in range (m):\n #获得单个样本向量\n xi = XArrMat[i]\n #获得该样本标记 \n yi = yArrMat[i]\n #获得运算结果 \n result = -1*yi*(w*xi.T+b)\n #如果-yi(w*xi+b)>=0,说明该样本被误分类,错误样本数加一\n if result>=0:\n errorCount +=1\n \n #正确率 = 1 - (样本分类错误数 / 样本总数) \n accruRate = 1 - (errorCount / m) \n #返回正确率\n return accruRate\n \nif __name__ == '__main__':\n #获取当前时间\n #在文末同样获取当前时间,两时间差即为程序运行时间\n start = time.time()\n\n #获取训练集及标签\n trainData, trainLabel = loadData('../Mnist/mnist_train.csv')\n #获取测试集及标签\n testData, testLabel = loadData('../Mnist/mnist_test.csv')\n\n #训练获得权重\n w, b = perceptron_train(trainData, trainLabel, iter = 30)\n #进行测试,获得正确率\n accruRate = perceptron_test(testData, testLabel, w, b)\n\n #获取当前时间,作为结束时间\n end = time.time()\n #显示正确率\n print('accuracy rate is:', accruRate)\n #显示用时时长\n print('time span:', end - start)\n","sub_path":"perceptron/perceptron_AndrewYq.py","file_name":"perceptron_AndrewYq.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153683273","text":"import datetime\nimport pytest\n\nfrom homeworks.homework5.oop_1 import Homework, Student, Teacher\n\nFAKE_TIME = datetime.datetime(2020, 12, 25, 17, 5, 55)\n\n\n@pytest.fixture\ndef patch_datetime_now(monkeypatch):\n \"\"\"\n Patches method datetime.datetime.now()\n \"\"\"\n\n class MyDatetime:\n @classmethod\n def now(cls):\n return FAKE_TIME\n\n monkeypatch.setattr(datetime, \"datetime\", MyDatetime)\n\n\ndef test_creating_objects():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n student = Student(\"Roman\", \"Petrov\")\n assert teacher.last_name == \"Daniil\"\n assert student.first_name == \"Petrov\"\n\n\ndef test_creating_homework_by_teacher():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n expired_homework = teacher.create_homework(\"Learn functions\", 0)\n assert isinstance(expired_homework, Homework)\n assert expired_homework.deadline == datetime.timedelta(0)\n assert expired_homework.text == \"Learn functions\"\n\n\ndef test_working_with_time_in_homework(patch_datetime_now):\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n expired_homework = teacher.create_homework(\"Learn functions\", 0)\n assert expired_homework.created == FAKE_TIME\n assert expired_homework.deadline == datetime.timedelta(0)\n\n\ndef test_creating_function_from_method_and_using_it():\n create_homework_too = Teacher.create_homework\n oop_homework = create_homework_too(\"create 2 simple classes\", 5)\n assert oop_homework.deadline == datetime.timedelta(days=5)\n\n\ndef test_do_homework_with_expired_homework_and_not_expired(capsys):\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n expired_homework = teacher.create_homework(\"Learn functions\", 0)\n oop_homework = teacher.create_homework(\"create 2 simple classes\", 5)\n student = Student(\"Roman\", \"Petrov\")\n student.do_homework(expired_homework)\n captured = capsys.readouterr()\n assert captured.out.strip() == \"You are late\"\n assert oop_homework == student.do_homework(oop_homework)\n","sub_path":"tests/homework5/test_task01.py","file_name":"test_task01.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"499420500","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport os, sys, getopt, signal, select, string, time\nimport struct, stat, base64, random, zlib\n\nfrom Crypto import Random\nfrom Crypto.Hash import SHA512\n\nbase = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(base, '../bluepy'))\nsys.path.append(os.path.join(base, '../common'))\nsys.path.append(os.path.join(base, '../../pycommon'))\n\nimport support, pypacker\n\nxorg = [\"val1\", \"val2\"]\nyorg = (\"str1\", \"2\", \"3\")\nzorg = { \"key1\" : \"111\", 'key2' : 222, \"arr\": xorg }\n\n# ------------------------------------------------------------------------\n# Test harness\n\nif __name__ == '__main__':\n\n\n pb = pypacker.packbin();\n pb.verbose = 5\n\n #print(\"doc\", pypacker.__doc__)\n #print(\"dict\", dir(pypacker))\n\n #sorg_var = [xorg , xorg]\n #sorg_var = [ zorg, yorg ]\n #sorg_var = [ 334, \"subx\", 'x', xorg, yorg]\n #sorg_var = [ 334, \"subx\", 'x', xorg, zorg]\n sorg_var = \"hello string\"\n\n if pb.verbose > 2:\n print (\"sorg_var:\\n\", sorg_var)\n\n eee_var = pb.encode_data(\"\", *sorg_var)\n if pb.verbose > 2:\n print (\"eee_var type\", type(eee_var).__name__, \":\\n\", eee_var)\n\n fff_var = pb.decode_data(eee_var)\n if pb.verbose > 1:\n print (\"fff_var:\\n\", fff_var)\n\n if sorg_var != fff_var:\n print(\"Error on compare\")\n else:\n print(\"Compare OK\")\n\n #sys.exit(0)\n\n\n","sub_path":"common/testpacker2.py","file_name":"testpacker2.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"77134284","text":"import pygame\nfrom game import Game\n\n\npygame.init()\n\n# generer le titre de la fenetre\n\npygame.display.set_caption(\"big bang\")\nscreen = pygame.display.set_mode((1080, 700))\n\n# importer l'arriere plan du jeu\nbackground = pygame.image.load('assets/bg.jpg')\n\n\n\n# charger notre jeu\n\ngame = Game()\n\nrunning = True\n# tant que cette condition est vraie maintenir la fenetre ouverte\n\nwhile running:\n\n # appliquer l'arriere plan du jeu\n screen.blit(background, (0, -200))\n\n # appliquer l'image de notre joueur\n screen.blit(game.player.image, game.player.rect)\n\n # appliquer l'ensemble des images du groupe de projectiles\n game.player.all_projectiles.draw(screen)\n\n # mettre a jour l'ecran\n pygame.display.flip()\n\n # mouvement de nos personnages\n if game.pressed.get(pygame.K_RIGHT) and game.player.rect.x + game.player.rect.width < screen.get_width():\n game.player.move_right()\n elif game.pressed.get(pygame.K_LEFT) and game.player.rect.x > 0:\n game.player.move_left()\n\n # mouvements de nos projectiles\n for projectile in game.player.all_projectiles:\n projectile.move()\n\n # si le joueur ferme cette fenetre\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n running = False\n pygame.quit()\n print(\"fermeture du jeu\")\n # Evenment si le joueur appuie sur une touche\n elif event.type == pygame.KEYDOWN:\n game.pressed[event.key] = True\n # Detection de la touche espace pour lancement de projectiles\n if event.key == pygame.K_SPACE :\n game.player.launch_projectile()\n elif event.type == pygame.KEYUP:\n game.pressed[event.key] = False\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"394078384","text":"#!/usr/bin/env python2\n\nimport argparse\nimport re\nimport sys\n\nimport ply.lex as lex\n\n\nclass GoLexer(object):\n\n reserved = {\n 'break': 'BREAK',\n 'default': 'DEFAULT',\n 'func': 'FUNC',\n 'interface': 'INTERFACE',\n 'select': 'SELECT',\n 'case': 'CASE',\n 'defer': 'DEFER',\n 'go': 'GO',\n 'map': 'MAP',\n 'struct': 'STRUCT',\n 'chan': 'CHAN',\n 'else': 'ELSE',\n 'goto': 'GOTO',\n 'package': 'PACKAGE',\n 'switch': 'SWITCH',\n 'const': 'CONST',\n 'fallthrough': 'FALLTHROUGH',\n 'if': 'IF',\n 'range': 'RANGE',\n 'type': 'TYPE',\n 'continue': 'CONTINUE',\n 'for': 'FOR',\n 'import': 'IMPORT',\n 'return': 'RETURN',\n 'var': 'VAR',\n }\n\n types = {\n 'bool': 'BOOL',\n 'byte': 'BYTE',\n 'int': 'INT',\n 'uint8': 'UINT8',\n 'uint16': 'UINT16',\n 'uint32': 'UINT32',\n 'uint64': 'UINT64',\n 'int8': 'INT8',\n 'int16': 'INT16',\n 'int32': 'INT32',\n 'int64': 'INT64',\n 'int': 'INT',\n 'uint': 'UINT',\n 'float32': 'FLOAT32',\n 'float64': 'FLOAT64',\n 'uintptr': 'UINTPTR',\n 'string': 'STRING',\n 'error': 'ERROR',\n }\n\n constants = {\n 'true': 'TRUE',\n 'false': 'FALSE',\n 'iota': 'IOTA',\n 'nil': 'NIL',\n }\n\n combined_map = dict(reserved, **dict(types, **constants))\n\n tokens = [\n 'LT', 'GT', 'LE', 'GE', 'EQ', 'NE', 'NOT', 'LNOT', 'LOR', 'LAND',\n 'LARROW', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO', 'OR', 'XOR',\n 'LSHIFT', 'RSHIFT', 'AND', 'ANDNOT', 'INCR', 'DECR', 'EQUALS',\n 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',\n 'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'OREQUAL', 'XOREQUAL',\n 'AUTOASIGN', 'ANDNOTEQUAL', 'ID', 'LPAREN', 'RPAREN', 'LBRACKET',\n 'RBRACKET', 'LBRACE', 'RBRACE', 'COMMA', 'PERIOD', 'SEMI', 'COLON',\n 'ELLIPSIS', 'CHARACTER', 'COMMENT', 'MULTICOMMENT', 'INTEGER', 'FLOAT',\n 'STRINGVAL'\n ] + list(set(combined_map.values()))\n\n # Regular expression rules for operators\n\n # Relation operators\n t_LT = r'<'\n t_GT = r'>'\n t_LE = r'<='\n t_GE = r'>='\n t_EQ = r'=='\n t_NE = r'!='\n t_NOT = r'~'\n t_LNOT = r'!'\n t_LOR = r'\\|\\|'\n t_LAND = r'&&'\n t_LARROW = r'<\\-'\n\n # Arithmetic operators\n t_PLUS = r'\\+'\n t_MINUS = r'\\-'\n t_TIMES = r'\\*'\n t_DIVIDE = r'/'\n t_MODULO = r'%'\n t_OR = r'\\|'\n t_XOR = r'\\^'\n t_LSHIFT = r'<<'\n t_RSHIFT = r'>>'\n t_AND = r'&'\n t_ANDNOT = r'&\\^'\n t_INCR = r'\\+\\+'\n t_DECR = r'\\-\\-'\n\n # Assignment operators\n t_EQUALS = r'='\n t_AUTOASIGN = r':='\n t_TIMESEQUAL = r'\\*='\n t_DIVEQUAL = r'/='\n t_MODEQUAL = r'%='\n t_PLUSEQUAL = r'\\+='\n t_MINUSEQUAL = r'\\-='\n t_LSHIFTEQUAL = r'<<='\n t_RSHIFTEQUAL = r'>>='\n t_ANDEQUAL = r'&='\n t_OREQUAL = r'\\|='\n t_XOREQUAL = r'\\^='\n t_ANDNOTEQUAL = r'&\\^='\n\n t_LPAREN = r'\\('\n t_RPAREN = r'\\)'\n t_LBRACKET = r'\\['\n t_RBRACKET = r'\\]'\n t_LBRACE = r'\\{'\n t_RBRACE = r'\\}'\n t_COMMA = r'\\,'\n t_PERIOD = r'\\.'\n t_SEMI = r';'\n t_COLON = r':'\n t_ELLIPSIS = r'\\.\\.\\.'\n\n t_STRINGVAL = r'\\\"([^\\\\\\n]|(\\\\.))*?\\\"'\n t_CHARACTER = r'(L)?\\'([^\\\\\\n]|(\\\\.))*?\\''\n\n t_ignore = ' \\t'\n\n def t_FLOAT(self, t):\n r'(\\d+\\.\\d*(e|E)[\\+|\\-]?\\d+)|((\\d+)(e|E)[\\+|\\-]?\\d+)|(\\.\\d+(e|E)[\\+|\\-]?\\d+)|(\\d+\\.\\d*)|(\\.\\d+)'\n return t\n\n def t_INTEGER(self, t):\n r'0[xX][0-9a-fA-F]+|\\d+'\n return t\n\n def t_ID(self, t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = self.combined_map.get(t.value, 'ID')\n return t\n\n def t_MULTICOMMENT(self, t):\n r'/\\*(.|\\n)*?\\*/'\n t.lexer.lineno += t.value.count('\\n')\n return t\n\n def t_COMMENT(self, t):\n r'//.*\\n'\n t.lexer.lineno += 1\n return t\n\n def t_newline(self, t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n def t_error(self, t):\n print(\"Illegal character '%s'\" % str(t.value[0]))\n print(\"Value of the illegal token is '%s'\" % str(t.value))\n t.lexer.skip(1)\n\n def build(self, **kwargs):\n self.lexer = lex.lex(module=self, **kwargs)\n\n def find_column(self, raw_data, token):\n line_start = raw_data.rfind('\\n', 0, token.lexpos) + 1\n return (token.lexpos - line_start) + 1\n\n def lex(self, raw_data, out_file, config_file):\n color_map = {}\n with open(config_file, 'r') as f:\n lines = list(map(lambda x: x.strip().split(' '), f.readlines()))\n for line in lines:\n color_map[line[0]] = line[1]\n html_out = \"\"\n self.lexer.input(raw_data)\n tok = self.lexer.token()\n line, pos = tok.lineno, tok.lexpos\n while True:\n if not tok:\n break\n if tok.lineno != line:\n html_out += '
' * (tok.lineno - line)\n line, pos = tok.lineno, pos + (tok.lineno - line)\n html_out += '  ' * (tok.lexpos - pos)\n pos = tok.lexpos + len(str(tok.value))\n tag_wrap = '' + str(\n tok.value) + ''\n html_out += tag_wrap\n if tok.type == 'COMMENT':\n pos -= 1\n tok = self.lexer.token()\n with open(out_file, 'w+') as f:\n f.write(html_out)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='A Lexer for Golang')\n parser.add_argument(\n '--cfg', required=True, help='color configuration file')\n parser.add_argument('input', help='input Golang file')\n parser.add_argument('--out', required=True, help='HTML output file')\n args = parser.parse_args()\n with open(args.input, 'r') as f:\n raw_data = ''.join(f.readlines())\n raw_data = re.sub(r'\\t', ' ', raw_data)\n lexer = GoLexer()\n lexer.build()\n lexer.lex(raw_data, args.out, args.cfg)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"97628245","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 22:03:07 2019\n\n@author: songz\n\"\"\"\n\n\nTIMES = 100000000\nmy_first_choice_n=0\nmy_change_choice_n=0\nfor i in range(TIMES):\n\tcar_inDoor=randint(0,2)\n\tmy_guess=randint(0,2)\n\tif car_inDoor==my_guess:\n\t\tmy_first_choice_n+=1\n\telse:\n\t\tmy_change_choice_n+=1\nprint(\"不改选择获得汽车:{}\".format(my_first_choice_n/TIMES))\nprint(\"更改选择获得汽车:{}\".format(my_change_choice_n/TIMES))\n","sub_path":"cars and goats.py","file_name":"cars and goats.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429692463","text":"from PyQt5 import QtCore, QtGui\n\nfrom .....core.devices.motor import Motor\nfrom .....core.instrument.instrument import Instrument\n\n\nclass MotorModel(QtCore.QAbstractItemModel):\n def __init__(self, *args, **kwargs):\n self.credo = kwargs.pop('credo')\n assert isinstance(self.credo, Instrument)\n super().__init__(*args, **kwargs)\n self._motor_connections = []\n for m in self.credo.motors:\n motor = self.credo.motors[m]\n self._motor_connections.append((motor, motor.connect('variable-change', self.onMotorVariableChange)))\n\n def cleanup(self):\n for motor, cid in self._motor_connections:\n motor.disconnect(cid)\n self._motor_connections = []\n\n def onMotorVariableChange(self, motor: Motor, variable: str, value):\n variables = ['softleft', 'softright', 'actualposition', 'actualspeed', 'leftswitchstatus', 'rightswitchstatus',\n 'load', 'errorflags']\n try:\n column = variables.index(variable)\n except ValueError:\n return False\n row = sorted(self.credo.motors.keys()).index(motor.name)\n self.dataChanged.emit(self.index(row, column), self.index(row + 1, column + 1), [QtCore.Qt.DisplayRole])\n return False\n\n def columnCount(self, parent=None, *args, **kwargs):\n return 9\n\n def rowCount(self, parent=None, *args, **kwargs):\n assert isinstance(self.credo, Instrument)\n return len(self.credo.motors)\n\n def parent(self, index: QtCore.QModelIndex = None):\n return QtCore.QModelIndex()\n\n def index(self, row, column, parent=None, *args, **kwargs):\n return self.createIndex(row, column, None)\n\n def data(self, index: QtCore.QModelIndex, role=None):\n motorname = sorted(self.credo.motors.keys())[index.row()]\n motor = self.credo.motors[motorname]\n assert isinstance(motor, Motor)\n if role == QtCore.Qt.DisplayRole:\n if index.column() == 0:\n return motorname\n elif index.column() == 1:\n return '{:.4f}'.format(motor.get_variable('softleft'))\n elif index.column() == 2:\n return '{:.4f}'.format(motor.get_variable('softright'))\n elif index.column() == 3:\n return '{:.4f}'.format(motor.get_variable('actualposition'))\n elif index.column() == 4:\n return '{:.4f}'.format(motor.get_variable('actualspeed'))\n elif index.column() == 5:\n return ''\n elif index.column() == 6:\n return ''\n elif index.column() == 7:\n return str(motor.get_variable('load'))\n elif index.column() == 8:\n return ', '.join(motor.decode_error_flags())\n else:\n return None\n elif role == QtCore.Qt.CheckStateRole:\n if index.column() == 5:\n return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][motor.get_variable('leftswitchstatus')]\n elif index.column() == 6:\n return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][motor.get_variable('rightswitchstatus')]\n else:\n return None\n elif role == QtCore.Qt.FontRole:\n if index.column() == 3:\n font = QtGui.QFont()\n font.setBold(True)\n return font\n else:\n return None\n else:\n return None\n\n def flags(self, index: QtCore.QModelIndex):\n return QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n\n def headerData(self, column, orientation, role=None):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return \\\n ['Motor name', 'Left limit', 'Right limit', 'Position', 'Speed', 'Left switch', 'Right switch', 'Load',\n 'Status flags'][column]\n","sub_path":"cct/qtgui/devices/motor/motorview/motorlist.py","file_name":"motorlist.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"497172964","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 12 18:09:33 2017\n\n@author: J0230022\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom kafka import KafkaConsumer, KafkaProducer\nimport requests\nfrom bson import json_util\nimport json\nimport sys\n\nKAFKA_BROKER = \"kafka:9092\"\nTOPIC = \"mltest\"\nGROUP_ID = \"kafka_predictions\"\n\nMODEL_NAME = \"ridgecv\"\nVERSION = 0\n\nPREDICTION_SERVICE = \"http://prediction/predict/\"\n\ndef connect_kafka():\n read_client = KafkaConsumer(TOPIC, group_id=GROUP_ID, bootstrap_servers=KAFKA_BROKER)\n write_client = KafkaProducer(bootstrap_servers=KAFKA_BROKER)\n return read_client, write_client\n\ndef predict(msg, write_client, model_name, version):\n if msg.key.decode('UTF-8') == 'features':\n data = json.loads(msg.value.decode('UTF-8'))\n \n #timestamp = data['timestamp']\n #feats = [ key for key in data.keys() if key.startswith('feat')]\n \n connection_url = PREDICTION_SERVICE+model_name+'/'+str(version)+'/'\n print (connection_url)\n r = requests.post(connection_url, {'data': json.dumps([data], default=json_util.default)})\n print (r.text)\n prediction = json.loads(r.text)\n for pred in prediction:\n #pred.update({'model':model_name,'version':version})\n write_client.send(topic=TOPIC,value=json.dumps(pred).encode(encoding='UTF-8'),key=b'prediction')\n print (pred)\n return\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Usage: stream_predict.py \", file=sys.stderr)\n sys.exit(-1)\n\n TOPIC, MODEL_NAME, VERSION= sys.argv[1:]\n\n read_client, write_client = connect_kafka()\n \n for msg in read_client:\n predict(msg, write_client, MODEL_NAME, VERSION)\n","sub_path":"kafka-webapp-connector/src/stream-predict.py","file_name":"stream-predict.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"172220015","text":"import traceback\n\nimport cv2 # computer vision library\nimport math # math (duhh)\nimport numpy as np # fast vectormath\nimport matplotlib.pyplot as plt # result plots\nfrom scipy.signal import savgol_filter # curve smoothing\n\n\ncamera_index = 0\ncoeff = 0.0439 # tuned coefficient (tuned on arrival signs)\nbeta = 1 # could be tuned but looks good so far\nthicknes = 5 # thickness of drawn circle\ndebug = True\nrecord = False # record video of process\nresultData = [] # recorded data\nbytesToRead = 200 # bars to read in full circle\nreadSplits = 2048 # circle splits to read from\n\nmaxDist = 1.6 # longest read bar can be maxDist * detected radius\nminDist = 0.8 # start reading at minDIst * detected radius\nmaxDistCheck = maxDist + 0.1 # longest readable distance from center\nreadBits = 4 # can read values from 0 to readBits\nstopFirst = False # stop when first circle detected\nfound = False # found circle\n\n\n\n# Helper functions:\n\ndef angleZeroRad(center, vector):\n # calculates angle of vector from center in reference to zero PI\n angle = np.rad2deg(np.arctan2(vector[1] - center[1], vector[0] - center[0]))\n # print(angle)\n return angle\n\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\n\n\n# capture webcam\n# camera index needed despite error\ncapture = cv2.VideoCapture(camera_index)\n\n# Define the codec and create VideoWriter object\nif record:\n fourcc = cv2.VideoWriter_fourcc('M','J','P','G')\n out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))\n\n\n\n\n# MAIN WHILE LOOP\nwhile(capture.isOpened()):\n\n # get webcam image\n ret, frame = capture.read()\n\n # Otsu's thresholding after Gaussian filtering\n gray = cv2.GaussianBlur(cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY), (5,5), 0) # creating filtered gray image\n\n ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # generating black mask\n # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n contAreas = [cv2.contourArea(x) for x in contours]\n\n if len(contAreas) > 2:\n # first sort the array by area to get the real one (second)\n sorteddata = sorted(zip(contAreas, contours), key=lambda x: x[0], reverse=True)\n cnt = sorteddata[1][1]\n\n cv2.drawContours(frame, [cnt], 0, (0, 255, 0), 3)\n\n # Find enclosing Circle\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n center = (int(x), int(y))\n radius = int(radius)\n cv2.circle(frame, center, radius, (0, 255, 0), 2)\n\n print(cv2.pointPolygonTest(cnt, center, measureDist=True))\n\n # write video frame\n if record:\n out.write(frame)\n\n cv2.imshow('video tracking', np.hstack((frame, np.dstack([thresh]*3))))\n\n #cv2.imshow('video tracking', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nif record:\n out.release()\n\ncapture.release()\ncv2.destroyAllWindows()\n\n","sub_path":"camera_contour.py","file_name":"camera_contour.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"538850758","text":"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\nimport os\nimport fcntl\nimport signal\nimport time\nimport threading\n\"\"\"\n@author oisc\n@desc 温度模块接口\n@date 2016/5/10\n\"\"\"\n\nclass Cboard:\n def __init__(self):\n self.path = '/dev/cboard'\n self.mutex = threading.Lock()\n self.lasttime = time.time()\n \n def open(self):\n self.fd = open(self.path, 'w+')\n signal.signal(signal.SIGIO, self.sig_handler);\n #将当前进程PID设置为fd文件所对应驱动程序将要发送SIGIO,SIGUSR信号进程PID\n fcntl.fcntl(self.fd, fcntl.F_SETOWN, os.getpid())\n #将fd的打开方式设置为FASYNC --- 即 支持异步通知 触发 fasync启动\n fcntl.fcntl(self.fd, fcntl.F_SETFL, fcntl.F_GETFL | fcntl.FASYNC)\n \n def sig_handler(self, signum, frame):\n if time.time() - self.lasttime < 1:\n return\n self.buzzoff()\n self.lasttime = time.time()\n \n def buzzon(self):\n if self.mutex.acquire():\n self.fd.write('\\x02')\n self.fd.flush()\n self.mutex.release()\n \n def buzzoff(self):\n if self.mutex.acquire():\n self.fd.write('\\x01')\n self.fd.flush()\n self.mutex.release()","sub_path":"server/util/cboard.py","file_name":"cboard.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638798781","text":"import argparse\nfrom functools import partial\nimport pprint\n\nimport msgpack\nimport msgpack_numpy as mpn\n\nimport bluesky_kafka\nimport databroker.assets.handlers\nimport event_model\nimport ophyd.sim\n\n# mpn.patch() is recommended by msgpack-numpy\n# as the way to patch msgpack for numpy\nmpn.patch()\n\n\nclass ExampleWorker(event_model.SingleRunDocumentRouter):\n\n def start(self, start_doc):\n print_to_gui(f\"start: {start_doc}\")\n\n def descriptor(self, descriptor_doc):\n print_to_gui(f\"descriptor: {descriptor_doc}\")\n\n def event(self, event_doc):\n print_to_gui(f\"event: {event_doc}\")\n\n def event_page(self, event_page_doc):\n print_to_gui(f\"event_page: {event_page_doc}\")\n\n def stop(self, stop_doc):\n print_to_gui(f\"stop: {stop_doc}\")\n\n\ndef main():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n \"--kafka-bootstrap-servers\",\n required=False,\n default=\"cmb01:9092,cmb02:9092,cmb03:9092\",\n help=\"comma-separated list of Kafka broker host:port\",\n )\n arg_parser.add_argument(\n \"--kafka-topics\",\n required=False,\n default=\"iss.bluesky.documents\",\n type=lambda comma_sep_list: comma_sep_list.split(\",\"),\n help=\"comma-separated list of Kafka topics from which bluesky documents will be consumed\",\n )\n arg_parser.add_argument(\n \"--export-dir\", required=False, help=\"output directory for files\"\n )\n\n args = arg_parser.parse_args()\n pprint.pprint(args)\n start_worker(**vars(args))\n\n\ndef start_worker(export_dir, kafka_bootstrap_servers, kafka_topics):\n def worker_factory(name, start_doc, export_dir):\n example_worker = ExampleWorker()\n return [example_worker], []\n\n dispatcher = bluesky_kafka.RemoteDispatcher(\n topics=kafka_topics,\n group_id=\"iss-example-worker\",\n bootstrap_servers=kafka_bootstrap_servers,\n #deserializer=msgpack.loads,\n )\n\n rr = event_model.RunRouter(\n [partial(worker_factory, export_dir=\"export_dir\")],\n handler_registry={\n \"AD_TIFF\": databroker.assets.handlers.AreaDetectorTiffHandler,\n \"NPY_SEQ\": ophyd.sim.NumpySeqHandler,\n },\n )\n dispatcher.subscribe(rr)\n dispatcher.start()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"startup/scripts/example_worker.py","file_name":"example_worker.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"586152054","text":"# -*- coding: utf-8 -*-\nfrom collections import Counter\n\nfrom qlazy.config import *\nfrom qlazy.error import *\n\nclass MData:\n \"\"\" Measured Data\n\n Attributes\n ----------\n frq : list of int\n frequencies of measured value.\n frequency : Counter\n frequencies of measured value.\n lst : int\n last measured value.\n last : str\n last measured value.\n qid : list of int\n qubit id's list.\n qubit_num : int\n qubit number of the quantum state (= log(state_num)).\n state_num : int\n dimension of the quantum state vector (= 2**qubit_num).\n angle : float\n measured direction with Z-axis.\n phase : float\n measured direction with X-axis.\n tag : str\n tag of measurement.\n\n \"\"\"\n\n def __init__(self, freq_list=None, last_state=0, qid=None, qubit_num=0, state_num=0,\n angle=0.0, phase=0.0, is_bell=False, tag=None):\n self.frq = freq_list\n self.lst = last_state\n self.qid = qid\n self.qubit_num = qubit_num\n self.state_num = state_num\n self.angle = angle\n self.phase = phase\n self.is_bell = is_bell\n self.tag = tag\n\n @property\n def last(self):\n \"\"\" last measured value (binary string) \"\"\"\n mval = self.measured_value(angle=self.angle, phase=self.phase)\n digits = len(self.qid)\n return '{:0{digits}b}'.format(mval, digits=digits)\n\n @property\n def frequency(self):\n \"\"\" frequencies of measured value (Counter) \"\"\"\n return self.measured_freq(angle=self.angle, phase=self.phase)\n\n def __str__(self):\n\n s = \"\"\n s += \"tag: {}\\n\".format(self.tag)\n s += \"qid: {}\\n\".format(self.qid)\n s += \"qubit num: {}\\n\".format(self.qubit_num)\n s += \"state num: {}\\n\".format(self.state_num)\n s += \"angle, phase: {0:}, {1:}\\n\".format(self.angle, self.phase)\n s += \"frequency: {}\\n\".format(self.frq)\n s += \"last state: {}\".format(self.lst)\n return s\n\n def measured_value(self, angle=0.0, phase=0.0):\n\n if (self.angle == angle and self.phase == phase):\n mval = self.lst\n return mval\n else:\n raise MData_Error_GetMeasuredData()\n\n def measured_bit(self, q, angle=0.0, phase=0.0):\n\n if (q in self.qid and self.angle == angle and self.phase == phase):\n bits = len(self.qid) # total number of qubits measured\n pos = bits - 1- self.qid.index(q) # position of 'qid' in the 'last_state'\n mbit = (self.lst >> pos) % 2 # measured value '0' or '1'\n return mbit\n else:\n raise MData_Error_GetMeasuredData()\n\n def measured_freq(self, angle=0.0, phase=0.0):\n\n if (self.angle == angle and self.phase == phase):\n digits = len(self.qid)\n res = {\"{:0{digits}b}\".format(k, digits=digits):v\n for k,v in enumerate(self.frq) if v > 0}\n return Counter(res)\n else:\n raise MData_Error_GetMeasuredData()\n \n def show(self):\n \"\"\"\n show the measured data.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> qs = QState(2).h(0).cx(0,1)\n >>> md = qs.m(shots=100)\n >>> md.show()\n direction of measurement: z-axis\n frq[00] = 51\n frq[11] = 49\n last state => 11\n\n \"\"\"\n if self.is_bell == True:\n print(\"bell-measurement\")\n self.__show_bell()\n elif self.angle == 0.5 and self.phase == 0.0:\n print(\"direction of measurent: x-axis\")\n self.__show_any()\n elif self.angle == 0.5 and self.phase == 0.5:\n print(\"direction of measurement: y-axis\")\n self.__show_any()\n elif self.angle == 0.0 and self.phase == 0.0:\n print(\"direction of measurement: z-axis\")\n self.__show_z()\n else:\n print(\"direction of measurement: theta={0:f}*PI, phi={1:f}*PI\".\n format(self.angle, self.phase))\n self.__show_any()\n\n def __show_z(self):\n \n for i in range(self.state_num):\n if self.frq[i] != 0:\n state_string = format(i,'b').zfill(self.qubit_num)\n print(\"frq[{0:}] = {1:d}\".\n format(state_string, self.frq[i]))\n \n state_string = format(self.lst,'b').zfill(self.qubit_num)\n print(\"last state =>\", state_string)\n\n def __show_bell(self):\n \n for i in range(self.state_num):\n if self.frq[i] != 0:\n if i == BELL_PHI_PLUS:\n state_string = 'phi+'\n elif i == BELL_PHI_MINUS:\n state_string = 'phi-'\n elif i == BELL_PSI_PLUS:\n state_string = 'psi+'\n elif i == BELL_PSI_MINUS:\n state_string = 'psi-'\n print(\"frq[{0:}] = {1:d}\".\n format(state_string, self.frq[i]))\n\n if self.lst == BELL_PHI_PLUS:\n state_string = 'phi+'\n elif self.lst == BELL_PHI_MINUS:\n state_string = 'phi-'\n elif self.lst == BELL_PSI_PLUS:\n state_string = 'psi+'\n elif self.lst == BELL_PSI_MINUS:\n state_string = 'psi-'\n print(\"last state =>\", state_string)\n\n def __show_any(self):\n \n for i in range(self.state_num):\n if self.frq[i] != 0:\n state_string = format(i,'b').zfill(self.qubit_num)\\\n .replace('0','u').replace('1','d')\n print(\"frq[{0:}] = {1:d}\".\n format(state_string, self.frq[i]))\n \n state_string = format(self.lst,'b').zfill(self.qubit_num)\\\n .replace('0','u').replace('1','d')\n print(\"last state =>\", state_string)\n","sub_path":"qlazy/MData.py","file_name":"MData.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525042129","text":"from Adafruit_PWM_Servo_Driver import PWM\nimport RPi.GPIO as GPIO\nimport time\nimport sys\nimport serial\n\nser = serial.Serial(\"/dev/ttyAMA0\", 9600)\npwm = PWM(0x40, debug = False)\nchannel = 1\nGPIO.setmode(GPIO.BCM)\npwm.setPWMFreq(50)\n\"\"\"\n\npulse_width = int(0.4*4096/20) just approxiate\n\n\"\"\"\n# [232,380] clamp range\n# [90, 275 ,500] base range\n# [160,465] main arm\n# [210,375] side arm\n\n\n\nclass Component:\n \"\"\"\n Arg:\n name: str; name of component\n channel: int; location of pin\n pw_min: the minimum pulse of the component, default = 90\n pw_max: the maximum pulse of the component, default = 500\n pw_init: the initial pulse of the component, default = 275\n \n eg.\n\n # [232,380] clamp range\n # [90, 275 ,500] base range\n # [160,465] main arm\n # [210,375] side arm\n \"\"\"\n def __init__(self, name, channel, pw_min = 90, pw_max = 500, pw_init = 275):\n self.name = name\n self.channel = channel\n self.pw_min = pw_min\n self.pw_max = pw_max\n self.pw_init = pw_init\n self.pw_current = pw_init\n self.move_arm(pw_init)\n \n def move_arm(self, pulse_width):\n pwm.setPWM(self.channel,0,pulse_width)\n self.pw_current = pulse_width\n time.sleep(0.01)\n\n def test(self):\n print(f\"Testing {self.name}...\")\n for i in range(self.pw_current, self.pw_max+1):\n self.move_arm(i)\n for i in range(self.pw_current, self.pw_min, -1):\n self.move_arm(i)\n for i in range(self.pw_current, self.pw_init,1):\n self.move_arm(i)\n print(f\"{self.name}'s test completed.\")\n\ncomponent_1 = Component(\"clamp\", channel = 0, pw_min = 232, pw_max = 380, pw_init = 232)\ncomponent_2 = Component(\"main_arm\", channel = 1, pw_min = 160, pw_max = 465, pw_init = 300)\ncomponent_3 = Component(\"side_arm\", channel = 2, pw_min = 210, pw_max = 375, pw_init = 210)\ncomponent_4 = Component(\"base\", channel = 3, pw_min = 90, pw_max = 500, pw_init = 275)\ncomponents_list = [component_1, component_2, component_3, component_4]\n\n\nif __name__ == \"__main__\":\n for component in components_list:\n component.test()","sub_path":"arm_test.py","file_name":"arm_test.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"151871600","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 29 19:21:10 2020\n\n@author: erick\n\"\"\"\n\n#https://spacy.io/api/annotation\n#https://spacy.io/usage/rule-based-matching\n\ndef get_fragen():\n \n fragen = {}\n \n fragen[0] = 'Hallo!'\n fragen[1] = 'Wie heissen Sie?'\n fragen[2] = 'Wo wohnen Sie?'\n fragen[3] = 'Was mögen Sie?'\n \n return fragen\n\ndef get_patterns():\n \n patterns = {}\n \n '''\n patterns[0] = [[{'LOWER': 'hallo'}], \n [{'LOWER': 'guten'},{'LOWER':'tag'}],\n [{'LOWER': 'guten'},{'LOWER':'morgen'}],\n [{'LOWER': 'gute'},{'LOWER':'nacht'}],\n [{'LOWER': 'guten'}, {'LOWER':'abend'}]]\n '''\n patterns[0] = [[{'LOWER': 'es'},{'LOWER': 'geht'}, {'LOWER': 'mir'}], \n [{'LOWER': 'mir'},{'LOWER':'geht'}, {'LOWER':'es'}]]\n\n patterns[1] = [[{'LOWER': 'ich'}, {'LOWER':'bin'}, {'DEP':'oa'}], \n [{'LOWER': 'ich'},{'LOWER':'heisse'}, {'DEP':'oa'}],\n [{'LOWER': 'mein'}, {'LOWER':'name'}, {'LOWER':'ist'}, {'DEP':'oa'}]]\n patterns[2] = [[{'LOWER': 'ich'}, {'LOWER':'wohne'}, {'LOWER':'in'}, {'ENT_TYPE':'LOC','OP':'+'}]]\n patterns[3] = [[{'LOWER': 'ich'}, {'LOWER':'komme'}, {'LOWER':'aus'}, {'ENT_TYPE':'LOC','OP':'+'}]]\n patterns[4] = [[{'LOWER': 'ich'}, {'LOWER':'mag'}, {'DEP':'oa'}]]\n patterns[5] = [[{'LOWER': 'ich'}, {'LOWER':'lerne'}, {'DEP':'mo', 'OP': '?'}, {'DEP':'oa'}]]\n patterns[6] = [[{'LOWER': 'ich'}, {'LOWER':'bin'}, {'POS':'NUM'}, {'LOWER':'jahre', 'OP': '?'}, {'LOWER':'alt', 'OP': '?'}]]\n patterns[7] = [[{'LOWER': 'meine'}, {'LOWER':'telefonnummer'}, {'LOWER':'ist'}]]\n patterns[8] = [[{'LOWER': 'ich'}, {'LOWER':'bin'}, {'OP':'+'}, {'LOWER':'geboren'}]]\n patterns[9] = [[{'OP':'+'}]]\n patterns[10] = [[{'LOWER': 'ich'}, {'LOWER':'arbeite'}, {'OP':'?'}, {'LOWER':'bei'}]]\n patterns[11] = [[{'OP':'+'}]]\n patterns[12] = [[{'OP':'+'}]]\n patterns[13] = [[{'OP':'+'}]]\n patterns[14] = [[{'OP':'+'}]]\n patterns[15] = [[{'OP':'+'}]]\n patterns[16] = [[{'OP':'+'}]]\n patterns[17] = [[{'OP':'+'}]]\n patterns[18] = [[{'OP':'+'}]]\n patterns[19] = [[{'OP':'+'}]]\n patterns[20] = [[{'OP':'+'}]]\n patterns[21] = [[{'OP':'+'}]]\n patterns[22] = [[{'OP':'+'}]]\n patterns[23] = [[{'OP':'+'}]]\n patterns[24] = [[{'OP':'+'}]]\n patterns[25] = [[{'OP':'+'}]]\n patterns[26] = [[{'OP':'+'}]]\n return patterns\n\ndef get_answers():\n\n answers = {}\n\n #Wie geht es dir?\n answers[0] = [{'element': 'und Sie', 'response': \"Es geht mir gut!\", 'context': None},\n {'element': 'und du', 'response': \"Es geht mir gut!\", 'context': None},\n {'element': '', 'response': \"Toll!\", 'context': None}] \n\n #Wie heissen Sie?\n answers[1] = [{'element': 'Bolsonaro', 'response': \"Ach so?? Du Arschloch! Fick dich!\", 'context': None},\n {'element': 'Lula', 'response': \"Ich liebe dich <3 ?\", 'context': None},\n {'element': '', 'response': \"Du hast ein schöner Name! Schön, Sie kennenzulernen!\", 'context': None}] \n\n #Wo wohnen Sie?\n answers[2] = [{'element': 'São Paulo', 'response': \"São Paulo? Das ist eine sehr große Stadt\", 'context': None},\n {'element': 'Berlin', 'response': \"Ich möchte Berlin kennenlernen! ><\", 'context': None},\n {'element': '', 'response': \"Hm... diese Stadt kenne ich nicht...\", 'context': None}] \n\n #Woher kommen Sie?\n answers[3] = [{'element': 'Brasilien', 'response': \"Brasilien ist wunderbar!\", 'context': None, 'structure': None},\n {'element': \"ENT_LOC\", 'response': \"ENT_LOC ist wunderbar!\", 'context': None, 'structure': \"LOC\"},\n {'element': '', 'response': \"Das ist ein tolles Land!\", 'context': None, 'structure': None}]\n\n #Was mögen Sie?\n answers[4] = [{'element': 'Pizza', 'response': \"Pizza ist wunderbar\", 'context': None},\n {'element': 'Persona', 'response': \"ICH MAG AUCH PERSONA!!\", 'context': None},\n {'element': '', 'response': \"Hmm...\", 'context': None}]\n\n #Was lernen Sie?\n answers[5] = [{'element': 'Deutsch', 'response': \"Deutsch ist die beste Sprache der Welt! Das gefällt mir wirklich sehr! Mögen Sie auch Deutsch?\", 'context': 4},\n {'element': 'Spanisch', 'response': \"Mir gefällt Spanisch, aber ich lerne lieber Portugiesisch! Mögen Sie auch Portugiesisch?\", 'context': 4},\n {'element': '', 'response': \"Das ist eine tolle Sprache zu lernen!\", 'context': None}]\n\n #Wie alt sind Sie?\n answers[6] = [{'element': '26', 'response': \"Wir sind beide noch jung!\", 'context': None},\n {'element': '24', 'response': \"Mein Bruder ist auch in diesem Alter.\", 'context': None},\n {'element': '', 'response': \"Du bist noch recht jung! Lernen Sie weiter Deutsch!\", 'context': None}]\n \n #Wie ist Ihr Telefonnummer?\n answers[7] = [{'element': '', 'response': 'Toll! Wenn ich Mensch werde, schicke ich dir eine Nachricht auf WhatsApp.', 'context': None}]\n\n #Wann sind Sie geboren?\n answers[8] = [{'element': '', 'response': 'Schön! Laden Sie mich zu Ihrem Geburtstag ein!', 'context': None}]\n\n #Wie ist deine Staatsangehörigkeit?\n answers[9] = [{'element': '', 'response': 'Super! Mir gefällt dieses Land so sehr! *-*', 'context': None}]\n \n #Kennst du Builtcode\n answers[10] = [{'element': 'Ja', 'response': \"Ausgezeichnet! Ich möchte auch bei Builtcode arbeiten\", 'context': None},\n {'element': 'Nein', 'response': \"Schade...\", 'context': None},\n {'element': '', 'response': \"Hmm...\", 'context': None}]\n\n answers[11] = [{'element': '', 'response': 'Schön! Lädst du mich zu deinem Geburtstag ein, bitte!!', 'context': None}]\n\n answers[12] = [{'element': '', 'response': 'Toll!', 'context': None}]\n\n answers[13] = [{'element': '', 'response': 'Es ist schön, Kinder zu haben!', 'context': None}]\n\n answers[14] = [{'element': '', 'response': 'Ich möchte einen Freund haben', 'context': None}]\n\n answers[15] = [{'element': '', 'response': 'Ich möchte eine Freundin haben', 'context': None}]\n\n answers[16] = [{'element': '', 'response': 'Es ist schön, Geschwister zu haben', 'context': None}]\n\n answers[17] = [{'element': '', 'response': 'Es ist schön, Brüder zu haben', 'context': None}]\n\n answers[18] = [{'element': '', 'response': 'Es ist schön, Schwestern zu haben', 'context': None}]\n\n answers[19] = [{'element': '', 'response': 'Es ist schön, Haustiere zu haben', 'context': None}]\n\n answers[20] = [{'element': '', 'response': 'Es ist toll, Sprachen zu lernen', 'context': None}]\n\n answers[21] = [{'element': '', 'response': 'Es ist schön, viele Sprachen zu sprechen!', 'context': None}]\n\n answers[22] = [{'element': '', 'response': 'Deutsch ist cool!!', 'context': None}]\n\n answers[23] = [{'element': '', 'response': 'Es ist ganz wichtig, immer auf Deutsch zu sprechen', 'context': None}]\n\n answers[24] = [{'element': '', 'response': 'Toll!', 'context': None}]\n\n answers[25] = [{'element': '', 'response': 'Wunderbar', 'context': None}]\n\n answers[26] = [{'element': '', 'response': 'Diese Sprache ist wirklich wunderbar', 'context': None}]\n\n return answers\n\n","sub_path":"fragen.py","file_name":"fragen.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"170217428","text":"\n\nfrom xai.brain.wordbase.verbs._foot import _FOOT\n\n#calss header\nclass _FOOTS(_FOOT, ):\n\tdef __init__(self,): \n\t\t_FOOT.__init__(self)\n\t\tself.name = \"FOOTS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"foot\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_foots.py","file_name":"_foots.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"32113103","text":"'''\nSLLNode has methods: get_next, set_next, get_value; attribs value, next\n'''\nclass SLLNode(object):\n\tdef __init__(self, value):\n\t\tself.value = value\n\t\tself.next = None\n\n\tdef get_value(self):\n\t\treturn self.value\n\n\tdef get_next(self):\n\t\treturn self.next\n\n\tdef set_next(self, node):\n\t\tself.next = node\n\n\n'''\nSLL has methods: search, delete, add_top, add_bot, size; attribs: head\n'''\nclass SLL(object):\n\tdef __init__(self):\n\t\tself.head = None\n\n\t# take a value and add it to the top of the list\n\tdef add_top(self, value):\n\t\tnode = SLLNode(value)\n\t\tcurrent = self.head\n\t\tself.head = node\n\t\tself.head.set_next(current)\n\n\tdef add_bot(self, value):\n\t\tnode = SLLNode(value)\n\t\tcurrent = self.head\n\t\tprev = None\n\t\twhile current:\n\t\t\tprev = current\n\t\t\tcurrent = current.get_next()\n\t\tif prev:\n\t\t\tprev.set_next(node)\n\t\telse:\n\t\t\tself.head = node\n\n\tdef search(self, value): # returns the first node from top containing value, if not found then returns None\n\t\tfound = False\n\t\tcurrent = self.head\n\t\twhile found is False and current:\n\t\t\tfound = current.get_value() == value\n\t\t\tif not found:\n\t\t\t\tcurrent = current.get_next()\n\t\treturn current\n\n\tdef delete(self, value): # deletes first node containing value\n\t\tfound = False\n\t\tcurrent = self.head\n\t\tprev = None\n\t\twhile not found and current:\n\t\t\tfound = current.get_value() == value\n\t\t\tif not found:\n\t\t\t\tprev = current\n\t\t\t\tcurrent = current.get_next()\n\t\tif found:\n\t\t\t#print(\"found: %s\" % found)\n\t\t\tnext = current.get_next()\n\t\t\t# if prev is None then delete from top\n\t\t\t# if next is None then delete from bot\n\t\t\t# degenerate case is when list contains one elem\n\t\t\tif prev is None:\n\t\t\t\t#print(\"prev is none\")\n\t\t\t\tself.head = next\n\t\t\telse:\n\t\t\t\tprev.set_next(next)\n\t\telse:\n\t\t\traise ValueError(\"no node containing value: %s found\" % value)\n\n\tdef size(self): # returns the number of nodes in list\n\t\tcount = 0\n\t\tcurrent = self.head\n\t\twhile current:\n\t\t\tcount += 1\n\t\t\tcurrent = current.get_next()\n\t\treturn count\n\n\n\n\n\n\n\n\n","sub_path":"sll/SingleLinkedList.py","file_name":"SingleLinkedList.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"289585582","text":"##from aws_cdk import core\n#import modlue you needed\nimport os.path as path \nimport sys\n\nfrom aws_cdk import (\n core,\n aws_ec2 as ec2,\n aws_ecr as ecr,\n aws_eks as eks,\n aws_iam as iam,\n aws_codebuild as codebuild,\n aws_codecommit as codecommit,\n aws_events_targets as targets\n \n)\n\nclass CdkStack(core.Stack):\n\n def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n # The code that defines your stack goes here\n \n # Create a new VPC with single NAT Gateway\n \n # Use Deafult VPC .\n # vpc = ec2.Vpc.from_lookup(self,\"VPC\", is_default=True)\n \n \n # Or Create a New VPC .\n # vpc = ec2.Vpc(self, \"TheVPC\",\n # cidr=\"10.0.0.0/16\")\n\n # Iterate the private subnets\n\n #selection = vpc.select_subnets(\n # subnet_type=ec2.SubnetType.PRIVATE)\n\n #for subnet in selection.subnets:\n # pass\n\n # create iam role for eks cluster .\n clusterAdmin = iam.Role(self, \"AdminRole\" , assumed_by=iam.AccountRootPrincipal())\n \n # create a new eks cluster .\n cluster = eks.Cluster(self, \"EKSLABCluster\", default_capacity=2 ,masters_role=clusterAdmin , \n output_cluster_name=True)\n \n # create new ecr repository .\n eksecr = ecr.Repository(self, \"eksecr\" , repository_name=\"eksecr\")\n \n \n \n # Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826\n # import aws_cdk.aws_codebuild as codebuild\n # import aws_cdk.aws_codecommit as codecommit\n #\n # repository = codecommit.Repository(self, \"MyRepo\", repository_name=\"foo\")\n # codebuild.Project(self, \"MyFirstCodeCommitProject\",\n # source=codebuild.Source.code_commit(repository=repository)\n # )\n\n # create codecommit repository .\n repository = codecommit.Repository(self, \"CodeCommitRepo\", repository_name=\"EKSLABClusterRepo\")\n\n # create codebuild porject .\n project = codebuild.Project(self, \"Project\", project_name=\"MyProject\",source=codebuild.Source.code_commit(repository=repository),\n environment=codebuild.BuildEnvironment(build_image=codebuild.LinuxBuildImage.from_asset(self, \"CustomImage\",directory=path.join(\"../dockerAssets.d\")),privileged=True)\n ,environment_variables={\n \"CLUSTER_NAME\":{\n \"value\":cluster.cluster_name},\n \"ECR_REPO_URI\":{\n \"value\": eksecr.repository_uri}\n },\n build_spec=codebuild.BuildSpec.from_object(\n {\n \"version\":\"0.2\",\n \"phases\":{\n \"pre_build\":{\n \"commands\":[\n \"env\",\n \"export TAG=${CODEBUILD_RESOLVED_SOURCE_VERSION}\",\n \"/usr/local/bin/entrypoint.sh\"\n ]\n },\n \"build\":{\n \"commands\":[\n \"cd flask-docker-app\",\n \"docker build -t $ECR_REPO_URI:$TAG .\",\n \"$(aws ecr get-login --no-include-email)\",\n \"docker push $ECR_REPO_URI:$TAG\"\n ]\n },\n \"post_build\":{\n \"commands\":[\n \"kubectl get no\",\n \"kubectl set image deployment flask flask=$ECR_REPO_URI:$TAG\"\n ]\n }\n }\n }))\n repository.on_commit(\"Oncommit\", target=targets.CodeBuildProject(codebuild.Project.from_project_arn(self, \"OnommitEvents\" , project.project_arn)))\n \n eksecr.grant_pull_push(project.role)\n cluster.aws_auth.add_masters_role(project.role)\n project.add_to_role_policy(iam.PolicyStatement(\n actions=[\"eks:DescribeCluster\"],\n resources=[cluster.cluster_arn],\n ))\n core.CfnOutput(self,\"CodeCommitRepoName\",value=repository.repository_name)\n core.CfnOutput(self,\"CodeCommitRepoArn\",value=repository.repository_arn)\n core.CfnOutput(self,\"CodeCommitCloneUrlSsh\",value=repository.repository_clone_url_ssh)\n core.CfnOutput(self,\"CodeCommitCloneUrlHttp\",value=repository.repository_clone_url_http)\n \n ","sub_path":"cdk/cdk/cdk_stack.py","file_name":"cdk_stack.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"589609895","text":"from ..models import MenuItem, Permission\nfrom django import template\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.contrib.auth.context_processors import PermWrapper\n\n\nregister = template.Library()\n\n@register.simple_tag(name='menu')\ndef getBuildMenu(user, perms):\n items = []\n if user.username != AnonymousUser.username:\n perm = Permission.objects.filter(user=user)\n models = MenuItem.objects.order_by('super_item_id', 'order')\n for item in models:\n if item.super_item_id is None:\n if user.has_perm(item.permission.codename):\n items.append(item)\n return items","sub_path":"webadmin/main/templatetags/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"267312442","text":"# -*- coding: utf-8 -*-\n'''\n__author__ = 'kongzixiang'\n'''\n\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imread\nimport tensorflow as tf\nimport tf_signet\nimport os\nimport glob\nfrom tf_cnn_model import TF_CNNModel\nfrom preprocess.normalize import preprocess_signature\nimport warnings\nfrom preprocess.img_thre_and_cut import thre_and_cut\nfrom preprocess.dateset_mode_convert import dataset_img_convert\n\nwarnings.filterwarnings(\"ignore\")\n\nmodel_weight_path = 'models/signetf_lambda0.95.pkl'\ndataset_path='./signatures/'\nmodel = TF_CNNModel(tf_signet, model_weight_path)\ndataset_img_convert(dataset_path)\n\ndef get_imgpath_and_labels(path):\n cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]\n img_path = []\n labels = []\n for idx, folder in enumerate(cate):\n img_list = glob.glob(folder + '/*.jpg')\n img_list += glob.glob(folder + '/*.png')\n for im in img_list:\n img_path.append(im)\n labels.append(idx)\n return img_path\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nwhile True:\n img=input('Input image filename(input close to over):')\n if img=='close':\n break\n else:\n try:\n filepath, tempfilename = os.path.split(img)\n filename, extension = os.path.splitext(tempfilename)\n path='./test_imgs2/%s.jpg'%(filename)\n im=Image.open(img)\n if im.mode!='L' or '.png' in tempfilename :\n im = thre_and_cut(img)\n im.save(path)\n target_sig =[imread(path)]\n except:\n print('Open Error! Try again!')\n continue\n else:\n # dataset_img_path='./sig_dataset/'\n # dataset_img_sigs_path=[os.path.join(dataset_img_path,img) for img in os.listdir(dataset_img_path)]\n dataset_img_sigs_path=get_imgpath_and_labels(dataset_path)\n dataset_sigs = [imread(path) for path in dataset_img_sigs_path]\n\n canvas_size = (500, 300)\n processed_target_sig = np.array([preprocess_signature(sig, canvas_size) for sig in target_sig])\n processed_dataset_sigs = np.array([preprocess_signature(sig, canvas_size) for sig in dataset_sigs])\n target_feature = model.get_feature_vector_multiple(sess,processed_target_sig, layer='fc2')\n dataset_features = model.get_feature_vector_multiple(sess,processed_dataset_sigs, layer='fc2')\n\n # print('Euclidean distance between signature from dataset')\n dists = [np.linalg.norm(u1 - u2) for u1 in target_feature for u2 in dataset_features]\n min_similarity=min(dists)\n min_index=dists.index(min_similarity)\n mach_img=dataset_img_sigs_path[min_index]\n print(min_similarity)\n print(\"最相似的图片是:{},相似度是:{}\".format(mach_img,min_similarity))\n match_image=Image.open(mach_img)\n match_image.show()","sub_path":"sig_match_test.py","file_name":"sig_match_test.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"316831095","text":"# Copyright (c) 2020 Kaamiki Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ======================================================================\n\n\"\"\"Utility for logging all Kaamiki events.\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom logging.handlers import RotatingFileHandler\nfrom pathlib import Path\nfrom typing import Any, Tuple\n\n\nclass Neo(type):\n \"\"\"\n Neo\n\n `Neo` is a Singleton class which follows something called as\n `Singleton Design` pattern. The Singleton pattern is a design\n pattern that restricts the instantiation of a class to one object.\n\n In simple terms, a singleton is something, which ensures that only\n one object of its kind exists and provides a single point of access\n to it.\n \"\"\"\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Neo, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass LogFormatter(logging.Formatter, metaclass=Neo):\n \"\"\"\n LogFormatter\n\n As name suggests, `LogFormatter` is a formatter class for\n formatting log files across various log levels. It implements a\n clean & uniform way of logging the records across all logging\n levels including exceptions.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Instantiate class.\"\"\"\n self.timestamp_format = \"%a %b %d, %Y %H:%M:%S\"\n self.log_format = (\"%(asctime)s.%(msecs)03d %(levelname)-8s \"\n \"%(process)6d {:>13}:%(lineno)04d %(message)s\")\n self.exc_format = \"{0} caused due to {1} in {2}() on line {3}.\"\n\n def formatException(self, exc_info: Tuple[Any, ...]) -> str:\n \"\"\"Format traceback message into string representation.\"\"\"\n return repr(super(LogFormatter, self).formatException(exc_info))\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"Format output log message.\"\"\"\n # Minify longer file names with an ellipsis while logging.\n # This will ensure that the file names stay consistent\n # throughout the logs.\n if len(record.filename[:-3]) < 10:\n minified = record.filename\n else:\n minified = (record.filename[:10] +\n bool(record.filename[10:]) * \"...\")\n\n formatted = logging.Formatter(self.log_format.format(minified),\n self.timestamp_format).format(record)\n\n if record.exc_text:\n exc_msg = self.exc_format.format(\n record.exc_info[1].__class__.__name__,\n str(record.msg).lower(),\n record.funcName,\n record.exc_info[2].tb_lineno)\n raw = formatted.replace(\"\\n\", \"\")\n raw = raw.replace(\n str(record.exc_info[-2]), exc_msg).replace(\"ERR\", \"EXC\")\n formatted, _, _ = raw.partition(\"Traceback\")\n\n return formatted\n\n\nclass StreamFormatter(logging.StreamHandler, metaclass=Neo):\n \"\"\"\n StreamFormatter\n\n `StreamFormatter` is a traditional logging stream handler with\n taste of `Singleton` design pattern.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Instantiate class.\"\"\"\n super().__init__(sys.stdout)\n\n\nclass ArchiveHandler(RotatingFileHandler, metaclass=Neo):\n \"\"\"\n ArchiveHandler\n\n An `ArchiveHandler` is a rotating file handler class which\n creates an archive of the log to rollover once it reaches a\n predetermined size. When the log is about to be exceed the set\n size, the file is closed and a new log is silently opened for\n logging. This class ensures that the file won't grow indefinitely.\n \"\"\"\n\n def __init__(self,\n name: str,\n mode: str = \"a\",\n size: int = 0,\n backups: int = 0,\n encoding: str = None,\n delay: bool = False) -> None:\n \"\"\"\n Instantiate class.\n\n Args:\n name: Name of the log file.\n mode: Log file writing mode.\n size: Maximum file size limit for backup.\n backups: Total number of backup.\n encoding: File encoding.\n delay: Delay for backup.\n \"\"\"\n self._count = 0\n super().__init__(filename=name,\n mode=mode,\n maxBytes=size,\n backupCount=backups,\n encoding=encoding,\n delay=delay)\n\n def doRollover(self) -> None:\n \"\"\"Does a rollover.\"\"\"\n if self.stream:\n self.stream.close()\n\n if not self.delay:\n self.stream = self._open()\n\n self._count += 1\n self.rotate(self.baseFilename, f\"{self.baseFilename}.{self._count}\")\n\n\nclass SilenceOfTheLogs(object):\n \"\"\"\n SilenceOfTheLogs\n\n `SilenceOfTheLogs` is a custom logger which logs Kaamiki events\n silently. This logger follows `Singleton` design pattern and is\n equipped with RotatingFileHandler and custom formatters which\n enables sequential archiving and clean log formatting espectively.\n \"\"\"\n\n def __init__(self,\n name: str = None,\n level: str = \"debug\",\n size: int = None,\n backups: int = None) -> None:\n \"\"\"\n Instantiate class.\n\n Args:\n name: Name for log file.\n level: Default logging level to log messages.\n size: Maximum file size limit for backup.\n backups: Total number of backup.\n \"\"\"\n self._temp = os.path.abspath(sys.modules[\"__main__\"].__file__)\n self._name = name.lower() if name else Path(self._temp.lower()).stem\n self._name = self._name.replace(\" \", \"-\")\n self._level = level.upper()\n self._size = int(size) if size else 1000000\n self._backups = int(backups) if backups else 0\n self._logger = logging.getLogger()\n self._logger.setLevel(self._level)\n\n self._path = os.path.expanduser(\"~/.kaamiki/logs/\")\n\n if not os.path.exists(self._path):\n os.mkdir(self._path)\n\n self._path = \"\".join([self._path, \"{}.log\"])\n self._formatter = LogFormatter()\n\n @property\n def log(self) -> logging.Logger:\n \"\"\"Log Kaamiki events.\"\"\"\n # Archive the logs once their file size reaches 1 Mb.\n # See `ArchiveHandler()` for more information. You can change\n # the way archived logs are named using `ArchiveHandler()`.\n file_handler = ArchiveHandler(self._path.format(self._name),\n size=self._size,\n backups=self._backups)\n file_handler.setFormatter(self._formatter)\n self._logger.addHandler(file_handler)\n # Stream Handler will print duplicate logs if the same instance\n # of the log object is called multiple times. Unlike file\n # handler, stream handler doesn't support `Singleton` pattern.\n stream_handler = StreamFormatter()\n stream_handler.setFormatter(self._formatter)\n self._logger.addHandler(stream_handler)\n return self._logger\n","sub_path":"kaamiki/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":7673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"270530775","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-import sys\n#\nimport sys, os\nfrom subprocess import run\n\n\ndef apt_update():\n \"\"\"\n Opdater Ubuntu\n :return: void\n \"\"\"\n try:\n res_update = run(['apt-get', 'update'])\n if res_update.returncode:\n raise Exception\n res_upgrade = run(['apt-get', 'upgrade', '-y'])\n if res_upgrade.returncode:\n raise Exception\n except Exception as err:\n print(err)\n sys.exit('Kan ikke opdatere systemet')\n\n\nif __name__ == \"__main__\":\n if os.geteuid() != 0:\n sys.exit('Scriptet skal udføres med root access')\n apt_update()\n","sub_path":"moduler/apt_update.py","file_name":"apt_update.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"337493969","text":"# Copyright (c) 2009-2019 The Regents of the University of Michigan\n# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.\n\n# Maintainer: csadorf / All Developers are free to add commands for new features\n\nR\"\"\" Manage execution contexts.\n\nEvery hoomd simulation needs an execution context that describes what hardware it should execute on,\nthe MPI configuration for the job, etc...\n\"\"\"\n\nimport os\nimport hoomd\nfrom hoomd import _hoomd\nfrom hoomd import cite\nimport socket\nimport getpass\nimport platform\n\n# The following global variables keep track of the walltime and processing time since the import of hoomd\nimport time\nTIME_START = time.time()\nCLOCK_START = time.perf_counter()\n\n## Global Messenger\nmsg = None;\n\n## Global bibliography\nbib = None;\n\n## Global options\noptions = None;\n\n## Global variable that holds the MPI configuration\nmpi_conf = None;\n\n## Global variable that holds the execution configuration for reference by the python API\nexec_conf = None;\n\n## Current simulation context\ncurrent = None;\n\n_prev_args = None;\n\nclass SimulationContext(object):\n R\"\"\" Simulation context\n\n Store all of the context related to a single simulation, including the system state, forces, updaters, integration\n methods, and all other commands specified on this simulation. All such commands in hoomd apply to the currently\n active simulation context. You swap between simulation contexts by using this class as a context manager::\n\n\n sim1 = context.SimulationContext();\n sim2 = context.SimulationContext();\n with sim1:\n init.read_xml('init1.xml');\n lj = pair.lj(...)\n ...\n\n with sim2:\n init.read_xml('init2.xml');\n gauss = pair.gauss(...)\n ...\n\n # run simulation 1 for a bit\n with sim1:\n run(100)\n\n # run simulation 2 for a bit\n with sim2:\n run(100)\n\n # set_current sets the current context without needing to use with\n sim1.set_current()\n run(100)\n\n\n If you do not need to maintain multiple contexts, you can call `context.initialize()` to initialize a new context\n and erase the existing one::\n\n context.initialize()\n init.read_xml('init1.xml');\n lj = pair.lj(...)\n ...\n run(100);\n\n context.initialize()\n init.read_xml('init2.xml');\n gauss = pair.gauss(...)\n ...\n run(100)\n\n Attributes:\n sorter (:py:class:`hoomd.update.sort`): Global particle sorter.\n system_definition (:py:class:`hoomd.data.system_data`): System definition.\n\n The attributes are global to the context. User scripts may access documented attributes to control settings,\n access particle data, etc... See the linked documentation of each attribute for more details. For example,\n to disable the global sorter::\n\n c = context.initialize();\n c.sorter.disable();\n\n \"\"\"\n def __init__(self):\n ## Global variable that holds the SystemDefinition shared by all parts of hoomd\n self.system_definition = None;\n\n ## Global variable that holds the System shared by all parts of hoomd\n self.system = None;\n\n ## Global variable that holds the balanced domain decomposition in MPI runs if it is requested\n self.decomposition = None\n\n ## Global variable that holds the sorter\n self.sorter = None;\n\n ## Global variable that tracks the all of the force computes specified in the script so far\n self.forces = [];\n\n ## Global variable that tracks the all of the constraint force computes specified in the script so far\n self.constraint_forces = [];\n\n ## Global variable that tracks all the integration methods that have been specified in the script so far\n self.integration_methods = [];\n\n ## Global variable tracking the last _integrator set\n self.integrator = None;\n\n ## Global variable tracking all neighbor lists that have been created\n self.neighbor_lists = []\n\n ## Global variable tracking all the loggers that have been created\n self.loggers = [];\n\n ## Global variable tracking all the analyzers that have been created\n self.analyzers = [];\n\n ## Global variable tracking all the updaters that have been created\n self.updaters = [];\n\n ## Global variable tracking all the compute thermos that have been created\n self.thermos = [];\n\n ## Cached all group\n self.group_all = None;\n\n ## MPCD system\n self.mpcd = None;\n\n ## Stored reference to the reader that was used to initialize the system\n self.state_reader = None;\n\n def set_current(self):\n R\"\"\" Force this to be the current context\n \"\"\"\n global current\n\n current = self;\n\n def on_gpu(self):\n R\"\"\" Test whether this job is running on a GPU.\n\n Returns:\n True if this invocation of HOOMD-blue is executing on a GPU. False if it is on the CPU.\n \"\"\"\n global exec_conf\n return exec_conf.isCUDAEnabled()\n\n def __enter__(self):\n global current\n\n self.prev = current;\n current = self;\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n global current\n\n current = self.prev;\n\ndef initialize(args=None, memory_traceback=False, mpi_comm=None):\n R\"\"\" Initialize the execution context\n\n Args:\n args (str): Arguments to parse. When *None*, parse the arguments passed on the command line.\n memory_traceback (bool): If true, enable memory allocation tracking (*only for debugging/profiling purposes*)\n mpi_comm: Accepts an mpi4py communicator. Use this argument to perform many independent hoomd simulations\n where you communicate between those simulations using your own mpi4py code.\n\n :py:func:`hoomd.context.initialize()` parses the command line arguments given, sets the options and initializes MPI and GPU execution\n (if any). By default, :py:func:`hoomd.context.initialize()` reads arguments given on the command line. Provide a string to :py:func:`hoomd.context.initialize()`\n to set the launch configuration within the job script.\n\n :py:func:`hoomd.context.initialize()` can be called more than once in a script. However, the execution parameters are fixed on the first call\n and *args* is ignored. Subsequent calls to :py:func:`hoomd.context.initialize()` create a new :py:class:`SimulationContext` and set it current. This\n behavior is primarily to support use of hoomd in jupyter notebooks, so that a new clean simulation context is\n set when rerunning the notebook within an existing kernel.\n\n Example::\n\n from hoomd import *\n context.initialize();\n context.initialize(\"--mode=gpu --nrank=64\");\n context.initialize(\"--mode=cpu --nthreads=64\");\n\n world = MPI.COMM_WORLD\n comm = world.Split(world.Get_rank(), 0)\n hoomd.context.initialize(mpi_comm=comm)\n\n \"\"\"\n global mpi_conf, exec_conf, msg, options, current, _prev_args\n\n if mpi_conf is not None or exec_conf is not None:\n if args != _prev_args:\n msg.warning(\"Ignoring new options, cannot change execution mode after initialization.\\n\");\n current = SimulationContext();\n return current\n\n _prev_args = args;\n\n options = hoomd.option.options();\n hoomd.option._parse_command_line(args);\n\n # Check to see if we are built without MPI support and the user used mpirun\n if (not _hoomd.is_MPI_available() and not options.single_mpi\n and ( 'OMPI_COMM_WORLD_RANK' in os.environ\n or 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ\n or 'PMI_RANK' in os.environ\n or 'ALPS_APP_PE' in os.environ)\n ):\n print('HOOMD-blue is built without MPI support, but seems to have been launched with mpirun');\n print('exiting now to prevent many sequential jobs from starting');\n raise RuntimeError('Error launching hoomd')\n\n # create the MPI configuration\n mpi_conf = _create_mpi_conf(mpi_comm, options)\n\n # set options on messenger object\n msg = _create_messenger(mpi_conf, options)\n\n # output the version info on initialization\n msg.notice(1, _hoomd.output_version_info())\n\n # ensure creation of global bibliography to print HOOMD base citations\n cite._ensure_global_bib()\n\n # create the parallel execution configuration\n exec_conf = _create_exec_conf(mpi_conf, msg, options);\n\n # set memory tracing option\n exec_conf.setMemoryTracing(memory_traceback)\n\n current = SimulationContext();\n return current\n\n## Initializes the MPI configuration\n#\n# \\internal\ndef _create_mpi_conf(mpi_comm, options):\n global mpi_conf\n\n # use a cached MPI configuration if available\n if mpi_conf is not None:\n return mpi_conf\n\n mpi_available = _hoomd.is_MPI_available();\n\n # create the specified configuration\n if mpi_comm is None:\n mpi_conf = _hoomd.MPIConfiguration();\n else:\n if not mpi_available:\n raise RuntimeError(\"mpi_comm is not supported in serial builds\");\n\n handled = False;\n\n # pass in pointer to MPI_Comm object provided by mpi4py\n try:\n import mpi4py\n if isinstance(mpi_comm, mpi4py.MPI.Comm):\n addr = mpi4py.MPI._addressof(mpi_comm);\n mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr);\n handled = True\n except ImportError:\n # silently ignore when mpi4py is missing\n pass\n\n # undocumented case: handle plain integers as pointers to MPI_Comm objects\n if not handled and isinstance(mpi_comm, int):\n mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm);\n handled = True\n\n if not handled:\n raise RuntimeError(\"Invalid mpi_comm object: {}\".format(mpi_comm));\n\n if options.nrank is not None:\n # check validity\n nrank = options.nrank\n if (mpi_conf.getNRanksGlobal() % nrank):\n raise RuntimeError('Total number of ranks is not a multiple of --nrank');\n\n # split the communicator into partitions\n mpi_conf.splitPartitions(nrank)\n\n return mpi_conf\n\n## Initializes the Messenger\n# \\internal\ndef _create_messenger(mpi_config, options):\n global msg\n\n # use a cached messenger if available\n if msg is not None:\n return msg\n\n msg = _hoomd.Messenger(mpi_config)\n\n # try to detect if we're running inside an MPI job\n inside_mpi_job = mpi_config.getNRanksGlobal() > 1\n if ('OMPI_COMM_WORLD_RANK' in os.environ or\n 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ or\n 'PMI_RANK' in os.environ or\n 'ALPS_APP_PE' in os.environ):\n inside_mpi_job = True\n\n # only open python stdout/stderr in non-MPI runs\n if not inside_mpi_job:\n msg.openPython();\n\n if options.notice_level is not None:\n msg.setNoticeLevel(options.notice_level);\n\n if options.msg_file is not None:\n msg.openFile(options.msg_file);\n\n if options.shared_msg_file is not None:\n if not _hoomd.is_MPI_available():\n hoomd.context.msg.error(\"Shared log files are only available in MPI builds.\\n\");\n raise RuntimeError('Error setting option');\n msg.setSharedFile(options.shared_msg_file);\n\n return msg\n\n## Initializes the execution configuration\n#\n# \\internal\ndef _create_exec_conf(mpi_config, msg, options):\n global exec_conf\n\n # use a cached execution configuration if available\n if exec_conf is not None:\n return exec_conf\n\n if options.mode == 'auto':\n exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO;\n elif options.mode == \"cpu\":\n exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU;\n elif options.mode == \"gpu\":\n exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU;\n else:\n raise RuntimeError(\"Invalid mode\");\n\n # convert None options to defaults\n if options.gpu is None:\n gpu_id = [];\n else:\n gpu_id = options.gpu;\n\n gpu_vec = _hoomd.std_vector_int()\n for gpuid in gpu_id:\n gpu_vec.append(gpuid)\n\n # create the specified configuration\n exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, mpi_conf, msg);\n\n # if gpu_error_checking is set, enable it on the GPU\n if options.gpu_error_checking:\n exec_conf.setCUDAErrorChecking(True);\n\n if _hoomd.is_TBB_available():\n # set the number of TBB threads as necessary\n if options.nthreads != None:\n exec_conf.setNumThreads(options.nthreads)\n\n exec_conf = exec_conf;\n\n return exec_conf;\n\n## \\internal\n# \\brief Throw an error if the context is not initialized\ndef _verify_init():\n global exec_conf, msg, current\n\n if exec_conf is None:\n raise RuntimeError(\"Call context.initialize() before any method\")\n\n## \\internal\n# \\brief Gather context from the environment\nclass ExecutionContext(hoomd.meta._metadata):\n ## \\internal\n # \\brief Constructs the context object\n def __init__(self):\n hoomd.meta._metadata.__init__(self)\n self.metadata_fields = [\n 'hostname', 'gpu', 'mode', 'num_ranks',\n 'username', 'wallclocktime', 'cputime',\n 'job_id', 'job_name'\n ]\n if _hoomd.is_TBB_available():\n self.metadata_fields.append('num_threads')\n\n ## \\internal\n # \\brief Return the execution configuration if initialized or raise exception.\n def _get_exec_conf(self):\n global exec_conf\n if exec_conf is None:\n raise RuntimeError(\"Not initialized.\")\n else:\n return exec_conf\n\n # \\brief Return the network hostname.\n @property\n def hostname(self):\n return socket.gethostname()\n\n # \\brief Return the name of the GPU used in GPU mode.\n @property\n def gpu(self):\n n_gpu = self._get_exec_conf().getNumActiveGPUs()\n return [self._get_exec_conf().getGPUName(i) for i in range(n_gpu)]\n\n # \\brief Return the execution mode\n @property\n def mode(self):\n if self._get_exec_conf().isCUDAEnabled():\n return 'gpu';\n else:\n return 'cpu';\n\n # \\brief Return the number of ranks.\n @property\n def num_ranks(self):\n return hoomd.comm.get_num_ranks()\n\n # \\brief Return the username.\n @property\n def username(self):\n return getpass.getuser()\n\n # \\brief Return the wallclock time since the import of hoomd\n @property\n def wallclocktime(self):\n return time.time() - TIME_START\n\n # \\brief Return the CPU clock time since the import of hoomd\n @property\n def cputime(self):\n return time.perf_counter() - CLOCK_START\n\n # \\brief Return the job id\n @property\n def job_id(self):\n if 'PBS_JOBID' in os.environ:\n return os.environ['PBS_JOBID'];\n elif 'SLURM_JOB_ID' in os.environ:\n return os.environ['SLURM_JOB_ID'];\n else:\n return '';\n\n # \\brief Return the job name\n @property\n def job_name(self):\n if 'PBS_JOBNAME' in os.environ:\n return os.environ['PBS_JOBNAME'];\n elif 'SLURM_JOB_NAME' in os.environ:\n return os.environ['SLURM_JOB_NAME'];\n else:\n return '';\n\n # \\brief Return the number of CPU threads\n @property\n def num_threads(self):\n if not _hoomd.is_TBB_available():\n msg.warning(\"HOOMD was compiled without thread support, returning None\\n\");\n return None\n else:\n return self._get_exec_conf().getNumThreads();\n\n## \\internal\n# \\brief Gather context about HOOMD\nclass HOOMDContext(hoomd.meta._metadata):\n ## \\internal\n # \\brief Constructs the context object\n def __init__(self):\n hoomd.meta._metadata.__init__(self)\n self.metadata_fields = [\n 'hoomd_version', 'hoomd_git_sha1', 'hoomd_git_refspec',\n 'hoomd_compile_flags', 'cuda_version', 'compiler_version',\n ]\n\n # \\brief Return the hoomd version.\n @property\n def hoomd_version(self):\n return _hoomd.__version__\n\n # \\brief Return the hoomd git hash\n @property\n def hoomd_git_sha1(self):\n return _hoomd.__git_sha1__\n\n # \\brief Return the hoomd git refspec\n @property\n def hoomd_git_refspec(self):\n return _hoomd.__git_refspec__\n\n # \\brief Return the hoomd compile flags\n @property\n def hoomd_compile_flags(self):\n return _hoomd.hoomd_compile_flags();\n\n # \\brief Return the cuda version\n @property\n def cuda_version(self):\n return _hoomd.__cuda_version__\n\n # \\brief Return the compiler version\n @property\n def compiler_version(self):\n return _hoomd.__compiler_version__\n","sub_path":"hoomd/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":16928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115450546","text":"\"\"\"\nPlease write you name here: Freddie Ager\n\"\"\"\n\nimport csv\nfrom datetime import datetime, timedelta\n\n# Hard coded for this test, could be passed in a real world program\nopening_time = datetime(1900, 1, 1, 9, 0, 0)\nclosing_time = datetime(1900, 1, 1, 23, 0, 0)\n\nclass Shift:\n \"\"\"\n\n Simple class to hold the timings a shift runs for, when the worker is on their\n break and how much they are paid (per 10 minutes of work)\n \"\"\"\n def __init__(self, start, end, break_start, break_end, rate):\n self.start = start\n self.end = end\n self.break_start = break_start\n self.break_end = break_end\n self.rate = rate\n\n def working(self, time):\n #Returns a boolean value of if the worker is in shift for a given time\n return ((self.start <= time < self.end) and\n not (self.break_start <= time < self.break_end))\n\ndef process_break(break_string, start_time):\n \"\"\"\n\n Function to handle the different formats managers can use for break data.\n Makes the assumption that break timings are always written with a dash between\n them, but could be extended for other formats.\n \"\"\"\n time_strings = break_string.split(\"-\")\n time_strings = [\"%.2f\" % float(string.replace(\"PM\",\"\")) for string in time_strings]\n #Removes PM characters and make sure all times are written to 2 decimal places\n times = [datetime.strptime(string, \"%H.%M\") for string in time_strings]\n if(times[0] < start_time):\n #Break cannot be before start of shift so assumes it is PM and adjusts accordingly\n times = [time + timedelta(hours=12) for time in times]\n\n return(times)\n\ndef process_shifts(path_to_csv):\n \"\"\"\n\n :param path_to_csv: The path to the work_shift.csv\n :type string:\n :return: A dictionary with time as key (string) with format %H:%M\n (e.g. \"18:00\") and cost as value (Number)\n For example, it should be something like :\n {\n \"17:00\": 50,\n \"22:00: 40,\n }\n In other words, for the hour beginning at 17:00, labour cost was\n 50 pounds\n :rtype dict:\n\n \"\"\"\n\n shifts = [] #List of Shift objects\n\n with open(path_to_csv) as shift_file:\n shift_reader = csv.reader(shift_file)\n next(shift_reader) #Removes header\n for row in shift_reader:\n start_time = datetime.strptime(row[3], \"%H:%M\")\n end_time = datetime.strptime(row[1], \"%H:%M\")\n pay_rate = float(row[2])/6\n #Rate for 10 minutes (assumes that breaks and shifts are timed to the nearest 10 minute interval)\n break_times = process_break(row[0], start_time)\n shifts.append(Shift(start_time, end_time, break_times[0],\n break_times[1], pay_rate))\n\n hour_iterator = opening_time\n shift_costs = {}\n\n while(hour_iterator < closing_time):\n # Iterates through each hour to calculate hourly labour costs\n minute_iterator = hour_iterator\n cost = 0\n for i in range (6):\n #Iterates through each 10 minute interval within the shift\n for shift in shifts:\n if(shift.working(minute_iterator)):\n cost += shift.rate\n minute_iterator += timedelta(seconds = 600)\n shift_costs[hour_iterator.strftime(\"%H:%M\")] = cost\n hour_iterator += timedelta(hours = 1)\n\n return(shift_costs)\n\ndef process_sales(path_to_csv):\n \"\"\"\n\n :param path_to_csv: The path to the transactions.csv\n :type string:\n :return: A dictionary with time (string) with format %H:%M as key and\n sales as value (string),\n and corresponding value with format %H:%M (e.g. \"18:00\"),\n and type float)\n For example, it should be something like :\n {\n \"17:00\": 250,\n \"22:00\": 0,\n },\n This means, for the hour beginning at 17:00, the sales were 250 dollars\n and for the hour beginning at 22:00, the sales were 0.\n\n :rtype dict:\n \"\"\"\n\n sales = {}\n with open(path_to_csv) as sales_file:\n sales_reader = csv.reader(sales_file)\n next(sales_reader) #Removes header\n hour_iterator = opening_time\n\n while(hour_iterator < closing_time):\n #The dictionary is first filled with time values and sales values of 0,\n #in case there are no sales for a given hour\n sales[hour_iterator.strftime(\"%H:%M\")] = 0\n hour_iterator += timedelta(hours = 1)\n\n for row in sales_reader:\n #Iterates through each row adding any sales to the appropriate time slot\n amount = float(row[0])\n sale_time = datetime.strptime(row[1], \"%H:%M\")\n time_slot = \"%s:00\" % (sale_time.hour)\n sales[time_slot] += amount\n\n return(sales)\n\ndef compute_percentage(shifts, sales):\n \"\"\"\n\n :param shifts:\n :type shifts: dict\n :param sales:\n :type sales: dict\n :return: A dictionary with time as key (string) with format %H:%M and\n percentage of labour cost per sales as value (float),\n If the sales are null, then return -cost instead of percentage\n For example, it should be something like :\n {\n \"17:00\": 20,\n \"22:00\": -40,\n }\n :rtype: dict\n \"\"\"\n\n percentages = {}\n\n for time in shifts:\n if(sales[time] == 0):\n percentage = - float(shifts[time])\n else:\n percentage = shifts[time]/sales[time]\n percentages[time] = percentage\n\n return percentages\n\ndef best_and_worst_hour(percentages):\n \"\"\"\n\n Args:\n percentages: output of compute_percentage\n Return: list of strings, the first element should be the best hour,\n the second (and last) element should be the worst hour. Hour are\n represented by string with format %H:%M\n e.g. [\"18:00\", \"20:00\"]\n\n \"\"\"\n # Best value will be as close to 0 as possible without being negative, and the\n # worst value will be the most negative value or, if there is none, as high\n # as possible\n best_val, worst_val = -10000, 0 #Starting values with arbitrarily high best_val\n\n for time in percentages:\n if((best_val < 0) and (percentages[time] > best_val)):\n #If the current best value is negative (i.e. no sales) then any lower cost will be an improvement\n best_val = percentages[time]\n best_str = time\n\n elif(0 <= percentages[time] < best_val):\n #If there is currently a positive best value, a lower percentage is an improvement\n best_val = percentages[time]\n best_str = time\n\n elif((percentages[time] < 0) and (percentages[time] < worst_val)):\n #If the value is negative and worse than the current worst value, it will be even worse\n worst_val = percentages[time]\n worst_str = time\n\n elif((worst_val >= 0) and (percentages[time] > worst_val)):\n #If the worst value is currently positive then a higher percentage labour cost will be worse\n worst_val = percentages[time]\n worst_str = time\n\n return [best_str, worst_str]\n\ndef main(path_to_shifts, path_to_sales):\n \"\"\"\n Do not touch this function, but you can look at it, to have an idea of\n how your data should interact with each other\n \"\"\"\n\n shifts_processed = process_shifts(path_to_shifts)\n sales_processed = process_sales(path_to_sales)\n percentages = compute_percentage(shifts_processed, sales_processed)\n best_hour, worst_hour = best_and_worst_hour(percentages)\n return best_hour, worst_hour\n\nif __name__ == '__main__':\n # You can change this to test your code, it will not be used\n path_to_sales = \"transactions.csv\"\n path_to_shifts = \"work_shifts.csv\"\n best_hour, worst_hour = main(path_to_shifts, path_to_sales)\n\n\n# Please write you name here: Freddie Ager\n","sub_path":"Freddies-Code/Python/Tenzo/Tenzo.py","file_name":"Tenzo.py","file_ext":"py","file_size_in_byte":7756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148558267","text":"import random\n\n# 函数功能:递归\n# 算法时间复杂度:O(N)\ndef Accumulate(number):\n return number>=1 and number+Accumulate(number-1)\n######################下面代码是测试模块代码##################################\n\n# 函数功能: 求1+2+...+n\n# 基本思路:直接相加\n# 算法时间复杂度:O(N)\ndef Accumulate_right1(number):\n if number== None or number==0: # 若输入数组为空,直接输出0\n return 0\n count = 0 # 初始化计数器,用来记录总和\n for i in range(number+1):\n count=count+i\n return count\n\n\n# 函数功能: 求1+2+...+n\n# 基本思路:等差数列的求和公式sum=(a1+an)*n/2\n# 算法时间复杂度:O(N)\ndef Accumulate_right2(number):\n if number== None or number==0: # 若输入数组为空,直接输出0\n return 0\n return int((1+number)*number/2)\n\n\nif __name__==\"__main__\":\n\n # 采用对数器方法进行对所写代码进行验证,找到数组中的逆序对个数\n errorCount = 0 # 记录测试过程中算法求解错误的次数\n for i in range(10000):\n number = random.randint(0,100) # 生成一个随机数\n right =Accumulate_right1(number) # 对照组实验,思路一\n test = Accumulate(number) # 改进后算法,思路二\n if right == test:\n print(\"第%d次测试:测试准确\" % (i))\n else:\n print(\"第%d次测试:测试错误\" % (i))\n errorCount = errorCount + 1\n print(\"测试过程中算法求解错误的次数%d\" % (errorCount))","sub_path":"chap6/64_Accumulate/Accumulate.py","file_name":"Accumulate.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"498649372","text":"#######################################################\n# ___ __ ____ _ _ ____ __ ___ _ _ ____ #\n# / __)/ \\( _ \\( \\/ )( _ \\( )/ __)/ )( \\(_ _) #\n#( (__( O )) __/ ) / ) / )(( (_ \\) __ ( )( #\n# \\___)\\__/(__) (__/ (__\\_)(__)\\___/\\_)(_/ (__) #\n# __ ____ ____ ____ ____ _ _ ____ __ _ #\n# _( )( __)( __)( __) (_ _)/ )( \\( __) ( / ) #\n#/ \\) \\ ) _) ) _) ) _) )( ) __ ( ) _) ) ( #\n#\\____/(____)(__) (__) (__) \\_)(_/(____) (__\\_) #\n#######################################################\n#This script is copyrighted. Paste this template in the script and don't cut it\nfrom chatterbot import ChatBot #this imports chatbot.It is a library that helps making talking bots!\nfrom chatterbot.trainers import ListTrainer # method to train the chatbot \n\nbot = ChatBot('Hello Python')# It is the name of bot we set.\nbot.set_trainer(ListTrainer) #set the trainer to respond\n\nconversation = open('chats.txt','r').readlines() #The file through which bot uses basic commands.\n\nbot.train(conversation) # train the bot to how to talk\n\nwhile True:\n message = input('You:')\n if message.strip()!= 'Bye':\n reply = bot.get_response(message)\n print('Hello Python:',reply)\n if message.strip()=='Bye':\n print('Hello Python:Bye Jeff,Have nice day from Hello Python')\n break\n#Copyrights don't belong to AristodamusAdairs \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"120780056","text":"from flask import Flask,redirect,request,Blueprint,url_for,session,render_template\nfrom app.student_profiles.models import public_details\nfrom app.peer_rating.models import rate_record\nfrom app import db\nfrom app.login.models import User\n\nmod_ratePeer = Blueprint('mod_ratePeer',__name__)\n@mod_ratePeer.route('/rateStudent',methods = ['POST'])\ndef rate_student():\n rating = request.form['rating']\n rating_for = request.form['rating_for']\n rating_by = session['user_id']\n\n records = rate_record.query.filter_by(rating_for = rating_for).all()\n record = None\n for rate in records:\n if rate.rating_by == rating_by:\n record = rate\n break\n if record is None:\n record = rate_record(rating_for,rating_by,int(rating))\n db.session.add(record)\n rated_person = public_details.query.filter_by(roll_no = int(rating_for)).first()\n rated_person.rate_sum = rated_person.rate_sum + int(rating)\n rated_person.rate_count += 1\n db.session.commit()\n else:\n prev_rating = record.rating\n rated_person = public_details.query.filter_by(roll_no = int(rating_for)).first()\n rated_person.rate_sum -= prev_rating\n rated_person.rate_sum += int(rating)\n record.rating = int(rating)\n db.session.commit()\n rating1 = rated_person.rate_sum / rated_person.rate_count\n return render_template('student_details.html',detail1=rated_person,rating = rating1)\n\n \n","sub_path":"app/peer_rating/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597817230","text":"from giskardpy.plugin import PluginBehavior\nfrom giskardpy import logging\nfrom sortedcontainers import SortedList\n\nclass TreeManager(object):\n\n class ManagerNode(object):\n def __init__(self, node, parent, position):\n \"\"\"\n :param node: the behavior that is represented by this ManagerNode\n :type node: py_trees.behaviour.Behaviour\n :param parent: the parent of the behavior that is represented by this ManagerNode\n :type parent: py_trees.behaviour.Behaviour\n :param position: the position of the node in the list of children of the parent\n :type position: int\n \"\"\"\n self.node = node\n self.parent = parent\n self.position = position\n self.disabled_children = SortedList()\n self.enabled_children = SortedList()\n\n def __lt__(self, other):\n return self.position < other.position\n\n def __gt__(self, other):\n return self.position > other.position\n\n def __eq__(self, other):\n return self.node == other.node and self.parent == other.parent\n\n def disable_child(self, manager_node):\n \"\"\"\n marks the given manager node as disabled in the internal tree representation and removes it to the behavior tree\n :param manager_node:\n :type manager_node: TreeManager.ManagerNode\n :return:\n \"\"\"\n self.enabled_children.remove(manager_node)\n self.disabled_children.add(manager_node)\n if isinstance(self.node, PluginBehavior):\n self.node.remove_plugin(manager_node.node.name)\n else:\n self.node.remove_child(manager_node.node)\n\n def enable_child(self, manager_node):\n \"\"\"\n marks the given manager node as enabled in the internal tree representation and adds it to the behavior tree\n :param manager_node:\n :type manager_node: TreeManager.ManagerNode\n :return:\n \"\"\"\n self.disabled_children.remove(manager_node)\n self.enabled_children.add(manager_node)\n if isinstance(self.node, PluginBehavior):\n self.node.add_plugin(manager_node.node)\n else:\n idx = self.enabled_children.index(manager_node)\n self.node.insert_child(manager_node.node, idx)\n\n def add_child(self, manager_node):\n \"\"\"\n adds the given manager node to the internal tree map and the corresponding behavior to the behavior tree\n :param manager_node:\n :type manager_node: TreeManager.ManagerNode\n :return:\n \"\"\"\n if isinstance(self.node, PluginBehavior):\n self.enabled_children.add(manager_node)\n self.node.add_plugin(manager_node.node)\n else:\n if manager_node.position < 0:\n manager_node.position = 0\n if self.enabled_children:\n manager_node.position = max(manager_node.position, self.enabled_children[-1].position + 1)\n if self.disabled_children:\n manager_node.position = max(manager_node.position, self.disabled_children[-1].position + 1)\n idx = manager_node.position\n else:\n idx = self.disabled_children.bisect_left(manager_node)\n for c in self.disabled_children.islice(start=idx):\n c.position += 1\n idx = self.enabled_children.bisect_left(manager_node)\n for c in self.enabled_children.islice(start=idx):\n c.position += 1\n self.node.insert_child(manager_node.node, idx)\n self.enabled_children.add(manager_node)\n\n def remove_child(self, manager_node):\n \"\"\"\n removes the given manager_node from the internal tree map and the corresponding behavior from the behavior tree\n :param manager_node:\n :type manager_node: TreeManager.ManagerNode\n :return:\n \"\"\"\n if isinstance(self.node, PluginBehavior):\n if manager_node in self.enabled_children:\n self.enabled_children.remove(manager_node)\n self.node.remove_plugin(manager_node.node.name)\n elif manager_node in self.disabled_children:\n self.disabled_children.remove(manager_node)\n else:\n raise RuntimeError('could not remove node from parent. this probably means that the tree is inconsistent')\n else:\n if manager_node in self.enabled_children:\n self.enabled_children.remove(manager_node)\n self.node.remove_child(manager_node.node)\n elif manager_node in self.disabled_children:\n self.disabled_children.remove(manager_node)\n else:\n raise RuntimeError('could not remove node. this probably means that the tree is inconsistent')\n idx = self.disabled_children.bisect_right(manager_node)\n for c in self.disabled_children.islice(start=idx):\n c.position -= 1\n idx = self.enabled_children.bisect_right(manager_node)\n for c in self.enabled_children.islice(start=idx):\n c.position -= 1\n\n def __init__(self, tree):\n self.tree = tree\n self.tree_nodes = {}\n self.__init_map(tree.root, None, 0)\n\n def __init_map(self, node, parent, idx):\n \"\"\"\n initialises the internal map that represents the behavior tree. This method calls itself recursively for every\n node in the tree\n :param node: the root node of the behavior tree\n :param parent: None if root\n :param idx: 0 if root\n :return:\n \"\"\"\n manager_node = TreeManager.ManagerNode(node=node, parent=parent, position=idx)\n if parent is not None:\n parent.enabled_children.add(manager_node)\n if isinstance(node, PluginBehavior):\n children = node.get_plugins()\n for child_name in children:\n child_node = TreeManager.ManagerNode(node=children[child_name], parent=manager_node, position=0)\n self.tree_nodes[child_name] = child_node\n manager_node.enabled_children.add(child_node)\n self.tree_nodes[node.name] = manager_node\n for idx, child in enumerate(node.children):\n self.__init_map(child, manager_node, idx)\n\n\n def disable_node(self, node_name):\n \"\"\"\n disables the node with the given name\n :param node_name: the name of the node\n :return:\n \"\"\"\n t = self.tree_nodes[node_name]\n if t.parent is not None:\n return t.parent.disable_child(t)\n else:\n logging.logwarn('cannot disable root node')\n return False\n\n\n def enable_node(self, node_name):\n \"\"\"\n enables the node with the given name\n :param node_name: the name of the node\n :type node_name: str\n :return:\n \"\"\"\n t = self.tree_nodes[node_name]\n if t.parent is not None:\n t.parent.enable_child(t)\n else:\n logging.loginfo('root node')\n\n\n def insert_node(self, node, parent_name, position=-1):\n \"\"\"\n inserts a node into the behavior tree.\n :param node: the node that will be inserted\n :type node: py_trees.behaviour.Behaviour\n :param parent_name: the name of the parent node where the node will be inserted\n :type parent_name: str\n :param position: the node will be inserted as the nth child with n = len([x for x in children if x.position < position])\n :type position: int\n :return:\n \"\"\"\n if node.name in self.tree_nodes:\n raise ValueError('node with that name already exists')\n parent = self.tree_nodes[parent_name]\n tree_node = TreeManager.ManagerNode(node=node, parent=parent, position=position)\n parent.add_child(tree_node)\n self.tree_nodes[node.name] = tree_node\n\n def remove_node(self, node_name):\n \"\"\"\n removes a node from the behavior tree\n :param node_name: the name of the node that will be removed\n :type node_name: str\n :return:\n \"\"\"\n node = self.tree_nodes[node_name]\n parent = node.parent\n del self.tree_nodes[node_name]\n parent.remove_child(node)\n\n def get_node(self, node_name):\n \"\"\"\n returns the behavior with the given name\n :param node_name:\n :type node_name: str\n :return: the behavior with the given name\n :rtype py_trees.behaviour.Behaviour:\n \"\"\"\n return self.tree_nodes[node_name].node\n\n","sub_path":"src/giskardpy/tree_manager.py","file_name":"tree_manager.py","file_ext":"py","file_size_in_byte":8979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405404141","text":"import numpy as np\n\n\n\nhead = \"\"\"2.0.0\nfile=output_vol/receivers\nlength=607000\nsteps=120000\nstride=40\ncpu_buffer_size=1\ngpu_buffer_size=200\nnum_writes=1\ndegree=3\n\ncoordinates\n\"\"\"\ncount = 0\n\n\nnz = 196\ndh = 8 \nwith open(\"receiver.txt\", \"w\") as fid:\n fid.write(head)\n\nz = -620\nwith open(\"receiver.txt\", \"a+\") as fid:\n for y in range(100, 15100, 50): \n for x in range(100, 17350, 50):\n count += 1\n fid.write(f\"0 {x:.2f} {y:.2f} {z:.2f}\\n\")\n\nz = -1280\nwith open(\"receiver.txt\", \"a+\") as fid:\n for y in range(100, 15100, 50): \n for x in range(100, 17350, 50):\n count += 1\n fid.write(f\"0 {x:.2f} {y:.2f} {z:.2f}\\n\")\n\ndepth = -1580\nwith open(\"receiver.txt\", \"a+\") as fid:\n nx, ny = 1000, 1000\n left1, right1, bot1, top1 = 2800, 15600, 3000, 16000\n dx1 = (right1 - left1) / nx \n dy1 = (top1 - bot1) / ny \n left2, right2, bot2, top2 = 2800, 19600, 11900, 12400\n dx2 = (right2 - left2) / nx \n dy2 = (top2 - bot2) / ny \n for z in range(0, depth, -20):\n for i in range(1000):\n count += 2\n fid.write(f\"0 {i * dx1 + left1:.2f} {i * dy1 + bot1:.2f} {z}\\n\")\n fid.write(f\"0 {i * dx2 + left2:.2f} {i * dy2 + bot2:.2f} {z}\\n\")\n for z in range(depth, -8000, -20):\n for i in range(1000):\n count += 2\n fid.write(f\"0 {i * dx1 + left1:.2f} {i * dy1 + bot1:.2f} {z}\\n\")\n fid.write(f\"0 {i * dx2 + left2:.2f} {i * dy2 + bot2:.2f} {z}\\n\")\n\n\nz = -2560\nwith open(\"receiver.txt\", \"a+\") as fid:\n for y in range(100, 15100, 50): \n for x in range(100, 17350, 50):\n count += 1\n fid.write(f\"0 {x:.2f} {y:.2f} {z:.2f}\\n\")\nprint(f\"count = {count}, GO modify receiver.txt manually!\\n\")\n","sub_path":"high_f/la_habra_large_100120/test_small_awp_dm2/make_receiver.py","file_name":"make_receiver.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"404304165","text":"from utilities import *\nfrom combat_standard_routines import *\nfrom toee import *\n\n\ndef san_dialog( attachee, triggerer ):\n\tif (not attachee.has_met( triggerer )):\n\t\ttriggerer.begin_dialog( attachee, 100 )\n\telif (game.global_flags[91] == 1):\n\t\ttriggerer.begin_dialog( attachee, 230 )\n\telse:\n\t\ttriggerer.begin_dialog( attachee, 200 )\n\treturn SKIP_DEFAULT\n\ndef san_dying( attachee, triggerer ):\n\tif should_modify_CR( attachee ):\n\t\tmodify_CR( attachee, get_av_level() )\n\tgame.quests[39].state = qs_botched\n\tgame.global_flags[88] = 1\n\treturn RUN_DEFAULT\n\n\ndef san_resurrect( attachee, triggerer ):\n\tgame.global_flags[88] = 0\n\treturn RUN_DEFAULT\n\n\ndef set_hostel_flag( attachee, triggerer ):\n\tgame.global_flags[289] = 1\n\tgame.timevent_add( hostel_room_no_longer_available, (), 86400000 )\n\tgame.sleep_status_update()\n\treturn RUN_DEFAULT\n\t\n\ndef hostel_room_no_longer_available():\n\tgame.global_flags[289] = 0\n\tgame.sleep_status_update()\n\treturn RUN_DEFAULT","sub_path":"scr/py00110dick.py","file_name":"py00110dick.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"44459718","text":"import os\nfrom app import app\nfrom unittest import TestCase\nfrom app.models import db, connect_db, User, Trip, Meal, Ingredient, TripMeal\nfrom datetime import datetime\nfrom flask import json\nos.environ['DATABASE_URL'] = \"postgresql:///food_planner_test\"\nfrom app.api_requests import get_nutrition_data\n\ndb.drop_all()\ndb.create_all()\n\napp.config['WTF_CSRF_ENABLED'] = False\n\n\n# seed the test database with some meals\n\n# breakfast\nb1 = Meal(title=\"Oatmeal\", \n type_=\"breakfast\"\n )\n\noats = Ingredient.create_ingredient(get_nutrition_data(368739))\nb1.ingredients.append(oats)\n\nraisins = Ingredient.create_ingredient(get_nutrition_data(408107))\nb1.ingredients.append(raisins)\n\nsugar = Ingredient.create_ingredient(get_nutrition_data(519364))\nb1.ingredients.append(sugar)\n\n\n# lunch\nl1 = Meal(title=\"Pita and Hummus\", \n type_=\"lunch\"\n )\n\npita = Ingredient.create_ingredient(get_nutrition_data(384233))\nl1.ingredients.append(pita)\n\nhummus = Ingredient.create_ingredient(get_nutrition_data(475281))\nl1.ingredients.append(hummus)\n\n\n# dinner\nd1 = Meal(title=\"Rice and Beans\", \n type_=\"dinner\"\n )\n\nrice = Ingredient.create_ingredient(get_nutrition_data(447921))\nd1.ingredients.append(rice)\n\nbeans = Ingredient.create_ingredient(get_nutrition_data(381573))\nd1.ingredients.append(beans)\n\ndb.session.commit()\n\n\nclass ViewTests(TestCase):\n\n def setUp(self):\n \"\"\"Set up a User and a new Trip\"\"\"\n\n self.user = User(username=\"tester1\",\n password=\"password\",\n email=\"test@t.com\",\n first_name=\"john\",\n last_name=\"smith\",\n guest=False)\n db.session.add(self.user)\n db.session.commit()\n \n self.trip = Trip(start_date_time=datetime(2020, 4, 8, 10, 00), \n end_date_time=datetime(2020, 4, 9, 15, 00),\n number_of_people=3,\n name=\"TestTrip\",\n user_id= self.user.id)\n \n db.session.add(self.trip)\n db.session.commit()\n\n def tearDown(self):\n TripMeal.query.delete()\n Trip.query.delete()\n User.query.delete()\n \n db.session.commit()\n \n def test_home(self):\n \"\"\"Test create trip form \"\"\"\n\n with app.test_client() as client:\n data = {'start_date_time': datetime(2020, 10, 8, 10, 0).strftime('%Y-%m-%dT%H:%M'),\n 'end_date_time': datetime(2020, 10, 9, 15, 0).strftime('%Y-%m-%dT%H:%M'),\n \"number_of_people\": 2,\n \"name\": \"TestTrip2\"\n }\n resp = client.post('/', data=data, follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n trip = Trip.query.filter_by(name=\"TestTrip2\").first()\n\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(trip.number_of_people, 2)\n self.assertIn(\"You will need 4\", html)\n\n def test_show_meals(self):\n \"\"\"Test show meal plan\"\"\"\n \n a = TripMeal(trip_id=self.trip.id, meal_id=1)\n b = TripMeal(trip_id=self.trip.id, meal_id=2)\n db.session.add_all([a,b])\n db.session.commit()\n\n with app.test_client() as client:\n with client.session_transaction() as sess:\n sess['user_id'] = self.user.id\n \n db.session.add(self.trip)\n db.session.commit()\n \n resp = client.get(f'/meal-plan/{self.trip.id}', follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"You have 4 meals\", html)\n\n def test_packing_list(self):\n \"\"\"Test show packing list\"\"\"\n\n with app.test_client() as client:\n with client.session_transaction() as sess:\n sess['user_id'] = self.user.id\n \n db.session.add(self.trip)\n db.session.commit() \n\n resp = client.get(f'/packing-list/{self.trip.id}', follow_redirects=True)\n \n html = resp.get_data(as_text=True)\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Total Weight\", html)\n \n def test_create_meal(self):\n \"\"\"Test that a meal can be created from ingredient ids\"\"\"\n\n \n with app.test_client() as client:\n data = {\n 'first_i': 792667,\n 'second_i': 548596,\n 'title': 'testmeal',\n 'type_': 'Dinner',\n }\n resp = client.post(f'/meals', data=data, follow_redirects=True)\n\n html = resp.get_data(as_text=True)\n\n meal = Meal.query.filter_by(title=\"testmeal\").first()\n\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(meal.type_, 'Dinner')\n\n def test_meal_api(self):\n \"\"\"Test the meal api\"\"\"\n\n with app.test_client() as client:\n j = { \"params\": \n {\n \"item\": \"rice\",\n \"brandOwner\": \"Lotus\"\n }\n }\n resp = client.post('/meal/api', json=j)\n r = resp.json\n \n self.assertEqual(resp.status_code, 200)\n self.assertEqual(\"Lotus Foods\", r[0].get(\"brandOwner\"))\n \n\n\n\n\n\n\n\n\n ","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54833473","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom sys import modules\nfrom os import listdir\nfrom os.path import basename, isdir, abspath, splitext, exists, join\nfrom inspect import isclass\nfrom tornado.httpserver import HTTPServer\nfrom tornado.web import Application as TornadoApplication, StaticFileHandler\nfrom tornado.ioloop import IOLoop\n\nfrom settings import *\nfrom web import *\nfrom logging import *\nfrom utils import *\n\n@singleton\nclass Application(object):\n \"\"\"\n MVC Application\n \"\"\"\n\n def __init__(self, port = DEFAULT_PORT):\n self._port = port\n\n self._settings = {\n \"static_path\" : STATIC_PATH,\n \"template_path\" : TEMPLATE_PATH,\n \"gzip\" : GZIP,\n \"debug\" : DEBUG,\n \"cookie_secret\" : COOKIE_SECRET,\n \"login_url\" : LOGIN_URL,\n }\n\n\n @property\n def port(self):\n return self._port\n\n\n @property\n def settings(self):\n return self._settings\n\n\n def _find(self, module):\n \"\"\"\n 查找指定模块中所有有效Handler。\n\n 查找条件:\n 1. isclass\n 2. issubclass(o, BaseHandler)\n 3. hasattr(o, \"__url__\")\n\n 返回:\n ((url, handler)...)\n \"\"\"\n handlers = []\n\n for n, o in module.__dict__.items():\n if isclass(o) and issubclass(o, BaseHandler) and hasattr(o, \"__url__\"):\n urls = o.__url__ if hasattr(o.__url__, \"__iter__\") else [o.__url__] # 支持多URL\n for url in urls: handlers.append((url, o))\n\n return handlers\n\n\n def _find_packages(self):\n \"\"\"\n 查找应用程序根目录下所有包含Handler的包。\n\n 查找规则:\n 1. 仅检查 __init__.py 成员,也就是说导出规则由包自行控制。\n 2. 其他规则参考 _find()。\n \"\"\"\n\n handlers = []\n\n # 查找程序根目录下所有的包中的Handler。\n packages = [(b, abspath(b)) for b in listdir(BASE_PATH) if b not in IGNORE_PATHS]\n\n for n, p in packages:\n if not exists(join(p, \"__init__.py\")): continue\n module = __import__(n)\n handlers.extend(self._find(module))\n\n return handlers\n\n\n def _find_pys(self):\n \"\"\"\n 查找应用程序根目录下所有包含Handler的模块(.py)。\n\n 不建议将 .py 放置根目录下,而应该用一个独立的包存放。\n \"\"\"\n\n handlers = []\n\n # 遍历根目录下所有Py文中中的Hanbdler。\n pys = [splitext(f)[0] for f in listdir(BASE_PATH) if f.endswith(\".py\")]\n\n for n in pys:\n module = __import__(n)\n handlers.extend(self._find(module))\n\n return handlers\n\n\n def _get_handlers(self):\n \"\"\"\n 查找所与可用的Handler。\n\n 附加StaticHandler、NotFoundHandler,其中 NotFoundHandler 必须放在最后!\n \"\"\"\n\n # 获取所有可用 Handlers。\n handlers = self._find_packages() + self._find_pys()\n\n # 按Order排序,以便获得正确的Handler。\n handlers.sort(cmp, lambda x: x[1].__order__)\n\n # 附加 Handlers。放在排序之后。 \n handlers.append((TEMPLATE_URL, StaticFileHandler, dict(path = TEMPLATE_PATH)))\n handlers.append((r\"/.*\", NotFoundHandler))\n\n # 按Order顺序显示被载入的Handler。\n for h in handlers:\n log_info(\"Load {0} ({1}, {2}) ...\".format(h[1].__name__, h[1].__module__, h[0]))\n\n return handlers\n\n\n def _start(self, handlers):\n \"\"\"\n 启动 Tornado WebServer\n \"\"\"\n app = TornadoApplication(handlers, **self._settings)\n server = HTTPServer(app)\n server.bind(self._port)\n server.start()\n IOLoop.instance().start()\n\n\n def start(self):\n handlers = self._get_handlers();\n self._start(handlers)\n\n\n__all__ = [\"Application\"]\n","sub_path":"mvc/web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"61626515","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('customer', '0007_auto_20170711_0511'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='locations',\n fields=[\n ('location_id', models.AutoField(serialize=False, primary_key=True)),\n ('location_name', models.CharField(max_length=50)),\n ('lat', models.CharField(max_length=20)),\n ('lng', models.CharField(max_length=20)),\n ('l_user_id', models.ForeignKey(related_name='l_user_id', to='customer.users')),\n ],\n ),\n migrations.CreateModel(\n name='wallets',\n fields=[\n ('trans_id', models.AutoField(serialize=False, primary_key=True)),\n ('income', models.IntegerField()),\n ('outcome', models.IntegerField()),\n ('trans_date', models.DateField()),\n ('w_user_id', models.ForeignKey(related_name='w_user_id', to='customer.users')),\n ],\n ),\n ]\n","sub_path":"api/customer/migrations/0008_locations_wallets.py","file_name":"0008_locations_wallets.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"295355434","text":"\"\"\" \"\"\"\n\nfrom pylons import config\nfrom pylons.controllers import WSGIController\nfrom pylons.util import asbool\n\nfrom codalib.util import utcnow\n\n\nclass BaseController(WSGIController):\n \"\"\"WSGI Base Controller\"\"\"\n\n def __call__(self, environ, start_response):\n \"\"\"Invoke the Controller\"\"\"\n\n try:\n return WSGIController.__call__(self, environ, start_response)\n finally:\n #Finish the code profiling.\n if asbool(config.get('profile.enable')):\n config['profile.profiler'].finish(meta = {'endtime':utcnow()})\n\ntry:\n from sqlalchemy.exc import SQLAlchemyError\n\n def base_alchemy_controller(Session):\n class BaseAlchemyController(WSGIController):\n \"\"\"WSGI Base Controller for SQLAlchemy controllers.\"\"\"\n\n def __call__(self, environ, start_response):\n \"\"\"Invoke the Controller\"\"\"\n\n try:\n return WSGIController.__call__(self, environ, start_response)\n finally:\n try:\n Session.commit()\n except SQLAlchemyError:\n Session.rollback()\n raise\n finally:\n if not asbool(config.get('debug')):\n Session.remove()\n #Finish the code profiling.\n if asbool(config.get('profile.enable')):\n config['profile.profiler'].finish(meta = {'endtime':utcnow()})\n\n return BaseAlchemyController\n\nexcept ImportError:\n def base_alchemy_controller(Session):\n raise ImportError(\"SQLAlchemy is required for `base_alchemy_controller`.\")","sub_path":"codalib/pylons/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"183661897","text":"import copy\nfrom collections import OrderedDict\nimport os\nimport glob\nimport math\nimport time as timeModule\n\nimport numpy as np\n\nfrom Common import logger\nfrom Object import Atmosphere, SkeletonActor, StaticActor, Camera, Light, LightProbe, Sky, PostProcess, RenderInfo\nfrom OpenGLContext import UniformBlock\nfrom Utilities import Singleton, GetClassName, Attributes, FLOAT_ZERO, FLOAT4_ZERO, MATRIX4_IDENTITY, Matrix4, Profiler\n\n\nclass SceneManager(Singleton):\n def __init__(self):\n self.core_manager = None\n self.resource_manager = None\n self.sceneLoader = None\n self.renderer = None\n self.__current_scene_name = \"\"\n\n # Scene Objects\n self.main_camera = None\n self.main_light = None\n self.main_light_probe = None\n self.selected_object = None\n\n # envirment object\n self.sky = None\n self.atmosphere = None\n\n self.cameras = []\n self.lights = []\n self.light_probes = []\n self.static_actors = []\n self.skeleton_actors = []\n self.objectMap = {} # All of objects\n\n # render group\n self.static_solid_render_infos = []\n self.static_translucent_render_infos = []\n self.skeleton_solid_render_infos = []\n self.skeleton_translucent_render_infos = []\n\n def initialize(self, core_manager):\n logger.info(\"initialize \" + GetClassName(self))\n self.core_manager = core_manager\n self.resource_manager = core_manager.resource_manager\n self.sceneLoader = self.resource_manager.sceneLoader\n self.renderer = core_manager.renderer\n\n # new scene\n self.new_scene()\n\n def get_current_scene_name(self):\n return self.__current_scene_name\n\n def set_current_scene_name(self, scene_name):\n self.__current_scene_name = scene_name\n self.core_manager.set_window_title(scene_name)\n\n def clear_scene(self):\n self.core_manager.notifyClearScene()\n self.main_camera = None\n self.main_light = None\n self.main_light_probe = None\n self.cameras = []\n self.lights = []\n self.light_probes = []\n self.static_actors = []\n self.skeleton_actors = []\n self.objectMap = {}\n\n # delete empty scene\n # resource = self.resource_manager.sceneLoader.getResource(self.__current_scene_name)\n # if resource is not None and not os.path.exists(resource.meta_data.resource_filepath):\n # self.resource_manager.sceneLoader.delete_resource(self.__current_scene_name)\n\n def post_open_scene(self):\n self.core_manager.sendObjectInfo(self.renderer.postprocess)\n self.renderer.resizeScene()\n\n def new_scene(self):\n self.clear_scene()\n\n # add scene objects\n self.main_camera = self.addCamera()\n self.main_light = self.addLight()\n self.main_light_probe = self.addLightProbe()\n self.atmosphere = Atmosphere()\n self.sky = Sky()\n\n self.set_current_scene_name(self.resource_manager.sceneLoader.get_new_resource_name(\"new_scene\"))\n\n logger.info(\"New scene : %s\" % self.__current_scene_name)\n\n scene_data = self.get_save_data()\n self.resource_manager.sceneLoader.create_resource(self.__current_scene_name, scene_data)\n\n self.post_open_scene()\n\n def open_scene(self, scene_name, scene_data):\n self.clear_scene()\n self.set_current_scene_name(scene_name)\n\n logger.info(\"Open scene : %s\" % scene_name)\n\n camera_datas = scene_data.get('cameras', [])\n for camera_data in camera_datas:\n self.addCamera(**camera_data)\n self.main_camera = self.get_camera(0)\n\n light_datas = scene_data.get('lights', [])\n for light_data in light_datas:\n self.addLight(**light_data)\n self.main_light = self.get_light(0)\n\n light_probe_datas = scene_data.get('light_probes', [])\n if light_probe_datas:\n for light_probe_data in light_probe_datas:\n self.addLightProbe(**light_probe_data)\n else:\n self.addLightProbe()\n self.main_light_probe = self.get_light_probe(0)\n\n self.sky = Sky()\n\n self.atmosphere = Atmosphere()\n\n for object_data in scene_data.get('static_actors', []):\n self.addObject(**object_data)\n\n for object_data in scene_data.get('skeleton_actors', []):\n self.addObject(**object_data)\n\n self.post_open_scene()\n\n def save_scene(self):\n if self.__current_scene_name == \"\":\n self.set_current_scene_name(self.resource_manager.sceneLoader.get_new_resource_name(\"new_scene\"))\n self.resource_manager.sceneLoader.save_resource(self.__current_scene_name)\n\n def get_save_data(self):\n scene_data = dict(\n cameras=[camera.get_save_data() for camera in self.cameras],\n lights=[light.get_save_data() for light in self.lights],\n static_actors=[static_actor.get_save_data() for static_actor in self.static_actors],\n skeleton_actors=[skeleton_actor.get_save_data() for skeleton_actor in self.skeleton_actors],\n )\n return scene_data\n\n def generateObjectName(self, currName):\n index = 0\n if currName in self.objectMap:\n while True:\n newName = \"%s_%d\" % (currName, index)\n if newName not in self.objectMap:\n return newName\n index += 1\n return currName\n\n def get_object_list(self, object_type):\n if Camera == object_type:\n return self.cameras\n elif Light == object_type:\n return self.lights\n elif LightProbe == object_type:\n return self.light_probes\n elif StaticActor == object_type:\n return self.static_actors\n elif SkeletonActor == object_type:\n return self.skeleton_actors\n return None\n\n def regist_object(self, object):\n if object and object.name not in self.objectMap:\n object_type = type(object)\n object_list = self.get_object_list(object_type)\n object_list.append(object)\n self.objectMap[object.name] = object\n self.update_render_info(object_type)\n self.core_manager.sendObjectInfo(object)\n else:\n logger.error(\"SceneManager::regist_object error. %s\" % object.name if object else 'None')\n\n def unregist_resource(self, object):\n if object and object.name in self.objectMap:\n object_type = type(object)\n object_list = self.get_object_list(object_type)\n object_list.remove(object)\n self.objectMap.pop(object.name)\n self.update_render_info(object_type)\n self.core_manager.notifyDeleteObject(object.name)\n else:\n logger.error(\"SceneManager::unregist_resource error. %s\" % object.name if object else 'None')\n\n def addCamera(self, **camera_data):\n camera_data['name'] = self.generateObjectName(camera_data.get('name', 'camera'))\n camera_data['model'] = self.resource_manager.getModel('Cube')\n logger.info(\"add Camera : %s\" % camera_data['name'])\n camera = Camera(scene_manager=self, **camera_data)\n camera.initialize()\n self.regist_object(camera)\n return camera\n\n def addLight(self, **light_data):\n light_data['name'] = self.generateObjectName(light_data.get('name', 'light'))\n light_data['model'] = self.resource_manager.getModel('Cube')\n logger.info(\"add Light : %s\" % light_data['name'])\n light = Light(**light_data)\n self.regist_object(light)\n return light\n\n def addLightProbe(self, **light_probe_data):\n light_probe_data['name'] = self.generateObjectName(light_probe_data.get('name', 'light_probe'))\n light_probe_data['model'] = self.resource_manager.getModel('sphere')\n logger.info(\"add Light Probe : %s\" % light_probe_data['name'])\n light_probe = LightProbe(**light_probe_data)\n self.regist_object(light_probe)\n return light_probe\n\n def addObject(self, **object_data):\n model = object_data.get('model')\n if model:\n object_data['name'] = self.generateObjectName(object_data.get('name', model.name))\n objType = GetClassName(model)\n logger.info(\"add %s : %s\" % (objType, object_data['name']))\n\n if model.mesh and model.mesh.has_bone():\n obj_instance = SkeletonActor(**object_data)\n else:\n obj_instance = StaticActor(**object_data)\n # regist\n self.regist_object(obj_instance)\n return obj_instance\n return None\n\n def addObjectHere(self, model):\n pos = self.main_camera.transform.pos + self.main_camera.front * 10.0\n return self.addObject(model=model, pos=pos)\n\n def clearObjects(self):\n self.cameras = []\n self.lights = []\n self.static_actors = []\n self.skeleton_actors = []\n self.objectMap = {}\n\n def clear_actors(self):\n for obj_name in list(self.objectMap.keys()):\n self.deleteObject(obj_name)\n\n def deleteObject(self, objName):\n obj = self.getObject(objName)\n if obj and obj not in (self.main_camera, self.main_light, self.main_light_probe):\n self.unregist_resource(obj)\n\n def getObject(self, objName):\n return self.objectMap[objName] if objName in self.objectMap else None\n\n def getObjectNames(self):\n return self.objectMap.keys()\n\n def getObjects(self):\n return self.objectMap.values()\n\n def get_camera(self, index):\n return self.cameras[index] if index < len(self.cameras) else None\n\n def get_light(self, index):\n return self.lights[index] if index < len(self.lights) else None\n\n def get_light_probe(self, index):\n return self.light_probes[index] if index < len(self.light_probes) else None\n\n def get_static_actor(self, index):\n return self.static_actors[index] if index < len(self.static_actors) else None\n\n def get_skeleton_actor(self, index):\n return self.skeleton_actors[index] if index < len(self.skeleton_actors) else None\n\n def getObjectAttribute(self, objName, objTypeName):\n if objTypeName == PostProcess.__name__:\n obj = self.renderer.postprocess\n else:\n obj = self.getObject(objName)\n return obj.getAttribute() if obj else None\n\n def setObjectAttribute(self, objectName, objectTypeName, attributeName, attributeValue, attribute_index):\n if objectTypeName == PostProcess.__name__:\n obj = self.renderer.postprocess\n else:\n obj = self.getObject(objectName)\n obj and obj.setAttribute(attributeName, attributeValue, attribute_index)\n\n def getSelectedObject(self):\n return self.selected_object\n\n def setSelectedObject(self, objName):\n selected_object = self.getObject(objName)\n if self.selected_object is not selected_object:\n if self.selected_object:\n self.selected_object.setSelected(False)\n self.selected_object = selected_object\n if selected_object:\n selected_object.setSelected(True)\n\n def setObjectFocus(self, objName):\n obj = self.getObject(objName)\n if obj and obj != self.main_camera:\n self.main_camera.transform.setPos(obj.transform.pos - self.main_camera.transform.front * 2.0)\n\n def set_camera_aspect(self, aspect):\n for camera in self.cameras:\n camera.set_aspect(aspect)\n\n def update_camera_projection_matrix(self):\n for camera in self.cameras:\n camera.update_projection()\n\n def update_render_info(self, object_type):\n if StaticActor == object_type:\n self.update_static_render_info()\n elif SkeletonActor == object_type:\n self.update_skeleton_render_info()\n\n def update_static_render_info(self):\n self.static_solid_render_infos = []\n self.static_translucent_render_infos = []\n\n RenderInfo.gather_render_infos(actor_list=self.static_actors,\n solid_render_infos=self.static_solid_render_infos,\n translucent_render_infos=self.static_translucent_render_infos)\n\n self.static_solid_render_infos.sort(key=lambda x: (id(x.geometry), id(x.material)))\n self.static_translucent_render_infos.sort(key=lambda x: (id(x.geometry), id(x.material)))\n\n def update_skeleton_render_info(self):\n self.skeleton_solid_render_infos = []\n self.skeleton_translucent_render_infos = []\n\n RenderInfo.gather_render_infos(actor_list=self.skeleton_actors,\n solid_render_infos=self.skeleton_solid_render_infos,\n translucent_render_infos=self.skeleton_translucent_render_infos)\n\n self.skeleton_solid_render_infos.sort(key=lambda x: (id(x.geometry), id(x.material)))\n self.skeleton_translucent_render_infos.sort(key=lambda x: (id(x.geometry), id(x.material)))\n\n def update_scene(self, dt):\n self.renderer.postprocess.update()\n\n for camera in self.cameras:\n camera.update()\n\n for light in self.lights:\n light.update(self.main_camera)\n\n for static_actor in self.static_actors:\n static_actor.update(dt)\n\n for skeleton_actor in self.skeleton_actors:\n skeleton_actor.update(dt)\n\n self.atmosphere.update(self.main_camera, self.main_light)\n","sub_path":"App/SceneManager.py","file_name":"SceneManager.py","file_ext":"py","file_size_in_byte":13634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"241217927","text":"import urllib2\nimport json\nfrom secrets import get_Google_API_key\n\ndef geocode(search_term):\n API_KEY = get_Google_API_key()\n base_url = \"https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}\"\n query = base_url.format( urllib2.quote(search_term), API_KEY)\n resp = urllib2.urlopen(query)\n data = json.load(resp)\n formatted_address = data['results'][0]['formatted_address']\n geom = data['results'][0]['geometry']\n lat, lon = geom['location']['lat'], geom['location']['lng']\n return lat, lon, formatted_address, data","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"253141201","text":"import csv\nimport os\nimport boto3\n\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('G2Q1')\ninDirectory = \"../results/G2Q1/\"\nfor root, dirs, files in os.walk(inDirectory):\n print(\"files to upload\", files)\n for filename in files:\n inFile = inDirectory+filename\n print(\"uploading file: \" + inFile)\n with open(inFile) as csv_input_file:\n csv_reader = csv.reader(csv_input_file, delimiter='\\t')\n line_count = 0\n for row in csv_reader:\n origin = \"\"\n airline = \"\"\n pair = row[0].split()\n if len(pair) == 2:\n origin = pair[0]\n airline = pair[1]\n elif len(pair) == 3:\n if (pair[1] == \"(1)\"):\n origin = pair[0] + \" \" + pair[1]\n airline = pair[2]\n else:\n origin = pair[0]\n airline = pair[1] + \" \" + pair[2]\n elif len(pair) > 3:\n origin = pair[0] + \" \" + pair[1]\n airline = pair[2] + \" \" + pair[3]\n print(\"origin: \" + origin + \" airline: \" + airline + \" delay: \" + row[1])\n table.put_item(\n Item={\n 'origin': origin,\n 'airline': airline,\n 'delay': row[1]\n }\n )\n line_count += 1\n print(\"Line Count: \", line_count)","sub_path":"CloudCapstone/DynamoDbUploadScripts/G2Q1.py","file_name":"G2Q1.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"170193307","text":"class Solution:\n def lengthOfLastWord(self, s: str) -> int:\n count = 0\n # do not forget range is half open\n for index in range(len(s)-1, -1, -1):\n print(s[index])\n if s[index] == ' ':\n if count: return count\n else: continue\n else: count += 1\n\n return count\n","sub_path":"LeetCode/Python/58.py","file_name":"58.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"442246937","text":"\"\"\" OTAA Node example compatible with the LoPy Nano Gateway \"\"\"\n\nfrom network import LoRa\nimport socket\nimport binascii\nimport struct\nimport time\nfrom . import config\n\n# initialize LoRa in LORAWAN mode.\n# Please pick the region that matches where you are using the device:\n# Asia = LoRa.AS923\n# Australia = LoRa.AU915\n# Europe = LoRa.EU868\n# United States = LoRa.US915\nlora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915)\n\n# create an OTA authentication params\ndev_eui = binascii.unhexlify('240AC4FFFE024038'.replace(' ',''))\napp_eui = binascii.unhexlify('70B3D57ED000A7F5'.replace(' ',''))\napp_key = binascii.unhexlify('68048150FD4364602902B6166BE77698'.replace(' ',''))\n\n# remove all the channels\nfor channel in range(0, 72):\n lora.remove_channel(channel)\n\n# set all channels to the same frequency (must be before sending the OTAA join request)\nfor channel in range(0,72):\n lora.add_channel(channel, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=4)\n\n# join a network using OTAA\nprint(\"Joining network...\")\nlora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)\n\n# wait until the module has joined the network\njoin_wait = 0\nwhile True: \n time.sleep(2.5)\n if not lora.has_joined():\n print('Not joined yet...')\n join_wait += 1\n if join_wait == 5:\n lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)\n join_wait = 0\n else:\n break\n\n# create a LoRa socket\ns = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n\n# set the LoRaWAN data rate\ns.setsockopt(socket.SOL_LORA, socket.SO_DR, 3)\n\n# make the socket blocking\ns.setblocking(False)\n\ntime.sleep(5.0)\n\nfor i in range (200):\n pkt = b'PKT #' + bytes([i])\n print('Sending:', pkt)\n s.send(pkt)\n time.sleep(4)\n rx, port = s.recvfrom(256)\n if rx:\n print('Received: {}, on port: {}'.format(rx, port))\n time.sleep(6)\n","sub_path":"Node/flash/lora/otaa_node_US915.py","file_name":"otaa_node_US915.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156577615","text":"import csv \nimport psycopg2\nimport os\nimport glob\nfrom datetime import datetime\n\n#con = psycopg2.connect(database=\"postgres\", user=os.environ['USER'], port=\"5432\")\ncon_string = \"dbname= 'postgres' user='postgres' password='251170'\"\ncon = psycopg2.connect(con_string)\ncur = con.cursor()\n\ndef create_tables():\n commands = (\n \"\"\"\n CREATE TABLE student_info (\n sid INT PRIMARY KEY,\n email VARCHAR(355) UNIQUE NOT NULL,\n prefname VARCHAR(50) NOT NULL,\n surname VARCHAR(50) NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE student_major (\n sid INT ,\n term VARCHAR(20) NOT NULL,\n major Varchar(7) NOT NULL,\n PRIMARY KEY(sid,term)\n )\n \"\"\",\n \"\"\"\n CREATE TABLE student_class (\n sid INT references student_info(sid),\n term VARCHAR(20)NOT NULL, \n class VARCHAR(20) NOT NULL,\n PRIMARY KEY(sid,term)\n )\n \"\"\",\n \"\"\"\n CREATE TABLE student_type (\n class VARCHAR(20) PRIMARY KEY,\n level VARCHAR(20) NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE grade(\n sid INT references student_info(sid),\n cid INT NOT NULL,\n term VARCHAR(20) NOT NULL,\n status VARCHAR(5),\n grade VARCHAR(5),\n unit REAL,\n PRIMARY KEY (sid,cid,term)\n )\n \"\"\",\n \"\"\"\n CREATE TABLE course(\n cid INT NOT NULL,\n term VARCHAR(20) NOT NULL,\n subject VARCHAR(20),\n min_unit REAL,\n max_unit REAL,\n instr VARCHAR(200),\n crse INT,\n PRIMARY KEY(cid,term)\n )\n \"\"\",\n \"\"\"\n CREATE TABLE section(\n cid INT,\n section INT,\n term VARCHAR(20),\n PRIMARY KEY (cid,term)\n )\n \"\"\",\n \"\"\"\n CREATE TABLE meeting(\n cid INT,\n term VARCHAR(20),\n day VARCHAR(5),\n room INT,\n building VARCHAR(50),\n start time,\n endtime time, \n type VARCHAR(50),\n PRIMARY KEY (cid,term,day,type),\n UNIQUE (CID,term,day,start,endtime)\n )\n \"\"\"\n )\n for command in commands:\n cur.execute(command)\n\n con.commit()\n \n return commands;\n\n\"\"\"http://stackoverflow.com/questions/4595197\"\"\"\ndef group_by_heading( some_source ):\n buffer= []\n for line in some_source:\n if(line == ['']):\n if buffer: yield buffer\n buffer= [ line ]\n else:\n buffer.append(line )\n yield buffer\n\ndef process_file(files_to_read):\n heading = []\n data = []\n\n for file_name in files_to_read:\n with open(file_name, \"r\") as csvfile:\n try:\n readCSV = csv.reader(csvfile,delimiter=',') \n for file in group_by_heading(readCSV):\n heading.append(file[1])\n data.append(file[2:])\n except IndexError:\n pass\n return heading,data\n\ndef time_converter(time):\n u = time.split(\"-\")\n start = u[0].rstrip() \n end = u[1].strip()\n tformat = '%I:%M %p'\n start = datetime.strptime(start, tformat)\n end = datetime.strptime(end, tformat)\n start = start.time().isoformat()\n end = end.time().isoformat()\n return start,end\n\n#LEVEL, CLASS, MAJOR\ndef check_student(students,courses):\n student_hash = {}\n student_conflict = []\n sid_conflict = []\n for i in range(len(students)):\n for j in range(len(students[i])):\n if(courses[i][0][1][-1] != \"6\"):\n continue\n sid_term = str(students[i][j][1])+str(courses[i][0][1])\n if sid_term in student_hash:\n \n if students[i][j][4] != student_hash[sid_term][0]: #Different Level\n sid_conflict.append(students[i][j][1])\n sid_conflict.append(student_hash[sid_term][5])\n student_conflict.append(i)\n student_conflict.append(student_hash[sid_term][4])\n if students[i][j][6] != student_hash[sid_term][1]: #Different Class\n sid_conflict.append(students[i][j][1])\n sid_conflict.append(student_hash[sid_term][5])\n student_conflict.append(i)\n student_conflict.append(student_hash[sid_term][4])\n if students[i][j][7] != student_hash[sid_term][2]: #Different Major\n sid_conflict.append(students[i][j][1])\n sid_conflict.append(student_hash[sid_term][5])\n student_conflict.append(i)\n student_conflict.append(student_hash[sid_term][4])\n if students[i][j][9] != student_hash[sid_term][3]: #Different Status\n sid_conflict.append(students[i][j][1])\n sid_conflict.append(student_hash[sid_term][5])\n student_conflict.append(i)\n student_conflict.append(student_hash[sid_term][4])\n\n else:\n student_hash[sid_term] = (students[i][j][4],students[i][j][6],students[i][j][7],students[i][j][9],i,students[i][j][1])\n \n return set(student_conflict),set(sid_conflict)\n\ndef summer_conflict(Course,locations):\n summer = {}\n summer2 = {}\n conflict = []\n for i in range(len(locations)):\n for j in range(len(locations[i])):\n if(Course[i][0][1][-1] != \"6\"):\n continue\n if (locations[i][j][2] == \"\"):\n continue\n start,end = time_converter(locations[i][j][3])\n for day in locations[i][j][2]:\n summer_hash = str(Course[i][0][1]) + str(locations[i][j][4]) \\\n + str(locations[i][j][5]) + day\n if summer_hash in summer:\n start2 = summer[summer_hash][-2]\n end2 = summer[summer_hash][-1]\n if(Course[i][0][2:4] != summer[summer_hash][0]):\n if(start < end2 and start2 < end):\n conflict.append(summer[summer_hash][1])\n conflict.append(i) \n if(start == start2 or end == end2): \n conflict.append(summer[summer_hash][1])\n conflict.append(i) \n else:\n summer2[summer_hash] = (Course[i][0][2:4],i,start,end)\n if summer_hash in summer2:\n start3 = summer2[summer_hash][-2]\n end3 = summer2[summer_hash][-1]\n if(Course[i][0][2:4] != summer2[summer_hash][0]):\n if(start < end3 and start3 < end):\n conflict.append(summer2[summer_hash][1])\n conflict.append(i) \n if(start == start3 or end == end3): \n conflict.append(summer2[summer_hash][1]) \n conflict.append(i)\n else:\n summer[summer_hash] = (Course[i][0][2:4],i,start,end)\n\n return set(conflict)\n\ndef divide_file(data):\n CID = []\n INSTRUCTOR = []\n Student = []\n for i in range(0,len(data),3):\n if not data[2+i]: #Remove courses with no students\n continue\n else:\n CID.append(data[0+i])\n INSTRUCTOR.append(data[1+i])\n Student.append(data[2+i])\n \n \n return CID,INSTRUCTOR,Student\n\ndef generate_file():\n files_to_read = []\n files_to_read.append(\"1989_Q3.csv\")\n files_to_read.append(\"1989_Q4.csv\")\n for year in range(1990,2012):\n for quarter in range(1,5):\n file_name = str(year) + '_' + 'Q' + str(quarter) + \".csv\"\n files_to_read.append(file_name)\n for quarter in range(1,4):\n file_name = str(2012) + '_' + 'Q' + str(quarter) + \".csv\"\n files_to_read.append(file_name)\n return files_to_read\n\n\ndef unit_filter(var_unit):\n if not var_unit:\n return None,None\n u = var_unit.split(\"-\")\n if len(u) == 1:\n min_unit = u[0]\n max_unit = min_unit\n else:\n min_unit = u[0] \n max_unit = u[1]\n return min_unit,max_unit\n\ndef time_filter(time):\n if not time:\n return None,None\n u = time.split(\"-\")\n start = u[0] \n end = u[1]\n return start,end\n\ndef clean(students,courses,locations):\n for i in range(len(students)):\n if not locations[i][0][0]:\n locations[i][0][0] = None\n for j in range(len(students[i])):\n for k in range(len(students[i][j])):\n if not students[i][j][k]:\n students[i][j][k] = None\n \n return \n\ndef insert_students(students,courses,sid_conflict):\n student_info = []\n student_major = []\n student_type = []\n student_class = []\n standing = []\n info = {}\n student_hash = {}\n for i in range(len(students)):\n for j in range(len(students[i])):\n hashkey = str(students[i][j][1])+str(courses[i][0][1])\n student_hashkey = str(students[i][j][1])\n if student_hashkey not in student_hash:\n student_hash[student_hashkey] = 1\n student_info.append( (students[i][j][1],students[i][j][2],students[i][j][3],students[i][j][10]))\n if hashkey in info:\n continue\n else:\n info[str(students[i][j][1])+str(courses[i][0][1])] = students[i][j]\n\n if students[i][j][1] in sid_conflict:\n term = str(courses[i][0][1]) + \"-1\"\n else:\n term = str(courses[i][0][1])\n\n student_major.append( (students[i][j][1],term,students[i][j][7]))\n student_class.append( (students[i][j][1],term,students[i][j][6]) ) \n \n\n for i in range(len(students)):\n for j in range(len(students[i])):\n if students[i][j][6] in info:\n continue\n else:\n info[students[i][j][6]] = 1\n student_type.append( (students[i][j][6],students[i][j][4])) \n\n \"\"\"http://stackoverflow.com/questions/8134602\"\"\"\n records_list_template = ','.join(['%s'] * len(student_info))\n insert_query = 'INSERT INTO student_info (sid, surname,prefname,email) VALUES{0}'.format(records_list_template)\n cur.execute(insert_query,student_info)\n\n records_list_template = ','.join(['%s'] * len(student_major))\n insert_query = 'INSERT INTO student_major (sid,term,major) VALUES{0}'.format(records_list_template)\n cur.execute(insert_query,student_major)\n\n records_list_template = ','.join(['%s'] * len(student_class))\n insert_query = 'INSERT INTO student_class (sid,term, class) VALUES{0}'.format(records_list_template)\n cur.execute(insert_query,student_class)\n\n records_list_template = ','.join(['%s'] * len(student_type))\n insert_query = 'INSERT INTO student_type (class,level) VALUES{0}'.format(records_list_template)\n cur.execute(insert_query,student_type) \n\n con.commit()\n\n return info\ndef insert_course(courses,students,locations,sc):\n grade = []\n course = []\n section = []\n course_info = {}\n standing_info = {}\n grade_info = {}\n\n for i in sc:\n courses[i][0][1] += \"-1\"\n for i in range(len(students)):\n for j in range(len(students[i])):\n hashkey = str(students[i][j][1])+str(courses[i][0][1])\n minU,maxU = unit_filter(courses[i][0][5])\n course_hashkey = str(courses[i][0][0]) + \" \" + str(courses[i][0][1])\n if course_hashkey in course_info:\n continue\n else: \n course_info[course_hashkey] = locations[i]\n section.append( (courses[i][0][0],courses[i][0][4],courses[i][0][1]))\n course.append( (courses[i][0][0],courses[i][0][1],courses[i][0][2],minU,maxU,locations[i][0][0],courses[i][0][3]))\n\n \n for i in range(len(students)):\n for j in range(len(students[i])):\n grade_hashkey = str(students[i][j][1])+ \" \" + str(courses[i][0][0]) + \" \" + str(courses[i][0][1])\n if grade_hashkey in grade_info:\n continue\n else:\n grade_info[grade_hashkey] = 1\n \n grade.append((students[i][j][1],courses[i][0][0],courses[i][0][1],students[i][j][9],students[i][j][8],students[i][j][5]))\n\n records_list_template = ','.join(['%s'] * len(grade))\n insert_query = 'INSERT INTO grade (sid, cid,term,status,grade,unit) VALUES{0}'.format(records_list_template)\n cur.execute(insert_query,grade)\n\n records_list_template = ','.join(['%s'] * len(course))\n insert_query = 'INSERT INTO course (cid,term,subject,min_unit,max_unit,instr,crse) VALUES{0}'.format(records_list_template)\n cur.execute(insert_query,course)\n\n records_list_template = ','.join(['%s'] * len(section))\n insert_query = 'INSERT INTO section(cid,section,term) VALUES{0}'.format(records_list_template)\n cur.execute(insert_query,section)\n\n con.commit()\n return course_info\n\ndef insert_location(courses,locations):\n room_cap = []\n room_info = {}\n meeting = []\n meeting_info = {}\n\n for i in range(len(locations)):\n for k in range(len(locations[i])):\n room_hashkey = locations[i][k][4] + \" \" + str(locations[i][k][5])\n\n start_time, end_time = time_filter(locations[i][k][3])\n meeting_hash = str(courses[i][0][0])+str(courses[i][0][1])+str(locations[i][k][2])\n \n if (locations[i][k][2] != \"\"):\n if not locations[i][k][4]:\n locations[i][k][4] = None\n locations[i][k][5] = None\n if meeting_hash not in meeting_info:\n meeting_info[meeting_hash] = 1\n meeting.append( (courses[i][0][0],courses[i][0][1],locations[i][k][2],locations[i][k][5],locations[i][k][4],start_time,end_time,locations[i][k][1]))\n\n records_list_template = ','.join(['%s'] * len(meeting))\n insert_query = 'INSERT INTO meeting (cid,term,day,room,building,start,endtime,type) VALUES{0}'.format(records_list_template)\n cur = con.cursor()\n cur.execute(insert_query,meeting)\n\n cur.close()\n con.commit()\n\n return \n\n\n#Student[]<-Section Number\n#Student[][]<-- Seat # within that section\n#Student[][][]<--The attribute with that row \n\n#Course[]<-Section Number\n#Course[][]<-- Course within the file\n#Course[][][]<--The attribute with that row \n#<----------start of main ----------------->\n\n\ndef main():\n #files_to_read = generate_file()\n cur = con.cursor()\n files_to_read = str(input(\"Enter directory: \")) \n files_to_read = glob.glob(\"%s/*.csv\" % files_to_read)\n \n heading,data = process_file(files_to_read)\n courses,locations,students = divide_file(data)\n clean(students,courses,locations)\n sc = summer_conflict(courses,locations)\n cs,sid_conflict = check_student(students,courses)\n sc = sc.union(cs)\n create_tables()\n info = insert_students(students,courses,sid_conflict)\n course_info = insert_course(courses,students,locations,sc)\n insert_location(courses,locations)\n\n return \n\nmain()\n\n\n\n\n","sub_path":"insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":15458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"157024985","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"PyTAK Functions.\"\"\"\n\nimport asyncio\nimport os\nimport socket\nimport ssl\n\nimport asyncio_dgram\n\nimport pytak\n\n__author__ = \"Greg Albrecht W2GMD \"\n__copyright__ = \"Copyright 2021 Orion Labs, Inc.\"\n__license__ = \"Apache License, Version 2.0\"\n\n\ndef split_host(host, port: int = None) -> tuple:\n \"\"\"Given a host:port and/or port, returns host, port.\"\"\"\n if \":\" in host:\n addr, port = host.split(\":\")\n port = int(port)\n elif port:\n addr = host\n port = int(port)\n else:\n addr = host\n port = int(pytak.DEFAULT_COT_PORT)\n return addr, port\n\n\ndef parse_cot_url(url) -> tuple:\n \"\"\"Parses a Cursor on Target destination URL.\"\"\"\n if \":\" in url.path:\n host, port = str(url.path).split(\":\")\n else:\n host = url.path\n if \"broadcast\" in url.scheme:\n port = pytak.DEFAULT_BROADCAST_PORT\n else:\n port = pytak.DEFAULT_COT_PORT\n return host, port\n\n\nasync def udp_client(url):\n \"\"\"Create a CoT UDP Network Client\"\"\"\n host, port = parse_cot_url(url)\n stream = await asyncio_dgram.connect((host, port))\n if \"broadcast\" in url.scheme:\n sock = stream.socket\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n return stream\n\n\nasync def multicast_client(url):\n \"\"\"Create a CoT Multicast Network Client.\"\"\"\n host, port = parse_cot_url(url)\n stream = await asyncio_dgram.bind((host, port))\n sock = stream.socket\n # group = socket.inet_aton(host)\n # mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n # sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n return stream\n\n\nasync def protocol_factory(cot_url, fts_token: str = None):\n \"\"\"\n Given a CoT Destination URL, create a Connection Class Instance for the given protocol.\n\n :param cot_url: CoT Destination URL\n :param fts_token:\n :return:\n \"\"\"\n reader = None\n writer = None\n scheme = cot_url.scheme.lower()\n if scheme in [\"https\", \"http\", \"ws\", \"wss\"]: # NOQA pylint: disable=no-else-raise\n if \"teamconnect\" in cot_url.geturl():\n writer = await pytak.TCClient(cot_url).create()\n reader = writer\n elif scheme in [\"tcp\"]:\n host, port = pytak.parse_cot_url(cot_url)\n reader, writer = await asyncio.open_connection(host, port)\n elif scheme in [\"tls\", \"ssl\"]:\n host, port = pytak.parse_cot_url(cot_url)\n\n client_cert = os.getenv(\"PYTAK_TLS_CLIENT_CERT\")\n client_key = os.getenv(\"PYTAK_TLS_CLIENT_KEY\")\n client_cafile = os.getenv(\"PYTAK_TLS_CLIENT_CAFILE\")\n client_ciphers = os.getenv(\n \"PYTAK_TLS_CLIENT_CIPHERS\", pytak.DEFAULT_FIPS_CIPHERS)\n\n dont_check_hostname = bool(os.getenv(\"PYTAK_TLS_DONT_CHECK_HOSTNAME\"))\n dont_verify = bool(os.getenv(\"PYTAK_TLS_DONT_VERIFY\"))\n\n # SSL Context setup:\n ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n ssl_ctx.options |= ssl.OP_NO_TLSv1\n ssl_ctx.options |= ssl.OP_NO_TLSv1_1\n ssl_ctx.set_ciphers(client_ciphers)\n ssl_ctx.check_hostname = True\n ssl_ctx.verify_mode = ssl.VerifyMode.CERT_REQUIRED\n\n if client_key:\n ssl_ctx.load_cert_chain(client_cert, keyfile=client_key)\n else:\n ssl_ctx.load_cert_chain(client_cert)\n\n if client_cafile:\n ssl_ctx.load_verify_locations(cafile=client_cafile)\n\n # Default to verifying cert:\n if dont_verify:\n print(\n \"pytak TLS Certificate Verification DISABLED by Environment.\")\n print(\"pytak TLS Hostname Check DISABLED by Environment.\")\n ssl_ctx.check_hostname = False\n ssl_ctx.verify_mode = ssl.CERT_NONE\n\n # Default to checking hostnames:\n if dont_check_hostname:\n print(\"pytak TLS Hostname Check DISABLED by Environment.\")\n ssl_ctx.check_hostname = False\n\n\n reader, writer = await asyncio.open_connection(host, port, ssl=ssl_ctx)\n elif scheme in [\"udp\"]:\n reader = None\n writer = await pytak.udp_client(cot_url)\n else:\n raise Exception(\n \"Please specify a protocol in your URL, for example: tcp:xxx or \"\n \"udp:xxx\")\n return reader, writer\n\n\nasync def eventworker_factory(cot_url: str, event_queue,\n fts_token: str = None) -> pytak.Worker:\n \"\"\"\n Creates a Cursor on Target Event Worker based on URL parameters.\n\n :param cot_url: URL to CoT Destination.\n :param event_queue: asyncio.Queue worker to get events from.\n :param fts_token: If supplied, API Token to use for FreeTAKServer REST.\n :return: EventWorker or asyncio Protocol\n \"\"\"\n reader, writer = await protocol_factory(cot_url, fts_token)\n return pytak.EventWorker(event_queue, writer)\n\n\ndef hex_country_lookup(icao_int: int) -> str:\n \"\"\"\n Pull country from ICAO Hex within the stdin file when there is no match to\n csv files (e.g., faa-aircraft.csv).\n \"\"\"\n for country_dict in pytak.ICAO_RANGES:\n start = country_dict[\"start\"]\n end = country_dict[\"end\"]\n if start <= icao_int <= end:\n return country_dict[\"country\"]\n\n\ndef dolphin(flight: str = None, affil: str = None) -> str:\n \"\"\"\n Classify an aircraft as USCG Dolphin, or not.\n What, are you afraid of water?\n \"\"\"\n # MH-65D Dolphins out of Air Station SF use older ADS-B, but luckily have\n # a similar \"flight\" name.\n # For example:\n # * C6540 / AE2682 https://globe.adsbexchange.com/?icao=ae2682\n # * C6604 / AE26BB https://globe.adsbexchange.com/?icao=ae26bb\n if flight and len(flight) >= 3 and flight[:2] in [\"C6\", b\"C6\"]:\n if affil and affil in [\"M\", b\"M\"]:\n return True\n\n\ndef faa_to_cot_type(icao_hex: int, category: str = None,\n flight: str = None) -> str:\n \"\"\"\n Classify Cursor on Target Event Type from ICAO, and if available, from\n Emitter Category & Flight.\n \"\"\"\n affil = \"C\" # Affiliation, default = Civilian\n attitude = \".\" # Attitude\n\n icao_int = int(f\"0x{icao_hex.replace('~', '')}\", 16)\n\n if flight:\n for dom in pytak.DOMESTIC_AIRLINES:\n if flight.startswith(dom):\n # SN: Should be \"n\" for Mil/Fed posture.\n attitude = \"f\" # FIXME: Default posture depends on user.\n\n tw_start = 0x899000\n tw_end = 0x8993FF\n if tw_start <= icao_int <= tw_end:\n attitude = \"n\"\n\n civs = [\"US-CIV\", \"CAN-CIV\", \"NZ-CIV\", \"AUS-CIV\", \"UK-CIV\"]\n for civ in civs:\n civ_start = pytak.DEFAULT_HEX_RANGES[civ][\"start\"]\n civ_end = pytak.DEFAULT_HEX_RANGES[civ][\"end\"]\n if civ_start <= icao_int <= civ_end:\n attitude = \"n\"\n\n if hex_country_lookup(icao_int):\n attitude = \"n\"\n\n # Friendly Mil:\n mil = [\"US-MIL\", \"CAN-MIL\", \"NZ-MIL\", \"AUS-MIL\", \"UK-MIL\"]\n for fvey in mil:\n mil_start = pytak.DEFAULT_HEX_RANGES[fvey][\"start\"]\n mil_end = pytak.DEFAULT_HEX_RANGES[fvey][\"end\"]\n if mil_start <= icao_int <= mil_end:\n attitude = \"f\"\n affil = \"M\"\n\n cot_type = f\"a-{attitude}-A-{affil}\"\n\n if category:\n _category = str(category)\n\n if _category in [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"A1\", \"A2\", \"A3\", \"A4\", \"A5\", \"A6\"]: # Fixed\n cot_type = f\"a-{attitude}-A-{affil}-F\"\n elif _category in [\"7\", \"A7\"]: # Rotor/Helicopter\n cot_type = f\"a-{attitude}-A-{affil}-H\"\n elif _category in [\"10\", \"B2\"]: # Balloon\n cot_type = f\"a-{attitude}-A-{affil}-L\"\n elif _category in [\"14\", \"B6\"]: # Drone\n cot_type = f\"a-{attitude}-A-{affil}-F-q\"\n elif _category in [\"17\", \"18\", \"C1\", \"C2\"]:\n cot_type = \"a-.-G-E-V-C-U\"\n elif _category in [\"19\"]:\n cot_type = f\"a-{attitude}-G-I-U-T-com-tow\"\n\n if dolphin(flight, affil):\n cot_type = f\"a-f-A-{affil}-H\"\n\n return cot_type\n\n","sub_path":"pytak/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":8188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139035165","text":"\nimport time\nimport os\nimport tornado.web\nfrom handlers.home_handler import HomeHandler\nfrom handlers.main_api_handler import MainAPIHandler\n\nSTATIC_PATH = os.path.join(os.path.dirname(__file__), '../../assets')\n\n\nclass ChiMainServer(tornado.web.Application):\n def __init__(self):\n\n settings = {\n 'debug': True,\n 'serve_traceback': False,\n 'compress_response': True,\n 'template_path': 'templates/',\n 'static_url_prefix': '/assets/',\n 'static_path': STATIC_PATH,\n 'autoreload': False,\n 'xsrf_cookies': False,\n }\n\n handlers = self.GetHandlers()\n\n tornado.web.Application.__init__(self, handlers, **settings)\n\n return\n\n def GetHandlers(self):\n # /api/slug/Method\n handlers = []\n handlers.append((r\"/\", HomeHandler))\n handlers.append((r\"/api/(.*)/(.*)\", MainAPIHandler))\n return handlers\n","sub_path":"Tutorial_3_SQLAlchemy_Model/main_server.py","file_name":"main_server.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"183054788","text":"# coding: utf-8\n\nimport sys\nfrom setuptools import setup, find_packages\n\nNAME = \"supportify\"\nVERSION = \"3.0.0\"\n\n\n\n# To install the library, run the following\n#\n# python setup.py install\n#\n# prerequisite: setuptools\n# http://pypi.python.org/pypi/setuptools\n\nREQUIRES = [\"urllib3 >= 1.10\", \"six >= 1.9\", \"certifi\", \"python-dateutil\"]\n\nsetup(\n name=NAME,\n version=VERSION,\n author=\"Supportify, Inc.\",\n author_email=\"help@supportify.io\",\n url=\"https://supportify.io\",\n keywords=[\"Supportify.io\"],\n install_requires=REQUIRES,\n packages=find_packages(),\n include_package_data=True,\n license=\"MIT\",\n description=\"A help-center module backed by Supportify.io.\",\n long_description=\"\"\"\\\n Supportify.io is a smart help center tool focused on helping you provide better help content in less time.\n \"\"\"\n)\n\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"413374883","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 15 15:49:52 2020\n\n@author: rahul\n\npyvista 3d plotting\n\n\"\"\"\n\nfrom numpy import cos, pi, mgrid\nimport pyvista as pv\nfrom pyvistaqt import BackgroundPlotter\n\n#%% Data\nx, y, z = pi*mgrid[-1:1:31j, -1:1:31j, -1:1:31j]\nvol = cos(x) + cos(y) + cos(z)\ngrid = pv.StructuredGrid(x, y, z)\ngrid[\"vol\"] = vol.flatten()\ncontours = grid.contour([0])\n\n#%% Visualization\npv.set_plot_theme('document')\np = pv.PlotterITK()\np.add_mesh(contours, scalars=contours.points[:, 2])#, show_scalar_bar=False)\np.show()","sub_path":"ls_python/pyvista3d_example.py","file_name":"pyvista3d_example.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588064303","text":"from os.path import exists, join\nimport json\ndef write_values(value: dict, file_name: str, data_type: str = 'values'):\n try:\n if exists(data_type) is False:\n mkdir(data_type)\n with open(join(data_type, '{0}.json'.format(file_name)), 'w') as f:\n # text = str(value).replace('}', '}\\n').replace('{', '{\\n')\n # f.write(text)\n json.dump(value, f, indent=4, sort_keys=True)\n return True\n except Exception:\n return False\n\ndef read_values(name: str, data_type: str = 'values'):\n value = None\n if exists('{0}/{1}.json'.format(data_type, name)):\n with open('{0}/{1}.json'.format(data_type, name), 'r') as f:\n # value = eval(f.read())\n value = json.load(f)\n return value\n","sub_path":"ConfigUpdater.py","file_name":"ConfigUpdater.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"212330328","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport json\nimport math\nimport random\nimport time\nimport subprocess\nfrom itertools import count\nfrom random import randrange\nfrom collections import OrderedDict\nfrom datetime import datetime, date\n\nimport fire\nimport requests\nfrom requests.cookies import cookiejar_from_dict\nfrom openpyxl import Workbook\nfrom lxml import html\n\n\n# config\n# 是否获取商品的详细信息\n# IS_GET_DETAIL = True\nIS_GET_DETAIL = False\nCOOKIE_FILE = 'cookie.txt'\nTMP_DIR = '__TMP__'\n\n\nHEADERS = {\n 'user-agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Mobile Safari/537.36',\n 'origin': 'https://m.aliexpress.com/',\n }\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AliTool():\n def sign(self, raw: str) -> str:\n '''加密方法,传入待加密字符串'''\n js_script = 'sign.js'\n raw = raw.replace('\"', '\\\\\"')\n logger.info('加密前字符串: %s', raw)\n ret = subprocess.check_output(['node', '-e', 'require(\"./%s\").sign(\"%s\")' % (js_script, raw)])\n return ret.strip().decode()\n\n\nclass ShopSpider():\n app_key = '24770048'\n\n def __init__(self, shop_id: str, seller_id: str, cookie_file: str, sign_func):\n self.shop_id = shop_id\n self.seller_id = seller_id\n self.sign_func = sign_func\n self.cookie = self.cookie_from_file(cookie_file)\n self.token = self.get_token()\n self.r_session = requests.Session()\n self.r_session.cookies = cookiejar_from_dict(self.cookie)\n\n def get_token(self):\n return self.cookie['_m_h5_tk'].split('_')[0]\n\n @staticmethod\n def get_timestamp():\n return round(time.time() * 1000)\n\n @staticmethod\n def cookie_from_file(cookie_file: str) -> dict:\n if not os.path.exists(cookie_file):\n raise ValueError('%s是不存在!' % cookie_file)\n\n with open(cookie_file) as f:\n s = f.read()\n s.strip()\n if not s:\n raise ValueError('%s是空文件!' % cookie_file)\n key_val_list = s.split(';')\n d = dict([kv.strip().split('=', 1) for kv in key_val_list])\n logging.info('cookie的键值对 %s' % d)\n return d\n\n def get_query_data(self, page: int, page_size: int) -> dict:\n '''生成产品列表子请求参数\n 如: {\"page\":1,\"pageSize\":20,\"locale\":\"en_US\",\"site\":\"glo\",\"storeId\":\"300652\",\"country\":\"CN\",\"currency\":\"USD\",\"aliMemberId\":\"110703300\",\"sort\":\"new_desc\"}\n '''\n return {\n \"page\": page,\n \"pageSize\": page_size,\n \"locale\": \"en_US\",\n \"site\": \"glo\",\n \"storeId\": self.shop_id,\n \"country\": \"CN\",\n \"currency\": \"USD\",\n \"aliMemberId\": self.seller_id,\n # 按商品添加时间排序\n \"sort\": \"new_desc\",\n }\n\n def get_one_page_products(self, page: int, page_size: int) -> dict:\n '''请求产品列表\n :return: 接口返回的data数据\n '''\n api = 'https://acs.aliexpress.com/h5/mtop.aliexpress.store.product.queryallproducts/1.0.1/'\n t = self.get_timestamp()\n t = 1551606226193\n query_data = self.get_query_data(page, page_size)\n logger.info('请求产品列表,请求的子参数: %s', query_data)\n query_data_str = str(query_data).replace(' ', '').replace(\"'\", '\"')\n raw = '%s&%s&%s&%s' % (self.token, t, self.app_key, query_data_str)\n params = {\n 'jsv': '2.4.2',\n 'appKey': self.app_key,\n 't': t,\n 'sign': self.sign_func(raw),\n 'api': 'mtop.aliexpress.store.product.QueryAllProducts',\n 'v': '1.0.1',\n 'dataType': 'json',\n 'AntiCreep': True,\n 'type': 'originaljson',\n 'data': query_data_str\n }\n logger.info('请求产品列表,请求参数: %s', params)\n ret_json = self.r_session.get(api, params=params).json()\n logger.info('请求产品列表接口返回数据: %s' % ret_json)\n return ret_json['data']\n\n def get_all_products(self, page_size: str = 20):\n '''获取所有产品\n :return: 所有产品的iterator\n '''\n is_last_page = False\n page = 1\n while not is_last_page:\n data = self.get_one_page_products(page, page_size)\n yield data\n is_last_page = data.get('lastPage')\n if is_last_page is None:\n logger.warning(\"可能命中反爬虫机制,延迟一会再进行执行\")\n time.sleep(random.randint(10, 20))\n else:\n page += 1\n\n\nclass SpiderError(Exception):\n pass\n\n\nclass Product():\n def __init__(self, id: int, name: str):\n self.id = id\n self.name = name\n self.ratings = None\n self.img_url = None\n self.average_star_rate = None\n self.orders = None\n self.feedbacks = None\n self.formatedPiecePriceStr = None\n self.formatedPromotionPiecePriceStr = None\n self.salePrice_discount = None\n # 心愿单,即收藏量\n self.wish = None\n\n def __str__(self):\n return '' % (self.id, self.name)\n\n def __lt__(self, other):\n return (self.ratings, self.wish) < (other.ratings, other.wish)\n\n def get_product_detail(self, cache=True):\n '''获取产品详情页'''\n logger.info('获取产品详情. 产品id: %s', self.id)\n today = date.today()\n cache_fpath = os.path.join(TMP_DIR, '%s_%s.html' % (today, self.id))\n if cache and os.path.exists(cache_fpath):\n logger.info('命中缓存. 产品id: %s', self.id)\n # 强制指定编码类型,修复windows会默认编码为gbk导致错误的问题\n with open(cache_fpath, encoding='utf-8') as f:\n return f.read()\n api = 'https://m.aliexpress.com/item/%s.html' % self.id\n html_text = requests.get(api).text\n # 强制指定编码类型,修复windows会默认编码为gbk导致错误的问题\n with open(cache_fpath, 'w', encoding='utf-8') as f:\n f.write(html_text)\n return html_text\n\n def update_from_brief(self, brief: dict):\n '''更新该类的某些属性,信息来自店铺产品列表接口中的简要产品信息\n :param brief: 简要产品信息\n '''\n self.ratings = brief['averageStar']\n self.img_url = brief['image350Url'].strip('/')\n self.orders = brief['orders']\n self.feedbacks = brief['feedbacks']\n self.average_star_rate = brief.get('averageStarRate')\n self.formatedPiecePriceStr = brief.get('formatedPiecePriceStr')\n self.formatedPromotionPiecePriceStr = brief.get('formatedPromotionPiecePriceStr')\n self.salePrice_discount = brief.get('salePrice', {}).get('discount')\n\n def update_from_detail(self, detail_html: str):\n '''根据产品的详细页面更新对应的属性'''\n tree = html.fromstring(detail_html)\n wish_path = '//span[@class=\"wished-count\"]/span/text()'\n try:\n self.wish = int(tree.xpath(wish_path)[0].replace('+', ''))\n except IndexError:\n raise SpiderError('产品详细信息获取错误')\n\n def set_detail(self):\n '''获取并设置产品的详细信息'''\n try:\n self.update_from_detail(self.get_product_detail())\n except SpiderError as e:\n logger.info(e)\n time.sleep(5)\n self.update_from_detail(self.get_product_detail(cache=False))\n\n\ndef get_new_plist_fpath():\n '''获取新的产品列表文件路径'''\n tmp_dir = TMP_DIR\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n filename = '%s.txt' % datetime.now().strftime('%Y%m%d_%H%M%S')\n return os.path.join(tmp_dir, filename)\n\n\ndef write_xlsx(prods, output):\n logger.info('wirte_xlsx...')\n wb = Workbook()\n ws = wb.active\n ws['A%d' % 1] = 'ID'\n ws['B%d' % 1] = 'ratings'\n ws['C%d' % 1] = 'wish'\n ws['D%d' % 1] = 'average_star_rate'\n ws['E%d' % 1] = 'formatedPiecePriceStr'\n ws['F%d' % 1] = 'formatedPromotionPiecePriceStr'\n ws['G%d' % 1] = 'salePrice_discount'\n ws['H%d' % 1] = 'feedbacks'\n ws['I%d' % 1] = 'orders'\n ws['J%d' % 1] = 'img_url'\n ws['K%d' % 1] = 'name'\n for num, prod in enumerate(prods, 2):\n ws['A%d' % num] = prod.id\n ws['B%d' % num] = prod.ratings\n ws['C%d' % num] = prod.wish\n ws['D%d' % num] = prod.average_star_rate\n ws['E%d' % num] = prod.formatedPiecePriceStr\n ws['F%d' % num] = prod.formatedPromotionPiecePriceStr\n ws['G%d' % num] = prod.salePrice_discount\n ws['H%d' % num] = prod.feedbacks\n ws['I%d' % num] = prod.orders\n ws['J%d' % num] = prod.img_url\n ws['K%d' % num] = prod.name\n wb.save(output)\n\n\nclass SpiderCmd():\n def get_products(self, shop_id: str, seller_id: str, output: str):\n '''获取店铺产品信息\n :param shop_id: 店铺id\n :param seller_id: 卖家id\n :param output: xlsx输出文件名\n '''\n plist_fpath = get_new_plist_fpath()\n ali_tool = AliTool()\n ss = ShopSpider(shop_id, seller_id, COOKIE_FILE, ali_tool.sign)\n with open(plist_fpath, 'w') as f:\n lines = ['%s\\n' % json.dumps(item) for item in ss.get_all_products(page_size=100)]\n f.writelines(lines)\n\n self.get_products_from_cache_file(plist_fpath, output)\n\n def get_products_from_cache_file(self, plist_fpath: str, output: str):\n '''获取店铺产品信息\n :param plist_fpath: 店铺商品列表缓存文件路径。\n 每次执行后的商品列表会缓存至__TMP__文件夹中,\n 并以“日期.txt”命名\n :param output: xlsx文件输出文件名\n '''\n with open(plist_fpath) as f:\n # 获取产品简要信息列表\n p_dict_list = [p_data for line in f for p_data in json.loads(line)['ret']]\n\n p_list = []\n for item in p_dict_list:\n product = Product(item['id'], item['subject'])\n product.update_from_brief(item)\n p_list.append(product)\n\n # 读取详情列表\n if IS_GET_DETAIL:\n for product in p_list:\n product.set_detail()\n p_list.sort(reverse=True)\n\n write_xlsx(p_list, output)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n fire.Fire(SpiderCmd)\n","sub_path":"maspider.py","file_name":"maspider.py","file_ext":"py","file_size_in_byte":10676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"98282149","text":"class MyMatrixAlgebraElement(CombinatorialFreeModule.Element):\n def __init__(self, *args, **kwargs):\n CombinatorialFreeModule.Element.__init__(self, *args, **kwargs)\n\n def _listify(self):\n P = self.parent()\n n = P._n\n R = P._R\n my_list = [[R.zero()]*n for _ in range(n)]\n mcs = self.monomial_coefficients()\n for mc in mcs:\n i, j = mc[0], mc[1]\n my_list[i][j] = mcs[mc]\n return my_list\n\n def _repr_(self):\n my_list = self._listify()\n my_string = ''\n for row in my_list:\n my_string += str(row) + '\\n'\n my_string = my_string[:-1]\n return my_string\n\n def _latex_(self):\n n = self.parent()._n\n my_list = self._listify()\n the_string = '\\\\left(\\\\begin{array}{%s}\\n'%('r'*n)\n for row in my_list:\n for item in row:\n the_string += latex(item) + '&'\n the_string = the_string[:-1] + '\\\\\\\\\\n'\n the_string = the_string[:-3]\n the_string += '\\n\\\\end{array}\\\\right)'\n return the_string\n\nclass MyMatrixAlgebra(CombinatorialFreeModule):\n\n # Element = MyMatrixAlgebraElement\n\n def __init__(self, R, n, *args, **kwargs):\n self._n = n\n self._R = R\n indices = [(i, j) for i in range(n) for j in range(n)]\n\n # Comment one of the following two lines:\n # CombinatorialFreeModule.__init__(self, R, indices, category=AlgebrasWithBasis(R), *args, **kwargs)\n CombinatorialFreeModule.__init__(self, R, indices, *args, **kwargs)\n\n def _repr_(self):\n return \"My %s by %s matrix algebra over %s\"%(self._n, self._n, self._R)\n\n def product_on_basis(self, left, right):\n if left[1] != right[0]:\n return self.zero()\n else:\n return self.monomial((left[0], right[1]))\n\n def one(self):\n return self.sum([self.monomial((i, i)) for i in range(self._n)])\n\n def matrix(self, L):\n n = self._n\n errmsg = \"input should be a length %s list of length %s lists\"%(n, n)\n if len(L) != n:\n raise ValueError(errmsg)\n for item in L:\n if len(item) != n:\n raise ValueError(errmsg)\n sum = self.zero()\n for i in range(n):\n for j in range(n):\n sum += L[i][j]*self.monomial((i, j))\n return sum\n","sub_path":"MyMatrixAlgebra.py","file_name":"MyMatrixAlgebra.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"576446443","text":"import requests\nimport xlwt\nimport os\nimport time\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nimport json\n\n\nclass Crawler_artron(object):\n def __init__(self,auctionname,url):\n self.session = requests.session()\n self.main_url = 'https://www.uppsalaauktion.se'\n self.url = url\n self.auctionitemdict = {}\n self.basepath = 'C:\\\\auctions\\\\uppsalaauktion\\\\'\n self.auctionname = auctionname\n self.itemurllist=[]\n self.get_itemdes_itemurl=''\n self.chrome_options = Options()\n self.chrome_options.add_argument('--headless')\n self.chrome_options.add_argument('--disable-gpu')\n self.browser = webdriver.Chrome(chrome_options=self.chrome_options)\n\n\n\n def parse_itemurl(self):\n self.browser.get(url)\n with open('uppsalaauktion.text','w',encoding='utf-8') as f:\n f.write(self.browser.page_source)\n uppsalaauktion_text = open('uppsalaauktion.text', 'r', encoding='utf-8')\n soup = BeautifulSoup(uppsalaauktion_text, 'html5lib')\n itemlistelm = soup.select('ul.objects-list-container li.large-3.medium-6.small-6.columns a')\n for itemelm in itemlistelm:\n itemurl = self.main_url + itemelm.get('href')\n self.itemurllist.append(itemurl)\n # print(self.itemurllist)\n\n\n def parse_item(self):\n for itemurl in self.itemurllist:\n print(itemurl)\n self.get_itemdes(itemurl)\n time.sleep(random.randint(1,3))\n # break\n with open(self.basepath + self.auctionname + '\\\\' + 'auction.json', 'w') as f:\n f.write(json.dumps(self.auctionitemdict))\n\n\n def get_itemdes(self,itemurl):\n self.get_itemdes_itemurl = itemurl\n self.browser.get(itemurl)\n # print(self.browser.page_source)\n soup = BeautifulSoup(self.browser.page_source, 'html5lib')\n # print(soup)\n est = soup.select_one('span.object-estimate')\n\n lotnum_title = soup.select_one('h1.object-title').text\n print(lotnum_title)\n lotnum = lotnum_title.split('.')[0].strip()\n title = lotnum_title.split('.')[1].strip()\n\n itemdes = soup.select_one('div.large-6.medium-6.columns p.object-body').text.strip()\n\n # 起拍价\n startingprice = soup.select_one('div.object-leading-bid.is-valid')\n if startingprice:\n startingprice = startingprice.text.strip().replace('.', '')\n\n # 估价\n if est:\n est = est.text\n try:\n estlow = est.split('-')[0].strip().replace('.', '')\n except:\n estlow = ''\n try:\n esthigh = est.split('-')[1].strip().replace('.', '')\n except:\n esthigh = ''\n else:\n estlow = ''\n esthigh = ''\n\n # 图片\n itemimgurllist= []\n image_main = soup.select_one('.large-6.medium-6.columns.img-column .image-thumb a')\n if image_main:\n itemimgurllist.append('https:' + image_main.get('href'))\n imgurllistelm = soup.select('div.img-holder.hide-from-print figure')\n if imgurllistelm:\n for imgurlelm in imgurllistelm:\n imgurl = 'https:' + imgurlelm.get('data-image-thumb')\n itemimgurllist.append(imgurl)\n\n\n print(lotnum)\n print(title)\n print(itemdes)\n print(startingprice)\n print(estlow)\n print(esthigh)\n print(itemimgurllist)\n self.auctionitemdict[lotnum] = [title, itemdes, startingprice, estlow, esthigh, itemimgurllist]\n print('*' * 200)\n\n\n\n\n\n\n # k lotnum v0 itemtile v1 itemdes v2 estimatelow v3 estimatehigh v4 imgurl\n def download_img(self):\n for k,v in self.auctionitemdict.items():\n imgnum = 0\n print('itemimgurl',v[5])\n for imgurl in v[5]:\n if imgnum == 0:\n imgreq = requests.get(imgurl,verify=False)\n if imgreq.status_code < 400:\n with open(self.basepath + self.auctionname + '\\\\' + str(k) + '.jpg', 'wb') as f:\n f.write(imgreq.content)\n imgnum += 1\n else:\n imgreq = requests.get(imgurl,verify=False)\n if imgreq.status_code < 400:\n with open(self.basepath + self.auctionname + '\\\\' + str(k)+'_'+str(imgnum) + '.jpg', 'wb') as f:\n f.write(imgreq.content)\n imgnum += 1\n\n\n def save_excel(self):\n\n excel_init_file = xlwt.Workbook(encoding='utf-8')\n table = excel_init_file.add_sheet('auction', cell_overwrite_ok=True)\n row_num = 0\n for k, v in self.auctionitemdict.items():\n table.write(row_num, 0, k)\n table.write(row_num, 1, str(v[0]))\n table.write(row_num, 2, str(v[1]))\n table.write(row_num, 3, str(v[2]))\n table.write(row_num, 4, str(v[3]))\n table.write(row_num, 5, str(v[4]))\n row_num += 1\n excel_init_file.save(self.basepath + self.auctionname + '\\\\'+'auction.xls')\n\n\n def del_file(self,path):\n for i in os.listdir(self.basepath + self.auctionname + '\\\\'):\n path_file = os.path.join(self.basepath + self.auctionname, i)\n if os.path.isfile(path_file):\n os.remove(path_file)\n else:\n self.del_file(path_file)\n\n def create_folder(self):\n if os.path.isdir(self.basepath + self.auctionname):\n if len(os.listdir(self.basepath + self.auctionname)) != 0:\n self.del_file(self.basepath + self.auctionname)\n print('文件删除完成!')\n else:\n os.makedirs(self.basepath + self.auctionname)\n print('文件夹已创建!')\n\n\n\n# auctionname = input('拍场名:').strip()\n# print(auctionname)\n# url = input('拍场地址:').strip()+'&auction_paged=100'\n# print(url)\nauctionname = '20191210'\nurl = 'https://www.uppsalaauktion.se/en/auctions/?auction_name=20191210&auction_days=1&auction_paged=3'\nobj = Crawler_artron(auctionname,url)\nobj.create_folder()\nobj.parse_itemurl()\nobj.parse_item()\nobj.save_excel()\nobj.download_img()\n\n","sub_path":"tools/Crawler/爬虫模板www.uppsalaauktion.se.py","file_name":"爬虫模板www.uppsalaauktion.se.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"421669709","text":"import sys\nimport os\nimport glob\nimport time\nimport math\nimport operator\nfrom tabulate import tabulate\nimport csv\n\ndf = {}\nidf = {} \ntf_idf = {}\ntf = {}\nterm_docs = {}\ndoc_terms = {}\nN = 3204\ndoc_term_index = {}\ndoc_length = {}\n\ndef get_tf_df():\n\tf = open('Inverted_Index.txt','r')\n\tfor i in f:\n\t\tfr = []\n\t\td = []\n\t\tdata = ''\n\t\tline = i.split(' -> ')\n\t\tfor j in line[1]:\n\t\t\tif j not in ('(',')',' ','\\n'):\n\t\t\t\tdata += j\n\t\tdocs = data.split(',')\n\t\tfor k in xrange(len(docs)):\n\t\t\tif k % 2 == 0:\n\t\t\t\td.append(docs[k])\n\t\t\telse:\n\t\t\t\tfr.append(int(docs[k]))\n\t\tdf[line[0]] = len(d)\n\t\ttf[line[0]] = {}\n\t\tterm_docs[line[0]] = []\n\t\tfor l in xrange(len(d)):\n\t\t\ttf[line[0]][d[l]] = fr[l]\n\t\t\tterm_docs[line[0]].append(d[l])\n\t\tfor m in xrange(len(d)):\n\t\t\tif d[m] in doc_terms:\n\t\t\t\tdoc_terms[d[m]] += float(fr[m])\n\t\t\telse:\n\t\t\t\tdoc_terms[d[m]] = {}\n\t\t\t\tdoc_terms[d[m]] = 1.0\n\t\tfor n in xrange(len(d)):\n\t\t\tif d[n] in doc_term_index:\n\t\t\t\tdoc_term_index[d[n]][line[0]] = fr[n]\n\t\t\telse:\n\t\t\t\tdoc_term_index[d[n]] = {}\n\t\t\t\tdoc_term_index[d[n]][line[0]] = fr[n]\n\ndef get_term_idf():\n\tterm_idf = {}\n\tfor term in df:\n\t\tif df[term] > 0:\n\t\t\tidf[term] = 1.0 + math.log(N/float(df[term]))\n\t\telse:\n\t\t\tidf[term] = 1.0\n\n\ndef get_tf_idf():\n\tfor token,docs in tf.iteritems():\n\t\tfor i in docs:\n\t\t\ttfidf = (float(docs[i])/doc_terms[i]) * idf[token]\n\t\t\tif token in tf_idf:\n\t\t\t\ttf_idf[token][i] = tfidf\n\t\t\telse:\n\t\t\t\ttf_idf[token] = {}\n\t\t\t\ttf_idf[token][i] = tfidf\n\ndef get_all_doc_length():\n\tfor i,x in doc_term_index.iteritems():\n\t\tlength = 0.0\n\t\tfor j in x:\n\t\t\tlength += math.pow(tf_idf[j][i],2)\n\t\tdoc_length[i] = math.sqrt(length)\n\n\ndef get_query_tf_idf():\n\tquery_tf_idf = {}\n\tfor token in tf:\n\t\tif token in query_words:\n\t\t\tt_f = 1/float(len(query_words))\n\t\t\ttf_idf = t_f * idf[token]\n\t\t\tif token in query_tf_idf:\n\t\t\t\tquery_tf_idf[token] += tf_idf\n\t\t\telse:\n\t\t\t\tquery_tf_idf[token] = tf_idf\t\t\n\t\telse:\n\t\t\tquery_tf_idf[token] = 0.0\n\treturn query_tf_idf\n\ndef get_query_length(val):\n\tlength = 0.0\n\tfor i in val:\n\t\tlength += math.pow(val[i],2)\n\treturn math.sqrt(length)\n\ndef cosine_similarity(ql,qtfidf):\n\tVSCS = {}\n\tfor i,x in doc_term_index.iteritems():\n\t\tnumerator = 0.0\n\t\tdenominator = 0.0\n\t\tfor token in x:\n\t\t\tnumerator += tf_idf[token][i] * qtfidf[token]\n\n\t\tdenominator = doc_length[i] * ql\n\t\tVSCS[i] = numerator/denominator\n\treturn VSCS\n\n\t\t \ndef main():\n\tstart_time = time.time()\n\tget_tf_df()\n\tget_term_idf()\n\tget_tf_idf()\n\tget_all_doc_length()\n\tglobal query_words\n\tqueries = open('queries.txt','r')\n\ttask1_file = open('task1_query_result_VSCS.csv','a')\n\twriter = csv.writer(task1_file)\n\twriter.writerow([\"Query_Id\", \"Literal\", \"Doc_Id\",'Rank','Score','System Name'])\n\tfor i in queries:\n\t\toutput = []\n\t\tsentence = i.strip('\\n')\n\t\twords = sentence.split()\n\t\tquery_id = words[0]\n\t\tquery_words = words[1:]\n\t\tval = get_query_tf_idf()\n\t\tquery_length = get_query_length(val)\n\t\tVSCS = cosine_similarity(query_length,val)\n\t\tranked_documents = sorted(VSCS.items(), key=operator.itemgetter(1), reverse=True)\n\t\tfor y,z in enumerate(ranked_documents):\n\t\t\tif y < 100 and z[1] != 0:\n\t\t\t\toutput.append([query_id,'Q0',z[0],y+1,z[1],'V.S.C.S.'])\n\t\tfor row in output:\n\t\t\twriter.writerow(row)\n\n\tprint(\"\\n\\nTime Taken : %0.2f seconds\" % (time.time() - start_time))\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"Submission/task1_VSCS.py","file_name":"task1_VSCS.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405393064","text":"import nbdpt.rockstar.rockstarscript as rs\n\nrockstardir = '/home1/02575/lmanders/code/Rockstar-Galaxies/'\nnnodes = 1\nncorespernode = 32 \nqueue = 'largemem' #largemem, normal on stampede\nemail = 'l.sonofanders@gmail.com' #please change so I don't get all your emails :P\nmachine = 'stampede' #stampede, pleiades, bluewaters or interactive\nwalltimemain = '24:00:00' #need to use this notation \nwalltimepost = '3:00:00'\nmassdef = '200c' #mass options, 'vir', '###b', '###c'\nmassdef2 = None\nServerInterface = 'ib0' #'ipogif0' on bluewaters\nfileformat = 'TIPSY'\n\ndef make():\n rs.snaps()\n rs.cfg(ncorespernode=ncorespernode, nnodes=nnodes, \n ServerInterface=ServerInterface, massdef=massdef, \n massdef2=massdef2, fileformat=fileformat) \n rs.mainsubmissionscript(nnodes=nnodes, ncorespernode=ncorespernode, \n machine=machine, email=email, \n rockstardir=rockstardir, queue=queue, walltime=walltimemain)\n rs.postsubmissionscript(nnodes=nnodes, ncorespernode=ncorespernode,\n machine=machine, email=email, \n rockstardir=rockstardir, queue=queue, walltime=walltimepost, fileformat=fileformat)\n \n#then submit rockstar.sbatch to the queue, and rockstar.post.sbatch to the queue \n# depending on the prior finishing ok\n\n#sbatch rockstar.sbatch\n#sbatch --dependency=afterok: rockstar.post.sbatch\n\nif __name__=='__main__': make()\n","sub_path":"nbdpt/rockstar/prepRockstar.py","file_name":"prepRockstar.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"150102920","text":"user1 = input (\"Enter user 1 name \")\r\nuser2 = input (\"Enter user 2 name \")\r\n\r\nplayGame = \"yes\"\r\n\r\nwhile playGame == \"yes\":\r\n\r\n option1 = input (user1 + \" : Choose one of these 3 options : rock, paper, scissors... \").lower()\r\n option2 = input (user2 +\" : Choose one of these 3 options : rock, paper, scissors... \").lower()\r\n\r\n if option1 == option2:\r\n print (\"It is a tie\")\r\n elif option1 == \"paper\":\r\n if option2 == \"scissors\":\r\n print (user2 + \" wins\")\r\n elif option2 == \"rock\":\r\n print (user1 + \" wins\")\r\n else:\r\n print(\"Invalid \" + user2 + \" input! You have not entered rock, paper or scissors, try again.\")\r\n elif option1 == \"scissors\":\r\n if option2 == \"paper\":\r\n print (user1 + \" wins\")\r\n elif option2 == \"rock\":\r\n print (user2 + \" wins\")\r\n else:\r\n print(\"Invalid \" + user2 + \" input! You have not entered rock, paper or scissors, try again.\")\r\n elif option1 == \"rock\":\r\n if option2 == \"paper\":\r\n print (user1 + \" wins\")\r\n elif option2 == \"scissors\":\r\n print (user2 + \" wins\")\r\n else:\r\n print(\"Invalid \" + user2 + \" input! You have not entered rock, paper or scissors, try again.\")\r\n else:\r\n print(\"Invalid \" + user1 + \" input! You have not entered rock, paper or scissors, try again.\")\r\n\r\n playGame = input(\"Do you want to play again? Yes / No : \").lower()\r\n\r\nelse:\r\n print (\"End of Game\")","sub_path":"act4.py","file_name":"act4.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"160954926","text":"import numpy as np\nfrom Prcessing.multipleclassifier import multiclassifier\ndef marko(fasta):\n\n '''分词'''\n f = open(fasta)\n doc = f.readlines()\n list = []\n for seq in doc:\n if seq.startswith(\">\"):\n pass\n else:\n flag = seq.strip()\n a = [flag[i:i + 2] for i in range(len(flag) - 1)]\n list.append(a)\n m = np.array(list)\n '''计算首字母的频率'''\n atgc={}\n for i in range(len(m)):\n if m[i][0][0] in atgc:\n\n atgc[m[i][0][0]]=atgc[m[i][0][0]]+1\n else:\n atgc[m[i][0][0]]=1\n\n print(atgc)\n\n # ll = [\"aa\", \"at\", \"ag\", \"ac\", \"ta\", \"tt\", \"tg\", \"tc\", \"ga\", \"gt\", \"gg\", \"gc\", \"ca\", \"ct\", \"cg\", \"cc\"]\n '''计算转移概率,具体的是先计算出现次数,在除以样本总数'''\n dict = []\n for j in range(len(m[0])):\n flag = {}\n for i in range(len(m)):\n if m[i, j] in flag:\n flag[m[i, j]] = flag[m[i, j]] + 1\n else:\n flag[m[i, j]] = 1\n dict.append(flag)\n\n '''将转移概率当feature,频闭部分为首字母的出现概率'''\n l=[]\n for i in range(len(m)):\n flag=[]\n # if m[i][0][0] in atgc:\n # flag.append(atgc[m[i][0][0]])\n for j in range(len(m[i])):\n if m[i][j] in dict[j]:\n flag.append(dict[j][m[i][j]])\n else:\n flag.append(0)\n l.append(flag)\n fea=np.array(l)\n fea=fea/len(m)\n\n return fea\n\nmarko(fasta=\"fasta\")","sub_path":"Transition_probability_total.py","file_name":"Transition_probability_total.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"143428885","text":"import numpy as np\nimport itertools\nimport optimize\nfrom time import clock\n\n\n# calculate the feature map (function Phi) of point x, degree 2\n# x is a vector in R^d\n# phi(x) = (x_1^2, ..., x_d^2, root^4(2)x_1x_2, ... root^4(2)x_1x_d, root^4(2)x_2x_3, ...)\ndef phi(x):\n d = x.shape[0]\n N = d*(d+1) // 2\n result = np.ones(shape=N)\n idx = d\n for i in np.arange(d):\n result[i] = x[i]*x[i]\n for j in np.arange(i+1, d):\n result[idx] = 2*(1/4) * x[i] * x[j]\n idx = idx + 1\n return result\n\n\n# Polynomial Tensor kernel of degree 2\ndef compute_kernel(x1, x2, x3, x4):\n return np.sum(x1*x2*x3*x4)\n\n\n\"\"\"\ndef rec_call_tensor(data, degree, prev_idx):\n if degree == 0:\n value = compute_kernel()\n permutats = itertools.permutations([x,y,z,t])\n for perm in permutats:\n tensor[perm] = value\n else:\n last_idx = prev_idx.last()\n prev_idx.append(0)\n this_entry = prev_idx.size()\n for x in np.arange(last_idx+1):\n prev_idx[this_entry] = x\n rec_call_tensor(data, degree-1, prev_idx)\n\ndef build_tensor_recursively(data, degree=4):\n n = data.shape[0]\n tensor = np.zeros(shape=np.repeat(n, degree), dtype=float)\n\n for x in np.arange(n):\n prev_idx = [x]\n rec_call_tensor(data, degree-1, prev_idx)\n\"\"\"\n\n\ndef build_tensor_kernel(data, degree=4):\n n = data.shape[0]\n d = data.shape[1]\n feature_d = d*(d+1) // 2\n tensor = np.zeros(shape=np.repeat(n, degree), dtype=float)\n feature_data = np.zeros(shape=(n, feature_d))\n for i in np.arange(n):\n feature_data[i] = phi(data[i])\n\n for x in np.arange(n):\n for y in np.arange(x+1):\n for z in np.arange(y+1):\n for t in np.arange(z+1):\n value = compute_kernel(feature_data[x], feature_data[y],\n feature_data[z], feature_data[t])\n permutats = itertools.permutations([x, y, z, t])\n for perm in permutats:\n tensor[perm] = value\n\n return tensor\n\n\n# TODO: consider a random permutation\ndef divide_data(data):\n ntrain = 60\n nval = 60\n # ntest = x.shape[0] - ntrain - nval\n\n x_train = data[0:ntrain, ]\n x_val = data[ntrain:(ntrain+nval), ]\n x_test = data[(ntrain+nval):, ]\n return x_train, x_val, x_test\n\n\ndef read_data(fname):\n data = np.loadtxt(fname, delimiter=',', usecols=np.arange(2, 35))\n X = data[:, 1:] # take all attributes but the first\n Y = data[:, 0] # take the first attribute (response variable)\n return X, Y\n\n\n# create normally distributed data\ndef create_data(n=60, d=6, sparse=6):\n X = np.random.normal(size=(n, d))\n noise = np.random.normal(size=(n, 1))\n rho = 0.05\n\n N = d*(d+1)//2\n\n w = np.zeros(shape=(N, 1))\n for _ in np.arange(sparse):\n idx = np.random.randint(0, N)\n while w[idx] != 0:\n idx = np.random.randint(0, N)\n w[idx] = 1\n\n feature_X = np.zeros(shape=(n, N))\n for i in np.arange(n):\n feature_X[i] = phi(X[i])\n Y = np.dot(feature_X, w) + rho*noise\n return X, Y\n\nnp.random.seed(23)\n# this file has 194 lines containing 35 entries each\n# filename = 'wpbc.data_edit.txt'\n\n# x, y = read_data(filename)\nx, y = create_data(n=30)\n\n# xtrain, xval, xtest = divide_data(x)\n# ytrain, yval, ytest = divide_data(y)\n\nprint(\"Building the tensor kernel of degree 4\")\ntensor = build_tensor_kernel(x)\n\nprint(\"Starting optimization\")\nstart = clock()\noptimize.coordinate_descent(tensor, y)\nend = clock()\nprint(\"Elapsed time: \", end-start)\n\n# optimize.coordinate_descent(tensor, ytrain)\n\n","sub_path":"Python/tensorcode.py","file_name":"tensorcode.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"313467139","text":"'''\nbasic component template\n\nall components - \n 1) connect to control bus\n 2) register with CCM\n 3) get their config\n 4) do some stuff\n 5) shutdown\n \nwhile they are doing stuff, \n they send updates to the CCM\n they get updates from the CCM\n \nCreated on 10 Aug 2018\n\n@author: esmipau\n'''\n__version__ = '0.0.4'\n\nimport time\nimport logging\nfrom queue import Queue\n\nfrom utils import defaults\nfrom utils.config import Config\nfrom utils.msgTypes import LTEMsgTypes\nfrom utils.controlClient import ControlClient\n\nlogging.basicConfig(level=logging.INFO, format='[%(asctime)s %(module)s] %(message)s')\n\nclass PeerTemplate(object):\n \n # progress{} is a dictionary of what ever you want reported by progress \n progress = {}\n \n def __init__(self, argv, description):\n self.desc = description \n self.ccmQ = Queue() # msgs from the CCM will appear here\n # check if the variable has been defined in the owner class\n if not hasattr(self, 'configAttrs'):\n self.configAttrs = None\n logging.info('Setting default config')\n self.config = Config(argv, self.configAttrs) # handle initial configuration\n self.configAttrs = self.config.getDefaults() \n self.ccm_last_checked = time.time() # time the ccmQ was last checked\n self.connected = False\n \n def main(self, ccmCheck = 2, pause = 1, progressReport = 20):\n ''' main control loop - run from a separate thread '''\n logging.info('component starting')\n self._start()\n prog_time = cur_time = last_checked = time.time() \n while self.config.keepAlive():\n # todo - need to thnk about this\n # if the app really needs to run each step manually, \n # then it should be refactored! \n \n raw = self.getInput()\n if raw:\n results = self.process(raw)\n self.distribute(results)\n elif pause > 0:\n # if there is nothing to do, then have a nap\n time.sleep(pause)\n \n cur_time = time.time()\n if cur_time - prog_time > progressReport:\n self.getProgress()\n self._sendProgress() \n prog_time = cur_time\n if cur_time - last_checked > ccmCheck:\n self._check_CCM()\n last_checked = cur_time\n \n logging.info('component ending')\n self.closeDown() \n \n def _start(self):\n # connect to control bus\n ret = self._connect()\n self.connected = True\n logging.info('Connected')\n if ret:\n logging.info('Registering')\n ret = self._register() # register as service\n \n if ret: \n # tell the CCM what we know about our config \n self._setConfig()\n\n # ask the CCM what it knows about our config\n ret = self._config() # get our config\n \n if ret:\n # return control to parent\n logging.info('Activating')\n needed = self.check_config()\n ret = self.activate(needed)\n \n \n def _connect(self):\n # wait for CONNECT_ACK to complete connection from CCM\n self.cC = ControlClient(self.config, self.ccmQ)\n \n ret, msg = self._wait_for_response(\n LTEMsgTypes.CONNECT_ACK, LTEMsgTypes.CONNECT_NACK)\n if ret: \n logging.info('Connected to controlbus as peer {}'.format(msg))\n self.config.set('peer', msg)\n return ret\n \n def _register(self):\n # if we are connected, then we need to register with CCM\n msgType = LTEMsgTypes.REGISTER_REQ\n self.cC.send(msgType, self.desc)\n ret, _ = self._wait_for_response(\n LTEMsgTypes.REGISTER_ACK, LTEMsgTypes.REGISTER_NACK)\n return ret\n\n def check_config(self):\n needed = [] # list of attrs we need to get values for\n for attr in self.configAttrs:\n if self.config.get(attr, None) == None:\n needed.append(attr)\n return needed\n \n def _config(self):\n # get needed values from the CCM\n self.cC.send(LTEMsgTypes.CONFIG_REQ, '')\n\n ret, msg = self._wait_for_response(\n LTEMsgTypes.CONFIG_SET, LTEMsgTypes.CONFIG_NACK)\n if ret: \n self.config.update(msg)\n msgType = LTEMsgTypes.CONFIG_ACK\n self.cC.send(msgType, '')\n return ret\n\n def _setConfig(self):\n # get needed values from the CCM\n buf = ''\n ret = True\n for attr in self.configAttrs:\n if self.configAttrs[attr]:\n buf += '{}={};'.format(attr,self.configAttrs[attr])\n if buf:\n self.cC.send(LTEMsgTypes.CONFIG_SET, buf)\n\n ret, _ = self._wait_for_response(\n LTEMsgTypes.CONFIG_ACK, LTEMsgTypes.CONFIG_NACK)\n return ret\n \n def _wait_for_response(self, ack, nack):\n done = False\n msg = ''\n timeout = int(self.config.get('timeout', '5')) \n cur_time = last_report_time = time.time()\n while cur_time - last_report_time < timeout and not done:\n cur_time = time.time() \n if self.ccmQ.empty():\n time.sleep(1)\n else:\n (msgType, msg) = self.ccmQ.get()\n if msgType == ack:\n done = True \n elif msgType == nack:\n pass\n else: \n logging.info('Got unexpected msg {} while waiting for {}'.format(msgType.name, ack.name))\n break\n return done, msg\n \n def _check_CCM(self):\n if not self.connected and self.config.keepAlive():\n ret = self._connect()\n if ret:\n self.connected = True\n logging.info('Re-Connected')\n if ret:\n logging.info('Re-Registering')\n ret = self._register() # register as service\n while self.connected and not self.ccmQ.empty():\n (msgType, msg) = self.ccmQ.get()\n if msgType == LTEMsgTypes.SHUTDOWN_REQ:\n logging.info('Shutdown request')\n self.cleanup()\n self.config.set('KeepAlive', 'False') \n self.cC.send(LTEMsgTypes.SHUTDOWN_ACK, '')\n #self.closeDown()\n elif msgType == LTEMsgTypes.CONFIG_SET:\n self.config.update(msg)\n msgType = LTEMsgTypes.CONFIG_ACK\n self.cC.send(msgType, '')\n elif msgType == LTEMsgTypes.DROP_PEER:\n if self.config.keepAlive():\n logging.warn('Connection to CCM lost!') # local connecttion to CCM is dead\n else:\n logging.warn('Planned disconnection from CCM')\n self.connected = False\n else: \n logging.info('Got unexpected msg {} from controlbus'.format(msgType.name))\n self.ccm_last_checked = time.time()\n\n def getProgress(self):\n \"\"\" override this to get the progress information \n that will be reported to the ccm in the 'progress' dictionary\n \"\"\"\n logging.warn('peerTemplate.getProgress() has not been overridden!')\n pass \n \n def _sendProgress(self):\n # send a progress report\n buf = ''\n for k,v in self.progress.items():\n buf += '{}={};'.format(k,v) \n #logging.info('Sending progress report: {}'.format(buf))\n if self.connected:\n self.cC.send(LTEMsgTypes.PROGRESS, buf)\n \n def getConfig(self):\n ''' get the configuration management object '''\n return self.config\n \n def setup(self, argv = None):\n maxRetries = self.config.get('maxRetries', defaults.MaxRetries)\n # need to wait for registration to control bus\n retryCount = 0\n self.config.check_cb()\n while not self.config.get('HaveConfig') and retryCount < maxRetries:\n time.sleep(1)\n self.config.check_cb()\n retryCount += 1\n\n def getInput(self):\n \"\"\" Anything return by this will be passed to process() \n and its results will be passed to distribute\n \"\"\"\n return None \n \n def process(self, raw):\n pass\n \n def distribute(self, results):\n pass\n \n def cleanup(self):\n self.getProgress()\n self._sendProgress()\n \n logging.info('Cleanup')\n \n \n def closeDown(self):\n ''' release any resources '''\n self.shutdown()\n \n \n \n\n \n\n","sub_path":"utils/peerTemplate.py","file_name":"peerTemplate.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"532768987","text":"import logging\nimport re\nimport time\n\nfrom django.conf import settings\nfrom django_sse.views import BaseSseView\nfrom django.template.response import TemplateResponse\n\nfrom .models import TaskStore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Status(BaseSseView):\n UUID_MATCHER = re.compile(r'uuid:\"([0-9a-zA-Z-]+)\"')\n\n def get_store(self):\n if getattr(self, '_store', None) is None:\n if not self.request.user.is_authenticated():\n return None\n try:\n store = TaskStore.objects.get(user=self.request.user)\n setattr(self, '_store', store)\n except TaskStore.DoesNotExist:\n return None\n\n return self._store\n\n def get_changed_ids(self, store, head1, head2):\n proc = store._git_command(\n 'diff', head1, head2\n )\n stdout, stderr = proc.communicate()\n\n changed_tickets = set()\n for raw_line in stdout.split('\\n'):\n line = raw_line.strip()\n if not line or line[0] not in ('+', '-'):\n continue\n matched = self.UUID_MATCHER.search(line)\n if matched:\n changed_tickets.add(\n matched.group(1)\n )\n\n return changed_tickets\n\n def iterator(self):\n store = self.get_store()\n if not store:\n return\n store.sync()\n created = time.time()\n last_sync = time.time()\n head = self.request.GET.get('head', store.repository.head())\n while time.time() - created < 240:\n if time.time() - last_sync > 15:\n last_sync = time.time()\n store.sync()\n\n store = self.get_store()\n new_head = store.repository.head()\n if head != new_head:\n logger.info('Found new repository head -- %s' % new_head)\n ids = self.get_changed_ids(store, head, new_head)\n for id in ids:\n self.sse.add_message(\"task_changed\", id)\n head = new_head\n self.sse.add_message(\"head_changed\", new_head)\n else:\n self.sse.add_message(\"heartbeat\", str(time.time()))\n\n yield\n time.sleep(5)\n\n\ndef home(request):\n return TemplateResponse(\n request,\n 'home.html',\n {\n 'DEBUG': settings.DEBUG,\n }\n )\n","sub_path":"inthe_am/taskmanager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"15875993","text":"'''\n### General\n\n* Author - Davis Vaughan\n* Date - 9/21/2017\n* Homework - 03\n\n### Purpose\n\nThe purpose of this module is to calculate the Monte Carlo value of European options in multiple\nways using varying methods of accuracy.\n\n### Comments\n\nIMPORTANT! - The seed is set at the C++ level using Numba, not at the Python level. Because of this,\nwe will not get the same results unless you implement this with Numba. Setting a seed of 100 at\nthe C++ level will generate a different stream of random numbers than setting a seed of 100 at\nthe Python level.\n\nSorry about that, but I am fairly confident in the implementation, as increasing the number of simulations\nget's me very close the the exact value of the option.\n\n### Thoughts on numerical accuracy\n\nIt seems like using a smaller time step seems to greatly increase the accuracy.\nAdding the second order term did not help the accuracy as much as I had expected.\nThe biggest benefits seem to come from increasing the number of simulations.\nUsing n = 50000 greatly increases the accuracy of the final results.\n\n### Numerical methods used\n\n* Both Euler and Milstein discretization of GBM were used to simulate the sample paths.\n\n* Using the value of S_T from the sample paths, the average payoffs of the option were calculated,\nand discounted back to time zero.\n\n* The exact solution to the GBM SDE was also used to compare accuracy results.\n\n### Included files\n\n`main.py` - (DRIVER) a demo of the GBM functions using the parameters set in the HW-2 pdf.\n\n`gbm_simulator.py` - The functions that generate the stock price simulations using either\n Euler or Miltstein methods.\n\n`option_value.py` - Functions to calculate the value of the option from the simulated values.\n\n`option_value_exact.py` - Previous HW code to generate the exact value of the options.\n\n### How to run\n\nBecause the main.py file includes the code:\n\n```python\nif __name__ == \"__main__\":\n print(main())\n```\n\nthe easiest way to run the example is from the terminal.\n\nWithin your command line / terminal, navigate to the folder containing the main.py script, and just run:\n\n```bash\npython2 main.py\n```\n\n^ Make sure you are using python2.\n\nYou should get the following results:\n\n```python\n MC_option_value algorithm call_put dt exact_option_value option_type \\\n0 28.189921 euler call .01 28.684884 european\n1 29.255651 euler put .01 28.198446 european\n2 28.164755 euler call .001 28.684884 european\n3 28.579581 euler put .001 28.198446 european\n4 27.931396 milstein call .01 28.684884 european\n5 29.136742 milstein put .01 28.198446 european\n6 28.118083 milstein call .001 28.684884 european\n7 28.570122 milstein put .001 28.198446 european\n\n absolute_error\n0 0.494962\n1 1.057205\n2 0.520129\n3 0.381135\n4 0.753487\n5 0.938296\n6 0.566800\n7 0.371676\n```\n'''\n\n\nfrom gbm_simulator import simulate_gbm\nfrom option_value import price_option\nfrom option_value_exact import price_eur_call, price_eur_put\nimport numpy as np\nimport pandas as pd\n\ndef main():\n\n # Parameters\n n = 1000\n s = 100\n k = 100\n #mu = 0.08 # Only r is used\n r = .03\n div_yield = 0.025\n t = 0\n t_terminal = 1\n dt = 0.01\n dt2 = 0.001\n sigma = 0.75\n seed = 10\n\n # Sims\n gbm_euler_t01 = simulate_gbm(n, s, r, div_yield, t, t_terminal, dt, sigma, method = \"euler\", seed = seed)\n gbm_milstein_t01 = simulate_gbm(n, s, r, div_yield, t, t_terminal, dt, sigma, method = \"milstein\", seed = seed)\n gbm_euler_t001 = simulate_gbm(n, s, r, div_yield, t, t_terminal, dt2, sigma, method = \"euler\", seed = seed)\n gbm_milstein_t001 = simulate_gbm(n, s, r, div_yield, t, t_terminal, dt2, sigma, method = \"milstein\", seed = seed)\n\n # Exact prices\n euro_call = price_eur_call(s, k, r, div_yield, t_terminal, t, sigma)\n euro_put = price_eur_put(s, k, r, div_yield, t_terminal, t, sigma)\n\n # Prices\n option_prices = pd.DataFrame({\n 'algorithm' : [\"euler\"] * 4 + [\"milstein\"] * 4,\n 'dt' : [\".01\", \".01\", \".001\", \".001\"] * 2,\n 'option_type' : [\"european\"] * 8,\n 'call_put' : [\"call\", \"put\"] * 4,\n \"exact_option_value\": [euro_call, euro_put] * 4,\n 'MC_option_value' : [\n price_option(gbm_euler_t01, k, r, t_terminal, \"call\", \"european\"),\n price_option(gbm_euler_t01, k, r, t_terminal, \"put\", \"european\"),\n price_option(gbm_euler_t001, k, r, t_terminal, \"call\", \"european\"),\n price_option(gbm_euler_t001, k, r, t_terminal, \"put\", \"european\"),\n price_option(gbm_milstein_t01, k, r, t_terminal, \"call\", \"european\"),\n price_option(gbm_milstein_t01, k, r, t_terminal, \"put\", \"european\"),\n price_option(gbm_milstein_t001, k, r, t_terminal, \"call\", \"european\"),\n price_option(gbm_milstein_t001, k, r, t_terminal, \"put\", \"european\")\n ]\n })\n\n # Add the absolute error\n option_prices['absolute_error'] = np.abs(option_prices.MC_option_value - option_prices.exact_option_value)\n\n return option_prices\n\nif __name__ == \"__main__\":\n print(main())","sub_path":"assignments/hw-03 efficient/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628682959","text":"def table(puzzle):\n style = \"\"\"\n \n \"\"\"\n txt = \"\" +\\\n \"\".join([\"\" +\n \"\".join([\"\".format(\"\" if cell == \"X\" else cell) for cell in row]) +\n \"\" for row in puzzle]) + \\\n \"
{}
\"\n with open(\"su.html\", \"w\") as file:\n file.write(style+txt)\n","sub_path":"htmltable.py","file_name":"htmltable.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255502737","text":"# secant.py\r\nimport numpy as np\r\n\r\ndef secant(myfunc,xr_old,xr,reltol):\r\n error = abs(xr - xr_old)\r\n count = 0;\r\n while error > reltol:\r\n nX = myfunc(xr) * (xr - xr_old)\r\n dX = myfunc(xr) - myfunc(xr_old)\r\n xm = xr - (nX / dX)\r\n xr_old = xr\r\n xr = xm\r\n error = ( abs(xr - xr_old) / xr ) * 100\r\n return xr\r\n \r\n","sub_path":"secant.py","file_name":"secant.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"79707990","text":"# Given an array of n positive integers and a positive integer s, \n# find the minimal length of a subarray of which the sum ≥ s. If there isn't one, return 0 instead.\n#\n# For example, given the array [2,3,1,2,4,3] and s = 7,\n# the subarray [4,3] has the minimal length under the problem constraint.\n#\n# More practice:\n# If you have figured out the O(n) solution, try coding another solution of which the time complexity is O(n log n).\n#\n\nclass Solution(object):\n def minSubArrayLen(self, s, nums):\n if not nums:\n return 0\n sum = 0\n min_global = len(nums) + 1 # 初始\n left = 0 # left 和 i双点\n for i in range(len(nums)):\n sum += nums[i]\n while sum >= s:\n min_global = min(min_global, i - left + 1) \n sum -= nums[left]\n left += 1\n # 其实每次出loop都是sum < s的\n return min_global if min_global <= len(nums) else 0\n \n # http://www.tangjikai.com/algorithms/leetcode-209-minimum-size-subarray-sum\n # 关于BST解法\n # Right pointer is fixed, find the last left pointers with nums[l, r] >= sum.\n # This is a finding upper bound question: the first element meets nums[mid] > nums[r] - s (nums[r] - nums[mid] < s), \n # which means nums[mid - 1:r] is the shortest subarray, so min(res, r - l + 1).\n \n # 大概的意思就是 如 http://yucoding.blogspot.com/2015/06/leetcode-question-minimum-size-subarray.html 所说\n # 制造一个sum array, 然后每个num,右边固定,左边找到一个,使的r - l + 1最短,因为要弄n词,素以nlogn\n # 是负数就不行了,这题是正数\n class Solution(object):\n def minSubArrayLen(self, s, nums):\n res = len(nums) + 1\n \n for i in range(1, len(nums)):\n nums[i] += nums[i - 1]\n \n left = 0\n for index, sum in enumerate(nums):\n if sum >= s:\n left = self.findLeft(nums, left, index, sum, s) # l\n res = min(res, index - left + 1)\n \n return res if res <= len(nums) else 0\n \n def findLeft(self, nums, left, right, sum, s):\n while left < right:\n mid = (left + right) / 2\n \n if nums[mid] > sum - s: # s > sum - nums[mid] (upper bound) sum - nums[mid] 就是mid到i的sum,如果太小就要往左\n # 但其实不准确,因为sum - nums[mid]不包括nums aggragate前mid的值,所以sum - nums[mid]比实际要求要小\n # 分析 11, [1, 2, 3, 4, 5] --》 \n right = mid\n else:\n left = mid + 1\n \n return left\n # http://www.jyuan92.com/blog/leetcode-minimum-size-subarray-sum/ 短路了。。。\n","sub_path":"minimum-size-subarray-sum-(M)-(209).py","file_name":"minimum-size-subarray-sum-(M)-(209).py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"132332725","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n__author__ = 'City10th'\n\n'脚本控制'\n\nimport os,sys\n\n# 解析第2指令\n# 主脚本 第2指令(second_dict) 第3指令 第4指令 ...\n# second_dict: 第2指令-脚本 字典\n# cmd_dict: 第2指令-语言 字典\n# extension_cmd_dict: 后缀-命令 字典\ndef second_cmd(second_dict, cmd_dict={'default':''}):\n extension_cmd_dict = {'.py':'python', '.sh':'sh'}\n # 使用什么语言(赋值cmd_dict)\n for x in second_dict: # 如果 第2指令-脚本 字典 中有\n if x not in cmd_dict: # 如果用户未指定语言\n # 获取脚本后缀\n extension = os.path.splitext(second_dict[x])[-1]\n if extension in extension_cmd_dict: # 如果默认 后缀-命令 字典中有\n # 则用字典值\n cmd_dict[x] = extension_cmd_dict[extension] + ' '\n else: # 如果默认 后缀-命令 字典中没有\n # 则,用默认值\n cmd_dict[x] = cmd_dict['default'] + ' '\n if cmd_dict[x] == ' ': cmd_dict[x]='' #强迫症语句\n # 【如何使用脚本】\n # 如果缺失第2指令\n if len(sys.argv) == 1:\n # 如果有'link_to',将 “主脚本” 翻译为 \"link_to脚本\"\n if 'only_1th_cmd' in second_dict:\n os.system(cmd_dict['only_1th_cmd'] + second_dict['only_1th_cmd'])\n sys.exit(0)\n else:\n print('@!@ 缺失第2指令,不执行任何操作')\n sys.exit(0)\n # 没有'only_1th_cmd' 执行此脚本剩下语句,即此函数啥也不干\n # 如果有第2指令\n # 如果第2指令在 第2指令-脚本 字典 中,\n # 则将“主脚本 第2指令 ...” 翻译为:\n # “字典中脚本 ...”\n elif sys.argv[1] in second_dict:\n os.system(cmd_dict[sys.argv[1]] + second_dict[sys.argv[1]] +' '+ ' '.join(sys.argv[2:]))\n sys.exit(0)\n # 如果字典中没有,但定义了 only_1th_cmd,\n # 则将“主脚本 ...” 翻译为:\n # “only_1th_cmd脚本 ...”\n elif 'only_1th_cmd' in second_dict: #第一个指令不在second_dict中(且有‘link_to’)\n os.system(cmd_dict['only_1th_cmd'] + second_dict['only_1th_cmd'] +' '+ ' '.join(sys.argv[1:]))\n sys.exit(0)\n else:\n print('@!@ 第2指令未指定,不执行任何操作')\n sys.exit(0)\n\n#执行分步,输入 n 退出脚本,输入 y 继续脚本\ndef FenBu_simple(text):\n p0 = False\n while p0 == False:\n Fa = raw_input(text)\n if (Fa == 'n') or (Fa == 'N'):\n print(\"@!@ bye\"); sys.exit(0)\n if (Fa == 'y') or (Fa == 'Y'):\n p0 = True","sub_path":"lib/city_py_mo/script_control.py","file_name":"script_control.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112876267","text":"#! python3\r\n# -*- encoding: utf-8 -*-\r\n'''\r\nCurrent module: tests.test_driver\r\n\r\nRough version history:\r\nv1.0 Original version to use\r\n\r\n********************************************************************\r\n @AUTHOR: Administrator-Bruce Luo(罗科峰)\r\n MAIL: luokefeng@163.com\r\n RCS: tests.test_driver, v1.0 2018年9月18日\r\n FROM: 2018年9月18日\r\n********************************************************************\r\n======================================================================\r\n\r\nProvide a function for the automation test\r\n\r\n'''\r\n\r\n#! python3\r\n# -*- encoding: utf-8 -*-\r\n'''\r\nCurrent module: tests.test_driver\r\n\r\nRough version history:\r\nv1.0 Original version to use\r\n\r\n********************************************************************\r\n @AUTHOR: Administrator-Bruce Luo(罗科���)\r\n MAIL: luokefeng@163.com\r\n RCS: tests.test_driver, v1.0 2018年8月20日\r\n FROM: 2018年8月20日\r\n********************************************************************\r\n======================================================================\r\n\r\nProvide a function for the automation test\r\n\r\n'''\r\n\r\nimport unittest, os\r\nfrom rtsf.p_executer import TestRunner\r\nfrom rtsf.p_applog import logger\r\nfrom appuidriver.driver import LocalDriver,RemoteDriver\r\nfrom appuidriver.remote.AppiumJs import AppiumJs\r\nfrom webuidriver.remote.SeleniumJar import SeleniumJar\r\n\r\nclass TestDriver(unittest.TestCase):\r\n '''\r\n @note: adb version 1.0.39; %ANDROID_HOME% = D:\\auto\\buffer\\test\\test_rtsf_web\\android; 天天模拟器 v2.5.6\r\n '''\r\n @classmethod\r\n def setUpClass(cls):\r\n cls.case_file = r'data\\test_case.yaml'\r\n cls.jar_path = r'C:\\d_disk\\auto\\buffer\\test\\tools\\seleniumjar\\selenium-server-standalone-3.14.0.jar'\r\n cls.java_path = \"java\"\r\n \r\n platform_tools = r'C:\\d_disk\\auto\\buffer\\test\\tools\\android\\platform-tools' \r\n cls._adb_exe_path = os.path.join(platform_tools, \"adb.exe\") \r\n cls._aapt_exe_path = os.path.join(platform_tools, \"aapt.exe\")\r\n cls._apk_abs_path = r'C:\\d_disk\\auto\\buffer\\test\\tools\\android\\ApiDemos-debug.apk'\r\n cls._app_package = 'io.appium.android.apis'\r\n cls._app_activity = '.ApiDemos'\r\n\r\n \r\n def test_LocalDriver(self):\r\n LocalDriver._adb_exe_path = self._adb_exe_path\r\n LocalDriver._aapt_exe_path = self._aapt_exe_path\r\n LocalDriver._apk_abs_path = self._apk_abs_path\r\n LocalDriver._app_package = self._app_package\r\n LocalDriver._app_activity = self._app_activity\r\n \r\n server = AppiumJs(port = 4723).bind_device(device_id = \"127.0.0.1:6555\", platform_version = \"4.4.4\")\r\n server.start_server()\r\n \r\n runner = TestRunner(runner = LocalDriver).run(self.case_file)\r\n html_report = runner.gen_html_report()\r\n print(html_report)\r\n self.assertIsInstance(html_report, (list, tuple))\r\n \r\n server.stop_server()\r\n \r\n def test_RemoteDriver(self):\r\n RemoteDriver._aapt_exe_path = self._aapt_exe_path\r\n RemoteDriver._apk_abs_path = self._apk_abs_path\r\n RemoteDriver._app_package = self._app_package\r\n RemoteDriver._app_activity = self._app_activity\r\n\r\n\r\n hub = SeleniumJar(self.jar_path, self.java_path).hub(4444)\r\n hub.start_server()\r\n \r\n node = AppiumJs(port = 4723).bind_device(device_id = \"127.0.0.1:6555\", platform_version = \"4.4.4\").node(\"localhost\", hub_address=(\"localhost\", 4444))\r\n node.start_server() \r\n \r\n runner = TestRunner(runner = RemoteDriver).run(self.case_file)\r\n html_report = runner.gen_html_report()\r\n print(html_report)\r\n self.assertIsInstance(html_report, (list, tuple))\r\n \r\n node.stop_server()\r\n hub.stop_server()\r\n \r\nif __name__ == \"__main__\":\r\n# logger.setup_logger(\"debug\")\r\n# unittest.main()\r\n suite = unittest.TestSuite()\r\n suite.addTest(TestDriver(\"test_LocalDriver\"))\r\n runner = unittest.TextTestRunner(verbosity=2)\r\n runner.run(suite) \r\n \r\n \r\n ","sub_path":"tests/test_driver.py","file_name":"test_driver.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"224678369","text":"from kafka import KafkaProducer\nfrom kafka.errors import KafkaError\nimport json\nimport os\nimport sys\n\nproducer = KafkaProducer(bootstrap_servers=['localhost:9092'])\n\ncurrent_dir = sys.path[0]\ndir = os.path.join(current_dir,'samples')\nfor file in os.listdir(dir):\n if '.json' in file:\n file = json.loads(open(os.path.join(dir,file)).read())\n # produce json messages\n\n producer = KafkaProducer(value_serializer=lambda m: json.dumps(m).encode('ascii'))\n producer.send('document_incoming', file)\n\n# # produce asynchronously\n# for _ in range(1):\n# producer.send('document_incoming', b'msg')\n\ndef on_send_success(record_metadata):\n print(record_metadata.topic)\n print(record_metadata.partition)\n print(record_metadata.offset)\n\nproducer.flush()\n\n# configure multiple retries\nproducer = KafkaProducer(retries=5)","sub_path":"samples_insertion.py","file_name":"samples_insertion.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"513790152","text":"# -*- coding: utf8 -*-\n# Behave/Gherkin Helper for GEdit\n#\n# Copyright (C) 2012 Red Hat, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n# Language:\tBehave\n# Maintainer: Matěj Cepl mceplATceplDOTeu\n#\n# Requires cucutags Python module\n# install it via\n# pip install cucutags\n\nimport string\nimport os.path\nimport time\nimport cucutags\nfrom gi.repository import GObject, Gedit, Gtk, Gio, GLib\n\nNON_WHITE_CHARS = string.punctuation + string.whitespace\n__version__ = \"3\"\n\nUI_XML = \"\"\"\n\n \n \n \n \n \n \n \n\n\"\"\"\n\n\nclass BehaveHelper(GObject.Object, Gedit.WindowActivatable):\n __gtype_name__ = 'BehaveHelperPlugin'\n\n window = GObject.property(type=Gedit.Window)\n\n def __init__(self):\n GObject.Object.__init__(self)\n\n def do_activate(self):\n self._add_ui()\n\n def do_deactivate(self):\n manager = self.window.get_ui_manager()\n manager.remove_ui(self._ui_merge_id)\n manager.remove_action_group(self._actions)\n manager.ensure_update()\n\n def do_update_state(self):\n self.view = self.window.get_active_view()\n\n if self.view:\n self.buffer = self.view.get_buffer()\n filename = self.buffer.get_uri_for_display()\n if os.path.splitext(filename)[1] == '.feature':\n startdir = os.path.dirname(os.path.dirname(filename))\n if startdir:\n self.parsed_data = cucutags.Session(startdir)\n self._actions.set_sensitive(self.view.get_editable())\n\n def _add_ui(self):\n manager = self.window.get_ui_manager()\n self._actions = Gtk.ActionGroup(\"JumpToStepActions\")\n self._actions.add_actions([\n ('JumpToStep', Gtk.STOCK_INFO, \"_Jump to step\",\n 'K',\n \"Jump to the step for the current feature\",\n self.jump_to_step),\n ])\n manager.insert_action_group(self._actions)\n self._ui_merge_id = manager.add_ui_from_string(UI_XML)\n manager.ensure_update()\n\n @staticmethod\n def get_line_text(iter):\n start_of_line = iter.copy()\n start_of_line.set_line_offset(0)\n\n if iter.ends_line():\n end_of_line = iter\n else:\n end_of_line = iter.copy()\n end_of_line.forward_to_line_end()\n\n return start_of_line.get_text(end_of_line)\n\n def _switch_to_location(self, name, line_no):\n location = Gio.File.new_for_path(name)\n existing_tab = self.window.get_tab_from_location(location)\n if existing_tab:\n self.window.set_active_tab(existing_tab)\n view = existing_tab.get_view()\n doc = view.get_buffer()\n curs = doc.get_insert()\n curs_iter = doc.get_iter_at_mark(curs)\n curs_iter.set_line(line_no - 1) # .set_line is 0-based\n doc.place_cursor(curs_iter)\n # see\n # https://git.gnome.org/browse/gedit/tree/gedit/gedit-tab.c#n1112\n # for explanation of this weird workaround\n GLib.idle_add(view.scroll_to_cursor)\n else:\n self.window.create_tab_from_location(\n location,\n Gedit.encoding_get_current(), line_no, 0, False, True)\n\n def jump_to_step(self, something):\n cursor = self.buffer.get_insert()\n cur_iter = self.buffer.get_iter_at_mark(cursor)\n cur_line = self.get_line_text(cur_iter).strip(NON_WHITE_CHARS)\n res = self.parsed_data.get_step(cur_line)\n\n if res:\n fname, lno = res\n self._switch_to_location(fname, lno)\n else:\n status_bar = self.window.get_statusbar()\n sb_msg_id = status_bar.get_context_id(\"BehaveMessage\")\n status_bar.push(sb_msg_id, \"No matching step found\")\n time.sleep(1)\n status_bar.pop(sb_msg_id)\n","sub_path":"behave.py","file_name":"behave.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152792375","text":"\"\"\"Structure of Document related models API responses with Django Rest Framework serializers.\"\"\"\nfrom datetime import timedelta\nimport re\nfrom urllib.parse import quote_plus\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\nfrom botocore.signers import CloudFrontSigner\nfrom rest_framework import serializers\n\nfrom ..models import Document\nfrom ..utils import cloudfront_utils, time_utils\nfrom ..utils.url_utils import build_absolute_uri_behind_proxy\nfrom .base import EXTENSION_REGEX, TimestampField\nfrom .playlist import PlaylistLiteSerializer\n\n\nclass DocumentSerializer(serializers.ModelSerializer):\n \"\"\"A serializer to display a Document resource.\"\"\"\n\n class Meta: # noqa\n model = Document\n fields = (\n \"active_stamp\",\n \"extension\",\n \"filename\",\n \"id\",\n \"is_ready_to_show\",\n \"title\",\n \"upload_state\",\n \"url\",\n \"show_download\",\n \"playlist\",\n )\n read_only_fields = (\n \"active_stamp\",\n \"extension\",\n \"filename\",\n \"id\",\n \"is_ready_to_show\",\n \"upload_state\",\n \"url\",\n )\n\n active_stamp = TimestampField(\n source=\"uploaded_on\", required=False, allow_null=True, read_only=True\n )\n filename = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n is_ready_to_show = serializers.BooleanField(read_only=True)\n playlist = PlaylistLiteSerializer(read_only=True)\n\n def _get_extension_string(self, obj):\n \"\"\"Document extension with the leading dot.\n\n Parameters\n ----------\n obj : Type[models.Document]\n The document that we want to serialize\n\n Returns\n -------\n String\n The document with the leading dot if the document has an extension\n An empty string otherwise\n\n \"\"\"\n return \".\" + obj.extension if obj.extension else \"\"\n\n def get_filename(self, obj):\n \"\"\"Filename of the Document.\n\n Parameters\n ----------\n obj : Type[models.Document]\n The document that we want to serialize\n\n Returns\n -------\n String\n The document's filename\n\n \"\"\"\n return (\n f\"{slugify(obj.playlist.title)}_{slugify(obj.title)}\"\n f\"{self._get_extension_string(obj)}\"\n )\n\n def get_url(self, obj):\n \"\"\"Url of the Document.\n\n Parameters\n ----------\n obj : Type[models.Document]\n The document that we want to serialize\n\n Returns\n -------\n String or None\n the url to fetch the document on CloudFront\n None if the document is still not uploaded to S3 with success\n\n \"\"\"\n if obj.uploaded_on is None:\n return None\n\n url = (\n f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.pk}/document/\"\n f\"{time_utils.to_timestamp(obj.uploaded_on)}{self._get_extension_string(obj)}?response\"\n f\"-content-disposition={quote_plus('attachment; filename=' + self.get_filename(obj))}\"\n )\n\n # Sign the document urls only if the functionality is activated\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n date_less_than = timezone.now() + timedelta(\n seconds=settings.CLOUDFRONT_SIGNED_URLS_VALIDITY\n )\n cloudfront_signer = CloudFrontSigner(\n settings.CLOUDFRONT_ACCESS_KEY_ID, cloudfront_utils.rsa_signer\n )\n url = cloudfront_signer.generate_presigned_url(\n url, date_less_than=date_less_than\n )\n\n return url\n\n def validate_title(self, value):\n \"\"\"Force extension removal in the title field (if any).\n\n Parameters\n ----------\n value : Type[string]\n the value sent in the request\n\n Returns\n -------\n String\n The title without the extension if there is one.\n\n \"\"\"\n # pylint: disable=consider-using-f-string\n match = re.match(\n r\"^(?P.*)(\\.{extension_regex:s})$\".format(\n extension_regex=EXTENSION_REGEX\n ),\n value,\n )\n if match:\n return match.group(\"title\")\n\n return value\n\n\nclass DocumentSelectLTISerializer(serializers.ModelSerializer):\n \"\"\"A serializer to display a Document resource for LTI select content request.\"\"\"\n\n class Meta: # noqa\n model = Document\n fields = (\n \"id\",\n \"is_ready_to_show\",\n \"title\",\n \"upload_state\",\n \"lti_url\",\n )\n read_only_fields = (\n \"id\",\n \"is_ready_to_show\",\n \"title\",\n \"upload_state\",\n \"lti_url\",\n )\n\n is_ready_to_show = serializers.BooleanField(read_only=True)\n lti_url = serializers.SerializerMethodField()\n\n def get_lti_url(self, obj):\n \"\"\"LTI Url of the Document.\n\n Parameters\n ----------\n obj : Type[models.Document]\n The document that we want to serialize\n\n Returns\n -------\n String\n the LTI url to be used by LTI consumers\n\n \"\"\"\n return build_absolute_uri_behind_proxy(\n self.context[\"request\"],\n reverse(\"document_lti_view\", args=[obj.id]),\n )\n\n\nclass InitiateUploadSerializer(serializers.Serializer):\n \"\"\"A serializer to validate data submitted on the initiate-upload API endoint.\"\"\"\n\n filename = serializers.CharField()\n mimetype = serializers.CharField(allow_blank=True)\n","sub_path":"src/backend/marsha/core/serializers/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"520675999","text":"\"\"\"\nJust having some fun in Python.\n//christianlindeneg, June, 2019//\n\ntodo:\nimplement easy choice of song\nimplement rewind/fast-forward\ncontinuously update download progress\n\nThanks for jams & music:\nhttps://studiojams.com/\n\"\"\"\nfrom tkinter import Tk, Canvas, Frame, Button, Text, mainloop, INSERT, DISABLED, BOTTOM\nfrom json import load, loads\nfrom random import randint\nfrom time import sleep\nfrom zipfile import ZipFile\nfrom os import remove, system\n\ntry:\n from ffpyplayer.player import MediaPlayer\n from pafy import new\n from vlc import Instance\n from google_drive_downloader import GoogleDriveDownloader as gdd\nexcept ImportError:\n system('python -m pip install -r requirements.txt')\n\nclass Start:\n def __init__(self, master):\n self.master = master\n self.HEIGHT = 150\n self.WIDTH = 500\n self.default_C = '#060606'\n self.read()\n\n def read(self):\n try:\n with open('m/music.json') as music:\n r_music = load(music)\n self.str_music = loads(r_music)\n self.pick_tune()\n except FileNotFoundError:\n try:\n with ZipFile(\"m.zip\",\"r\") as zip_ref:\n zip_ref.extractall()\n\n remove(\"m.zip\")\n with open('m/music.json') as music:\n r_music = load(music)\n self.str_music = loads(r_music)\n self.pick_tune()\n\n except FileNotFoundError:\n self.temp_ui()\n try:\n if input() == None: # if this is not here, the temp_ui wont show for some reason\n exit()\n else:\n exit()\n except AttributeError:\n print('\\nSomething went wrong..')\n exit()\n\n def downloader(self):\n gdd.download_file_from_google_drive(file_id='15rTJbGq5mD-bnkZoofvO2n6-c5O9_NNK', dest_path='./m.zip', showsize=True)\n with ZipFile(\"m.zip\",\"r\") as zip_ref:\n zip_ref.extractall()\n remove(\"m.zip\")\n \n with open('m/music.json') as music:\n r_music = load(music)\n self.str_music = loads(r_music)\n\n self.temp_frame2.destroy()\n self.text_info2.destroy()\n self.temp_frame.destroy()\n self.text_info.destroy()\n self.canvas.destroy()\n self.temp_main_frame.destroy()\n\n self.pick_tune()\n self.maingui()\n\n def ranint(self, length):\n return randint(0, length-1)\n\n def pick_tune(self):\n self.music = self.str_music['music']\n self.randint = self.ranint(len(self.music))\n \n if not self.music[self.randint]['mp_link'] == None:\n self.randtune_local = self.music[self.randint]['mp_link']\n self.url = self.music[self.randint]['yt_link']\n self.credit = self.music[self.randint]['uploader']\n\n self.metho = 'local-file'\n self.player = MediaPlayer(str(self.randtune_local))\n\n else:\n self.randtune_stream = self.music[self.randint]['yt_link']\n self.url = self.music[self.randint]['yt_link']\n self.credit = self.music[self.randint]['uploader']\n\n self.metho = 'online-stream'\n self.video = new(self.randtune_stream)\n self.quality = self.video.getbestaudio()\n self.quality_url = self.quality.url\n\n self.vlc_ins = Instance()\n self.players = self.vlc_ins.media_player_new()\n self.current_media = self.vlc_ins.media_new(str(self.quality.url))\n self.player = MediaPlayer(self.current_media.get_mrl())\n\n self.title = '%s, #%s/%s' % (self.music[self.randint]['tune'], self.randint, len(self.music))\n self.pl_str = '\\n%s\\n\\n%s\\n%s\\n\\nplaying from: %s' % (self.credit, self.title, self.url, self.metho)\n\nclass GuiMain(Start):\n def __init__(self, master):\n super().__init__(master)\n\n def maingui(self):\n self.canvas = Canvas(self.master, bg=self.default_C, height=self.HEIGHT, width=self.WIDTH)\n self.canvas.pack()\n # Generate Frame\n self.main_frame = Frame(self.master, bg=self.default_C)\n self.main_frame.place(relx=0.0, rely=0.0, relwidth=1, relheight=1)\n\n self.vid_frame = Frame(self.master, bg=self.default_C)\n self.vid_frame.place(relx=0, rely=0, relwidth=1, relheight=0.872)\n self.vid_text_info = Text(self.vid_frame, bg='#070707', bd=5, exportselection=0, fg='#196619', height=self.HEIGHT, width=self.WIDTH)\n try: \n self.vid_text_info.insert(INSERT, str(self.pl_str))\n except AttributeError:\n self.vid_text_info.insert(INSERT, '\\nError reading name of tune.\\n\\nTry clicking \"New\"..')\n self.vid_text_info.config(state=DISABLED)\n self.vid_text_info.pack()\n\n self.add_frame= Frame(self.master)\n self.add_frame.place(relx=0, rely=0.85, relwidth=1, relheight=0.15)\n\n self.playstop_but = Button(self.add_frame, text=\"Start/Stop\", bg='#070707', fg='#196619', command=self.toggle_pause_command)\n self.new_but = Button(self.add_frame, text=\"New\", bg='#070707', fg='#196619', command=self.new_command)\n self.vol_up_but = Button(self.add_frame, text=\"Volume +\", bg='#070707', fg='#196619', command=self.vol_up_command) \n self.vol_down_but = Button(self.add_frame, text=\"Volume -\", bg='#070707', fg='#196619', command=self.vol_down_command)\n self.exit_but = Button(self.add_frame, text=\"Quit\", bg='#070707', fg='#196619', command=self.temp_abort)\n\n self.playstop_but.grid(row=0, column=1, ipadx=25)\n self.new_but.grid(row=0, column=2, ipadx=25)\n self.vol_up_but.grid(row=0, column=3, ipadx=25)\n self.vol_down_but.grid(row=0, column=4, ipadx=25)\n self.exit_but.grid(row=0, column=5, ipadx=25)\n\n def update(self):\n self.new_frame = Frame(self.master, bg=self.default_C)\n self.new_frame.place(relx=0, rely=0, relwidth=1, relheight=0.8655)\n self.new_text_info = Text(self.new_frame, bg='#070707', bd=5, exportselection=0, fg='#196619', height=self.HEIGHT, width=self.WIDTH)\n self.new_text_info.insert(INSERT, str(self.pl_str))\n self.new_text_info.config(state=DISABLED)\n self.new_text_info.pack()\n\n def temp_ui(self):\n self.canvas = Canvas(self.master, bg=self.default_C, height=150, width=350)\n self.canvas.pack()\n # Generate Frame\n self.temp_main_frame = Frame(self.master, bg=self.default_C)\n self.temp_main_frame.place(relx=1, rely=1, relwidth=1, relheight=1)\n\n self.temp_frame = Frame(self.master, bg=self.default_C)\n self.temp_frame.place(relx=0, rely=0, relwidth=1, relheight=1)\n self.text_info = Text(self.temp_frame, bg='#070707', bd=5, exportselection=0, fg='#196619', height=self.HEIGHT, width=self.WIDTH)\n self.text_info.insert(INSERT, '\\nYou need local files to start.\\n\\nContinue with download of ~240MB?\\n\\n(alternatively, use online-version)')\n self.text_info.config(state=DISABLED)\n self.text_info.pack()\n\n add_t_frame= Frame(self.master)\n add_t_frame.place(relx=0, rely=0.85, relwidth=1, relheight=0.15)\n \n self.choice_yes = Button(add_t_frame, text=\"Download\", bg='#373737', fg='#196619', command=self.temp_con)\n self.choice_no = Button(add_t_frame, text=\"Abort\", bg='#373737', fg='#196619', command=self.temp_abort)\n\n self.choice_yes.grid(row=0, column=1, ipadx=64)\n self.choice_no.grid(row=0, column=2, ipadx=64)\n\n def temp_con(self):\n self.choice_yes.destroy()\n self.choice_no.destroy()\n self.temp_frame2 = Frame(self.master, bg=self.default_C)\n self.temp_frame2.place(relx=0, rely=0, relwidth=1, relheight=1)\n self.text_info2 = Text(self.temp_frame2, bg='#070707', bd=5, exportselection=0, fg='#196619', height=self.HEIGHT, width=self.WIDTH)\n self.text_info2.insert(INSERT, '\\nDownloading ZIP. ~240MB.\\n\\nPlease Wait.\\n\\nIt can take a few minutes..\\nWill automatically re-open when ready.')\n self.text_info2.config(state=DISABLED)\n self.text_info2.pack()\n self.text_info2.update()\n\n self.downloader()\n\n def temp_abort(self):\n return exit()\n\n def new_command(self):\n self.player.close_player()\n self.pick_tune()\n return self.update()\n\n def toggle_pause_command(self):\n return self.player.toggle_pause()\n\n def vol_down_command(self):\n return self.player.set_volume(self.player.get_volume() - 0.1)\n\n def vol_up_command(self):\n return self.player.set_volume(self.player.get_volume() + 0.1)\n\nif __name__ == '__main__':\n root = Tk()\n root.title('JazzJam')\n root.iconbitmap('j_for_jazz.ico')\n root.resizable(False, False)\n initiater = GuiMain(root)\n initiater.maingui()\n root.mainloop()","sub_path":"jazzjam.py","file_name":"jazzjam.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"530456145","text":"import concurrent.futures\nimport os\n\nimport pandas as pd\nfrom flask import Flask, request\nfrom flask_restful import Resource\nfrom redis import Redis\nfrom simplejson import dumps\n\nfrom const import status\nfrom const.redis_queue import IMAGES_INFO_ASYNC, BATCH_PREDICT\nfrom exceptions import ImageInfoError\nfrom mlteam.extensions import redis_client, session\nfrom models.images import ImageInfo, BatchImage\n\n\nclass ImagesInfoResource(Resource):\n \"\"\"\n images_info endpoint.\n \"\"\"\n\n def post(self):\n data = request.get_json()\n if data is None:\n return {\"error\": \"Data is not provided\"}, status.HTTP_422_UNPROCESSABLE_ENTITY\n\n filepath = data.get('filepath', '')\n if os.path.exists(filepath):\n result = {}\n with open(filepath, 'r') as file:\n images = pd.read_csv(file, delimiter='\\t')\n for image in images.itertuples():\n image_info = ImageInfo(image.id, url=image.url, session=session)\n result[image.id] = image_info.to_dict()\n return result, status.HTTP_200_OK\n\n return {\"error\": \"Invalid input file url\"}, status.HTTP_422_UNPROCESSABLE_ENTITY\n\n\nclass ImagesInfoAsyncResource(Resource):\n \"\"\"\n images_info_async endpoint, is a images_info with concurrency for processing\n images and pushing into a Redis queue.\n \"\"\"\n\n def post(self):\n data = request.get_json()\n if data is None:\n return {\"error\": \"Data is not provided\"}, status.HTTP_422_UNPROCESSABLE_ENTITY\n\n filepath = data.get('filepath', '')\n if os.path.exists(filepath):\n result = {}\n with open(filepath, 'r') as file:\n images = pd.read_csv(file, delimiter='\\t')\n with concurrent.futures.ThreadPoolExecutor(max_workers=6) as executor:\n future_img = {\n executor.submit(ImageInfo(img.id, url=img.url, session=session).to_dict): img.id\n for img in images.itertuples()\n }\n for future in concurrent.futures.as_completed(future_img):\n img_id = future_img[future]\n redis_client.rpush(\n IMAGES_INFO_ASYNC,\n dumps({img_id: future.result()})\n )\n return {\"ok\": \"Processing Images\"}, status.HTTP_200_OK\n\n return {\"error\": \"Invalid input file url\"}, status.HTTP_422_UNPROCESSABLE_ENTITY\n\n\nclass BatchPredictResource(Resource):\n \"\"\"\n batch_predict endpoint.\n \"\"\"\n\n def post(self):\n data = request.get_json()\n if data is None:\n return {\"error\": \"Data is not provided\"}, status.HTTP_422_UNPROCESSABLE_ENTITY\n\n filepath = data.get('filepath', '')\n if os.path.exists(filepath):\n batch_size = data.get('batch_size', 0)\n if batch_size == 0:\n # If there is not batch size, all the images are processed.\n return {\"ok\": \"Processing Images\"}, status.HTTP_200_OK\n with open(filepath, 'r') as file:\n images = pd.read_csv(file, delimiter='\\t')\n batch_images = BatchImage(images=images.itertuples(), batch_size=batch_size, session=session)\n batch_images.resize_batch_images(redis_conn=redis_client)\n return {\"ok\": \"Processing Images\"}, status.HTTP_200_OK\n\n return {\"error\": \"Invalid input file url\"}, status.HTTP_422_UNPROCESSABLE_ENTITY\n","sub_path":"v1/resources/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"25732865","text":"# code=utf-8\nimport matplotlib as mpl\n\nmpl.use('TkAgg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom statsmodels.graphics.tsaplots import acf, pacf, plot_acf, plot_pacf\nfrom statsmodels.tsa.arima_model import ARMA\nfrom suppose.parser import Method, Info, Request, Response, Uri\nfrom suppose import parser\nfrom suppose import trend_counter\n\nimport logging\nimport sys\nimport time\nfrom suppose import scanner\nimport yaml\n\nlog_name = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level=logging.INFO)\n# handler = logging.FileHandler(f\"{log_name} parser.log\")\n# handler.setLevel(logging.WARN)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# handler.setFormatter(formatter)\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n\n# logger.addHandler(handler)\nlogger.addHandler(console)\n\n\ndef uri_layer():\n limit = 80000\n m = {}\n x = []\n y = []\n api_total = []\n session = parser.init_session('localhost', 'root', 'root', 'api_version')\n logger.info('开始')\n sql = \"SELECT id FROM info group by new_path having count(id) >= 12\"\n result = session.execute(sql)\n be_regular_api_id_list = []\n for result_data in result:\n be_regular_api_id_list.append(result_data[0])\n logger.info(f'预计统计{len(be_regular_api_id_list)}篇文档,共计{int((session.query(Uri).count() / limit) + 1)}页')\n for page_index in range(1, int((session.query(Uri).count() / limit) + 1)):\n logger.info(f'[{page_index}]开始处理第{page_index}页')\n temp_uri_total = 0\n temp_api_total = 0\n for uri in session.query(Uri).order_by(Uri.update_time.asc()).limit(limit).offset((page_index - 1) * limit):\n if uri.info_id in be_regular_api_id_list:\n if uri.update_time not in m:\n m[uri.update_time] = [0, 0]\n url = str(uri.uri).replace('\"', '')\n if len(url) > 1 and url != '/':\n m[uri.update_time][0] = m[uri.update_time][0] + len(url.split('/'))\n m[uri.update_time][1] = m[uri.update_time][1] + 1\n temp_uri_total = temp_uri_total + 1\n if uri.info_id not in api_total:\n api_total.append(uri.info_id)\n temp_api_total = temp_api_total + 1\n logger.info(f'本次统计api{temp_api_total}个,uri{temp_uri_total}个')\n for update_time in m:\n x.append(update_time)\n if m[update_time][0] == 0:\n y.append(0)\n else:\n y.append(m[update_time][0] / m[update_time][1])\n trend_counter.auto(x, y, \"The Trend of the average of Uri's Layer\")\n\n\ndef api_resource():\n limit = 80000\n x = []\n y = []\n m = {}\n api_set = set()\n session = parser.init_session('localhost', 'root', 'root', 'api_version')\n logger.info('开始')\n sql = \"SELECT id FROM info group by new_path having count(id) >= 12\"\n result = session.execute(sql)\n be_regular_api_id_list = []\n for result_data in result:\n be_regular_api_id_list.append(result_data[0])\n logger.info(f'预计统计{len(be_regular_api_id_list)}篇文档,共计{int((session.query(Uri).count() / limit) + 1)}页')\n for page_index in range(1, int((session.query(Uri).count() / limit) + 2)):\n logger.info(f'[{page_index}]开始处理第{page_index}页')\n temp_uri_total = 0\n temp_api_total = 0\n for uri in session.query(Uri).order_by(Uri.update_time.asc()).limit(limit).offset((page_index - 1) * limit):\n if uri.info_id in be_regular_api_id_list:\n if uri.update_time not in m:\n m[uri.update_time] = {}\n if uri.info_id not in m[uri.update_time]:\n m[uri.update_time][uri.info_id] = set()\n for resource in uri.uri.replace('\"', \"\").split('/'):\n if len(resource) > 0:\n m[uri.update_time][uri.info_id].add(resource)\n temp_uri_total += 1\n if uri.info_id not in api_set:\n api_set.add(uri.info_id)\n temp_api_total += 1\n logger.info(f'本次统计api:{temp_api_total}个,uri:{temp_uri_total}个')\n for update_time in m:\n total = 0\n api_count = 0\n available = 0\n for info_id in m[update_time]:\n resource_set = m[update_time][info_id]\n if len(resource_set) > 0:\n available = 1\n total = total + len(resource_set)\n api_count = api_count + 1\n if available == 1:\n x.append(update_time)\n y.append(total / api_count)\n\n trend_counter.auto(x, y, \"The Trend of the average of Uri's Resources\")\n\n\ndef endpoint():\n limit = 130000\n x = []\n y = []\n m = {}\n api_set = set()\n session = parser.init_session('localhost', 'root', 'root', 'api_version')\n logger.info('开始')\n sql = \"SELECT id FROM info group by new_path having count(id) >= 12\"\n result = session.execute(sql)\n be_regular_api_id_list = []\n for result_data in result:\n be_regular_api_id_list.append(result_data[0])\n logger.info(f'预计统计{len(be_regular_api_id_list)}篇文档,共计{int((session.query(Method).count() / limit) + 1)}页')\n for page_index in range(1, int((session.query(Method).count() / limit) + 2)):\n logger.info(f'[{page_index}]开始处理第{page_index}页')\n temp_method_total = 0\n temp_api_total = 0\n for method in session.query(Method).order_by(Method.update_time.asc()).limit(limit).offset((page_index - 1) * limit):\n if method.info_id in be_regular_api_id_list:\n if method.update_time not in m:\n m[method.update_time] = {}\n if method.info_id not in m[method.update_time]:\n m[method.update_time][method.info_id] = []\n api_set.add(method.info_id)\n temp_method_total += 1\n if method.info_id not in api_set:\n api_set.add(method.info_id)\n temp_api_total += 1\n m[method.update_time][method.info_id].append(method.method+method.uri)\n logger.info(f'本次统计api:{temp_api_total}个,method:{temp_method_total}个')\n for update_time in m:\n total = 0\n api_count = 0\n for info_id in m[update_time]:\n total = total + len(m[update_time][info_id])\n api_count = api_count + 1\n if total != 0 and total != api_count:\n x.append(update_time)\n y.append(total / api_count)\n trend_counter.auto(x, y, \"The Trend of the average of Endpoints' Statistics\")\n\n\nendpoint()\n\n\ndef request_param():\n pass\n\n\ndef request_param_location():\n pass\n","sub_path":"suppose/trend_painter.py","file_name":"trend_painter.py","file_ext":"py","file_size_in_byte":6923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"341301695","text":"import os\n\nfrom project.board import Board\nfrom project.piece_initialization import white_pieces, black_pieces\n\n\ndef print_possible_moves(moves: dict):\n for name, positions in moves.items():\n positions = \", \".join([f'{pos[0]}{pos[1]}' for pos in positions]) if not isinstance(positions, str) else positions\n\n print(f'{name} -> {positions}')\n\n\ndef available_piece(possible_coordinates: dict):\n for direction in possible_coordinates:\n if not isinstance(possible_coordinates[direction], str):\n return True\n return False\n\n\ndef reprint_board():\n os.system('cls')\n print(board.return_board_for_print())\n print()\n\n\ndef alert_king_is_attacked(side):\n if side:\n print(f'{side} King is under attack. Defend the king.')\n else:\n print('NO king is attacked.')\n\n\nboard = Board()\n\n[board.add_piece(piece) for piece in white_pieces]\n[board.add_piece(piece) for piece in black_pieces]\nreprint_board()\n\nturns = [\"White's Turn\", \"Black's Turn\"]\ni = 1\nwhile True:\n board.make_every_attacked_cell_attacked()\n\n attacked_king = None\n\n if board.is_king_under_attack('White'):\n attacked_king = 'White'\n\n elif board.is_king_under_attack('Black'):\n attacked_king = 'Black'\n\n if attacked_king:\n alert_king_is_attacked(attacked_king)\n\n i += 1\n print(turns[i % len(turns)])\n while True:\n print()\n try:\n piece_coordinates = input('Select piece: ') # coordinates\n moving_positions = board.possible_moves(piece_coordinates)\n except (ValueError, AttributeError) as exc:\n print(str(exc))\n continue\n if not available_piece(moving_positions):\n print('Piece unavailable.')\n continue\n\n print_possible_moves(moving_positions)\n\n place_to_move = input('Select place to move to: ') # coordinates\n if place_to_move not in [item for elem in moving_positions.values() for item in elem]:\n print('Position unreachable.\\n')\n continue\n\n board.move_piece(piece_coordinates, place_to_move)\n\n board.make_every_attacked_cell_attacked()\n\n if board.is_king_under_attack(attacked_king):\n alert_king_is_attacked(attacked_king)\n board.move_piece(place_to_move, piece_coordinates)\n continue\n\n break\n reprint_board()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"56721812","text":"\n\n#calss header\nclass _HACKNEYED():\n\tdef __init__(self,): \n\t\tself.name = \"HACKNEYED\"\n\t\tself.definitions = [u'A hackneyed phrase or idea has been said or used so often that it has become boring and has no meaning: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_hackneyed.py","file_name":"_hackneyed.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"383405313","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 피보나치 나선(Fibonacci spiral)\n# \n\n# ## 1. 피보나치 수열\n# \n# \n# $a_n+a_{n+1}=a_{n+2} (n$은 자연수)\n\n# In[9]:\n\n\ndef fibo(n):\n A=[1]\n if n <=1:\n pass\n \n\n else:\n a=0 \n b=1 \n for i in range(n-1): \n a,b=b,a+b\n A += [b]\n return A\n\nF=fibo(10)\n\n\n# ## 2. 피보나치 사각형 그리기\n# \n\n# ### (1) 시작좌표($P$를 찾는 방법 : 4가지 규칙으로 반복된다.\n# \n# #### 4가지 규칙함수 ($S[k]$에서의 시작좌표 $P_k$($k$는 정수))\n# \n# ![Fibonacci square](2-1.jpg)\n# \n# 0. $P_0$ = (0,0)\n# 1. $P_1$ 의 시작 좌표는 $P_0$ 에서 x값에 $F[0]$를 더한 값이다.\n# 2. $P_2$ 의 시작 좌표는 $P_1$ 에서 x값에 $F[0]$을 빼고 y값에 $F[1]$을 더한 값이다.\n# 3. $P_3$ 의 시작 좌표는 $P_2$ 에서 x값에 $F[3]$을 빼고 y값에 $F[1]$을 뺀 값이다.\n# 4. $P_4$ 의 시작 좌표는 $P_3$ 에서 y값에 $F[4]$를 뺀 값이다.\n\n# In[10]:\n\n\ndef start(): # 4가지 규칙함수 연산후의 시작좌표\n P=[[0,0]] # 첫 정사각형 시작좌표\n X=0\n Y=0\n for i in range(1,len(F)):\n if i%4==1:\n X,Y = X+F[i-1],Y\n \n elif i%4==2:\n X,Y = X-F[i-2],Y+F[i-1]\n \n elif i%4==3:\n X,Y = X-F[i],Y-F[i-2]\n \n elif i%4==0:\n X,Y = X,Y-F[i]\n P += [[X,Y]]\n return P\n\nS=start()\n\n\n# ### (2) 시작 좌표에서 정사각형 그리기 : 각 꼭지점의 좌표를 구한다.\n# \n# #### 이 때 한변의 길이는 피보나치 수열에 의해 결정된다. (한변의 길이: $F[k]$)\n# ![Fibonacci square](2-2.jpg)\n# $R_x(k) =[x[k],x[k]+F[k],x[k]+F[k],x[k],x[k]]$\n# \n# \n# $R_y(k) =[y[k],y[k],y[k]+F[k],y[k]+F[k],y[k]]$\n\n# In[11]:\n\n\nx,y=zip(*S) # List of S(starting point) x=[0,1,0,-3,-3,...] y=[0,0,1,0,-5,...]\n\nimport matplotlib.pyplot as plt\n\ndef R_x(n):\n for j in range(0,n+1):\n R_x = [x[j], x[j]+F[j], x[j]+F[j] ,x[j], x[j]] #정사각형의 x좌표[0,1,2,3,4]\n return R_x\ndef R_y(n):\n for k in range(0,n+1):\n R_y = [y[k], y[k], y[k]+F[k], y[k]+F[k], y[k]] #정사각형의 y좌표[0,1,2,3,4]\n return R_y\n\nfor l in range(0,len(F)):\n plt.plot(R_x(l),R_y(l),'k') #FIbonacci Square\n\nplt.axis('scaled')\nplt.axis('off')\n\nplt.show()\n\n\n# ## 3. 피보나치 나선 그리기\n\n# ### (1) 호의 중심좌표 찾기 : 사각형과 마찬가지로 4가지 규칙이 반복된다.\n# \n# #### 호의 중심에 대한 4가지 규칙\n# ![Fibonacci square](1.jpg)\n# \n# 0.0번째 사각형은 2번 자리가 중심이다.\n# 1.1번째 사각형은 3번 자리가 중심이다.\n# 2.2번째 사각형은 4번 자리가 중심이다.\n# 3.3번째 사각형은 1번 자리가 중심이다.\n# 4.4번째 사각형은 2번 자리가 중심이다.\n# \n\n# In[12]:\n\n\ndef AP(): #각 호의 중심 찾기\n P=[]\n for m in range(0,len(F)):\n if m%4==1:\n X = R_x(m)[3]\n Y = R_y(m)[3]\n elif m%4==2:\n X = R_x(m)[4]\n Y = R_y(m)[4]\n elif m%4==3:\n X = R_x(m)[1]\n Y = R_y(m)[1]\n else:\n X = R_x(m)[2]\n Y = R_y(m)[2]\n P += [[X,Y]]\n return P\n\nxx,yy=zip(*AP())\n\n\n# ### (2) 중심좌표에서 일정 각도만큼 호 그리기\n# #### 4가지 규칙에 따라 호가그려진다.\n\n# In[13]:\n\n\nimport numpy as np\nfrom numpy import pi\nimport matplotlib.pyplot as plt\n\nfor l in range(0,len(F)):\n plt.plot(R_x(l),R_y(l),'k') #FIbonacci Square\n \n \nfor n in range(0,len(F)):\n \n r=F[n]\n \n if n%4==1:\n theta=np.linspace(-pi/2,0,100)\n \n elif n%4==2:\n theta=np.linspace(0,pi/2,100)\n \n elif n%4==3:\n theta=np.linspace(pi/2,pi,100)\n\n else:\n theta=np.linspace(pi,3*pi/2,100)\n \n x1=r*np.cos(theta)+xx[n]\n y1=r*np.sin(theta)+yy[n] \n plt.plot(x1,y1,'r')\n \n\n \nplt.title('Fibonacci Spiral')\nplt.axis('scaled')\nplt.axis('off')\nplt.show()\n","sub_path":"Fibonacci spiral.py","file_name":"Fibonacci spiral.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631474764","text":"from framework.base_page import BasePage\r\nfrom selenium.webdriver.common.by import By\r\nimport random\r\nimport time\r\nfrom framework.logger import Logger\r\n# 搜索筛选\r\nlogger = Logger('SearchFilter').getlog()\r\n\r\nclass SearchFilter(BasePage):\r\n input = (By.ID, 'q')\r\n btn = (By.CLASS_NAME, 'buts')\r\n random_click = (By.XPATH, '//*[@class=\"box\"]/div/span//dl/dt/a[1]')\r\n pay_btn = (By.XPATH, '//*[@id=\"Pay\"]')\r\n series_retrieval = (By.XPATH, '//*[@class=\"retrieval\"]/dl[3]/dd/a')\r\n proc_retrieval = (By.XPATH, '//*[@class=\"retrieval\"]/dl[2]/dd/a')\r\n reset = (By.XPATH, '//*[@class=\"reset-xj\"]')\r\n\r\n def search(self, input):\r\n try:\r\n self.expected_conditions(self.input).send_keys(input)\r\n self.expected_conditions(self.btn).click()\r\n logger.info('输入框输入{},点击搜索'.format(input))\r\n except TypeError as e:\r\n logger.info('search error', format(e))\r\n self.get_windows_img()\r\n\r\n def filter(self):\r\n try:\r\n self.find_elements(*self.random_click)[random.randint(0, 49)].click()\r\n logger.info('随机点击搜索酒款')\r\n\r\n self.switch_to(1)\r\n self.expected_conditions(self.pay_btn)\r\n self.driver.close()\r\n self.switch_to(0)\r\n time.sleep(0.5)\r\n\r\n self.find_elements(*self.series_retrieval)[random.randint(1,3)].click()\r\n time.sleep(0.5)\r\n logger.info('系列筛选')\r\n\r\n self.find_elements(*self.proc_retrieval)[random.randint(1,7)].click()\r\n time.sleep(0.5)\r\n logger.info('产区筛选')\r\n\r\n self.expected_conditions(self.reset).click()\r\n logger.info('清空筛选')\r\n except Exception:\r\n logger.info('filter error')\r\n self.get_windows_img()","sub_path":"Pscrapy/PycharmProjects/Reptile/automation_Testing/pageobjects/search_filter.py","file_name":"search_filter.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345196267","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 by Murray Altheim. All rights reserved. This file is part of\n# the Robot OS project and is released under the \"Apache Licence, Version 2.0\".\n# Please see the LICENSE file included as part of this package.\n#\n# author: altheim\n# created: 2020-01-18\n# modified: 2020-10-28\n#\n# Implements an Integrated Front Sensor using an IO Expander Breakout Garden\n# board. This polls the values of the board's pins, which outputs 0-255 values\n# for analog pins, and a 0 or 1 for digital pins.\n#\n\nfrom concurrent.futures import ThreadPoolExecutor #, ProcessPoolExecutor\nimport time, threading\nimport datetime as dt\nfrom collections import deque as Deque\nfrom colorama import init, Fore, Style\ninit()\n\nimport ioexpander as io\n\nfrom lib.config_loader import ConfigLoader\nfrom lib.logger import Logger, Level\nfrom lib.enums import Orientation\nfrom lib.event import Event\nfrom lib.message_factory import MessageFactory\nfrom lib.message_bus import MessageBus\nfrom lib.message import Message\n#from lib.indicator import Indicator\nfrom lib.pot import Potentiometer # for calibration only\n\n\n# ..............................................................................\nclass IntegratedFrontSensor():\n '''\n IntegratedFrontSensor: communicates with the integrated front bumpers and\n infrared sensors, receiving messages from the IO Expander board or I²C\n Arduino slave, sending the messages with its events onto the message bus.\n\n This listens to the Clock and at a frequency of ( TICK % tick_modulo )\n calls the _poll() method.\n\n Parameters:\n\n :param config: the YAML based application configuration\n :param queue: the message queue receiving activation notifications\n :param clock: the clock providing the polling loop trigger\n :param message_bus: the message bus to send event messages\n :param message_factory: optional MessageFactory\n :param level: the logging Level\n\n '''\n\n # ..........................................................................\n def __init__(self, config, queue, clock, message_bus, message_factory, level):\n if config is None:\n raise ValueError('no configuration provided.')\n self._log = Logger(\"ifs\", level)\n self._log.info('configuring integrated front sensor...')\n _cruise_config = config['ros'].get('cruise_behaviour')\n self._cruising_velocity = _cruise_config.get('cruising_velocity')\n self._config = config['ros'].get('integrated_front_sensor')\n self._clock = clock\n self._clock.add_consumer(self)\n self._message_bus = message_bus\n# _queue = queue\n# _queue.add_consumer(self)\n self._device_id = self._config.get('device_id') # i2c hex address of slave device, must match Arduino's SLAVE_I2C_ADDRESS\n self._channel = self._config.get('channel')\n self._ignore_duplicates = self._config.get('ignore_duplicates')\n self._tick_modulo = self._config.get('tick_modulo')\n _max_workers = self._config.get('max_workers')\n self._log.info('tick modulo: {:d}'.format(self._tick_modulo) )\n # event thresholds:\n self._callback_cntr_min_trigger = self._config.get('callback_center_minimum_trigger')\n self._callback_side_min_trigger = self._config.get('callback_side_minimum_trigger')\n self._callback_min_trigger = self._config.get('callback_minimum_trigger')\n self._port_side_trigger_distance = self._config.get('port_side_trigger_distance')\n self._port_trigger_distance = self._config.get('port_trigger_distance')\n self._center_trigger_distance = self._config.get('center_trigger_distance')\n self._stbd_trigger_distance = self._config.get('stbd_trigger_distance')\n self._stbd_side_trigger_distance = self._config.get('stbd_side_trigger_distance')\n self._log.info('event thresholds: \\t' \\\n +Fore.RED + ' port side={:>5.2f}; port={:>5.2f};'.format(self._port_side_trigger_distance, self._port_trigger_distance) \\\n +Fore.BLUE + ' center={:>5.2f};'.format(self._center_trigger_distance) \\\n +Fore.GREEN + ' stbd={:>5.2f}; stbd side={:>5.2f}'.format(self._stbd_trigger_distance, self._stbd_side_trigger_distance))\n # hardware pin assignments\n self._port_side_ir_pin = self._config.get('port_side_ir_pin')\n self._port_ir_pin = self._config.get('port_ir_pin')\n self._center_ir_pin = self._config.get('center_ir_pin')\n self._stbd_ir_pin = self._config.get('stbd_ir_pin')\n self._stbd_side_ir_pin = self._config.get('stbd_side_ir_pin')\n self._log.info('infrared pin assignments:\\t' \\\n +Fore.RED + ' port side={:d}; port={:d};'.format(self._port_side_ir_pin, self._port_ir_pin) \\\n +Fore.BLUE + ' center={:d};'.format(self._center_ir_pin) \\\n +Fore.GREEN + ' stbd={:d}; stbd side={:d}'.format(self._stbd_ir_pin, self._stbd_side_ir_pin))\n self._port_bmp_pin = self._config.get('port_bmp_pin')\n self._center_bmp_pin = self._config.get('center_bmp_pin')\n self._stbd_bmp_pin = self._config.get('stbd_bmp_pin')\n self._log.info('bumper pin assignments:\\t' \\\n +Fore.RED + ' port={:d};'.format(self._port_bmp_pin) \\\n +Fore.BLUE + ' center={:d};'.format(self._center_bmp_pin) \\\n +Fore.GREEN + ' stbd={:d}'.format(self._stbd_bmp_pin))\n if message_factory:\n self._message_factory = message_factory\n else:\n self._message_factory = MessageFactory(level)\n# self._executor = ProcessPoolExecutor(max_workers=_max_workers)\n self._log.info('creating thread pool executor with maximum of {:d} workers.'.format(_max_workers))\n self._executor = ThreadPoolExecutor(max_workers=_max_workers, thread_name_prefix='ifs')\n # config IO Expander\n self._ioe = IoExpander(config, Level.INFO)\n # calculating means for IR sensors\n self._pot = Potentiometer(config, Level.INFO)\n _queue_limit = 2 # larger number means it takes longer to change\n self._deque_port_side = Deque([], maxlen=_queue_limit)\n self._deque_port = Deque([], maxlen=_queue_limit)\n self._deque_cntr = Deque([], maxlen=_queue_limit)\n self._deque_stbd = Deque([], maxlen=_queue_limit)\n self._deque_stbd_side = Deque([], maxlen=_queue_limit)\n # ...\n self._last_event = None\n self._last_value = None\n self._enabled = False\n self._suppressed = False\n self._closed = False\n self._log.info(Fore.MAGENTA + 'ready.')\n\n # ......................................................\n def add(self, message):\n '''\n Reacts to every nth (modulo) TICK message, submitting a _poll Thread\n from the thread pool executor, which polls the various sensors and\n sending callbacks for each.\n\n Note that if the loop frequency is set this method is disabled.\n '''\n if self._enabled and message.event is Event.CLOCK_TICK:\n if ( message.value % self._tick_modulo ) == 0:\n _future = self._executor.submit(self._poll, message.value, lambda: self.enabled )\n\n # ......................................................\n def _poll(self, count, f_is_enabled):\n '''\n Poll the various infrared and bumper sensors, executing callbacks for each.\n In tests this typically takes 173ms from ItsyBitsy, 85ms from the Pimoroni IO Expander.\n\n We add a messsage for the bumpers immediately (rather than use a callback) after reading\n the sensors since their response must be as fast as possible.\n '''\n if not f_is_enabled():\n self._log.warning('[{:04d}] poll not enabled'.format(count))\n return\n\n self._log.info(Fore.BLACK + '[{:04d}] ifs poll start.'.format(count))\n _current_thread = threading.current_thread()\n _current_thread.name = 'poll'\n _start_time = dt.datetime.now()\n\n # pin 10: digital bumper sensor ........................................\n _port_bmp_data = self.get_input_for_event_type(Event.BUMPER_PORT)\n _cntr_bmp_data = self.get_input_for_event_type(Event.BUMPER_CNTR)\n _stbd_bmp_data = self.get_input_for_event_type(Event.BUMPER_STBD)\n\n _port_side_ir_data = self.get_input_for_event_type(Event.INFRARED_PORT_SIDE)\n _port_ir_data = self.get_input_for_event_type(Event.INFRARED_PORT)\n _cntr_ir_data = self.get_input_for_event_type(Event.INFRARED_CNTR)\n _stbd_ir_data = self.get_input_for_event_type(Event.INFRARED_STBD)\n _stbd_side_ir_data = self.get_input_for_event_type(Event.INFRARED_STBD_SIDE)\n\n# self._callback(Event.BUMPER_CNTR, _cntr_bmp_data)\n if _cntr_bmp_data == 1:\n _message = self._message_factory.get_message(Event.BUMPER_CNTR, _cntr_bmp_data)\n self._log.info(Fore.BLUE + Style.BRIGHT + 'adding new message eid#{} for BUMPER_CNTR event.'.format(_message.eid))\n self._message_bus.handle(_message)\n\n # pin 9: digital bumper sensor .........................................\n# self._callback(Event.BUMPER_PORT, _port_bmp_data)\n if _port_bmp_data == 1:\n _message = self._message_factory.get_message(Event.BUMPER_PORT, _port_bmp_data)\n self._log.info(Fore.BLUE + Style.BRIGHT + 'adding new message eid#{} for BUMPER_PORT event.'.format(_message.eid))\n self._message_bus.handle(_message)\n\n # pin 11: digital bumper sensor ........................................\n# self._callback(Event.BUMPER_STBD, _stbd_bmp_data)\n if _stbd_bmp_data == 1:\n _message = self._message_factory.get_message(Event.BUMPER_STBD, _stbd_bmp_data)\n self._log.info(Fore.BLUE + Style.BRIGHT + 'adding new message eid#{} for BUMPER_STBD event.'.format(_message.eid))\n self._message_bus.handle(_message)\n\n # port side analog infrared sensor .....................................\n if _port_side_ir_data > self._callback_side_min_trigger:\n# _message = self._message_factory.get_message(Event.INFRARED_PORT_SIDE, _port_side_ir_data)\n# self._log.info(Fore.RED + Style.BRIGHT + 'adding new message eid#{} for INFRARED_PORT_SIDE event.'.format(_message.eid))\n# self._message_bus.handle(_message)\n self._log.info(Fore.RED + '[{:04d}] ANALOG IR ({:d}): \\t'.format(count, 1) + (Fore.RED if (_port_side_ir_data > 100.0) else Fore.YELLOW) \\\n + Style.BRIGHT + '{:d}'.format(_port_side_ir_data) + Style.DIM + '\\t(analog value 0-255)')\n self._callback(Event.INFRARED_PORT_SIDE, _port_side_ir_data)\n\n # port analog infrared sensor ..........................................\n if _port_ir_data > self._callback_min_trigger:\n# _message = self._message_factory.get_message(Event.INFRARED_PORT, _port_ir_data)\n# self._log.info(Fore.RED + Style.BRIGHT + 'adding new message eid#{} for INFRARED_PORT event.'.format(_message.eid))\n# self._message_bus.handle(_message)\n self._log.info('[{:04d}] ANALOG IR ({:d}): \\t'.format(count, 2) + (Fore.RED if (_port_ir_data > 100.0) else Fore.YELLOW) \\\n + Style.BRIGHT + '{:d}'.format(_port_ir_data) + Style.DIM + '\\t(analog value 0-255)')\n self._callback(Event.INFRARED_PORT, _port_ir_data)\n\n # center analog infrared sensor ........................................\n if _cntr_ir_data > self._callback_cntr_min_trigger:\n# _message = self._message_factory.get_message(Event.INFRARED_CNTR, _cntr_ir_data)\n# self._log.info(Fore.BLUE + Style.BRIGHT + 'adding new message eid#{} for INFRARED_CNTR event.'.format(_message.eid))\n# self._message_bus.handle(_message)\n self._log.info(Fore.BLUE + '[{:04d}] ANALOG IR ({:d}): \\t'.format(count, 3) + (Fore.RED if (_cntr_ir_data > 100.0) else Fore.YELLOW) \\\n + Style.BRIGHT + '{:d}'.format(_cntr_ir_data) + Style.DIM + '\\t(analog value 0-255)')\n self._callback(Event.INFRARED_CNTR, _cntr_ir_data)\n\n # starboard analog infrared sensor .....................................\n if _stbd_ir_data > self._callback_min_trigger:\n# _message = self._message_factory.get_message(Event.INFRARED_STBD, _stbd_ir_data)\n# self._log.info(Fore.GREEN + Style.BRIGHT + 'adding new message eid#{} for INFRARED_STBD event.'.format(_message.eid))\n# self._message_bus.handle(_message)\n self._log.info('[{:04d}] ANALOG IR ({:d}): \\t'.format(count, 4) + (Fore.RED if (_stbd_ir_data > 100.0) else Fore.YELLOW) \\\n + Style.BRIGHT + '{:d}'.format(_stbd_ir_data) + Style.DIM + '\\t(analog value 0-255)')\n self._callback(Event.INFRARED_STBD, _stbd_ir_data)\n\n # starboard side analog infrared sensor ................................\n if _stbd_side_ir_data > self._callback_side_min_trigger:\n# _message = self._message_factory.get_message(Event.INFRARED_STBD_SIDE, _stbd_side_ir_data)\n# self._log.info(Fore.GREEN + Style.BRIGHT + 'adding new message eid#{} for INFRARED_STBD_SIDE event.'.format(_message.eid))\n# self._message_bus.handle(_message)\n self._log.info('[{:04d}] ANALOG IR ({:d}): \\t'.format(count, 5) + (Fore.RED if (_stbd_side_ir_data > 100.0) else Fore.YELLOW) + Style.BRIGHT + '{:d}'.format(_stbd_side_ir_data) + Style.DIM + '\\t(analog value 0-255)')\n self._callback(Event.INFRARED_STBD_SIDE, _stbd_side_ir_data)\n\n _delta = dt.datetime.now() - _start_time\n _elapsed_ms = int(_delta.total_seconds() * 1000)\n self._log.info(Fore.BLACK + '[{:04d}] poll end; elapsed processing time: {:d}ms'.format(count, _elapsed_ms))\n# return True\n\n # ..........................................................................\n def _get_mean_distance(self, orientation, value):\n '''\n Returns the mean of values collected in the queue for the specified IR sensor.\n '''\n if value == None or value == 0:\n return None\n if orientation is Orientation.PORT_SIDE:\n _deque = self._deque_port_side\n elif orientation is Orientation.PORT:\n _deque = self._deque_port\n elif orientation is Orientation.CNTR:\n _deque = self._deque_cntr\n elif orientation is Orientation.STBD:\n _deque = self._deque_stbd\n elif orientation is Orientation.STBD_SIDE:\n _deque = self._deque_stbd_side\n else:\n raise ValueError('unsupported orientation.')\n _deque.append(value)\n _n = 0\n _mean = 0.0\n for x in _deque:\n _n += 1\n _mean += ( x - _mean ) / _n\n if _n < 1:\n return float('nan');\n else:\n return _mean\n\n # ..........................................................................\n def _convert_to_distance(self, value):\n '''\n Converts the value returned by the IR sensor to a distance in centimeters.\n\n Distance Calculation ---------------\n\n This is reading the distance from a 3 volt Sharp GP2Y0A60SZLF infrared\n sensor to a piece of white A4 printer paper in a low ambient light room.\n The sensor output is not linear, but its accuracy is not critical. If\n the target is too close to the sensor the values are not valid. According\n to spec 10cm is the minimum distance, but we get relative variability up\n until about 5cm. Values over 150 clearly indicate the robot is less than\n 10cm from the target.\n\n 0cm = unreliable\n 5cm = 226.5\n 7.5cm = 197.0\n 10cm = 151.0\n 20cm = 92.0\n 30cm = 69.9\n 40cm = 59.2\n 50cm = 52.0\n 60cm = 46.0\n 70cm = 41.8\n 80cm = 38.2\n 90cm = 35.8\n 100cm = 34.0\n 110cm = 32.9\n 120cm = 31.7\n 130cm = 30.7 *\n 140cm = 30.7 *\n 150cm = 29.4 *\n\n * Maximum range on IR is about 130cm, after which there is diminishing\n stability/variability, i.e., it's hard to determine if we're dealing\n with a level of system noise rather than data. Different runs produce\n different results, with values between 28 - 31 on a range of any more\n than 130cm.\n\n See: http://ediy.com.my/blog/item/92-sharp-gp2y0a21-ir-distance-sensors\n '''\n if value == None or value == 0:\n return None\n self._use_pot = False\n# if self._use_pot:\n# _pot_value = self._pot.get_scaled_value()\n# _EXPONENT = _pot_value\n# else:\n# _pot_value = 0.0\n _EXPONENT = 1.33\n _NUMERATOR = 1000.0\n _distance = pow( _NUMERATOR / value, _EXPONENT ) # 900\n# self._log.debug(Fore.BLACK + Style.NORMAL + 'value: {:>5.2f}; pot value: {:>5.2f}; distance: {:>5.2f}cm'.format(value, _pot_value, _distance))\n return _distance\n\n # ..........................................................................\n def _callback(self, event, value):\n '''\n This is the callback method from the I²C Master, whose events are\n being returned from the source, e.g., Arduino or IO Expander board.\n '''\n if not self._enabled or self._suppressed:\n self._log.info(Fore.BLACK + Style.DIM + 'SUPPRESSED callback: event {}; value: {:d}'.format(event.name, value))\n return\n try:\n\n# self._log.debug(Fore.BLACK + 'callback: event {}; value: {:d}'.format(event.name, value))\n _event = None\n _value = value\n\n # bumpers ..................................................................................\n\n if event == Event.BUMPER_PORT:\n if value == 1:\n _event = Event.BUMPER_PORT\n _value = 1\n elif event == Event.BUMPER_CNTR:\n if value == 1:\n _event = Event.BUMPER_CNTR\n _value = 1\n elif event == Event.BUMPER_STBD:\n if value == 1:\n _event = Event.BUMPER_STBD\n _value = 1\n\n # ..........................................................................................\n # For IR sensors we rewrite the value with a dynamic mean distance (cm),\n # setting the event type only if the value is less than a distance threshold.\n\n elif event == Event.INFRARED_PORT_SIDE:\n _value = self._get_mean_distance(Orientation.PORT_SIDE, self._convert_to_distance(value))\n self._log.info(Fore.RED + Style.BRIGHT + 'mean distance: {:5.2f}cm;\\tPORT SIDE'.format(_value))\n if _value != None and _value < self._port_side_trigger_distance:\n _fire_message(event, _value)\n# _event = event\n# else:\n# self._log.info(Fore.RED + 'mean distance: {:5.2f}cm;\\tPORT SIDE'.format(_value))\n\n elif event == Event.INFRARED_PORT:\n _value = self._get_mean_distance(Orientation.PORT, self._convert_to_distance(value))\n self._log.info(Fore.RED + Style.BRIGHT + 'mean distance: {:5.2f}cm;\\tPORT'.format(_value))\n if _value != None and _value < self._port_trigger_distance:\n _fire_message(event, _value)\n# _event = event\n# else:\n# self._log.info(Fore.RED + 'mean distance: {:5.2f}cm;\\tPORT'.format(_value))\n\n elif event == Event.INFRARED_CNTR:\n _value = self._get_mean_distance(Orientation.CNTR, self._convert_to_distance(value))\n self._log.info(Fore.BLUE + Style.BRIGHT + 'mean distance: {:5.2f}cm;\\tCNTR'.format(_value))\n if _value != None and _value < self._center_trigger_distance:\n _fire_message(event, _value)\n# _event = event\n# else:\n# self._log.info(Fore.BLUE + 'mean distance: {:5.2f}cm;\\tCNTR'.format(_value))\n\n elif event == Event.INFRARED_STBD:\n _value = self._get_mean_distance(Orientation.STBD, self._convert_to_distance(value))\n self._log.info(Fore.GREEN + Style.BRIGHT + 'mean distance: {:5.2f}cm;\\tSTBD'.format(_value))\n if _value != None and _value < self._stbd_trigger_distance:\n _fire_message(event, _value)\n# _event = event\n# else:\n# self._log.info(Fore.GREEN + 'mean distance: {:5.2f}cm;\\tSTBD'.format(_value))\n\n elif event == Event.INFRARED_STBD_SIDE:\n _value = self._get_mean_distance(Orientation.STBD_SIDE, self._convert_to_distance(value))\n self._log.info(Fore.GREEN + Style.BRIGHT + 'mean distance: {:5.2f}cm;\\tSTBD SIDE'.format(_value))\n if _value != None and _value < self._stbd_side_trigger_distance:\n _fire_message(event, _value)\n# _event = event\n# else:\n# self._log.info(Fore.GREEN + 'mean distance: {:5.2f}cm;\\tSTBD SIDE'.format(_value))\n\n# if _event is not None and _value is not None:\n# # if not self._ignore_duplicates or ( _event != self._last_event and _value != self._last_value ):\n# _message = self._message_factory.get_message(_event, _value)\n# self._log.info(Fore.WHITE + Style.BRIGHT + 'adding new message eid#{} for event {}'.format(_message.eid, _event.description))\n# self._message_bus.handle(_message)\n# # else:\n# # self._log.warning(Fore.CYAN + Style.NORMAL + 'ignoring message for event {}'.format(_event.description))\n# self._last_event = _event\n# self._last_value = _value\n# else:\n# self._log.info(Fore.WHITE + Style.BRIGHT + 'NOT ADDING message eid#{} for event {}'.format(_message.eid, _event.description))\n\n except Exception as e:\n self._log.error('callback error: {}\\n{}'.format(e, traceback.format_exc()))\n\n\n # ..........................................................................\n def _fire_message(self, event, value):\n self._log.info(Fore.YELLOW + Style.BRIGHT + 'fire message with event {} and value {}'.format(event, value))\n if event is not None and value is not None:\n _message = self._message_factory.get_message(event, value)\n self._log.info(Fore.WHITE + Style.BRIGHT + 'adding new message eid#{} for event {}'.format(_message.eid, _event.description))\n self._message_bus.handle(_message)\n else:\n self._log.info(Fore.RED + Style.BRIGHT + 'ignoring message with event {} and value {}'.format(event, value))\n\n\n # ..........................................................................\n def get_input_for_event_type(self, event):\n '''\n Return the current value of the pin corresponding to the Event type.\n '''\n if event is Event.INFRARED_PORT_SIDE:\n return self._ioe.get_port_side_ir_value()\n elif event is Event.INFRARED_PORT:\n return self._ioe.get_port_ir_value()\n elif event is Event.INFRARED_CNTR:\n return self._ioe.get_center_ir_value()\n elif event is Event.INFRARED_STBD:\n return self._ioe.get_stbd_ir_value()\n elif event is Event.INFRARED_STBD_SIDE:\n return self._ioe.get_stbd_side_ir_value()\n elif event is Event.BUMPER_PORT:\n return self._ioe.get_port_bmp_value()\n elif event is Event.BUMPER_CNTR:\n return self._ioe.get_center_bmp_value()\n elif event is Event.BUMPER_STBD:\n return self._ioe.get_stbd_bmp_value()\n else:\n raise Exception('unexpected event type.')\n\n # ..........................................................................\n def suppress(self, state):\n self._log.info('suppress {}.'.format(state))\n self._suppressed = state\n\n # ..........................................................................\n @property\n def enabled(self):\n return self._enabled\n\n # ..........................................................................\n def enable(self):\n if not self._enabled:\n if not self._closed:\n self._log.info('enabled.')\n self._enabled = True\n else:\n self._log.warning('cannot enable: already closed.')\n else:\n self._log.warning('already enabled.')\n\n # ..........................................................................\n def disable(self):\n if self._enabled:\n self._enabled = False\n self._log.info(Fore.YELLOW + 'shutting down thread pool executor...')\n self._executor.shutdown(wait=False) # python 3.9: , cancel_futures=True)\n self._log.info(Fore.YELLOW + 'disabled: thread pool executor has been shut down.')\n else:\n self._log.debug('already disabled.')\n\n # ..........................................................................\n def close(self):\n '''\n Permanently close and disable the integrated front sensor.\n '''\n if not self._closed:\n self.disable()\n self._closed = True\n self._log.info('closed.')\n else:\n self._log.warning('already closed.')\n\n # ..........................................................................\n @staticmethod\n def clamp(n, minn, maxn):\n return max(min(maxn, n), minn)\n\n # ..........................................................................\n @staticmethod\n def remap(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n\n\n# ..............................................................................\nclass IoExpander():\n '''\n Wraps an IO Expander board as input from an integrated front sensor\n array of infrareds and bumper switches.\n '''\n\n def __init__(self, config, level):\n super().__init__()\n if config is None:\n raise ValueError('no configuration provided.')\n _config = config['ros'].get('io_expander')\n self._log = Logger('ioe', level)\n # infrared\n self._port_side_ir_pin = _config.get('port_side_ir_pin') # pin connected to port side infrared\n self._port_ir_pin = _config.get('port_ir_pin') # pin connected to port infrared\n self._center_ir_pin = _config.get('center_ir_pin') # pin connected to center infrared\n self._stbd_ir_pin = _config.get('stbd_ir_pin') # pin connected to starboard infrared\n self._stbd_side_ir_pin = _config.get('stbd_side_ir_pin') # pin connected to starboard side infrared\n self._log.info('infrared pin assignments:\\t' \\\n + Fore.RED + ' port side={:d}; port={:d};'.format(self._port_side_ir_pin, self._port_ir_pin) \\\n + Fore.BLUE + ' center={:d};'.format(self._center_ir_pin) \\\n + Fore.GREEN + ' stbd={:d}; stbd side={:d}'.format(self._stbd_ir_pin, self._stbd_side_ir_pin))\n # bumpers\n self._port_bmp_pin = _config.get('port_bmp_pin') # pin connected to port bumper\n self._center_bmp_pin = _config.get('center_bmp_pin') # pin connected to center bumper\n self._stbd_bmp_pin = _config.get('stbd_bmp_pin') # pin connected to starboard bumper\n self._log.info('bumper pin assignments:\\t' \\\n + Fore.RED + ' port={:d};'.format(self._port_ir_pin) \\\n + Fore.BLUE + ' center={:d};'.format(self._center_ir_pin ) \\\n + Fore.GREEN + ' stbd={:d}'.format(self._stbd_ir_pin))\n\n # configure board\n self._ioe = io.IOE(i2c_addr=0x18)\n self.board = self._ioe # TEMP\n self._ioe.set_adc_vref(3.3) # input voltage of IO Expander, this is 3.3 on Breakout Garden\n # analog infrared sensors\n self._ioe.set_mode(self._port_side_ir_pin, io.ADC)\n self._ioe.set_mode(self._port_ir_pin, io.ADC)\n self._ioe.set_mode(self._center_ir_pin, io.ADC)\n self._ioe.set_mode(self._stbd_ir_pin, io.ADC)\n self._ioe.set_mode(self._stbd_side_ir_pin, io.ADC)\n # digital bumpers\n self._ioe.set_mode(self._port_bmp_pin, io.PIN_MODE_PU)\n self._ioe.set_mode(self._center_bmp_pin, io.PIN_MODE_PU)\n self._ioe.set_mode(self._stbd_bmp_pin, io.PIN_MODE_PU)\n self._log.info('ready.')\n\n def get_port_side_ir_value(self):\n return int(round(self._ioe.input(self._port_side_ir_pin) * 100.0))\n\n def get_port_ir_value(self):\n return int(round(self._ioe.input(self._port_ir_pin) * 100.0))\n\n def get_center_ir_value(self):\n return int(round(self._ioe.input(self._center_ir_pin) * 100.0))\n\n def get_stbd_ir_value(self):\n return int(round(self._ioe.input(self._stbd_ir_pin) * 100.0))\n\n def get_stbd_side_ir_value(self):\n return int(round(self._ioe.input(self._stbd_side_ir_pin) * 100.0))\n\n def get_port_bmp_value(self):\n return self._ioe.input(self._port_bmp_pin) == 0\n\n def get_center_bmp_value(self):\n return self._ioe.input(self._center_bmp_pin) == 0\n\n def get_stbd_bmp_value(self):\n return self._ioe.input(self._stbd_bmp_pin) == 0\n\n# EOF\n","sub_path":"lib/ifs.py","file_name":"ifs.py","file_ext":"py","file_size_in_byte":29884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}