diff --git "a/377.jsonl" "b/377.jsonl" new file mode 100644--- /dev/null +++ "b/377.jsonl" @@ -0,0 +1,818 @@ +{"seq_id":"22570024979","text":"import pytest\n\nfrom solution import is_triangular\n\n\ndata_true = [\n (1, True),\n (3, True),\n (6, True),\n (10, True),\n (15, True),\n (21, True),\n (28, True),\n]\n\n\n@pytest.mark.parametrize(\n \"t, result\", data_true\n)\ndef test_returns_true_when_t_is_a_triangular_numer(t, result):\n assert is_triangular(t) == result\n\n\ndata_false = [\n (2, False),\n (7, False),\n (14, False),\n (27, False),\n]\n\n\n@pytest.mark.parametrize(\n \"t, result\", data_false\n)\ndef test_returns_false_when_t_is_not_a_triangular_number(t, result):\n assert is_triangular(t) == result\n","repo_name":"estraviz/codewars","sub_path":"7_kyu/Beginner Series 5 Triangular Numbers/python/test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"30954321794","text":"import h5py\nimport re\n#h5py.run_tests()\nPATH = './data/das-merania/sweep_p1intensity_2021-08-31T17_22_39+0100/'\nFILENAME = 'sweep_p1intensity_2021-08-31T162239Z.h5'\n\n# with h5py.File(f'{PATH}{FILENAME}', 'r', driver=None) as f:\nf = h5py.File(f'{PATH}{FILENAME}', 'r', driver=None)\nprint(f.__dict__)\nprint(f.name)\nprint(f.filename)\nif f:\n print(\"file is open\")\nelse:\n print(\"file is closed\")\n\ndataset = f.get('/Acquisition/Raw[0]/RawData')\n# def get_keys():\n# f.keys() # prints keys\n# if match := re.match(\"^.+'(.+)'.+\", str(f.keys())):\n# print(match[1]) # 0 whole string 1 just the match group\n# print(f[match[1]])\n\n","repo_name":"jsenlas/optoviz","sub_path":"hdf5reader.py","file_name":"hdf5reader.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71045914837","text":"import pygame\r\nimport game_functions as gf\r\nfrom settings import Settings\r\nfrom ship import Ship\r\nfrom pygame.sprite import Group\r\nfrom game_stats import GameStats\r\nfrom button import Button\r\nfrom scoreboard import ScoreBoard\r\n\r\n\r\ndef run_game():\r\n # 初始化并创建一个屏幕对象\r\n pygame.init()\r\n ai_setting = Settings()\r\n screen = pygame.display.set_mode(\r\n (ai_setting.screen_width, ai_setting.screen_height))\r\n pygame.display.set_caption('Alien Invasion')\r\n\r\n # 创建一艘飞船\r\n ship = Ship(ai_setting, screen)\r\n # 创建一个储存子弹的编组\r\n bullets = Group()\r\n # 创建一个储存外星人的编组\r\n aliens = Group()\r\n # 创建外星人群\r\n gf.creat_fleet(ai_setting, screen, ship, aliens)\r\n # 创建游戏统计信息和记分牌\r\n stats = GameStats(ai_setting)\r\n sb = ScoreBoard(ai_setting, screen, stats)\r\n # 创建按钮\r\n play_button = Button(ai_setting, screen, \"Play\")\r\n\r\n # 开始游戏主循环\r\n while True:\r\n # 检查鼠标键盘事件\r\n gf.check_events(ai_setting, screen, stats, sb, play_button, ship,\r\n aliens, bullets)\r\n # 判断游戏是否失败\r\n if stats.game_active:\r\n # 更新飞船位置\r\n ship.update()\r\n # 更新子弹位置\r\n gf.update_bullets(ai_setting, screen, stats, sb,\r\n ship, bullets, aliens)\r\n # 更新外星人位置\r\n gf.update_aliens(ai_setting, stats, sb, screen, ship,\r\n aliens, bullets)\r\n # 检查子弹与外星人的碰撞\r\n gf.check_bullet_alien_collisions(ai_setting, screen, stats, sb,\r\n ship, bullets, aliens)\r\n # 检查飞船与外星人的碰撞\r\n gf.check_alien_condition(ship, aliens)\r\n # 刷新屏幕\r\n gf.update_screen(ai_setting, screen, stats, sb, ship, bullets,\r\n aliens, play_button)\r\n\r\n\r\nrun_game()\r\n","repo_name":"ZhaoYangMeng/demo","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12974106339","text":"import os\nimport json\nimport numpy as np\nimport abipy.core.abinit_units as abu\nimport abipy.flowtk as flowtk\n\nfrom monty.string import list_strings, is_string\nfrom monty.collections import AttrDict, dict2namedtuple\nfrom pymatgen.core.periodic_table import Element\nfrom abipy.core.structure import Structure\nfrom abipy.flowtk.works import Work\nfrom abipy.flowtk.flows import Flow\nfrom abipy.flowtk.pseudos import Pseudo\nfrom abipy.abio.inputs import AbinitInput\nfrom abipy.tools.plotting import add_fig_kwargs, get_ax_fig_plt\n#from deltafactor.eosfit import BM\nfrom pseudo_dojo.util.dojo_eos import EOS\nfrom pseudo_dojo.core.dojoreport import DojoReport\nfrom pseudo_dojo.refdata.deltafactor import df_compute\n\n\n\n#AE_FCC_A0_BOHR = {\n#\"85_At\"\t: 7.191602243,\n#\"87_Fr\"\t: 10.385617352,\n#\"88_Ra\"\t: 8.843815553,\n#\"89_Ac\"\t: 7.593414710,\n#\"90_Th\"\t: 6.758367409,\n#\"91_Pa\"\t: 6.220412926,\n#\"92_U\"\t: 5.903319246,\n#\"93_Np\"\t: None,\n#\"94_Pu\"\t:6.460780896,\n#\"95_Am\"\t:6.923665533,\n#\"96_Cm\"\t:6.661556739,\n#\"97_Bk\"\t: None,\n#\"98_Cf\"\t:6.203849952,\n#\"99_Es\"\t: None,\n#\"100_Fm\":6.673581066,\n#\"101_Md\": None,\n#\"102_No\" :7.218033842,\n#\"103_Lr\" :6.607962216,\n#\"104_Rf\" :6.177085761,\n#\"105_Db\" :5.878951057,\n#\"106_Sg\" :5.655914411,\n#\"107_Bh\" :5.507431070,\n#\"108_Hs\" :5.424292569,\n#\"109_Mt\" :5.441005307,\n#\"110_Ds\" :5.574234778,\n#\"111_Rg\" :5.856726158,\n#\"112_Cn\" :7.440280753,\n#\"113_Nh\" :7.052079983,\n#\"114_Fl\" :7.089273573,\n#\"115_Mc\": None,\n#\"116_Lv\" :7.134838649,\n#\"117_Ts\": 7.455648006,\n#\"118_Og\": 9.610763498,\n#\"119_Uue\": 9.926738935,\n#\"120_Ubn\": 9.078847165,\n#}\n\n\n_AEDF_Z = None\n\n\ndef get_aedf_z():\n global _AEDF_Z\n if _AEDF_Z is not None:\n return _AEDF_Z\n\n _AEDF_Z = AeDfZ()\n return _AEDF_Z\n\n\nclass AeDfZ(dict):\n\n def __init__(self):\n super().__init__()\n\n def parse_ae(path):\n try:\n data = np.loadtxt(path)\n data[:,2] *= abu.Ha_to_eV\n #volumes_ang = data[:,1]\n #v0 = volumes_ang[3]\n #for i in range(len(volumes_ang) - 1):\n # delta = 100 * (volumes_ang[i+1] - volumes_ang[i]) / v0\n # print(delta)\n\n d = dict(alist_ang=data[:,0], volumes_ang=data[:,1], etotals_ev=data[:,2])\n num_sites = 1\n eos_fit = EOS.DeltaFactor().fit(d[\"volumes_ang\"] / num_sites, d[\"etotals_ev\"] / num_sites)\n for k in (\"e0\", \"v0\", \"b0\", \"b0_GPa\", \"b1\"):\n d[k] = getattr(eos_fit, k)\n return AttrDict(**d)\n\n except Exception as exc:\n print(\"Error in path\", path)\n raise exc\n\n root = os.path.join(os.path.dirname(__file__), \"AE_calcs\")\n\n mag_file = os.path.join(root, \"..\", \"Magnetization.txt\")\n lines = open(mag_file, \"rt\").readlines()\n i = lines.index(\"\\n\")\n json_string = \" \".join([l for l in lines[:i] if not l.startswith(\"#\")])\n #print(json_string)\n mag = json.loads(json_string)\n mag_z = {}\n for k in mag:\n z = int(k.split(\"_\")[0])\n mag_z[z] = mag[k]\n #print(mag_z)\n\n # For these elements, we use the non-magnetic configuration with suffix `_ae_NM.txt`.\n black_list = {\n #\"94_ae.txt\",\n #\"96_ae.txt\",\n \"94_ae_NM.txt\",\n \"96_ae_NM.txt\",\n \"99_ae.txt\",\n \"100_ae.txt\",\n \"101_ae.txt\",\n # New\n \"91_ae.txt\",\n \"92_ae.txt\",\n \"93_ae.txt\",\n #\"98_ae.txt\",\n \"98_ae_NM.txt\", # Use magnetic config for 98_Cf\n \"113_ae.txt\",\n \"117_ae.txt\",\n \"118_ae.txt\",\n }\n\n for basename in os.listdir(root):\n path = os.path.join(root, basename)\n if not path.endswith(\".txt\"): continue\n if basename in black_list: continue\n\n # Use z instead of element because pymatgen element does not support z >= 120.\n z = int(basename.split(\"_\")[0])\n #Element.from_Z(z)\n\n if (path.endswith(\"_ae.txt\") or path.endswith(\"_ae_NM.txt\")) and not path.endswith(\"_ox_ae.txt\"):\n if z in self:\n raise ValueError(f\"Found multiple files for z: {z}\")\n\n self[z] = parse_ae(path)\n self[z][\"mag\"] = mag_z.get(z, 0.0)\n #if z == 94:\n # print(\"Parsing path\", path)\n # print(self[z])\n\n\n#if path.endswith(\"_ox_ae.txt\"):\n# try:\n# data = parse_ox_ae(path)\n# except Exception as exc:\n# print(\"Error in path\", path)\n# raise exc\n# self.unary_z[z] = data\n#\n# # volumes in A^3/atom and energies in eV/atom,\n#\n# natom = 1\n# array = np.stack((data[\"volumes_ang\"], data[\"etotals_ev\"] ), axis=-1) / natom\n# volume, bulk_modulus, bulk_deriv, residuals = BM(array)\n#\n#\n#\n\n#def parse_ox_ae(path):\n# a_ang, c_ang, vols_ang, etotals_ev = [], [], [], []\n# with open(path, \"rt\") as fh:\n# for i, line in enumerate(fh):\n# print(line)\n# if line.startswith(\"a\"): continue\n# tokens = line.split()\n# if len(tokens) == 4:\n# a, c, v, e = map(float, tokens)\n# elif len(tokens) == 3:\n# a, v, e = map(float, tokens)\n# c = a\n# else:\n# raise ValueError(f\"Wrong line: {line} in path: {path}\")\n# a_ang.append(a)\n# c_ang.append(c)\n# vols_ang.append(v)\n# etotals_ev.append(e)\n#\n# d = dict(a_ang=a_ang, c_ang=c_ang, vols_ang=vols_ang, etotals_ev=etotals_ev)\n# for k in d:\n# d[k] = np.array(d[k])\n# return d\n\n\ndef _dojo_dfact_results(pseudo, num_sites, volumes, etotals):\n \"\"\"\n This function computes the deltafactor and returns the dictionary to be inserted\n in the dojoreport file.\n\n Args:\n pseudo: Pseudopotential object.\n num_sites: Number of sites in unit cell\n volumes: List with unit cell volumes in Ang**3\n etotals: List of total energies in eV.\n\n Return:\n (dojo_entry, eos_fit)\n where dojo_entry is the Dictionary with results to be inserted in the djrepo file.\n eos_fit is the object storing the results of the EOS fit.\n \"\"\"\n nan = float('NaN')\n\n dojo_entry = dict(\n etotals=list(etotals),\n volumes=list(volumes),\n num_sites=num_sites,\n dfact_meV=nan,\n dfactprime_meV=nan,\n v0=nan,\n b0=nan,\n b0_GPa=nan,\n b1=nan,\n )\n\n volumes, etotals = np.asarray(volumes), np.asarray(etotals)\n eos_fit = None\n try:\n # Use same fit as the one employed for the deltafactor.\n eos_fit = EOS.DeltaFactor().fit(volumes/num_sites, etotals/num_sites)\n\n # Get AE reference results (Wien2K).\n #wien2k = df_database(pseudo.xc).get_entry(pseudo.symbol)\n ae = get_aedf_z()[pseudo.Z]\n\n # Compute deltafactor estimator.\n dfact = df_compute(ae.v0, ae.b0_GPa, ae.b1,\n eos_fit.v0, eos_fit.b0_GPa, eos_fit.b1, b0_GPa=True)\n\n dfactprime_meV = dfact * (30 * 100) / (eos_fit.v0 * eos_fit.b0_GPa)\n\n dfres = {\n \"dfact_meV\": dfact,\n \"dfactprime_meV\": dfactprime_meV,\n \"v0\": eos_fit.v0,\n \"b0\": eos_fit.b0,\n \"b0_GPa\": eos_fit.b0_GPa,\n \"b1\": eos_fit.b1,\n }\n\n for k, v in dfres.items():\n v = v if not isinstance(v, complex) else nan\n dfres[k] = v\n\n dojo_entry.update(dfres)\n\n except EOS.Error as exc:\n dojo_entry[\"_exceptions\"] = str(exc)\n\n return dojo_entry, eos_fit\n\n\nclass DeltaUnaryWork(Work):\n\n @classmethod\n def from_pseudo_ecut(cls, pseudo, ecut):\n work = cls()\n work.dojo_pseudo = pseudo\n work.ecut = float(ecut)\n symbol, z = pseudo.symbol, pseudo.Z\n #key = f\"{z}_{symbol}\"\n #a_ang = AE_FCC_A0_BOHR[key] * abu.Bohr_Ang\n\n ae = get_aedf_z()[pseudo.Z]\n\n #connect = True\n connect = False\n\n for a_ang in ae.alist_ang:\n #print(\"a_ang\", a_ang)\n scf_inp = make_input_unary(pseudo, a_ang, ae[\"mag\"], do_relax=False, ecut=ecut)\n if connect: scf_inp[\"prtwf\"] = 1\n work.register_scf_task(scf_inp)\n\n if connect:\n middle = len(work) // 2\n filetype = \"WFK\"\n for i, task in enumerate(work[:middle]):\n #task.add_deps({work[i + 1]: filetype})\n task.add_deps({work[middle]: filetype})\n\n for i, task in enumerate(work[middle+1:]):\n #task.add_deps({work[middle + i]: filetype})\n task.add_deps({work[middle]: filetype})\n\n return work\n\n def get_deltafactor_entry(self):\n #etotals = self.read_etotals(unit=\"eV\")\n etotals, mag_list = [], []\n\n for task in self:\n with task.open_gsr() as gsr:\n etot = gsr.reader.read_value(\"etotal\") * abu.Ha_eV\n etotals.append(etot)\n mag_list.append(gsr.ebands.get_collinear_mag())\n\n num_sites = 1\n volumes = [task.input.structure.volume for task in self]\n\n d, eos_fit = _dojo_dfact_results(self.dojo_pseudo, num_sites, volumes, etotals)\n print(\"[%s]\" % self.dojo_pseudo.symbol, \"eos_fit:\", eos_fit)\n print(\"Ecut %.1f, dfact = %.3f meV, dfactprime %.3f meV\" % (self.ecut, d[\"dfact_meV\"], d[\"dfactprime_meV\"]))\n #print(\"mag_list:\", mag_list)\n d[\"mag_list\"] = mag_list\n\n dojo_ecut = \"%.1f\" % self.ecut\n return {dojo_ecut: d}\n\n\nclass DfEcutFlow(Flow):\n\n @classmethod\n def from_pseudo(cls, pseudo):\n\n #print(pseudo)\n root = os.path.dirname(pseudo.filepath)\n workdir = os.path.join(root, os.path.basename(pseudo.filepath) + \"_flow\")\n flow = cls(workdir=workdir)\n\n # Get initial hints from djrepo file.\n djrepo_path, _ = os.path.splitext(pseudo.filepath)\n flow.djrepo_path = djrepo_path + \".djrepo\"\n with open(flow.djrepo_path) as fh:\n d = json.load(fh)\n ppgen_hints = d[\"ppgen_hints\"]\n flow.ecut_list = d[\"ecuts\"]\n\n #print(f\"running with ecut: {ecut}\")\n #symbol, z = pseudo.symbol, pseudo.Z\n #key = f\"{z}_{symbol}\"\n #a_ang = AE_FCC_A0_BOHR[key] * abu.Bohr_Ang\n #relax_inp = make_input_unary(pseudo, a_ang, mag, do_relax=True)\n #work = flowtk.Work()\n #for ecut in flow.ecut_list:\n # work.register_relax_task(relax_inp.new_with_vars(ecut=ecut))\n #flow.register_work(work)\n\n for ecut in flow.ecut_list:\n flow.register_work(DeltaUnaryWork.from_pseudo_ecut(pseudo, ecut))\n\n return flow\n\n def on_all_ok(self):\n\n with open(self.djrepo_path, \"r\") as fh:\n in_data = json.load(fh)\n\n # Get relaxed lattice parameters as a function of ecut\n #etotals_ev, pressures_gpa = [], []\n #for task in self.works[0]:\n # with task.open_gsr() as gsr:\n # etotals_ev.append(float(gsr.energy))\n # pressures_gpa.append(float(gsr.pressure))\n\n #in_data[\"relax\"] = dict(\n # #ecut_list=self.ecut_list,\n # etotals_ev=etotals_ev,\n # pressure_gpa=pressures_gpa,\n #)\n\n # Compute deltafactor as a function of ecut\n in_data[\"deltafactor\"] = out = {}\n for work in self.works:\n entry = work.get_deltafactor_entry()\n out.update(entry)\n\n # Update djrepo file.\n with open(self.djrepo_path, \"w\") as fh:\n from monty.json import MontyEncoder\n json.dump(in_data, fh, indent=-1, sort_keys=True, cls=MontyEncoder)\n\n return True\n\n\ndef make_input_unary(pseudo, a_ang, mag, do_relax=False, ecut=None):\n\n lattice = float(a_ang) * np.array([\n 0, 1, 1,\n 1, 0, 1,\n 1, 1, 0]) / np.sqrt(2.0)\n\n coords = [[0, 0, 0]]\n\n structure = Structure(lattice, species=[pseudo.symbol], coords=coords)\n #print(structure.volue, float(a_ang) **3 * 2**(-1/2))\n\n # Initialize the input\n inp = AbinitInput(structure, pseudos=pseudo)\n\n #if pseudo.symbol in (\"Ra\", \"Fm\", \"Cn\", \"Ts\", \"Og\"):\n # print(f\"Setting mag to None for {pseudo.symbol=}\")\n # mag = None\n\n if mag == 0.0:\n nsppol, spinat = 1, None\n #nsppol, spinat = 2, [0, 0, 8]\n else:\n nsppol = 2\n if mag is None:\n #spinat = [0, 0, 6]\n spinat = [0, 0, 8]\n else:\n spinat = [0, 0, mag]\n #spinat = [0, 0, 8]\n\n print(f\"Using nsppol: {nsppol} with spinat {spinat}\")\n\n nband = inp.num_valence_electrons // 2\n nband = max(np.ceil(nband * 1.2), nband + 10)\n\n ngkpt = [15, 15, 15]\n #if pseudo.symbol in (\"Bk\", \"Fm\", \"Md\", \"Nh\"):\n # # Calculations done by BANDS developers with densified sampling.\n # ngkpt = [17, 17, 17]\n # print(\"Using densified ngkpt\", ngkpt, \"for symbol:\", pseudo.symbol)\n\n inp.set_vars(\n paral_kgb=0,\n #rmm_diis=1,\n nband=nband,\n # Occupation\n occopt=3, # Fermi-Dirac\n tsmear=0.001,\n #smdelta 2,\n ecutsm=0.5,\n # SCF procedure\n iscf=17,\n nstep=1000,\n nsppol=nsppol,\n spinat=spinat,\n # k-point grid\n ngkpt=ngkpt,\n nshiftk=1,\n shiftk=[0.0, 0.0, 0.0],\n prtwf=0,\n )\n\n if do_relax:\n inp.set_vars(\n # optimization parameters\n optcell=2,\n ionmov=2,\n tolmxf=1.0e-6,\n tolvrs=1.0e-12,\n dilatmx=1.1,\n )\n else:\n inp.set_vars(\n toldfe=1.0e-10,\n )\n\n if ecut is not None:\n inp[\"ecut\"] = ecut\n\n return inp\n\n\nclass MyDojoReport(DojoReport):\n\n @add_fig_kwargs\n def plot_deltafactor_convergence(self, xc, code=\"WIEN2k\", with_soc=False, what=None, ax_list=None, **kwargs):\n \"\"\"\n Plot the convergence of the deltafactor parameters wrt ecut.\n\n Args:\n xc: String or XcFunc object specifying the XC functional. E.g \"PBE\" or XcFunc.from_name(\"PBE\")\n code: Reference code.\n with_soc: If True, the results obtained with SOC are plotted (if available).\n what:\n ax_list: List of matplotlib Axes, if ax_list is None a new figure is created\n\n Returns:\n `matplotlib` figure or None if the deltafactor test is not present\n \"\"\"\n trial = \"deltafactor\" if not with_soc else \"deltafactor_soc\"\n if trial not in self:\n cprint(\"dojo report does not contain trial: %s\" % str(trial), \"red\")\n return None\n\n all_keys = [\"dfact_meV\", \"dfactprime_meV\", \"v0\", \"b0_GPa\", \"b1\"]\n if what is None:\n keys = all_keys\n else:\n what = list_strings(what)\n if what[0].startswith(\"-\"):\n # Exclude keys\n what = [w[1:] for w in what]\n keys = [k for k in all_keys if k not in what]\n else:\n keys = what\n\n # Get reference entry\n #reference = df_database(xc=xc).get_entry(symbol=self.symbol, code=code)\n element = Element[self.symbol]\n reference = get_aedf_z()[element.Z]\n #print(\"Reference data:\", reference)\n\n # Get DataFrame.\n frame = self.get_pdframe(trial, *keys)\n ecuts = np.array(frame[\"ecut\"])\n\n import matplotlib.pyplot as plt\n if ax_list is None:\n fig, ax_list = plt.subplots(nrows=len(keys), ncols=1, sharex=True, squeeze=False)\n ax_list = ax_list.ravel()\n else:\n fig = plt.gcf()\n\n if len(keys) != len(ax_list):\n raise ValueError(\"len(keys)=%s != len(ax_list)=%s\" % (len(keys), len(ax_list)))\n\n for i, (ax, key) in enumerate(zip(ax_list, keys)):\n values = np.array(frame[key])\n refval = getattr(reference, key)\n # Plot difference pseudo - ref.\n #print(\"ecuts\", ecuts, \"values\", values)\n psmae_diff = values - refval\n ax.plot(ecuts, psmae_diff, \"o-\")\n\n # Add vertical lines at hints.\n if self.has_hints:\n vmin, vmax = psmae_diff.min(), psmae_diff.max()\n for acc in self.ALL_ACCURACIES:\n ax.vlines(self[\"hints\"][acc][\"ecut\"], vmin, vmax,\n colors=self.ACC2COLOR[acc], linestyles=\"dashed\")\n\n ax.grid(True)\n ax.set_ylabel(r\"$\\Delta$\" + key)\n if i == len(keys) - 1: ax.set_xlabel(\"Ecut [Ha]\")\n\n xmin, xmax = min(ecuts), max(ecuts)\n if key == \"dfactprime_meV\":\n # Add horizontal lines (used to find hints for ecut).\n last = values[-1]\n for pad, acc in zip(self.ATOLS, self.ALL_ACCURACIES):\n color = self.ACC2COLOR[acc]\n ax.hlines(y=last + pad, xmin=xmin, xmax=xmax, colors=color, linewidth=1.5, linestyles='dashed')\n ax.hlines(y=last - pad, xmin=xmin, xmax=xmax, colors=color, linewidth=1.5, linestyles='dashed')\n # Set proper limits so that we focus on the relevant region.\n #ax.set_ylim(last - 1.1*self.ATOLS[0], last + 1.1*self.ATOLS[0])\n else:\n ax.hlines(y=0., xmin=xmin, xmax=xmax, colors=\"black\", linewidth=2, linestyles='dashed')\n\n plt.tight_layout()\n\n return fig\n\n @add_fig_kwargs\n def plot_ae_eos(self, ax=None, text=None, cmap=\"jet\", **kwargs):\n\n ax, fig, plt = get_ax_fig_plt(ax)\n cmap = plt.get_cmap(cmap)\n\n ppgen_ecuts = set([self[\"ppgen_hints\"][acc][\"ecut\"] for acc in (\"low\", \"normal\", \"high\")])\n\n # Get DataFrame.\n trial = \"deltafactor\" #if not with_soc else \"deltafactor_soc\"\n frame = self.get_pdframe(trial, \"num_sites\", \"volumes\", \"etotals\")\n ecuts = frame[\"ecut\"]\n num_sites = np.array(frame[\"num_sites\"])\n assert np.all(num_sites == num_sites[0])\n num_sites = num_sites[0]\n\n # Get reference entry\n #reference = df_database(xc=xc).get_entry(symbol=self.symbol, code=code)\n element = Element[self.symbol]\n reference = get_aedf_z()[element.Z]\n #print(\"Reference data:\", reference)\n\n ys = reference.etotals_ev - np.min(reference.etotals_ev)\n #ax.plot(reference.volumes_ang, ys, label=\"AE1\")\n\n # Use same fit as the one employed for the deltafactor.\n eos_fit = EOS.DeltaFactor().fit(reference.volumes_ang/num_sites, ys/num_sites)\n eos_fit.plot(ax=ax, text=False, label=\"AE\", color=\"k\", marker=\"^\", alpha=1, show=False)\n\n for i, ecut in enumerate(ecuts):\n #if ecut not in ppgen_ecuts: continue\n #if i not in (0, len(ecuts) -1): continue\n if i not in (2, len(ecuts) -1): continue\n\n # Subframe with this value of ecut.\n ecut_frame = frame.loc[frame[\"ecut\"] == ecut]\n assert ecut_frame.shape[0] == 1\n # Extract volumes and energies for this ecut.\n volumes = (np.array(list(ecut_frame[\"volumes\"].values), dtype=float)).flatten()\n etotals = (np.array(list(ecut_frame[\"etotals\"].values), dtype=float)).flatten()\n\n ys = etotals - etotals.min()\n #ax.plot(volumes, ys)\n\n # Use same fit as the one employed for the deltafactor.\n eos_fit = EOS.DeltaFactor().fit(volumes/num_sites, ys/num_sites)\n eos_fit.plot(ax=ax, text=False, label=\"ecut %.1f\" % ecut, color=cmap(i/len(ecuts), alpha=0.8), show=False)\n\n ax.grid(True)\n if text is not None:\n ax.set_title(text)\n ax.legend(loc='best', shadow=True, frameon=True) #fancybox=True)\n\n return fig\n\n\ndef check_data(z, data, verbose=0):\n from pymatgen.core.lattice import Lattice\n tol = 1e-4\n if verbose: print(f\"Testing volume for z: {z} with tol: {tol}\")\n for a_ang, vol in zip(data[\"alist_ang\"], data[\"volumes_ang\"]):\n lattice = float(a_ang) * np.array([\n 0, 1, 1,\n 1, 0, 1,\n 1, 1, 0]) / np.sqrt(2.0)\n lattice = Lattice(lattice)\n\n # V = l**3 2 * (-1/2)\n #print(lattice.volume, float(a_ang) **3 * 2**(-1/2))\n #print(lattice.volume * np.sqrt(2), float(a_ang) ** 3)\n #print(lattice.volume ** (1/3) * (2 ** (1/6)), float(a_ang))\n #print(float(a_ang) / lattice.volume ** 1/3)\n\n adiff = abs(vol - lattice.volume)\n print(\"adiff:\", adiff)\n if adiff > tol:\n print(f\"Inexact a/vol for z: {z}: volume from file:\", vol, \", volume from a\", lattice.volume, \"adiff\", adiff)\n\nif __name__ == \"__main__\":\n from pprint import pprint, pformat\n aedf_z = get_aedf_z()\n for z, data in aedf_z.items():\n print(\"Checking z:\", z) #, pformat(data))\n check_data(z, data, verbose=0)\n","repo_name":"gmatteo/pseudos_ac_she","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":20965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"27213016000","text":"from bson import ObjectId\nimport pika\nfrom RabbitMQ_Mongo.contact_model import Contact\nfrom mongoengine import connect\n\n# Connect to RabbitMQ\ncredentials = pika.PlainCredentials('guest', 'guest')\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost', port=5672, credentials=credentials))\nchannel = connection.channel()\n\n# Declare the queue\nchannel.queue_declare(queue='contact_ids_sms')\n\n# This function simulates sending an smsm.\ndef send_sms(phone_num, message):\n print(f\"Simulating SMS sent to {phone_num}: {message}\")\n\n# This function is intended to be called when a message is received \n# from the RabbitMQ queue, and it processes the message by sending \n# an email to the contact specified in the message.\ndef callback(ch, method, properties, body):\n contact_id = body.decode('utf-8')\n contact = Contact.objects(id=ObjectId(contact_id)).first()\n if contact and not contact.message_sent:\n send_sms(contact.phone, \"Your message content goes here.\")\n contact.message_sent = True\n contact.save()\n print(f\"Message sent for contact {contact_id}\")\n\n# Set up the consumer and contains main logic\ndef main():\n channel.basic_consume(queue='contact_ids_sms', on_message_callback=callback, auto_ack=True)\n print('Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\nif __name__ == '__main__':\n # Connect to Atlas MongoDB\n uri = \"mongodb+srv://tsubanolga:O1904@cluster0.yiefpq6.mongodb.net/Cluster0?retryWrites=true&w=majority\"\n connect(host=uri) \n main()\n","repo_name":"OlgaTsuban/Practice_with_web","sub_path":"MongoDB_Redis_RabbitMQ/RabbitMQ_Mongo/consumer_sms.py","file_name":"consumer_sms.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71556299157","text":"import pandas as pd\nimport plotly.express as px\nfrom sys import argv\n\ndef main():\n df = pd.read_csv(argv[1])\n print(df.head())\n fig = px.scatter_3d(df, x='x1', y='x2', z='x3', color='y')\n fig.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MatyManzur/sia-all-tps","sub_path":"sia-tp3/plots/data_plot.py","file_name":"data_plot.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42412696902","text":"import requests\r\n\r\nurl = \"https://tcgbusfs.blob.core.windows.net/blobyoubike/YouBikeTP.json\"\r\n\r\ndef getYouBikeData():\r\n # get ubike data from ubike json\r\n try:\r\n res = requests.get(url)\r\n except requests.ConnectionError:\r\n return \"Connection Error\" \r\n data = res.json()\r\n global ubike_data\r\n ubike_data = data[\"retVal\"]\r\n\r\n return ubike_data","repo_name":"ShihPingLin/Taipei_Find_YouBike_Website","sub_path":"getYouBikeData.py","file_name":"getYouBikeData.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38695800258","text":"import numpy as np\nfrom scipy import interpolate\n\nfrom .checkarrays import checkarrays, checkarrays_tvd, checkarrays_monotonic_tvd\n\ndef resample_deviation(md, inc, azi, md_step=1):\n \"\"\"Resample a well deviation to a given step.\n\n Parameters\n ----------\n md : float\n measured depth\n inc : float\n well inclination in degrees from vertical\n azi : float\n well azimuth in degrees from North\n md_step : int or float\n md increment to interpolate to\n\n Notes\n -----\n This function should not be used before md->tvd conversion.\n \n The input arrays must not contain NaN values.\n\n Returns\n -------\n md : array_like of float\n inc : array_like of float\n azi : array_like of float\n \"\"\"\n\n md, inc, azi = checkarrays(md, inc, azi)\n\n for input_array in [md, inc, azi]:\n if np.isnan(input_array).any():\n raise ValueError('md, inc and azi cannot contain NaN values.')\n\n try:\n new_md = np.arange(md.min(), md.max() + md_step, md_step)\n new_md[-1] = md.max()\n except TypeError:\n raise TypeError('md_step must be int or float')\n\n f_inc = interpolate.interp1d(md, inc)\n new_inc = f_inc(new_md)\n f_azi = interpolate.interp1d(md, azi)\n new_azi = f_azi(new_md)\n\n return new_md, new_inc, new_azi\n\ndef resample_position(tvd, easting, northing, tvd_step=1):\n \"\"\"\n Resample a well positional log to a given step.\n\n Parameters\n ----------\n tvd : float\n true verical depth\n northing : float\n north-offset from zero reference point\n easting : float\n east-offset from zero reference point\n tvd_step : int or float\n tvd increment to resample to\n\n Notes\n -----\n This function should not be used before tvd->md conversion.\n\n The input arrays must not contain NaN values.\n\n The tvd values must be strictly increasing, i.e. this\n method will not work on horizontal wells, use\n `resample_deviation` for those wells.\n\n The units should be the same as the input deviation or the results will be wrong.\n\n Returns\n -------\n tvd : array_like of float\n true vertical depth\n northing : array_like of float\n easting : array_like of float\n \"\"\"\n tvd, easting, northing = checkarrays_monotonic_tvd(tvd, easting, northing)\n\n for input_array in [tvd, northing, easting]:\n if np.isnan(input_array).any():\n raise ValueError('tvd, northing and easting cannot contain NaN values.')\n\n try:\n new_tvd = np.arange(tvd[0], tvd[-1] + tvd_step, tvd_step)\n new_tvd[-1] = tvd[-1]\n except TypeError:\n raise TypeError('tvd_step must be int or float')\n\n f_easting = interpolate.interp1d(tvd, easting)\n new_easting = f_easting(new_tvd)\n f_northing = interpolate.interp1d(tvd, northing)\n new_northing = f_northing(new_tvd)\n\n return new_tvd, new_northing, new_easting","repo_name":"scuervo91/wellpathpy","sub_path":"wellpathpy/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"4028936356","text":"import numpy as np\n\nfrom word_cloud.wordcloud import make_wordcloud\n\nfrom config import (\n WIDTH, HEIGHT,\n CLOUD_IMAGE_FILENAME)\n\n# get word counts\ncount = {}\nwith open('names.txt') as f:\n for line in f:\n username = line[:-1]\n count[username.decode('utf8')] = count.get(username, 0) + 1\nwords = np.array(count.keys())\ncounts = np.array(count.values())\n\nmake_wordcloud(\n words, counts,\n CLOUD_IMAGE_FILENAME,\n width=WIDTH,\n height=HEIGHT,\n font_path=\"OldSansBlack.ttf\", redraw_in_color=False)\n","repo_name":"pliablematter/flickr-favorite-attribution","sub_path":"plot_word_cloud.py","file_name":"plot_word_cloud.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40886875032","text":"from django.test import Client, TestCase\nfrom django.urls import reverse\nfrom blog.models import Post\nfrom django.contrib.auth.models import User\n\nclass TestViews(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.home_url = reverse('home')\n self.author = User.objects.create(username='testuser', password='testpass')\n self.post = Post.objects.create(\n author = self.author,\n title = 'testpost',\n content = 'testcontent',\n subcontent = 'testsubcontent',\n active = True,\n featured = True,\n slug = 'testpost'\n )\n self.deatil_url = reverse('post', kwargs={'slug': self.post.slug})\n self.posts_url = reverse('posts')\n\n def test_post_home_GET(self):\n\n response = self.client.get(self.home_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'index.html')\n self.assertNotContains(response, 'i am not in index page')\n\n\n def test_post_detail_GET(self):\n response = self.client.get(self.deatil_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'post.html')\n self.assertNotContains(response, 'i am not in detail page')\n\n def test_posts_page_GET(self):\n response = self.client.get(self.posts_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'posts.html')\n self.assertNotContains(response, 'hi i am alexander cabanel')","repo_name":"DevOckha/BasicBlog","sub_path":"blog/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20129990450","text":"# a = 6A09E667F3BCC908\r\n# e = 510E527FADE682D1 \r\n# b = BB67AE8584CAA73B \r\n# f = 9B05688C2B3E6C1F \r\n# c = 3C6EF372FE94F82B \r\n# g = 1F83D9ABFB41BD6B \r\n# d = A54FF53A5F1D36F1 \r\n# h = 5BE0CD19137E2179\r\n\r\nprint(0x3C6EF372FE94F82C)\r\n\r\n# a =2,\r\n# b= 3,\r\n# c= 5,\r\n# d =7,\r\n# e = 11,\r\n# f =13,\r\n# g = 7,\r\n# h =19\r\nimport math\r\nimport fractions\r\na = 2\r\n# a = int(input(\"enter the number :\"))\r\n# sqrt_A= math.sqrt(a)\r\n# print(f\"we are printing the sqrt of {a} ==> \",end = \"\")\r\n# print(sqrt_A)\r\nsqrt_A = 1.7320508075688772935\r\nfr = fractions.Fraction(sqrt_A)\r\nprint(f\"printing the fractions of {a} ===> \",end=\"\")\r\nprint(fr)\r\nfr = str(fr).split(\"/\")\r\nfractions_num = []\r\nfractions_num.append(fr[0])\r\nfractions_dem = []\r\nfractions_dem.append(fr[1])\r\nf_num = fractions_num[0]\r\nf_dem = fractions_dem[0]\r\ndef normal():\r\n print(\"Normal \")\r\n print(\"normal hex val :\")\r\n fractions_num = hex(int(f_num))\r\n fractions_dem = hex(int(f_dem))\r\n print(f\"printing the hexadecimal values of numerator :{fractions_num},demoniator : {fractions_dem}\")\r\nnormal()\r\n# big_endian()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# def big_endian():\r\n# f_nu = f_num[::-1]\r\n# f_de = f_dem[::-1]\r\n# # fractions_dem_r = fractions_dem[::-1]\r\n# print(f\"big endian formats :numerator:{fractions_num}={f_num} , denominator :{fractions_dem} = {f_dem}\")\r\n# fractions_num = hex(int(f_nu))\r\n# fractions_dem = hex(int(f_de))\r\n# print(f\"printing the big endian hexadecimal values of numerator :{fractions_num},demoniator : {fractions_dem}\")\r\n","repo_name":"jeykarlokes/complete-reference-to-python3-programs","sub_path":"python/dg2v.py","file_name":"dg2v.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32209895436","text":"from collections import deque\nm, n = map(int, __import__('sys').stdin.readline().split())\na = [[int(_) for _ in __import__('sys').stdin.readline().split()] for _ in range(n)]\nvisited = [[False if a[i][j] == 0 else True for j in range(m)] for i in range(n)]\n\nday = -1\nq = deque()\nfor i in range(n):\n for j in range(m):\n if a[i][j] == 1:\n q.append((i, j))\nwhile q:\n l = len(q)\n for _ in range(l):\n i, j = q.popleft()\n for x, y in (i-1, j), (i, j+1), (i+1, j), (i, j-1):\n if not(0 <= x < n and 0 <= y < m):\n continue\n else:\n if not visited[x][y]:\n visited[x][y] = True\n q.append((x, y))\n day += 1\n\nchk = True\nfor x in visited:\n if False in x:\n chk = False\n break\n\nif chk: print(day)\nelse: print(-1)\n","repo_name":"INYEONGKIM/BOJ","sub_path":"BOJ7576.py","file_name":"BOJ7576.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15643807510","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\n\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder, LabelBinarizer\nfrom machine import lsi\nfrom utils.wrapper import time_consumed\nfrom machine import svm, logistic, xgb, linear_svm\nfrom datetime import datetime\nfrom machine.get_project_config import get_project_config\n\n\n@time_consumed\ndef train(project_info):\n start_time = datetime.now()\n\n name, dict_config, topic_method, topic_config, train_method, train_config = get_project_config(project_info)\n\n data = lsi.compute_lsi_lda(project_info, recover_d=True,\n recover_m=True, recover_tfidf=True)\n\n targets = [x[1] for x in data]\n\n if train_method == 'xgb':\n le = LabelBinarizer()\n else:\n le = LabelEncoder()\n\n le.fit(targets)\n\n X_data = []\n Y_data = []\n for i in range(len(data)):\n x = data[i][0]\n assert np.shape(x)[1] == topic_config['num_topics']\n\n y = [targets[i]] * len(x)\n\n y = le.transform(y)\n\n X_data.append(x)\n Y_data.append(y)\n\n data = np.concatenate(X_data, axis=0)\n target = np.concatenate(Y_data, axis=0)\n\n train_score = None\n\n if train_method == 'svm':\n train_score, test_score = svm.clf(\n project_info, data, target, le, is_save=True)\n\n # TODO\n elif train_method == 'xgb':\n pass\n \n elif train_method == 'logistic':\n train_score, test_score = logistic.clf(project_info, data, target, le, is_save=True)\n # train_score = logistic.logistic_clf(data, target, le, is_save=True)\n # cross_scores = logistic.logistic_cross(data, target)\n elif train_method == 'lsvm':\n train_score, test_score = linear_svm.clf(\n project_info, data, target, le, is_save=True)\n\n base_dir = './data/gensim/{}'.format(name)\n base_dir = os.path.join(base_dir, 'train')\n\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n\n end_time = datetime.now()\n interval = str(end_time - start_time)\n\n train_date = start_time.strftime('%Y-%m-%d')\n res_name = '{0}-{1}-{2}-{3}-train.txt'.format(\n name, train_method, topic_method, train_date)\n\n res_path = os.path.join(base_dir, res_name)\n\n with open(res_path, 'a') as f:\n f.write('time:\\n')\n f.write('\\tstart => %s\\n' % start_time)\n f.write('\\tend => %s\\n' % end_time)\n f.write('\\tinterval => %s\\n\\n' % interval)\n\n project_info = ['\\t' + str(x) + ' => ' + str(y)\n for x, y in project_info.items() if isinstance(y, str)]\n project_info = '\\n'.join(project_info)\n f.write('project-info:\\n')\n f.write(project_info)\n f.write('\\n\\n')\n\n dict_config = ['\\t' + str(x) + ' => ' + str(y)\n for x, y in dict_config.items()]\n dict_config = '\\n'.join(dict_config)\n f.write('dictionary-config:\\n')\n f.write(dict_config)\n f.write('\\n\\n')\n\n topic_config = ['\\t' + str(x) + ' => ' + str(y)\n for x, y in topic_config.items()]\n topic_config = '\\n'.join(topic_config)\n f.write('topic-config:\\n')\n f.write(topic_config)\n f.write('\\n\\n')\n\n\n train_config = ['\\t' + str(x) + ' => ' + str(y) for x, y in train_config.items()]\n train_config = '\\n'.join(train_config)\n f.write('train-config:\\n')\n f.write(train_config)\n f.write('\\n\\n')\n\n if train_score:\n f.write('score:\\n')\n f.write('\\ttrain_score => %s\\n' % str(train_score))\n if test_score:\n f.write('\\ttest_score => %s\\n' % str(test_score))\n f.write('\\n\\n')\n\n f.write('*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#* \\n')\n f.write('\\n')\n","repo_name":"XuHewen/sync_work","sub_path":"machine/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4057149082","text":"import logging\n\nfrom faker import Faker\n\nfrom tests.factory.entities.listing_factory import ListingFactory\n\nlogger = logging.getLogger(__name__)\n\nif __name__ == \"__main__\":\n fake_fr = Faker(locale=\"fr-FR\")\n fake_de = Faker(locale=\"de-DE\")\n fake_be = Faker(locale=\"nl-BE\")\n\n for i in range(10):\n faker = Faker()\n locale = faker.random_element([\"fr-FR\", \"de-DE\", \"nl-BE\"])\n listing = ListingFactory(locale).build()\n print(listing.json(ensure_ascii=False))\n\n logger.info(\"done\")\n","repo_name":"hatimali/aviv","sub_path":"python-flask/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"43346078485","text":"import psycopg2\r\nimport psycopg2.extensions\r\nfrom flask import Flask\r\nfrom flask_login import LoginManager, UserMixin\r\nfrom flask_migrate import Migrate\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nconn = psycopg2.connect(\r\n host=\"localhost\",\r\n database=\"ten_jen\",\r\n user=\"postgres\",\r\n password=\"762341Aa\",\r\n port=5432)\r\n\r\npsycopg2.extensions.register_type(psycopg2.extensions.UNICODE)\r\npsycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)\r\n\r\nUPLOAD_FOLDER = r'/home/Armianin/Work/jen_ten'\r\nALLOWED_EXTENSIONS = {'jpeg', 'jpg', 'png'}\r\napp = Flask(__name__)\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\napp.config['RECAPTCHA_USE_SSL']= False\r\napp.config['RECAPTCHA_PUBLIC_KEY'] ='6LeBCfIZAAAAAO39_L4Gd7f6uCM0PfP_N3XjHxkW'\r\napp.config['RECAPTCHA_PRIVATE_KEY'] ='6LeBCfIZAAAAAJTjq0Xz_ndAW9LByCo1nJJKy'\r\napp.config['RECAPTCHA_OPTIONS'] = {'theme':'black'}\r\nlogin_manager = LoginManager()\r\nlogin_manager.init_app(app)\r\nlogin_manager.login_view = 'login'\r\ndb_cursor = conn.cursor()\r\n","repo_name":"Qazqazqaz2/ten_jen","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15729605147","text":"import datetime\nimport copy\nimport os\nimport openpyxl\nimport openpyxl.utils\nimport logging\nimport numpy as np\nfrom modules.utilities import toolbox\n\nclass InvoicesFromTemplate: # todo refactor into multiple classes\n\n def __init__(self, times, clients, invoices, params):\n self.times = times\n self.clients = clients\n self.invoices = invoices\n self.params = params\n\n self.logger = logging.getLogger('main.' + __name__)\n\n try:\n self.template_workbook = openpyxl.load_workbook(params.invoice_template_filename)\n self.worksheet_name = self.template_workbook.sheetnames[0]\n self.template_worksheet = self.template_workbook.get_sheet_by_name(self.worksheet_name)\n self._generate_cell_addresses_from_template_dict()\n self._determine_working_day_rows()\n except FileNotFoundError:\n self.logger.error('Template file was not found at {}. No invoices can be generated without one. Please make a template file.'.format(params.invoice_template_path))\n\n def _generate_cell_addresses_from_template_dict(self):\n cell_coordinate_attribute_get_info_structure_df_itertuples_method_tuple_list = [('times_cell_coordinate_dict', 'get_times_info_structure_df_itertuples'),\n ('clients_cell_coordinate_dict', 'get_clients_info_structure_df_itertuples'),\n ('invoices_cell_coordinate_dict', 'get_invoices_info_structure_df_itertuples'),\n ('calculated_cell_coordinate_dict', 'get_calculations_info_structure_df_itertuples')]\n\n for cell_coordinate_attribute, get_info_structure_df_itertuples_method in cell_coordinate_attribute_get_info_structure_df_itertuples_method_tuple_list:\n self._generate_cell_coordinate_dict_from_info_structure_df(cell_coordinate_attribute, get_info_structure_df_itertuples_method)\n\n def _generate_cell_coordinate_dict_from_info_structure_df(self, cell_coordinate_attribute, get_info_structure_df_itertuples_method):\n setattr(self, cell_coordinate_attribute, dict())\n for info_structure_row in getattr(self.params, get_info_structure_df_itertuples_method)():\n info_name = info_structure_row.info_name\n invoice_template_tag = info_structure_row.invoice_template_tag\n if isinstance(invoice_template_tag, str):\n for row in range(1, self.template_worksheet.max_row + 1):\n for col in range(1, self.template_worksheet.max_column + 1):\n cell = self.template_worksheet.cell(row=row, column=col)\n if cell.value == invoice_template_tag:\n getattr(self, cell_coordinate_attribute)[info_name] = cell.coordinate\n\n def _determine_working_day_rows(self):\n self.working_day_start_row, _ = openpyxl.utils.cell.coordinate_to_tuple(self.times_cell_coordinate_dict['Type of hours'])\n self.working_day_stop_row, _ = openpyxl.utils.cell.coordinate_to_tuple(self.calculated_cell_coordinate_dict['Calculated amount for working day'])\n self.row_increment_between_two_working_days = self.working_day_stop_row - self.working_day_start_row + 2\n\n def generate_invoice(self, invoice_info_dict):\n invoice_workbook = self._initialize_workbook()\n client_info_dict, invoice_times_df = self._create_client_info_dict_and_invoice_times_df(invoice_info_dict)\n\n self._copy_and_fill_in_workbook_begin(client_info_dict, invoice_info_dict)\n self._copy_and_fill_in_working_days(invoice_times_df)\n self._copy_and_fill_in_workbook_end(invoice_times_df)\n\n self._save_invoice_workbook(invoice_workbook, invoice_info_dict)\n\n def _initialize_workbook(self):\n invoice_workbook = openpyxl.Workbook()\n blank_worksheet_name = invoice_workbook.sheetnames[0]\n blank_worksheet = invoice_workbook.get_sheet_by_name(blank_worksheet_name)\n invoice_workbook.remove_sheet(blank_worksheet)\n self.invoice_worksheet = invoice_workbook.create_sheet(self.worksheet_name) # todo get somewhere else\n return invoice_workbook\n\n def _create_client_info_dict_and_invoice_times_df(self, invoice_info_dict):\n client_name = invoice_info_dict['Client name']\n client_info_dict = self.clients.get_client_info_dict_based_on_client_name(client_name)\n\n invoice_month = invoice_info_dict['Month']\n invoice_year = invoice_info_dict['Year']\n\n start_date, stop_date = self._extract_start_and_stop_date_from_month_and_year(invoice_month, invoice_year)\n full_times_df = self.times.get_times_df()\n\n valid_row_indices = (start_date <= full_times_df['Date']) & (full_times_df['Date'] < stop_date) & (full_times_df['Client name'] == client_name)\n invoice_times_df = full_times_df.loc[valid_row_indices]\n invoice_times_df = invoice_times_df.sort_values(by='Date', ignore_index=True)\n return client_info_dict, invoice_times_df\n\n def _extract_start_and_stop_date_from_month_and_year(self, month, year):\n date_string = month + ' ' + str(year)\n start_date = datetime.datetime.strptime(date_string, '%B %Y')\n\n stop_date = datetime.datetime.strptime(date_string, '%B %Y')\n stop_date_month = stop_date.month\n stop_date_year = stop_date.year\n if stop_date_month == 12:\n stop_date_month = 1\n stop_date_year = stop_date_year + 1\n else:\n stop_date_month = stop_date_month + 1\n stop_date = datetime.datetime(year=stop_date_year, month=stop_date_month, day=1, hour=0, minute=0, second=0)\n return start_date, stop_date\n\n\n def _copy_and_fill_in_workbook_begin(self, client_info_dict, invoice_info_dict):\n self._copy_workbook_begin()\n self._fill_in_invoice_info(invoice_info_dict)\n self._fill_in_client_info(client_info_dict)\n\n def _copy_workbook_begin(self):\n for df_row in range(1, self.working_day_start_row):\n for col in range(1, self.template_worksheet.max_column + 1):\n template_cell = self.template_worksheet.cell(row=df_row, column=col)\n invoice_cell = self.invoice_worksheet.cell(row=df_row, column=col)\n InvoicesFromTemplate.copy_cell(template_cell, invoice_cell)\n\n def _fill_in_invoice_info(self, invoice_info_dict):\n for key, val in invoice_info_dict.items():\n try:\n coordinate = self.invoices_cell_coordinate_dict[key]\n template_cell = self.invoice_worksheet[coordinate]\n template_cell.value = val\n except KeyError:\n pass\n\n def _fill_in_client_info(self, client_info_dict):\n for key, val in client_info_dict.items():\n try:\n coordinate = self.clients_cell_coordinate_dict[key]\n template_cell = self.invoice_worksheet[coordinate]\n template_cell.value = val\n except KeyError:\n pass\n\n def _copy_and_fill_in_working_days(self, invoice_times_df):\n for index, df_row in invoice_times_df.iterrows():\n row_offset = index * self.row_increment_between_two_working_days\n self._copy_working_day(row_offset)\n self._fill_in_working_day(df_row, row_offset)\n self._fill_in_calculated_hours(row_offset)\n\n if df_row['Type of hours'] == 'during day':\n self._fill_in_calculated_amount_for_hours_during_day(row_offset)\n elif df_row['Type of hours'] == 'shift':\n self._fill_in_calculated_amount_for_hours_in_shift(row_offset)\n else:\n self.logger.error('Invalid type of hours in times')\n\n self._fill_in_calculated_amount_for_commute(row_offset)\n self._fill_in_calculated_amount_for_distance_during_work(row_offset)\n self._fill_in_calculated_amount_for_working_day(row_offset)\n\n def _copy_working_day(self, row_offset):\n for row in range(self.working_day_start_row, self.working_day_stop_row + 1):\n for col in range(1, self.template_worksheet.max_column + 1):\n template_cell = self.template_worksheet.cell(row=row, column=col)\n invoice_row = row + row_offset\n invoice_cell = self.invoice_worksheet.cell(row=invoice_row, column=col)\n InvoicesFromTemplate.copy_cell(template_cell, invoice_cell)\n\n def _fill_in_working_day(self, df_row, row_offset): # todo clean up\n for key, val in df_row.iteritems():\n try:\n if key == 'Type of hours':\n if val == 'during day':\n self.set_times_value(key, row_offset, self.params.translation_during_day)\n elif val == 'shift':\n self.set_times_value(key, row_offset, self.params.translation_shift)\n else:\n self.logger.error('Invalid type of hours in times')\n else:\n self.set_times_value(key, row_offset, val)\n except KeyError:\n pass\n\n def set_times_value(self, info_name, row_offset, value):\n coordinate = self.times_cell_coordinate_dict[info_name]\n initial_row, initial_col = openpyxl.utils.cell.coordinate_to_tuple(coordinate)\n used_row = initial_row + row_offset\n cell = self.invoice_worksheet.cell(row=used_row, column=initial_col)\n cell.value = value\n\n def _fill_in_calculated_hours(self, row_offset):\n initial_start_time_row, start_time_col = openpyxl.utils.cell.coordinate_to_tuple(self.times_cell_coordinate_dict['Start time'])\n initial_stop_time_row, stop_time_col = openpyxl.utils.cell.coordinate_to_tuple(self.times_cell_coordinate_dict['Stop time'])\n\n start_time_row = initial_start_time_row + row_offset\n stop_time_row = initial_stop_time_row + row_offset\n\n start_time_coordinate = InvoicesFromTemplate.tuple_to_coordinate(start_time_row, start_time_col)\n stop_time_coordinate = InvoicesFromTemplate.tuple_to_coordinate(stop_time_row, stop_time_col)\n\n self.set_calculated_value('Calculated hours', row_offset, '=24*({}-{})', [stop_time_coordinate, start_time_coordinate])\n\n def _fill_in_calculated_amount_for_hours_during_day(self, row_offset):\n multiplication_coordinate = self.invoices_cell_coordinate_dict['Rate during day (euro/h)']\n quantity_coordinate = self.calculated_cell_coordinate_dict['Calculated hours']\n self._fill_in_calculated_amount('Calculated amount for hours', row_offset, multiplication_coordinate, quantity_coordinate)\n\n def _fill_in_calculated_amount_for_hours_in_shift(self, row_offset):\n multiplication_coordinate = self.invoices_cell_coordinate_dict['Rate for shifts (euro/h)']\n quantity_coordinate = self.calculated_cell_coordinate_dict['Calculated hours']\n self._fill_in_calculated_amount('Calculated amount for hours', row_offset, multiplication_coordinate, quantity_coordinate)\n\n def _fill_in_calculated_amount_for_commute(self, row_offset):\n multiplication_coordinate = self.invoices_cell_coordinate_dict['Compensation for commute (euro/km)']\n quantity_coordinate = self.times_cell_coordinate_dict['Commute (km)']\n self._fill_in_calculated_amount('Calculated amount for commute', row_offset, multiplication_coordinate, quantity_coordinate)\n\n def _fill_in_calculated_amount_for_distance_during_work(self, row_offset):\n multiplication_coordinate = self.invoices_cell_coordinate_dict['Compensation for driving during work (euro/km)']\n quantity_coordinate = self.times_cell_coordinate_dict['Distance during work (km)']\n self._fill_in_calculated_amount('Calculated amount for distance during work', row_offset, multiplication_coordinate, quantity_coordinate)\n\n def _fill_in_calculated_amount(self, calculated_amount, row_offset, multiplication_coordinate, quantity_coordinate):\n new_multiplication_coordinate = openpyxl.utils.cell.absolute_coordinate(multiplication_coordinate)\n new_quantity_coordinate = InvoicesFromTemplate._increment_coordinate_by_row_offset(quantity_coordinate, row_offset)\n self.set_calculated_value(calculated_amount, row_offset, '={}*{}', [new_multiplication_coordinate, new_quantity_coordinate])\n\n def _fill_in_calculated_amount_for_working_day(self, row_offset):\n coordinate_list = [self.calculated_cell_coordinate_dict['Calculated amount for hours'], self.calculated_cell_coordinate_dict['Calculated amount for commute'], self.calculated_cell_coordinate_dict['Calculated amount for distance during work']]\n new_coordinate_list = [InvoicesFromTemplate._increment_coordinate_by_row_offset(coordinate, row_offset) for coordinate in coordinate_list]\n string_format = '=' + '+'.join(['{}']*len(coordinate_list)) # should give e.g. '={}+{}' if there are two coordinates\n self.set_calculated_value('Calculated amount for working day', row_offset, string_format, new_coordinate_list)\n\n def _copy_and_fill_in_workbook_end(self, invoice_times_df):\n row_offset_for_end = (len(invoice_times_df) - 1) * self.row_increment_between_two_working_days\n self._copy_workbook_end(row_offset_for_end)\n self._fill_in_calculated_total_amount(invoice_times_df, row_offset_for_end)\n\n def _copy_workbook_end(self, row_offset_for_end):\n for row in range(self.working_day_stop_row + 2, self.template_worksheet.max_row + 1):\n for col in range(1, self.template_worksheet.max_column + 1):\n template_cell = self.template_worksheet.cell(row=row, column=col)\n invoice_row = row + row_offset_for_end\n invoice_cell = self.invoice_worksheet.cell(row=invoice_row, column=col)\n InvoicesFromTemplate.copy_cell(template_cell, invoice_cell)\n\n def _fill_in_calculated_total_amount(self, invoice_times_df, row_offset):\n number_of_working_days = len(invoice_times_df)\n initial_calculated_amount_for_working_day_coordinate = self.calculated_cell_coordinate_dict['Calculated amount for working day']\n new_coordinate_list = [InvoicesFromTemplate._increment_coordinate_by_row_offset(initial_calculated_amount_for_working_day_coordinate, index*self.row_increment_between_two_working_days) for index in range(number_of_working_days)]\n string_format = '=' + '+'.join(['{}']*number_of_working_days) # should give e.g. '={}+{}' if there are two working days\n self.set_calculated_value('Calculated total amount', row_offset, string_format, new_coordinate_list)\n\n def _save_invoice_workbook(self, invoice_workbook, invoice_dict):\n invoice_path = invoice_dict['File path']\n invoice_workbook.save(invoice_path)\n \n def set_calculated_value(self, info_name, row_offset, string_format, string_parameter_list):\n coordinate = self.calculated_cell_coordinate_dict[info_name]\n initial_row, initial_col = openpyxl.utils.cell.coordinate_to_tuple(coordinate)\n used_row = initial_row + row_offset\n cell = self.invoice_worksheet.cell(row=used_row, column=initial_col)\n value = string_format.format(*string_parameter_list)\n cell.value = value\n\n @staticmethod\n def _increment_coordinate_by_row_offset(initial_coordinate, row_offset):\n initial_row, col = openpyxl.utils.cell.coordinate_to_tuple(initial_coordinate)\n new_row = initial_row + row_offset\n new_coordinate = InvoicesFromTemplate.tuple_to_coordinate(new_row, col)\n return new_coordinate\n\n @staticmethod\n def copy_cell(cell, new_cell):\n new_cell.value = cell.value\n if cell.has_style:\n new_cell.font = copy.copy(cell.font)\n new_cell.border = copy.copy(cell.border)\n new_cell.fill = copy.copy(cell.fill)\n new_cell.number_format = copy.copy(cell.number_format)\n new_cell.protection = copy.copy(cell.protection)\n new_cell.alignment = copy.copy(cell.alignment)\n\n @staticmethod\n def tuple_to_coordinate(row, col):\n col_letter = openpyxl.utils.cell.get_column_letter(col)\n coordinate = '{}{}'.format(col_letter, row)\n return coordinate\n\n","repo_name":"haroldmeerwaldt/a_simple_accounting_program","sub_path":"modules/worker_thread/invoice_generation.py","file_name":"invoice_generation.py","file_ext":"py","file_size_in_byte":16465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3481074730","text":"\"\"\"Tests for cals.py\"\"\"\nimport pytest\nfrom io import StringIO\nimport cals\nimport sqlite3\nimport datetime\n\n\n@pytest.fixture\ndef memory_db():\n \"\"\"Fixture to set up an in-memory test db\"\"\"\n db = sqlite3.connect(':memory:')\n cursor = db.cursor()\n yield db, cursor\n\n\ndef test_Entry():\n \"\"\"Verify that Entry class and add method behave as expected\"\"\"\n data = ['a', 1, (1, 2), -1, .5]\n test = cals.Entry()\n for item in data:\n test.add(item)\n assert len(test.content) == len(data)\n for i in range(0, len(data)-1):\n assert test.content[i] == data[i]\n\n\ndef test_CalEntry():\n \"\"\"Test class CalEntry\"\"\"\n def test_add(data):\n \"\"\"Add data to test obj and verify\"\"\"\n test = cals.CalEntry()\n i = 0\n for item in data:\n test.add(item)\n assert item == test.content[i]\n i += 1\n data = [['egg', 60, 6], [-2, -1, -0]]\n for i in range(len(data)):\n test_add(data[i])\n # TODO test validate method, commit, rm\n\n\ndef test_WeightEntry(memory_db):\n \"\"\"Test class WeightEntry\"\"\"\n def test_add(data):\n \"\"\"Add data to test obj and verify\"\"\"\n test = cals.WeightEntry()\n test.add(data)\n assert data == test.content[0]\n try:\n test.validate()\n except ValueError:\n pass\n check = 'fail' if type(data) is str else '?'\n assert check == 'fail'\n\n data = [130, -333, '', 'fish']\n for i in range(len(data)):\n test_add(data[i])\n\n\n# def test_create_table(memory_db, table):\n# \"\"\"Test table creation\"\"\"\n# db, cursor = memory_db\n# tables = ['calorie_table', 'weight_table',\n# 'profile_table']\n# bogus = [42, 'nope', (0, 1, 2)]\n\n# def create_table(table):\n# \"\"\"Create table and return results\"\"\"\n# cals.create_table(db, cursor, f'{table}')\n# with db:\n# check = list(cursor.execute(\n# f\"\"\"SELECT * FROM {table}\"\"\").fetchall())\n# return check\n\n# for table in tables:\n# check = create_table(table)\n# assert check == []\n# for table in bogus:\n# try:\n# check = create_table(table)\n# except sqlite3.OperationalError:\n# check = 'fail'\n# assert check == 'fail'\n\n\ndef test_append_timestamp():\n \"\"\"Verify that append_timestamp appends as expected to array\"\"\"\n def append_timestamp(arr):\n arr = cals.append_timestamp(arr)\n return arr\n arrs = [[0], [0, 2], ['str', 4, ['nest']]]\n for init_arr in arrs:\n init_len = len(init_arr)\n arr = append_timestamp(init_arr)\n assert len(arr) == init_len + 2\n assert type(arr[-2]) is str\n assert type(arr[-1]) is datetime.date\n\n\ndef test_to_metric():\n \"\"\"Verify that to_metric converts imperial units to metric\"\"\"\n h, w = cals.to_metric(5, 140)\n assert h, w == 152.4\n 63.50\n\n h, w = cals.to_metric(7.4, 333)\n assert h, w == 223.52\n 151.05\n\n h, w = cals.to_metric(-1, -1)\n assert h, w == -30.48\n -.45\n\n\ndef test_validate_input(monkeypatch):\n \"\"\"Verify that validate_input returns expected datatypes\"\"\"\n fake_input = StringIO('5.1\\n')\n monkeypatch.setattr('sys.stdin', fake_input)\n test = cals.validate_input('Float: ', float)\n assert type(test) == float\n\n fake_input = StringIO('1\\n')\n monkeypatch.setattr('sys.stdin', fake_input)\n test = cals.validate_input('Int: ', int)\n assert type(test) == int\n\n\ndef test_harris_benedict(monkeypatch):\n \"\"\"Verify that harris_benedict method returns expected BMR values\"\"\"\n data = (19, 'm', 170, 68, 2)\n fake_input = StringIO('1\\n')\n monkeypatch.setattr('sys.stdin', fake_input)\n test_prof = cals.Profile(*data)\n assert round(test_prof.bmr, 0) == 1707\n\n data = (77, 'f', 120, 85, 2)\n fake_input = StringIO('1\\n')\n monkeypatch.setattr('sys.stdin', fake_input)\n test_prof = cals.Profile(*data)\n assert round(test_prof.bmr, 0) == 1272\n\n\ndef test_calc_tdee(monkeypatch):\n \"\"\"Verify that calc_tdee method returns expected TDEE values\"\"\"\n data = (33, 'm', 99.06, 14.97, 3)\n fake_input = StringIO('1\\n')\n monkeypatch.setattr('sys.stdin', fake_input)\n test_prof = cals.Profile(*data)\n assert round(test_prof.tdee, 0) == 692\n\n data = (33, 'm', 99.06, 14.97, 2)\n fake_input = StringIO('3\\n')\n monkeypatch.setattr('sys.stdin', fake_input)\n test_prof = cals.Profile(*data)\n assert round(test_prof.tdee, 0) == 894\n\n\ndef test_calc_goal(monkeypatch):\n \"\"\"Verify that calc_goal method returns expected caloric deficit values\"\"\"\n data = (33, 'm', 99.06, 14.97, 2)\n fake_input = StringIO('3\\n')\n monkeypatch.setattr('sys.stdin', fake_input)\n test_prof = cals.Profile(*data)\n diet = cals.Diet(test_prof.tdee, test_prof.lose)\n assert int(diet.calories) == -105\n","repo_name":"dch42/calcount","sub_path":"test_cals.py","file_name":"test_cals.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31893199045","text":"'''\nhigh-level interface to google drive\n\nDesign:\n per user authentication setup\n look inside $HOME for application data\n'''\nimport io\nimport os\nimport glob\nimport mimetypes\nimport pickle\nfrom sys import exit\n\nfrom googleapiclient import errors\nfrom googleapiclient.discovery import build\n# retry handling\nfrom google.api_core.retry import Retry\n# file management\nfrom googleapiclient.http import MediaFileUpload\nfrom googleapiclient.http import MediaInMemoryUpload\n# oauth related\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\n\ndef get_authenticated(SCOPES, credential_file='credentials.json',\n token_file='token.json', service_name='drive',\n api_version='v3'):\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n store = file.Storage(token_file)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(credential_file, SCOPES)\n creds = tools.run_flow(flow, store)\n service = build(service_name, api_version, http=creds.authorize(Http()),\n cache_discovery=False)\n return service\n\n################################################################################\n# module initialization\n################################################################################\n# initialize mimetypes\nmimetypes.init()\nmimetypes.add_type('application/python-pickle', '.pickle')\nmimetypes.add_type('application/numpy-npy', '.npy')\nmimetypes.add_type('application/numpy-npz', '.npz')\n\n# google drive root folder\nrootdir = 'octopus'\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/drive']\n\n# look for user local data\ndirname = os.path.join(os.environ['HOME'], '.octopus')\n\ntoken_name = 'token.json'\ntoken_name = os.path.join(dirname, token_name)\ncred_name = 'credentials.json'\ncred_name = os.path.join(dirname, cred_name)\n\nassert os.path.exists(cred_name), f'missing credentials in {dirname}'\n#assert os.path.exists(token_name), f'missing token storage in {dirname}'\n\n# module level object\n_service = None\n\ndef get_service():\n if not _service:\n _service = get_authenticated(SCOPES,\n credential_file=cred_name,\n token_file=token_name)\n return _service\n\n################################################################################\n# support functions\n################################################################################\ndef split_all(path):\n dirs = []\n while True:\n parts = os.path.split(path)\n if parts[0] == path: # sentinel for absolute paths\n dirs.insert(0, parts[0])\n break\n elif parts[1] == path: # sentinel for relative paths\n dirs.insert(0, parts[1])\n break\n else:\n path = parts[0]\n dirs.insert(0, parts[1])\n return dirs\n\n################################################################################\n# functions\n################################################################################\ndef get_folder(fname, parent):\n '''\n check if folder exists, if so returns folder id,\n else return None\n '''\n folders_only_query = \"mimeType='application/vnd.google-apps.folder'\"\n not_trash_query = \"trashed=false\"\n root_folder_query = f\"'{parent}' in parents\"\n folder_name_query = f\"name='{fname}'\"\n\n query = f\"{root_folder_query} and {folders_only_query} and {folder_name_query} and {not_trash_query}\"\n fields = 'nextPageToken, files(id, name)'\n\n response = get_service().files().list(q=query,\n spaces='drive',\n fields=fields).execute()\n\n #for file in response.get('files', []):\n # print (f\"Found folder: {file.get('name')}, {file.get('id')}\")\n\n file_list = [ (file.get('name'), file. get('id'))\n for file in response.get('files', []) ]\n\n if file_list:\n assert len(file_list) == 1, 'more than one folder found'\n folder_id = file_list[0][1]\n return folder_id\n else:\n return None\n\ndef make_dirs(fnames, exist_ok=True):\n '''\n make all folders specified in folders\n if they do not already exist\n '''\n parent = 'root'\n for fname in fnames:\n folder = get_folder(fname, parent)\n if not folder:\n folder = create_folder(fname, parent)\n # folder is valid at this point\n parent = folder\n\n # return folder id\n return folder\n\ndef create_folder(fname, parent):\n ''' create folder '''\n\n file_metadata = {\n 'name': fname,\n 'parents': [parent],\n 'mimeType': 'application/vnd.google-apps.folder',\n }\n\n file = get_service().files().create(body=file_metadata,\n fields='id').execute()\n\n folder_id = file.get('id')\n #print(f\"Folder ID: {folder_id}\")\n\n return folder_id\n\ndef get_file(fname, parent):\n '''\n return file_id if it exists,\n otherwise return None\n '''\n assert parent, 'parent not set'\n not_trash_query = \"trashed=false\"\n folder_query = f\"'{parent}' in parents\"\n name_query = f\"name='{fname}'\"\n\n query = f\"{folder_query} and {name_query} and {not_trash_query}\"\n fields = 'nextPageToken, files(id, name)'\n\n response = get_service().files().list(q=query,\n spaces='drive',\n fields=fields).execute()\n\n #for file in response.get('files', []):\n # print (f\"Found file: {file.get('name')}, {file.get('id')}\")\n\n file_list = [ (file.get('name'), file. get('id'))\n for file in response.get('files', []) ]\n\n if file_list:\n assert len(file_list) == 1, 'more than one file found'\n file_id = file_list[0][1]\n return file_id\n else:\n return None\n\ndef create_file(filepath, parent=None, mime_type=None):\n '''\n create file at filepath\n\n assumptions:\n Infer mime type if not specified.\n filepath should be a valid file\n write to root directory if parent not given\n '''\n assert os.path.isfile(filepath), f'{filepath} must be a valid file'\n if not parent: parent='root'\n\n fname = os.path.basename(filepath)\n file_ext = os.path.splitext(fname)[1]\n\n # lookup mime type\n if not mime_type:\n mime_type = mimetypes.types_map.get(file_ext, 'application/octet-stream')\n\n metadata = {}\n metadata['name'] = fname\n metadata['parents'] = [parent]\n\n media = MediaFileUpload(filepath, mimetype=mime_type, resumable=True)\n\n file = get_service().files().create(body=metadata,\n media_body=media,\n fields='id').execute()\n #print(f'File ID: {file[\"id\"]}')\n return file['id']\n\ndef update_file(filepath, file_id, mime_type=None):\n '''\n update file with file_id, using contents from filepath\n\n assumptions:\n Infer mime type if not specified.\n filepath should be a valid file\n file_id must be specified\n '''\n assert os.path.isfile(filepath), f'{filepath} must be a valid file'\n assert file_id, 'must have valid file_id'\n\n fname = os.path.basename(filepath)\n file_ext = os.path.splitext(fname)[1]\n\n # lookup mime type\n if not mime_type:\n mime_type = mimetypes.types_map.get(file_ext, 'application/octet-stream')\n\n metadata = {}\n metadata['name'] = fname\n\n media = MediaFileUpload(filepath, mimetype=mime_type, resumable=True)\n\n file = get_service().files().update(body=metadata,\n media_body=media,\n fileId=file_id).execute()\n return file_id\n\ndef create_bytes(buf, filepath, parent=None, text=True):\n '''\n create filepath and fill content with buf.\n\n assumptions:\n write to root directory if parent not given\n '''\n if not parent: parent='root'\n\n fname = os.path.basename(filepath)\n #file_ext = os.path.splitext(fname)[1]\n\n # lookup mime type\n #mimetypes.init()\n #mime_type = mimetypes.types_map.get(file_ext, 'application/octet-stream')\n mime_type = 'text/plain' if text else 'application/octet-stream'\n\n metadata = {}\n metadata['name'] = fname\n metadata['parents'] = [parent]\n\n media = MediaInMemoryUpload(buf, mimetype=mime_type, resumable=True)\n\n file = get_service().files().create(body=metadata,\n media_body=media,\n fields='id').execute()\n #print(f'File ID: {file[\"id\"]}')\n return file['id']\n\ndef update_bytes(buf, filepath, file_id, text=True):\n '''\n update filepath content with buf\n\n assumptions:\n file_id must be specified\n '''\n assert file_id, 'must have valid file_id'\n\n fname = os.path.basename(filepath)\n #file_ext = os.path.splitext(fname)[1]\n\n # lookup mime type\n #mimetypes.init()\n #mime_type = mimetypes.types_map.get(file_ext, 'application/octet-stream')\n mime_type = 'text/plain' if text else 'application/octet-stream'\n\n metadata = {}\n metadata['name'] = fname\n\n media = MediaInMemoryUpload(buf, mimetype=mime_type, resumable=True)\n\n file = get_service().files().update(body=metadata,\n media_body=media,\n fileId=file_id).execute()\n\n return file_id\n\ndef download_bytes(filepath, file_id, text=True):\n '''\n download content into local filepath [not verified]\n '''\n\n request = get_service().files().get_media(fileId=file_id)\n\n #fh = io.FileIO(filepath, mode='w')\n fh = io.StringIO()\n #downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)\n downloader = MediaIoBaseDownload(fh, request)\n\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n if status:\n print(\"Download {}%\".format( int(status.progress()*100) ) )\n print(\"Download Complete!\")\n\n # return string buffer\n fh.seek(0) # reset file handle\n buf = fp.read()\n\n return buf\n\n################################################################################\n# user API functions\n################################################################################\n@Retry()\ndef save_to_file(dst_filepath, buf, text=True):\n ''' save buf to a remote file directly '''\n assert not os.path.isabs(dst_filepath), 'support relative path only'\n dst_filepath = os.path.join(rootdir, dst_filepath)\n print(f'saving buffer to gdrive: {dst_filepath}')\n\n dst_basename = os.path.basename(dst_filepath)\n dst_dirname = os.path.dirname(dst_filepath)\n dst_dirname = os.path.normpath(dst_dirname)\n\n dirs = split_all(dst_dirname)\n #print(dirs)\n folder = make_dirs(dirs)\n\n if text: buf = buf.encode()\n\n file_id = get_file(dst_basename, folder)\n if file_id:\n update_bytes(buf, dst_basename, file_id, text=text)\n else:\n create_bytes(buf, dst_basename, folder, text=text)\n\n@Retry()\ndef load_from_file(dst_filepath, text=True):\n ''' return content of remote file [not verified]'''\n\n src_filepath = os.path.relpath(dst_filepath, rootdir)\n print(f'loading from file: {dst_filepath}')\n\n dst_basename = os.path.basename(dst_filepath)\n dst_dirname = os.path.dirname(dst_filepath)\n dst_dirname = os.path.normpath(dst_dirname)\n\n dirs = split_all(dst_dirname)\n #print(dirs)\n folder = make_dirs(dirs)\n\n file_id = get_file(dst_basename, folder)\n if file_id:\n buf = download_bytes(dst_basename, file_id, text=text)\n else:\n raise RuntimeError('file does not exist')\n\n return buf\n\n@Retry()\ndef save_file(src_filepath, dst_filepath=None, mime_type=None):\n ''' upload file to google drive '''\n if not dst_filepath: dst_filepath = src_filepath\n assert not os.path.isabs(dst_filepath), 'support relative path only'\n assert os.path.isfile(src_filepath), f'{src_filepath} must be a valid file'\n # append root directory to dst_filepath\n dst_filepath = os.path.join(rootdir, dst_filepath)\n print(f'saving file to gdrive: {dst_filepath}')\n\n dst_basename = os.path.basename(dst_filepath)\n dst_dirname = os.path.dirname(dst_filepath)\n dst_dirname = os.path.normpath(dst_dirname)\n\n dirs = split_all(dst_dirname)\n #print(dirs)\n folder = make_dirs(dirs)\n\n file_id = get_file(dst_basename, folder)\n if file_id:\n update_file(src_filepath, file_id, mime_type)\n else:\n create_file(src_filepath, folder, mime_type)\n\n@Retry()\ndef save_folder(src_filepath, dst_filepath=None, recursive=True):\n ''' recursively save of the contents in filepath '''\n if not dst_filepath: dst_filepath = src_filepath\n assert os.path.isdir(src_filepath), f'{src_filepath} must be a folder'\n assert not (dst_filepath.startswith('.') or dst_filepath.startswith('..')), \\\n \"dst_filepath cannot contain '.' or '..'\"\n\n # recurse thru all files in src folder\n paths = glob.glob(f\"{src_filepath}/**\", recursive=True)\n for src_path in paths:\n dst_path = src_path.replace(src_filepath, dst_filepath, 1)\n print(f'copying {src_path} --> {dst_path}')\n\n if os.path.isdir(src_path):\n # skip, this will be taken care of in save_file()\n print('folder, skipping')\n pass\n elif os.path.isfile(src_path):\n save_file(src_path, dst_path)\n else:\n raise RuntimeError(f'cannot handle special files: {src_path}')\n\n","repo_name":"DavidKWH/CommLib","sub_path":"comm_ai/drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":13630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36796517524","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Room',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('roomsn', models.CharField(unique=True, max_length=64, db_index=True)),\n ('name', models.CharField(max_length=64)),\n ('is_active', models.BooleanField(default=False)),\n ('is_add', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='RoomTemporary',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('roomsn', models.CharField(unique=True, max_length=64, db_index=True)),\n ],\n ),\n ]\n","repo_name":"xiaohutushen30/website","sub_path":"RoomManage/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"41489897710","text":"import os\n\nfrom PyQt5 import QtGui, QtWidgets, QtCore\nfrom demos.twip_widget import TWIPWidget\nfrom demos.plot_widget import RollingPlotWidget\n\nfrom twip.model.robot import load_robot_json\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n ''' Realtime TWIP viewer program\n '''\n\n def __init__(self):\n super(MainWindow, self).__init__()\n\n # Create TWIP model\n self.twip = load_robot_json( os.path.dirname(os.path.realpath(__file__)) + \"/../docs/robot_generic.json\")\n self.twip_widget = TWIPWidget(self, self.twip)\n\n self.resize(1900, 1000)\n # Add layout to put twip_widget in\n wid = QtWidgets.QWidget(self)\n self.setCentralWidget(wid)\n\n # create layouts\n self.view_layout = QtGui.QHBoxLayout()\n self.twip_layout = QtGui.QHBoxLayout()\n self.plot_layout = QtGui.QVBoxLayout()\n\n wid.setLayout(self.view_layout)\n self.view_layout.addLayout(self.twip_layout, 2)\n self.view_layout.addLayout(self.plot_layout, 1)\n\n self.twip_layout.addWidget(self.twip_widget)\n\n self.tilt_widget = RollingPlotWidget(1, 300)\n self.yaw_widget = RollingPlotWidget(1, 300)\n self.motor_plot_widget = RollingPlotWidget(2, 300)\n\n self.tilt_widget.set_pen(0, 'r')\n self.tilt_widget.setLabel('left', 'Tilt', units='degrees')\n self.tilt_widget.setLabel('bottom', 'Sample Number')\n self.tilt_widget.showGrid(True, True, 0.5)\n\n self.yaw_widget.set_pen(0, 'c')\n self.yaw_widget.setLabel('left', 'Yaw', units='degrees')\n self.yaw_widget.setLabel('bottom', 'Sample Number')\n self.yaw_widget.showGrid(True, True, 0.5)\n\n self.motor_plot_widget.set_pen(0, 'g')\n self.motor_plot_widget.set_pen(1, 'w')\n self.motor_plot_widget.setLabel('left', 'Motor Torque', units='N m')\n self.motor_plot_widget.setLabel('bottom', 'Sample Number')\n self.motor_plot_widget.showGrid(True, True, 0.5)\n\n self.plot_layout.addWidget(self.tilt_widget)\n self.plot_layout.addWidget(self.yaw_widget)\n self.plot_layout.addWidget(self.motor_plot_widget)\n\n # wid.setLayout(mainLayout)\n\n # Setup twip initial state\n dt = 1 / 30\n self.twip.set_IC([0, 0, 0.4, -0.1, 0, 0])\n self.twip.update_current_state(dt, [1 / dt * 0.5, 1 / dt * 0.4, 0, 0])\n self.dt = dt\n\n def update_twip(self):\n ''' program mainloop method\n '''\n self.twip.update_current_state(self.dt, [0, 0, 0, 0])\n\n m_l = self.twip.motor_l.get_position_coordinates()[0]\n m_r = self.twip.motor_r.get_position_coordinates()[0]\n self.motor_plot_widget.push_data([m_l, m_r])\n\n coords = self.twip.get_position_coordinates()\n y = coords[2] * 180 / 3.1415\n t = coords[5] * 180 / 3.1415\n self.tilt_widget.push_data([t])\n self.yaw_widget.push_data([y])\n\n def update_plot(self):\n # pass\n self.motor_plot_widget.update_plot()\n self.tilt_widget.update_plot()\n self.yaw_widget.update_plot()\n self.twip_widget.draw_twip()\n\n\napp = QtWidgets.QApplication(['TWIP Viewer'])\nwindow = MainWindow()\n\nsim_timer = QtCore.QTimer()\nsim_timer.timeout.connect(window.update_twip)\nsim_timer.start(1 / 60 * 1000)\n\nplot_timer = QtCore.QTimer()\nplot_timer.timeout.connect(window.update_plot)\nplot_timer.start(1 / 40 * 1000)\n\nwindow.show()\n\napp.exec_()\n","repo_name":"EthanJamesLew/TWIP-Sim","sub_path":"demos/twip_sim.py","file_name":"twip_sim.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11688619730","text":"\"\"\"\nMódulo para facilitar o registro de inspeções não-invasivas.\n\nPermite registrar rapidamente análise de imagem efetuada pelo COV, seja com Ficha a\nencaminhar para verificação física, ou registrando e logo em seguida encerrando.\n\n\"\"\"\nfrom ajna_commons.flask.log import logger\nfrom bson import ObjectId\nfrom flask import render_template, flash, url_for\nfrom flask_login import login_required, current_user\nfrom werkzeug.utils import redirect\n\nfrom bhadrasana.models.ovr import OVR\nfrom bhadrasana.models.ovrmanager import cadastra_ovr, atribui_responsavel_ovr\nfrom bhadrasana.models.rvfmanager import programa_rvf_container, lista_rvfovr, gera_evento_rvf, get_rvf\n\n\ndef assistenteini_app(app):\n @app.route('/nova_inspecaoni/<_id>', methods=['GET'])\n @login_required\n def nova_inspecaonaoinvasiva(_id):\n title_page = 'Assistente de Inspeção Não Invasiva'\n mongodb = app.config['mongodb']\n mongo_risco = app.config['mongo_risco']\n session = app.config.get('dbsession')\n try:\n # raise Exception('Não implementado!!!')\n grid_data = mongodb['fs.files'].find_one({'_id': ObjectId(_id)})\n meta = grid_data['metadata']\n print(meta)\n xmldoc = meta.get('xml')\n if xmldoc is None:\n alerta = False\n else:\n alerta = xmldoc.get('alerta')\n container = meta.get('numeroinformado')\n metacarga = meta.get('carga')\n tipooperacao = 2\n if metacarga is None:\n conhecimento = ''\n descricao = ''\n else:\n if metacarga.get('vazio'):\n conhecimento = metacarga.get('manifesto')\n if isinstance(conhecimento, list):\n conhecimento = conhecimento[0]\n conhecimento = conhecimento.get('manifesto')\n descricao = 'Manifesto'\n else:\n conhecimento = metacarga.get('conhecimento')\n if isinstance(conhecimento, list):\n conhecimento = conhecimento[0]\n descricao = 'Conhecimento'\n tipo = conhecimento.get('trafego').lower()\n if tipo == 'lci':\n tipooperacao = 1\n conhecimento = conhecimento.get('conhecimento')\n ovr_data = {'numeroCEmercante': conhecimento,\n 'tipooperacao': tipooperacao,\n 'observacoes': f'Inspeção não invasiva {descricao} ' \\\n f'{conhecimento} automaticamente registrada.'\n f'Análise do contêiner {container}'\n }\n ovr = None\n if conhecimento:\n ovr = session.query(OVR).filter(OVR.numeroCEmercante == conhecimento). \\\n order_by(OVR.id.desc()).first()\n if ovr is None:\n ovr = cadastra_ovr(session,\n params=ovr_data,\n user_name=current_user.name)\n atribui_responsavel_ovr(session, ovr.id, current_user.name, current_user.name)\n rvf = None\n rvfs = lista_rvfovr(session, ovr.id)\n if len(rvfs) > 0:\n for umarvf in rvfs:\n if umarvf.numerolote == container:\n rvf = get_rvf(session, umarvf.id)\n break\n if rvf is None:\n # imagens = get_imagens_dict_container_id(mongodb, ovr.numeroCEmercante, '')\n rvf = programa_rvf_container(\n mongodb, mongo_risco, session,\n ovr, container, _id\n )\n if not rvf.descricao:\n rvf.descricao = 'Análise de imagem de escaneamento, por rotina do COV.'\n if alerta:\n rvf.descricao = rvf.descricao + '\\n Contêiner com alerta Operador.'\n rvf.inspecaonaoinvasiva = True\n try:\n session.add(rvf)\n session.commit()\n except Exception as err:\n session.rollback()\n raise err\n gera_evento_rvf(session, rvf, user_name=current_user.name)\n return redirect(url_for('ovr', id=ovr.id))\n except Exception as err:\n logger.error(err, exc_info=True)\n flash('Erro! Detalhes no log da aplicação.')\n flash(str(type(err)))\n flash(str(err))\n return render_template('index.html',\n title_page=title_page)\n","repo_name":"IvanBrasilico/bhadrasana2","sub_path":"bhadrasana/routes/assistente_ini.py","file_name":"assistente_ini.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73357204118","text":"#Nome: Vinícius Neves Corrêa\n#RM: 98392\n\n#Nome: João Pedro Lima Rodrigues\n#RM: 99521\n\n#Nome: Daniel Neves França\n#RM: 550912\n\n#Nome: Danilo Chichinato Yoshihara\n#RM: 98736\n\ndef calcular_valor_total(ano_uso, valor_maco, quant_macos_dia):\n dias_uso = ano_uso * 12 * 30\n valor_dia_maco = valor_maco * quant_macos_dia\n\n return dias_uso * valor_dia_maco\n\ndef retornar_mensagem(valor):\n\n if(valor < 20000):\n return \"Com o valor de R$ {0:.2f}, você poderia ter dado entrada em um carro.\".format(valor)\n elif(valor >= 20000 and valor <= 50000):\n return \"Com o valor de R$ {0:.2f}, você poderia ter comprado um carro popular usado.\".format(valor)\n else:\n return \"Com o valor de R$ {0:.2f}, você poderia ter comprado um carro zero.\".format(valor)\n\nif __name__ == '__main__':\n anos_uso = float(input(\"Tempo como fumante (em anos).....:\"))\n valor_maco = int(input(\"Valor do maço....................:\"))\n quant_macos_dia = float(input(\"Quantidade de maços por dia............:\"))\n\n valor_total = calcular_valor_total(anos_uso, valor_maco, quant_macos_dia)\n\n mensagem = retornar_mensagem(valor_total)\n\n print(mensagem)","repo_name":"Zacoff/Python-Fiap-Cap7","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30617132985","text":"import fcntl\nimport os\nimport sys\nimport time\n\nfrom mailprocessing import signals\n\nfrom mailprocessing.util import safe_write\n\n\nclass MailProcessor(object):\n def __init__(\n self, rcfile, log_fp, **kwargs):\n\n defaults = {'log_level': 1,\n 'dry_run': False,\n 'run_once': False,\n 'auto_reload_rcfile': False}\n\n for key in defaults:\n if key not in kwargs:\n kwargs[key] = defaults[key]\n\n self._rcfile = rcfile\n self._log_fp = log_fp\n print(\"setting log level to %s\" % kwargs['log_level'])\n self._log_level = kwargs['log_level']\n self._run_once = kwargs['run_once'] or kwargs['dry_run']\n self._auto_reload_rcfile = kwargs['auto_reload_rcfile']\n self._deliveries = 0\n self._sendmail = \"/usr/sbin/sendmail\"\n self._sendmail_flags = \"-i\"\n self.rcfile_modified = False\n self._previous_rcfile_mtime = self._get_previous_rcfile_mtime()\n\n def get_auto_reload_rcfile(self):\n return self._auto_reload_rcfile\n\n def set_auto_reload_rcfile(self, value):\n self._auto_reload_rcfile = value\n\n auto_reload_rcfile = property(\n get_auto_reload_rcfile, set_auto_reload_rcfile)\n\n def set_logfile(self, path_or_fp):\n if isinstance(path_or_fp, str):\n self._log_fp = open(\n os.path.expanduser(path_or_fp),\n \"a\",\n errors=\"backslashreplace\")\n lock_acquired = False\n while not lock_acquired:\n try:\n fcntl.flock(self._log_fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n lock_acquired = True\n except OSError:\n print(\"Couldn't acquire lock on log file %s, sleeping \"\n \"for 5s\" % self._log_fp.name, file=sys.stderr)\n time.sleep(5)\n else:\n self._log_fp = path_or_fp\n\n logfile = property(fset=set_logfile)\n\n def reopen_logfile(self):\n # log file is a stream, so no need to reopen\n if self._log_fp.fileno() < 3:\n return\n filename = self._log_fp.name\n self._log_fp.close()\n self.set_logfile(filename)\n\n @property\n def rcfile(self):\n return self._rcfile\n\n def get_sendmail(self):\n return self._sendmail\n\n def set_sendmail(self, sendmail):\n self._sendmail = sendmail\n\n sendmail = property(get_sendmail, set_sendmail)\n\n def get_sendmail_flags(self):\n return self._sendmail_flags\n\n def set_sendmail_flags(self, sendmail_flags):\n self._sendmail_flags = sendmail_flags\n\n sendmail_flags = property(get_sendmail_flags, set_sendmail_flags)\n\n def __iter__(self):\n \"\"\"\n Iterator method used to invoke the processor from default.rc.\n\n The user must be able to treat your processor class as an iterable\n object that provides every message as a mailprocessing.mail.base.MailBase\n subclass.\n \"\"\"\n\n message = (\"You need to implement an __iter__ method in your \"\n \"MailProcessor subclass.\")\n raise NotImplementedError(message)\n\n def create_folder(self, folder, **kwargs):\n \"\"\"\n Creates a new folder.\n\n This method is used to create a folder to store emails in. Depending on\n the `parents` parameter it also creates the folders parent folders if\n they do not exist. This should be the default behaviour.\n\n folder may either be a path name separated by the appropriate path\n component separator or a list of path name components. In the latter\n case it is the create_folder() implementation's responsibility to join\n the path components.\n \"\"\"\n\n message = (\"You need to implement a create_folder() method in your \"\n \"MailProcessor subclass.\")\n raise NotImplementedError(message)\n\n def log(self, text, level=1):\n if level <= self._log_level:\n safe_write(self._log_fp, text)\n self._log_fp.flush()\n\n def log_debug(self, text):\n if signals.hup_received():\n self.reopen_logfile()\n self.log(text, 2)\n\n def log_error(self, text):\n if signals.hup_received():\n self.reopen_logfile()\n try:\n self.log(text, 0)\n except:\n # Make sure the message gets out even if writing to the log file\n # fails.\n safe_write(sys.stderr, text)\n\n def log_info(self, text):\n if signals.hup_received():\n self.reopen_logfile()\n self.log(text, 1)\n\n def fatal_error(self, text):\n if signals.hup_received():\n self.reopen_logfile()\n try:\n self.log_error(text)\n except:\n # Make sure the message gets out even if writing to the log file\n # fails.\n pass\n safe_write(sys.stderr, text)\n sys.exit(1)\n\n def path_ensure_prefix(self, path, sep='.'):\n \"\"\"\n Converts path to list form and returns path prepended with the\n processor's path prefix. If its leading component is already the prefix\n or there is no prefix set, the original path in list form is returned.\n \"\"\"\n\n path = self.path_list(path)\n if len(path) == 0:\n return path\n\n if sep == '/':\n return path\n # Special case (mostly relevant for maildirs):\n # Return an empty first component if path and\n # prefix are identical.\n if self.prefix == self.separator:\n if path[0] == '':\n return path\n else:\n ret = ['']\n ret.extend(path)\n return ret\n\n if self.prefix != '':\n if path[0] == self.prefix:\n return path\n ret = [self.prefix]\n ret.extend(path)\n return ret\n return path\n\n def path_list(self, path, sep='/'):\n \"\"\"\n Leaves a list of path components unchanged or converts a path name in\n string form to a list of path components.\n \"\"\"\n\n if type(path) is list:\n return path\n\n return path.split(sep)\n\n def list_path(self, path, sep='/'):\n \"\"\"\n Leaves a path name in string form unchanged and converts a list of path\n components into a path in string form.\n \"\"\"\n\n if type(path) is list:\n return sep.join(path)\n return path\n\n # ----------------------------------------------------------------\n # Private methods:\n\n def _get_previous_rcfile_mtime(self):\n if self.rcfile == \"-\":\n return None\n else:\n try:\n return os.path.getmtime(self.rcfile)\n except OSError:\n # File does not exist.\n return None\n","repo_name":"mailprocessing/mailprocessing","sub_path":"mailprocessing/processor/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"85"} +{"seq_id":"1700092903","text":"class Solution(object):\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) <= 2: return len(nums)\n fast, slow = 2, 2\n while fast < len(nums):\n nums[slow] = nums[fast]\n if nums[slow] == nums[slow-1] and nums[slow-1] == nums[slow-2]:\n while fast < len(nums) and nums[fast] == nums[slow]: fast += 1\n if fast == len(nums): return slow\n nums[slow] = nums[fast]\n slow, fast = slow+1, fast+1\n return slow","repo_name":"coolmich/py-leetcode","sub_path":"solu/80|Remove Dup from Sorted Array II.py","file_name":"80|Remove Dup from Sorted Array II.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"71168270999","text":"## https://leetcode.com/problems/shortest-completing-word/description/\nclass Solution(object):\n def shortestCompletingWord(self, licensePlate, words):\n \"\"\"\n :type licensePlate: str\n :type words: List[str]\n :rtype: str\n \"\"\"\n ## idea is to iterate through licensePlate and add the number of occurances \n ## of each letter in dictionary. Then iterate through all the words, and \n ## update the answer according to the length of the smallest word that \n ## contains all the letter from licence plate.\n ans = \"\"\n d = collections.defaultdict(int)\n for c in licensePlate:\n if c.isalpha():\n d[c.lower()] += 1\n for w in words:\n for k, v in d.items():\n if w.count(k) < v:\n break\n else:\n if not ans:\n ans = w\n elif len(w) < len(ans):\n ans = w\n return ans\n \n","repo_name":"himanshutyagi36/AlgorithmPractice","sub_path":"Leetcode/shortestCompletingWord.py","file_name":"shortestCompletingWord.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31616763882","text":"import numpy as np\nimport networkx as nx\n\nimport random\nimport itertools\nimport math\n\n\"\"\"\nData loading\n\"\"\"\n\n\ndef load_G(data_name):\n if data_name[-4:] != '.edg':\n data_name += '.edg'\n G = nx.read_edgelist('data_set/' + data_name, nodetype=int, create_using=nx.Graph())\n\n # G.remove_edges_from(G.selfloop_edges())\n\n # remap the node names from 0,1,2,....,|V|-1\n mapping = {}\n nodes = sorted(nx.nodes(G))\n for i in range(len(nodes)):\n mapping[nodes[i]] = i\n G = nx.relabel_nodes(G, mapping)\n\n return G\n\n\n\"\"\"\nBasic functions\n\"\"\"\n\n\ndef ln(x):\n return math.log(x)\n\n\ndef boundVk(n, delta, k):\n # n = len(G)\n # delta = max([nx.degree(G, n) for n in nx.nodes(G)])\n return math.factorial(k - 1) * delta ** (k - 1) * n\n\n\ndef binom(n, r):\n if r == 0 or n == r:\n return 1\n x = n\n for i in range(max(r, n - r) + 1, n):\n x *= i\n x /= math.factorial(min(r, n - r))\n return x\n\n\ndef choose_one(l):\n return l[np.random.randint(0, len(l))]\n\n\ndef diff(x, y):\n \"\"\"\n :return one element that is in x but not in y\n :param x:\n :param y:\n :return:\n \"\"\"\n for u in x:\n if not u in y:\n return u\n\n\ndef state_merge(x, y):\n \"\"\"\n return a tuple of union of elements in x and y\n :param x:\n :param y:\n :return: tuple\n \"\"\"\n l = set(x).union(set(y))\n return tuple(sorted(l))\n\n\ndef num_edges_yields(x, y, neighbor_of_x):\n \"\"\"\n number of edges that yield the state (x U y)\n :param x:\n :param y:\n :param neighbor_of_x:\n :return:\n \"\"\"\n\n df = diff(y, x)\n m = 1\n for an in neighbor_of_x:\n if df in an:\n m += 1\n\n # return m * (m - 1) / 2\n return binom(m, 2)\n\n\n\"\"\"\nSubgraph sampling utilities\n\"\"\"\n\n\ndef neighbor_nodes(G, s) -> set:\n \"\"\"\n return the set of nodes that is adjacent to at least one node in s\n :param G: nx.Graph\n :param s: tuple of nodes\n :return: set of nodes\n \"\"\"\n\n n = len(G)\n\n nb = set()\n for v in s:\n vn = nx.neighbors(G, v)\n nb = nb.union(set(vn))\n if len(nb) == n:\n break\n\n nb = nb.difference(set(s))\n return nb\n\n\ndef __removable(G, s):\n \"\"\"\n return removable nodes do no think about the adding node\n :param G:\n :param s:\n :return:\n \"\"\"\n rem = set()\n news = set(s)\n for y in s:\n news.discard(y)\n H = G.subgraph(news)\n if nx.is_connected(H):\n rem.add(y)\n news.add(y)\n unrem = news.difference(rem)\n return rem, unrem\n\n\ndef degree(G, s):\n d = 0\n news = set(s)\n do_remove, mynot_remove = __removable(G, s)\n\n for x in neighbor_nodes(G, s):\n\n # check if\n nei_x = set(nx.neighbors(G, x))\n node_connected2x = news.intersection(nei_x)\n\n news.add(x)\n for y in s:\n if len(node_connected2x) == 1 and (y in node_connected2x or y in mynot_remove):\n continue\n if y in do_remove:\n d += 1\n continue\n\n news.discard(y)\n H = G.subgraph(news)\n if nx.is_connected(H):\n d += 1\n news.add(y)\n news.discard(x)\n return d\n\n\ndef neighbor_states(G, s):\n \"\"\"\n :param s: tuple return list of tuple\n :return: [next_s] : next states\n \"\"\"\n states = []\n news = set(s)\n do_remove, mynot_remove = __removable(G, s)\n\n for x in neighbor_nodes(G, s):\n\n # check if\n nei_x = set(nx.neighbors(G, x))\n node_connected2x = news.intersection(nei_x)\n\n news.add(x)\n for y in s:\n if len(node_connected2x) == 1 and (y in node_connected2x or y in mynot_remove):\n continue\n if y in do_remove:\n news.discard(y)\n states.append(tuple(sorted(news)))\n news.add(y)\n continue\n\n news.discard(y)\n H = G.subgraph(news)\n if nx.is_connected(H):\n states.append(tuple(sorted(news)))\n news.add(y)\n news.discard(x)\n return states\n\n\ndef random_next_state(neighbor_states):\n next_state = choose_one(neighbor_states)\n return next_state\n\n\ndef neighbor_edges(G, s):\n \"\"\"\n return the set of nodes that is adjacent a node in s\n :param G:\n :param s:\n :return:\n \"\"\"\n nb = []\n for v in s:\n vn = nx.neighbors(G, v)\n for n in vn:\n if n in s:\n continue\n nb.append((v, n))\n return nb\n\n\ndef RVE(G, k):\n \"\"\"\n Random Vertex Expansion\n Sampling k-subgraph with some bias\n :param G:\n :param k:\n :return:\n \"\"\"\n e = choose_one(list(G.edges()))\n\n s = [e[0], e[1]]\n\n while len(s) < k:\n ne = neighbor_edges(G, s)\n e = choose_one(ne)\n s.append(e[1])\n\n s = sorted(s)\n return s\n\n\ndef RVE2(G, k):\n \"\"\"\n A variant of Random Vertex Expansion\n Sampling k-subgraph with some bias\n :param G:\n :param k:\n :return:\n \"\"\"\n s = [choose_one(list(G.nodes()))]\n while len(s) < k:\n nei = list(neighbor_nodes(G, s))\n s.append(choose_one(nei))\n return tuple(sorted(s))\n\n\ndef gen_all_ksub(G, k):\n \"\"\"\n\n :param G:\n :param k:\n :return: list of touples\n \"\"\"\n if k == 1:\n return [(n,) for n in G.nodes()]\n if k == 2:\n return [tuple(e if e[0] < e[1] else (e[1], e[0])) for e in nx.edges(G)]\n\n N = len(G)\n ite = itertools.combinations(np.arange(N), k)\n S = []\n nodes = np.array(G.nodes(), dtype=int)\n for v in ite:\n x = nodes[np.array(v)]\n H = G.subgraph(x)\n if nx.is_connected(H):\n S.append(tuple(sorted(x)))\n return S\n\n\ndef gen_gm(G, k):\n if k == 1:\n return G\n all_ksub = gen_all_ksub(G, k)\n all_set = [set(v) for v in all_ksub]\n N_M = len(all_ksub)\n\n edges = []\n\n for i in range(N_M - 1):\n t1 = all_set[i]\n for j in range(i + 1, N_M):\n t2 = all_set[j]\n\n if len(t1.intersection(t2)) == k - 1:\n edges.append((all_ksub[i], all_ksub[j]))\n\n G_M = nx.Graph()\n G_M.add_nodes_from(all_ksub)\n G_M.add_edges_from(edges)\n\n return G_M\n","repo_name":"ryutamatsuno/RSS","sub_path":"sampling_util.py","file_name":"sampling_util.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27211401921","text":"from urllib.parse import urlparse\nfrom django.urls import URLPattern, path\nfrom .views import *\n\n\nurlpatterns = [\n path('book/', book_room, name = \"bookroom\"),\n path('food/', food_buy, name = \"foodbuy\"),\n path('cart/', cart, name = \"cart\"),\n path('payment/', payment, name = \"payment\"),\n path('products/', product_list, name = \"productlist\"),\n # path('products/', product_detail), รับได้ทุกอย่าง \n path('products/', product_detail),\n]","repo_name":"niboon39/WEB_DEV","sub_path":"web_programming/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11092561460","text":"# -*- coding: utf-8; -*-\n#\n# (c) 2004-2007 Linbox / Free&ALter Soft, http://linbox.com\n# (c) 2007-2009 Mandriva, http://www.mandriva.com/\n#\n# $Id$\n#\n# This file is part of Pulse 2, http://pulse2.mandriva.org\n#\n# Pulse 2 is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Pulse 2 is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Pulse 2; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n\n\"\"\"\nProvides access to MSC database\n\"\"\"\n\n# standard modules\nimport time\n\n# SqlAlchemy\nfrom sqlalchemy import and_, create_engine, MetaData, Table, Column, String, \\\n Integer, ForeignKey, select, asc, or_, desc, func, not_, distinct\nfrom sqlalchemy.orm import create_session, mapper, relation\nfrom sqlalchemy.exc import NoSuchTableError, TimeoutError\nfrom sqlalchemy.orm.exc import NoResultFound\n\n# ORM mappings\nfrom pulse2.database.msc.orm.commands import Commands\nfrom pulse2.database.msc.orm.commands_on_host import CommandsOnHost\nfrom pulse2.database.msc.orm.commands_on_host_phase import CommandsOnHostPhase\nfrom pulse2.database.msc.orm.commands_history import CommandsHistory\nfrom pulse2.database.msc.orm.target import Target\nfrom pulse2.database.msc.orm.pull_targets import PullTargets\nfrom pulse2.database.msc.orm.bundle import Bundle\nfrom mmc.database.database_helper import DatabaseHelper\n\n# Pulse 2 stuff\nfrom pulse2.managers.location import ComputerLocationManager\n\n# Imported last\nimport logging\n\nNB_DB_CONN_TRY = 2\n\n# TODO need to check for useless function (there should be many unused one...)\n\nclass MscDatabase(DatabaseHelper):\n \"\"\"\n Singleton Class to query the msc database.\n\n \"\"\"\n\n def db_check(self):\n self.my_name = \"msc\"\n self.configfile = \"msc.ini\"\n return DatabaseHelper.db_check(self)\n\n def activate(self, config):\n self.logger = logging.getLogger()\n if self.is_activated:\n return None\n\n self.logger.info(\"Msc database is connecting\")\n self.config = config\n self.db = create_engine(self.makeConnectionPath(), pool_recycle = self.config.dbpoolrecycle, \\\n pool_size = self.config.dbpoolsize, pool_timeout = self.config.dbpooltimeout, convert_unicode = True)\n if not self.db_check():\n return False\n self.metadata = MetaData(self.db)\n if not self.initTables():\n return False\n if not self.initMappersCatchException():\n return False\n self.metadata.create_all()\n # FIXME: should be removed\n self.session = create_session()\n self.is_activated = True\n self.logger.debug(\"Msc database connected\")\n return True\n\n def initTables(self):\n \"\"\"\n Initialize all SQLalchemy tables\n \"\"\"\n try:\n # commands\n self.commands = Table(\"commands\", self.metadata,\n Column('dispatched', String(32), default='YES'),\n Column('fk_bundle', Integer, ForeignKey('bundle.id')),\n autoload = True)\n # commands_history\n self.commands_history = Table(\n \"commands_history\",\n self.metadata,\n Column('fk_commands_on_host', Integer, ForeignKey('commands_on_host.id')),\n autoload = True\n )\n # target\n self.target = Table(\n \"target\",\n self.metadata,\n autoload = True\n )\n # pull_targets\n self.pull_targets = Table(\n \"pull_targets\",\n self.metadata,\n autoload = True\n )\n # bundle\n self.bundle = Table(\n \"bundle\",\n self.metadata,\n autoload = True\n )\n # commands_on_host_phase\n self.commands_on_host_phase = Table(\n \"phase\",\n self.metadata,\n Column('fk_commands_on_host', Integer, ForeignKey('commands_on_host.id')),\n autoload = True\n )\n # commands_on_host\n self.commands_on_host = Table(\n \"commands_on_host\",\n self.metadata,\n Column('fk_commands', Integer, ForeignKey('commands.id')),\n Column('fk_target', Integer, ForeignKey('target.id')),\n autoload = True\n )\n # version\n self.version = Table(\n \"version\",\n self.metadata,\n autoload = True\n )\n except NoSuchTableError as e:\n self.logger.error(\"Cant load the msc database : table '%s' does not exists\"%(str(e.args[0])))\n return False\n return True\n\n def initMappers(self):\n \"\"\"\n Initialize all SQLalchemy mappers needed for the msc database\n \"\"\"\n mapper(CommandsHistory, self.commands_history)\n mapper(CommandsOnHostPhase, self.commands_on_host_phase)\n mapper(PullTargets, self.pull_targets)\n mapper(CommandsOnHost, self.commands_on_host, properties = {\n 'historys' : relation(CommandsHistory),\n }\n )\n mapper(Target, self.target, properties = {\n 'commandsonhosts' : relation(CommandsOnHost)\n }\n )\n mapper(Bundle, self.bundle, properties = {})\n mapper(Commands, self.commands, properties = {\n 'commandsonhosts' : relation(CommandsOnHost),\n 'bundle' : relation(Bundle),\n }\n )\n # FIXME: Version is missing\n\n ####################################\n\n def getIdCommandOnHost(self, ctx, id):\n session = create_session()\n query = session.query(CommandsOnHost).select_from(self.commands_on_host.join(self.commands)).filter(self.commands.c.id == id)\n query = self.__queryUsersFilter(ctx, query)\n query = query.all()\n if type(query) != list:\n ret = query.id\n elif len(query) > 0:\n ret = []\n for q in query:\n ret.append(q.id)\n else:\n ret = -1\n session.close()\n return ret\n\n def createBundle(self, title = '', session = None):\n \"\"\"\n Return a new Bundle\n \"\"\"\n if session is None:\n session = create_session()\n bdl = Bundle()\n bdl.title = title\n bdl.do_reboot = 'disable'\n session.add(bdl)\n session.flush()\n return bdl\n\n def createCommand(self, session, package_id, start_file, parameters, files,\n start_script, clean_on_success, start_date, end_date, connect_as,\n creator, title,\n next_connection_delay,\n max_connection_attempt,\n maxbw, deployment_intervals,\n fk_bundle, order_in_bundle, proxies, proxy_mode,\n state, sum_running, cmd_type=0):\n \"\"\"\n Return a Command object\n \"\"\"\n if type(files) == list:\n files = \"\\n\".join(files)\n\n cmd = Commands()\n cmd.creation_date = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n cmd.package_id = package_id\n cmd.start_file = start_file\n cmd.parameters = parameters\n cmd.files = files\n cmd.start_script = start_script\n cmd.clean_on_success = clean_on_success\n cmd.start_date = start_date\n cmd.end_date = end_date\n cmd.connect_as = connect_as\n cmd.creator = creator\n cmd.title = title\n #cmd.do_halt = ','.join(do_halt)\n #cmd.do_reboot = do_reboot\n #cmd.do_wol = do_wol\n #cmd.do_imaging_menu = do_wol_with_imaging\n cmd.next_connection_delay = next_connection_delay\n cmd.max_connection_attempt = max_connection_attempt\n #cmd.do_inventory = do_inventory\n cmd.maxbw = maxbw\n cmd.deployment_intervals = deployment_intervals\n cmd.fk_bundle = fk_bundle\n cmd.order_in_bundle = order_in_bundle\n cmd.proxy_mode = proxy_mode # FIXME: we may add some code to check everything is OK\n cmd.state = state\n cmd.sum_running = sum_running\n cmd.type= cmd_type\n cmd.ready = False\n session.add(cmd)\n session.flush()\n return cmd\n\n @DatabaseHelper._session\n def _force_command_type(self, session, cmd_id, type):\n \"\"\"\n Force type of command cmd_id, usually used for reschedule\n convergence commands\n \"\"\"\n cmd = session.query(Commands).get(cmd_id)\n if cmd:\n self.logger.debug('Force command %s to type %s' % (cmd_id, type))\n cmd.type = type\n session.add(cmd)\n session.flush()\n return True\n self.logger.warn('Failed to set command %s to type %s' % (cmd, type))\n return False\n\n @DatabaseHelper._session\n def _set_command_ready(self, session, cmd_id):\n \"\"\"\n Set command as ready, usually used for reschedule\n convergence commands\n \"\"\"\n cmd = session.query(Commands).get(cmd_id)\n if cmd:\n self.logger.debug('Set command %s as ready' % (cmd_id))\n cmd.ready = 1\n session.add(cmd)\n session.flush()\n return True\n self.logger.warn('Failed to set command %s as ready' % (cmd))\n return False\n\n\n def deleteBundle(self, bundle_id):\n \"\"\"\n Deletes a bundle with all related sub-elements.\n\n @param bundle_id: id of bundle\n @type bundle_id: int\n \"\"\"\n session = create_session()\n session.begin()\n try:\n\n bundle = session.query(Bundle).get(bundle_id)\n if not bundle:\n self.logger.warn(\"Unable to find bundle (id=%s)\" % bundle_id)\n return False\n\n cmds = session.query(Commands)\n cmds = cmds.select_from(self.commands)\n cmds = cmds.filter(self.commands.c.fk_bundle == bundle_id)\n #self.logger.warn(\"Commands : %s)\" % cmds.all())\n\n ok = self._deleteCommands(session, cmds)\n if ok:\n session.delete(bundle)\n session.flush()\n session.commit()\n session.close()\n return True\n else:\n return False\n\n except Exception as exc:\n self.logger.error(\"Delete of bundle (id=%s) failed: %s\" % (bundle_id, str(exc)))\n session.rollback()\n session.close()\n return False\n\n\n\n\n def deleteCommand(self, cmd_id):\n \"\"\"\n Deletes a command with all related sub-elements.\n\n @param cmd_id: Commands id\n @type cmd_id: int\n \"\"\"\n session = create_session()\n session.begin()\n try:\n cmds = session.query(Commands)\n cmds = cmds.select_from(self.commands)\n cmds = cmds.filter(self.commands.c.id == cmd_id)\n\n ok = self._deleteCommands(session, cmds)\n if ok:\n session.commit()\n session.close()\n return True\n else:\n session.rollback()\n session.close()\n return False\n\n except Exception as exc:\n self.logger.error(\"Delete of command (id=%s) failed: %s\" % (cmd_id, str(exc)))\n session.rollback()\n session.close()\n return False\n\n def deleteCommandOnHost(self, coh_id):\n \"\"\"\n Deletes a command with all related sub-elements.\n\n @param cmd_id: Commands id\n @type cmd_id: int\n \"\"\"\n session = create_session()\n session.begin()\n try:\n cohs = session.query(CommandsOnHost)\n cohs = cohs.select_from(self.commands_on_host)\n cohs = cohs.filter(self.commands_on_host.c.id == coh_id)\n\n ok = self._deleteCommandsOnHost(session, cohs)\n if ok:\n session.commit()\n session.close()\n return True\n else:\n session.rollback()\n session.close()\n return False\n\n\n except Exception as exc:\n self.logger.error(\"Delete of command on host(id=%s) failed: %s\" % (coh_id, str(exc)))\n session.rollback()\n session.close()\n return False\n\n\n\n def _deleteCommands(self, session, cmds):\n \"\"\"\n Deletes a command with all related sub-elements.\n\n @param cmd_id: Commands id\n @type cmd_id: int\n \"\"\"\n for cmd in cmds.all():\n cohs = session.query(CommandsOnHost)\n cohs = cohs.select_from(self.commands_on_host)\n cohs = cohs.filter(self.commands_on_host.c.fk_commands == cmd.id)\n\n ok = self._deleteCommandsOnHost(session, cohs)\n if ok:\n session.delete(cmd)\n session.flush()\n self.logger.info(\"Command (id=%s) successfully deleted\" % (cmd.id))\n\n else:\n self.logger.warn(\"Unable to delete commands on host of command (id=%s)\" % cmd.id)\n return False\n\n return True\n\n\n def _deleteCommandsOnHost(self, session, cohs):\n \"\"\"\n Deletes a command with all related sub-elements.\n\n @param cohs: Commands hon Host\n @type cohs: query list\n \"\"\"\n for coh in cohs.all():\n session.delete(coh)\n session.flush()\n\n targets = session.query(Target)\n targets = targets.select_from(self.target)\n targets = targets.filter(self.target.c.id == coh.fk_target)\n\n for target in targets.all():\n session.delete(target)\n session.flush()\n\n\n phases = session.query(CommandsOnHostPhase)\n phases = phases.select_from(self.commands_on_host_phase)\n phases = phases.filter(self.commands_on_host_phase.c.fk_commands_on_host == coh.id)\n\n for phase in phases.all():\n session.delete(phase)\n session.flush()\n\n\n hists = session.query(CommandsHistory)\n hists = hists.select_from(self.commands_history)\n hists = hists.filter(self.commands_history.c.fk_commands_on_host == coh.id)\n\n for hist in hists.all():\n session.delete(hist)\n session.flush()\n\n\n session.delete(coh)\n session.flush()\n\n return True\n\n\n\n\n\n\n\n\n\n def extendCommand(self, cmd_id, start_date, end_date):\n \"\"\"\n Custom command re-scheduling.\n\n @param cmd_id: Commands id\n @type cmd_id: int\n\n @param start_date: new start date of command\n @type start_date: str\n\n @param end_date: new end date of command\n @type end_date: str\n \"\"\"\n session = create_session()\n cmd = session.query(Commands).get(cmd_id)\n if cmd :\n cmd.start_date = start_date\n cmd.end_date = end_date\n cmd.sum_running = cmd.sum_failed\n cmd.sum_failed = 0\n session.add(cmd)\n session.flush()\n\n self._extendCommandsOnHost(session, cmd_id, start_date, end_date)\n self.logger.info(\"msc: re-scheduling command id = <%s> from %s to %s\" % (cmd_id, start_date, end_date))\n\n\n session.close()\n\n def _extendCommandsOnHost(self, session, cmd_id, start_date, end_date):\n \"\"\"\n Update of all commands on host attached on updated command.\n\n @param cmd_id: Commands id\n @type cmd_id: int\n\n @param start_date: new start date of command_on_host\n @type start_date: str\n\n @param end_date: new end date of command_on_host\n @type end_date: str\n \"\"\"\n query = session.query(CommandsOnHost)\n query = query.select_from(self.commands_on_host)\n query = query.filter(self.commands_on_host.c.fk_commands == cmd_id)\n query = query.filter(self.commands_on_host.c.current_state != \"done\")\n for coh in query.all():\n coh.start_date = start_date\n coh.end_date = end_date\n coh.next_launch_date = start_date\n coh.attempts_failed = 0\n coh.current_state = \"scheduled\"\n session.add(coh)\n session.flush()\n\n\n\n\n def _createPhases(self,\n session,\n cohs,\n do_imaging_menu,\n do_wol,\n files,\n start_script,\n clean_on_success,\n do_inventory,\n do_halt,\n do_reboot,\n do_windows_update,\n is_quick_action=False):\n wf_list = [\"pre_menu\",\n \"wol\",\n \"post_menu\",\n \"upload\",\n \"execute\",\n \"wu_parse\",\n \"delete\",\n \"inventory\",\n \"reboot\",\n \"halt\",\n \"done\",\n ]\n\n if isinstance(cohs, int):\n cohs = [cohs]\n elif isinstance(cohs, list):\n pass\n else :\n raise TypeError(\"list or int type required\")\n phases_values = []\n for coh in cohs :\n order = 0\n\n for name in wf_list:\n if name == \"pre_menu\" and do_imaging_menu == \"disable\" :\n continue\n if name == \"post_menu\" and do_imaging_menu == \"disable\" :\n continue\n if name == \"wol\" and do_wol == \"disable\" :\n continue\n if name == \"upload\" and len(files) == 0:\n continue\n if name == \"execute\" and (start_script == \"disable\" \\\n or is_quick_action) and do_windows_update == \"disable\":\n continue\n if name == \"wu_parse\" and do_windows_update == \"disable\":\n continue\n if name == \"delete\" and (clean_on_success == \"disable\" or is_quick_action):\n continue\n if name == \"inventory\" and do_inventory == \"disable\" :\n continue\n if name == \"reboot\" and do_reboot == \"disable\" :\n continue\n if name == \"halt\" and do_halt == \"disable\" :\n continue\n\n phases_values.append({\"fk_commands_on_host\": coh.id,\n \"phase_order\" : order,\n \"name\" : name})\n\n order += 1\n\n session.execute(self.commands_on_host_phase.insert(), phases_values)\n\n\n\n\n\n\n\n\n\n def createCommandsOnHost(self, command, target, target_id,\n target_name, cmd_max_connection_attempt,\n start_date, end_date, scheduler = None,\n order_in_proxy = None, max_clients_per_proxy = 0):\n logging.getLogger().debug(\"Create new command on host '%s'\" % target_name)\n return {\n \"host\" : target_name,\n \"start_date\" : start_date,\n \"end_date\" : end_date,\n \"next_launch_date\" : time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"current_state\" : \"scheduled\",\n \"uploaded\" : \"TODO\",\n \"executed\" : \"TODO\",\n \"deleted\" : \"TODO\",\n \"attempts_left\" : cmd_max_connection_attempt,\n \"next_attempt_date_time\" : 0,\n \"scheduler\" : scheduler,\n \"order_in_proxy\" : order_in_proxy,\n \"max_clients_per_proxy\": max_clients_per_proxy,\n \"fk_target\" : target_id,\n \"fk_commands\" : command\n }\n\n def createTarget(self, targetName, targetUuid, targetIp, targetMac, targetBCast, targetNetmask, mirror, groupID = None):\n \"\"\"\n Inject a new Target object in our MSC database\n Return the corresponding Target object\n \"\"\"\n target = { \"target_name\" : targetName,\n \"target_uuid\" : targetUuid,\n \"target_ipaddr\" : targetIp,\n \"target_macaddr\" : targetMac,\n \"target_bcast\" : targetBCast,\n \"target_network\" : targetNetmask,\n \"mirrors\" : mirror,\n \"id_group\" : groupID }\n return target\n\n def getCommandsonhostsAndSchedulersOnBundle(self, fk_bundle):\n \"\"\"\n \"\"\"\n conn = self.getDbConnection()\n c_ids = select([self.commands.c.id], self.commands.c.fk_bundle == fk_bundle).execute()\n c_ids = [x[0] for x in c_ids]\n result = select([self.commands_on_host.c.id, self.commands_on_host.c.scheduler], self.commands_on_host.c.fk_commands.in_(c_ids)).execute()\n schedulers = {}\n for row in result:\n coh, scheduler = row\n if scheduler in schedulers:\n schedulers[scheduler].append(coh)\n else:\n schedulers[scheduler] = [coh]\n conn.close()\n return schedulers\n\n def getCommandsonhostsAndSchedulers(self, c_id):\n \"\"\"\n For a given command id, returns a dict with:\n - keys: a scheduler id (e.g. scheduler_01)\n - values: the related commands_on_host for each scheduler\n \"\"\"\n conn = self.getDbConnection()\n result = select([self.commands_on_host.c.id, self.commands_on_host.c.scheduler], self.commands_on_host.c.fk_commands == c_id).execute()\n schedulers = {}\n for row in result:\n coh, scheduler = row\n if scheduler in schedulers:\n schedulers[scheduler].append(coh)\n else:\n schedulers[scheduler] = [coh]\n conn.close()\n return schedulers\n\n def __queryUsersFilterBis(self, ctx):\n \"\"\"\n Build a part of a query for commands, that add user filtering\n \"\"\"\n if ctx.filterType == \"mine\":\n # User just want to get her/his commands\n return self.commands.c.creator == ctx.userid\n elif ctx.filterType == \"all\":\n # User want to get all commands she/he has the right to see\n if ctx.userid == \"root\":\n # root can see everything, so no filter for root\n return True\n elif ctx.locationsCount not in [None, 0, 1] and ctx.userids:\n # We have multiple locations, and a list of userids sharing the\n # same locations of the current user\n userids = ctx.userids\n # If show root commands is activated, we add it\n if self.config.show_root_commands:\n userids.append('root')\n return self.commands.c.creator.in_(userids)\n else:\n # Unknown filter type\n self.logger.debug(\"Unknown filter type when querying commands\")\n if ctx.locationsCount not in [None, 0, 1]:\n # We have multiple locations (entities) in database, so we\n # filter the results using the current userid\n return self.commands.c.creator == ctx.userid\n return True\n\n def __queryUsersFilter(self, ctx, q):\n \"\"\"\n Build a part of a query for commands, that add user filtering\n \"\"\"\n # should use return q.filter(self.__queryUsersFilterBis(ctx))\n if ctx.filterType == \"mine\":\n # User just want to get her/his commands\n q = q.filter(self.commands.c.creator == ctx.userid)\n elif ctx.filterType == \"all\":\n # User want to get all commands she/he has the right to see\n if ctx.userid == \"root\":\n # root can see everything, so no filter for root\n pass\n elif ctx.locationsCount not in [None, 0, 1] and ctx.userids:\n # We have multiple locations, and a list of userids sharing the\n # same locations of the current user\n userids = ctx.userids\n # If show root commands is activated, we add it\n if self.config.show_root_commands:\n userids.append('root')\n q = q.filter(self.commands.c.creator.in_(userids))\n # else if we have just one location, we don't apply any filter. The\n # user can see the commands of all users\n\n else:\n # Unknown filter type\n self.logger.debug(\"Unknown filter type when querying commands\")\n if ctx.locationsCount not in [None, 0, 1]:\n # We have multiple locations (entities) in database, so we\n # filter the results using the current userid\n q = q.filter(self.commands.c.creator == ctx.userid)\n return q\n\n def __queryAllCommandsonhostBy(self, session, ctx):\n \"\"\"\n Built a part of the query for the *AllCommandsonhost* methods\n \"\"\"\n\n join = self.commands_on_host.join(self.commands).join(self.target).outerjoin(self.bundle)\n q = session.query(CommandsOnHost, Commands, Target, Bundle)\n q = q.select_from(join)\n q = self.__queryUsersFilter(ctx, q)\n return q\n\n def getAllCommandsonhostCurrentstate(self, ctx): # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = self.__queryAllCommandsonhostBy(session, ctx)\n ret = ret.add_column(self.commands.c.max_connection_attempt).filter(self.commands_on_host.c.current_state != ''). \\\n group_by(self.commands_on_host.c.current_state). \\\n group_by(self.commands_on_host.c.attempts_left). \\\n group_by(self.commands.c.max_connection_attempt). \\\n order_by(asc(self.commands_on_host.c.next_launch_date))\n # x[0] contains a commands_on_host object x[1] contains commands\n l = []\n for x in ret.all(): # patch to have rescheduled as a \"state\" ... must be emulated\n if x[0].current_state == 'scheduled' and x[0].attempts_left != x[1].max_connection_attempt and not 'rescheduled' in l:\n l.append('rescheduled')\n elif not x[0].current_state in l:\n l.append(x[0].current_state)\n session.close()\n return l\n\n def countAllCommandsonhostByCurrentstate(self, ctx, current_state, filt = ''): # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = self.__queryAllCommandsonhostBy(session, ctx)\n if current_state == 'rescheduled': # patch to have rescheduled as a \"state\" ... must be emulated\n ret = ret.filter(and_(self.commands.c.max_connection_attempt != self.commands_on_host.c.attempts_left, self.commands_on_host.c.current_state == 'scheduled'))\n elif current_state == 'scheduled':\n ret = ret.filter(and_(self.commands.c.max_connection_attempt == self.commands_on_host.c.attempts_left, self.commands_on_host.c.current_state == 'scheduled'))\n else:\n ret = ret.filter(self.commands_on_host.c.current_state == current_state)\n # the join in itself is useless here, but we want to have exactly\n # the same result as in getAllCommandsonhostByCurrentstate\n if filt != '':\n ret = ret.filter(or_(self.commands_on_host.c.host.like('%'+filt+'%'), self.commands.c.title.like('%'+filt+'%')))\n c = ret.count()\n session.close()\n return c\n\n def getAllCommandsonhostByCurrentstate(self, ctx, current_state, min = 0, max = 10, filt = ''): # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = self.__queryAllCommandsonhostBy(session, ctx)\n if current_state == 'rescheduled': # patch to have rescheduled as a \"state\" ... must be emulated\n ret = ret.filter(and_(self.commands.c.max_connection_attempt != self.commands_on_host.c.attempts_left, self.commands_on_host.c.current_state == 'scheduled'))\n elif current_state == 'scheduled':\n ret = ret.filter(and_(self.commands.c.max_connection_attempt == self.commands_on_host.c.attempts_left, self.commands_on_host.c.current_state == 'scheduled'))\n else:\n ret = ret.filter(self.commands_on_host.c.current_state == current_state)\n if filt != '':\n ret = ret.filter(or_(self.commands_on_host.c.host.like('%'+filt+'%'), self.commands.c.title.like('%'+filt+'%')))\n ret = ret.order_by(desc(self.commands_on_host.c.id))\n ret = ret.offset(int(min))\n ret = ret.limit(int(max)-int(min))\n l = []\n for x in ret.all():\n bundle = x[3]\n if bundle != None:\n bundle = bundle.toH()\n l.append([x[0].toH(), x[1].toH(), x[2].toH(), bundle])\n session.close()\n return l\n\n def countAllCommandsonhostByType(self, ctx, type, filt = ''): # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = self.__queryAllCommandsonhostBy(session, ctx)\n if filt != '':\n ret = ret.filter(or_(self.commands_on_host.c.host.like('%'+filt+'%'), self.commands.c.title.like('%'+filt+'%')))\n if int(type) == 0: # all\n pass\n elif int(type) == 1: # pending\n ret = ret.filter(self.commands_on_host.c.current_state.in_( ('upload_failed', 'execution_failed', 'delete_failed', 'inventory_failed', 'not_reachable', 'pause', 'stop', 'stopped', 'scheduled') ))\n elif int(type) == 2: # running\n ret = ret.filter(self.commands_on_host.c.current_state.in_( ('upload_in_progress', 'upload_done', 'execution_in_progress', 'execution_done', 'delete_in_progress', 'delete_done', 'inventory_in_progress', 'inventory_done') ))\n elif int(type) == 3: # finished\n ret = ret.filter(self.commands_on_host.c.current_state.in_( ('done', 'failed', 'over_timed') ))\n c = ret.count()\n session.close()\n return c\n\n def getAllCommandsonhostByType(self, ctx, type, min, max, filt = ''): # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = self.__queryAllCommandsonhostBy(session, ctx)\n if filt != '':\n ret = ret.filter(or_(self.commands_on_host.c.host.like('%'+filt+'%'), self.commands.c.title.like('%'+filt+'%')))\n if int(type) == 0: # all\n pass\n elif int(type) == 1: # pending\n ret = ret.filter(self.commands_on_host.c.current_state.in_( ('upload_failed', 'execution_failed', 'delete_failed', 'inventory_failed', 'not_reachable', 'pause', 'stop', 'stopped', 'scheduled') ))\n elif int(type) == 2: # running\n ret = ret.filter(self.commands_on_host.c.current_state.in_( ('upload_in_progress', 'upload_done', 'execution_in_progress', 'execution_done', 'delete_in_progress', 'delete_done', 'inventory_in_progress', 'inventory_done') ))\n elif int(type) == 3: # finished\n ret = ret.filter(self.commands_on_host.c.current_state.in_( ('done', 'failed', 'over_timed') ))\n ret = ret.order_by(desc(self.commands_on_host.c.id))\n ret = ret.offset(int(min))\n ret = ret.limit(int(max)-int(min))\n l = []\n for x in ret.all():\n bundle = x[3]\n if bundle != None:\n bundle = bundle.toH()\n l.append([x[0].toH(), x[1].toH(), x[2].toH(), bundle])\n session.close()\n return l\n\n def countAllCommandsOnHostBundle(self, ctx, uuid, fk_bundle, filt, history): # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = session.query(CommandsOnHost).select_from(self.commands_on_host.join(self.commands).join(self.target)).filter(self.target.c.target_uuid == uuid).filter(self.commands.c.creator == ctx.userid).filter(self.commands.c.fk_bundle == fk_bundle)\n# ret = ret.filter(self.commands_on_host.c.id == self.target.c.fk_commands_on_host)\n if filt != '':\n ret = ret.filter(self.commands.c.title.like('%'+filt+'%'))\n if history:\n ret = ret.filter(self.commands_on_host.c.current_state == 'done')\n else:\n ret = ret.filter(self.commands_on_host.c.current_state != 'done')\n c = ret.count()\n session.close()\n return c\n\n def countAllCommandsOnHost(self, ctx, uuid, filt):\n if ComputerLocationManager().doesUserHaveAccessToMachine(ctx, uuid):\n session = create_session()\n ret = session.query(CommandsOnHost).select_from(self.commands_on_host.join(self.commands).join(self.target)).filter(self.target.c.target_uuid == uuid)\n #.filter(self.commands.c.creator == ctx.userid)\n if filt != '':\n ret = ret.filter(self.commands.c.title.like('%'+filt+'%'))\n c = ret.count()\n session.close()\n return c\n self.logger.warn(\"User %s does not have good permissions to access '%s'\" % (ctx.userid, uuid))\n return False\n\n def getAllCommandsOnHost(self, ctx, uuid, min, max, filt):\n if ComputerLocationManager().doesUserHaveAccessToMachine(ctx, uuid):\n session = create_session()\n query = session.query(Commands).add_column(self.commands_on_host.c.id).add_column(self.commands_on_host.c.current_state)\n query = query.select_from(self.commands.join(self.commands_on_host).join(self.target)).filter(self.target.c.target_uuid == uuid)\n #.filter(self.commands.c.creator == ctx.userid)\n if filt != '':\n query = query.filter(self.commands.c.title.like('%'+filt+'%'))\n query = query.order_by(asc(self.commands_on_host.c.next_launch_date))\n query = query.offset(int(min))\n query = query.limit(int(max)-int(min))\n ret = query.all()\n session.close()\n return [(x[0].toH(), x[1], x[2]) for x in ret]\n self.logger.warn(\"User %s does not have good permissions to access '%s'\" % (ctx.userid, uuid))\n return []\n\n def getAllCommandsConsult(self, ctx, min, max, filt, expired = True):\n\n session = create_session()\n\n # ====== GENERATING FILTERS ============================\n\n # User context filter\n filters = self.__queryUsersFilterBis(ctx)\n # search text Filtering\n\n if filt:\n filters = and_(filters, or_(self.commands.c.title.like('%%%s%%'%(filt)),\n self.commands.c.creator.like('%%%s%%'%(filt)),\n self.commands.c.start_date.like('%%%s%%'%(filt)),\n self.bundle.c.title.like('%%%s%%'%(filt)),\n self.target.c.target_name.like('%%%s%%'%(filt))))\n\n # Bundle join filtering\n #filters = filters & (self.commands.c.fk_bundle == self.bundle.c.id)\n\n if expired:\n filters = and_(filters, (self.commands.c.end_date <= func.now()))\n else:\n filters = and_(filters, (self.commands.c.end_date > func.now()))\n\n # Adding command type filtering\n # Show default commands type=0 and convegence commands type=2\n filters = and_(filters, (self.commands.c.type.in_([0, 2])))\n\n # ====== CALCULATING COUNT ============================\n\n query = session.query(func.count(distinct(Commands.id)))\n query = query.select_from(self.commands.join(self.commands_on_host, self.commands_on_host.c.fk_commands == self.commands.c.id)\\\n .join(self.target, self.commands_on_host.c.fk_target == self.target.c.id)\\\n .outerjoin(self.bundle, self.commands.c.fk_bundle == self.bundle.c.id))\n # Filtering on filters\n query = query.filter(filters)\n # Grouping bundle commands by fk_bundle only if fk_bundle is not null\n # So we generate random md5 hash for command that have null fk_bundle\n query = query.group_by(func.ifnull(self.commands.c.fk_bundle, func.md5(self.commands.c.id)))\n size = len(query.all())\n\n\n # ====== MAIN QUERY ============================\n query = session.query(Commands)\n query = query.add_column(self.commands.c.fk_bundle).add_column(self.commands_on_host.c.host).add_column(self.commands_on_host.c.id)\n query = query.add_column(self.target.c.id_group).add_column(self.bundle.c.title).add_column(self.target.c.target_uuid)\n query = query.add_column(self.pull_targets.c.target_uuid)\n query = query.select_from(self.commands.join(self.commands_on_host, self.commands_on_host.c.fk_commands == self.commands.c.id)\\\n .join(self.target, self.commands_on_host.c.fk_target == self.target.c.id)\\\n .outerjoin(self.bundle, self.commands.c.fk_bundle == self.bundle.c.id)) \\\n .outerjoin(self.pull_targets, self.target.c.target_uuid == self.pull_targets.c.target_uuid)\n # Filtering on filters\n query = query.filter(filters)\n # Grouping bundle commands by fk_bundle only if fk_bundle is not null\n # So we generate random md5 hash for command that have null fk_bundle\n query = query.group_by(func.ifnull(self.commands.c.fk_bundle, func.md5(self.commands.c.id))) #.group_by(self.commands.c.id)\n query = query.order_by(desc(self.commands.c.id))\n # Limit result\n cmds = query.offset(int(min)).limit(int(max)-int(min)).all()\n\n session.close()\n\n ret = []\n for cmd, bid, target_name, cohid, gid, btitle, target_uuid, machine_pull in cmds:\n if bid != None: # we are in a bundle\n if gid != None and gid != '':\n ret.append({\n 'title':btitle,\n 'creator':cmd.creator,\n 'creation_date':cmd.creation_date,\n 'start_date':cmd.start_date,\n 'end_date':cmd.end_date,\n 'sum_running':cmd.sum_running,\n 'sum_failed':cmd.sum_failed,\n 'sum_done':cmd.sum_done,\n 'sum_stopped':cmd.sum_stopped,\n 'sum_overtimed':cmd.sum_overtimed,\n 'bid':bid,\n 'cmdid':'',\n 'target':'group %s'%gid,\n 'gid':gid,\n 'uuid':'',\n 'machine_pull': machine_pull,\n 'deployment_intervals': cmd.deployment_intervals\n })\n else:\n ret.append({\n 'title':btitle,\n 'creator':cmd.creator,\n 'creation_date':cmd.creation_date,\n 'start_date':cmd.start_date,\n 'end_date':cmd.end_date,\n 'sum_running':cmd.sum_running,\n 'sum_failed':cmd.sum_failed,\n 'sum_done':cmd.sum_done,\n 'sum_stopped':cmd.sum_stopped,\n 'sum_overtimed':cmd.sum_overtimed,\n 'bid':bid,\n 'cmdid':'',\n 'target':target_name,\n 'uuid':target_uuid,\n 'machine_pull': machine_pull,\n 'gid':'',\n 'deployment_intervals': cmd.deployment_intervals\n })\n else: # we are not in a bundle\n if gid != None and gid != '':\n ret.append({\n 'title':cmd.title,\n 'creator':cmd.creator,\n 'creation_date':cmd.creation_date,\n 'start_date':cmd.start_date,\n 'end_date':cmd.end_date,\n 'sum_running':cmd.sum_running,\n 'sum_failed':cmd.sum_failed,\n 'sum_done':cmd.sum_done,\n 'sum_stopped':cmd.sum_stopped,\n 'sum_overtimed':cmd.sum_overtimed,\n 'bid':'',\n 'cmdid':cmd.id,\n 'target':'group %s'%gid,\n 'gid':gid,\n 'uuid':'',\n 'machine_pull': machine_pull,\n 'deployment_intervals': cmd.deployment_intervals,\n 'type': cmd.type\n })\n else:\n ret.append({\n 'title':cmd.title,\n 'creator':cmd.creator,\n 'creation_date':cmd.creation_date,\n 'start_date':cmd.start_date,\n 'end_date':cmd.end_date,\n 'sum_running':cmd.sum_running,\n 'sum_failed':cmd.sum_failed,\n 'sum_done':cmd.sum_done,\n 'sum_stopped':cmd.sum_stopped,\n 'sum_overtimed':cmd.sum_overtimed,\n 'bid':'',\n 'cmdid':cmd.id,\n 'cohid':cohid,\n 'target':target_name,\n 'uuid':target_uuid,\n 'machine_pull': machine_pull,\n 'gid':'',\n 'status':{},\n 'deployment_intervals': cmd.deployment_intervals,\n 'type': cmd.type\n })\n\n return [size, ret]\n\n ###################\n def __displayLogsQuery(self, ctx, params, session):\n query = session.query(Commands).select_from(self.commands.join(self.commands_on_host).join(self.target))\n if params['gid'] != None:\n query = query.filter(self.target.c.id_group == params['gid'])\n if params['uuid'] != None:\n query = query.filter(self.target.c.target_uuid == params['uuid'])\n if params['filt'] != None:\n query = query.filter(self.commands.c.title.like('%'+params['filt']+'%'))\n #if params['finished']:\n # query = query.filter(self.commands_on_host.c.current_state.in_(['done', 'failed', 'over_timed']))\n else:\n # If we are querying on a bundle, we also want to display the\n # commands_on_host flagged as done\n #if params['b_id'] == None:\n # query = query.filter(not_(self.commands_on_host.c.current_state.in_(['done', 'failed', 'over_timed'])))\n pass\n query = self.__queryUsersFilter(ctx, query)\n\n # Finished param\n if 'finished' in params and params['finished'] == '1':\n query = query.filter(self.commands.c.end_date <= func.now())\n elif 'finished' in params and params['finished'] == '0':\n query = query.filter(self.commands.c.end_date > func.now())\n\n return query.group_by(self.commands.c.id).order_by(desc(params['order_by']))\n\n def __doneBundle(self, params, session):\n query = session.query(Commands).select_from(self.commands.join(self.commands_on_host))\n filter = []\n if params['b_id'] != None:\n filter = [self.commands.c.fk_bundle == params['b_id']]\n elif params['cmd_id'] != None:\n filter = [self.commands.c.id == params['cmd_id']]\n #filter.append(not_(self.commands_on_host.c.current_state.in_(['done', 'failed', 'over_timed'])))\n query = query.filter(and_(*filter))\n how_much = query.count()\n if how_much > 0:\n return False\n return True\n\n def __displayLogsQuery2(self, ctx, params, session, count = False):\n filter = []\n group_by = False\n group_clause = False\n\n # Get query parts\n if count:\n query = session.query(func.count('*')).select_from(self.commands.join(self.commands_on_host).join(self.target))\n else:\n query = session.query(Commands).select_from(self.commands.join(self.commands_on_host).join(self.target).outerjoin(self.pull_targets, self.pull_targets.c.target_uuid == self.target.c.target_uuid))\n query = query.add_column(self.commands_on_host.c.id).add_column(self.commands_on_host.c.current_state).add_column(PullTargets.target_uuid)\n\n\n\n if params['cmd_id'] != None: # COH\n filter = [self.commands.c.id == params['cmd_id']]\n if params['b_id'] != None:\n filter.append(self.commands.c.fk_bundle == params['b_id'])\n else: # CMD\n if params['b_id'] != None:\n filter = [self.commands.c.fk_bundle == params['b_id']]\n group_by = True\n group_clause = self.commands.c.id\n\n if params['gid'] != None: # Filter on a machines group id\n filter.append(self.target.c.id_group == params['gid'])\n\n if params['uuid'] != None: # Filter on a machine uuid\n filter.append(self.target.c.target_uuid == params['uuid'])\n\n if params['filt'] != None: # Filter on a commande names\n filter.append(self.commands.c.title.like('%s%s%s' % ('%', params['filt'], '%')) | self.target.c.target_name.like('%s%s%s' % ('%', params['filt'], '%')) )\n\n # Finished param\n if 'finished' in params and params['finished'] == '1':\n filter.append(self.commands.c.end_date <= func.now())\n elif 'finished' in params and params['finished'] == '0':\n filter.append(self.commands.c.end_date > func.now())\n\n # Filtering on COH State\n if 'state' in params and params['state']:\n filter.append(self.commands_on_host.c.current_state.in_(params['state']))\n\n #if params['b_id'] == None:\n # is_done = self.__doneBundle(params, session)\n #if params['finished'] and not is_done: # Filter on finished commands only\n # filter.append(1 == 0) # send nothing\n #elif not params['finished'] and is_done:\n # If we are querying on a bundle, we also want to display the\n # commands_on_host flagged as done\n # filter.append(1 == 0) # send nothing\n# else:\n# is_done = self.__doneBundle(params, session)\n# self.logger.debug(\"is the bundle done ? %s\"%(str(is_done)))\n\n query = self.__queryUsersFilter(ctx, query)\n query = query.filter(and_(*filter))\n\n if group_by:\n query = query.group_by(group_clause)\n\n if not count:\n return query\n else:\n return query.all()[0][0]\n\n\n def __displayLogsQueryGetIds(self, cmds, min = 0, max = -1, params = {}):\n i = 0\n min = int(min)\n max = int(max)\n ids = []\n defined = {}\n for cmd in cmds:\n id, fk_bundle = cmd\n if max != -1 and max-1 < i:\n break\n if i < min:\n if fk_bundle != 'NULL' and fk_bundle != None and fk_bundle not in defined:\n defined[fk_bundle] = id\n i += 1\n elif fk_bundle == 'NULL' or fk_bundle == None:\n i += 1\n continue\n if fk_bundle != 'NULL' and fk_bundle != None and fk_bundle not in defined:\n defined[fk_bundle] = id\n ids.append(id)\n i += 1\n elif fk_bundle == 'NULL' or fk_bundle == None:\n ids.append(id)\n i += 1\n return ids\n\n def __displayLogReturn(self, ctx, list):\n # list is : cmd, cohid, cohstate\n cohids = [x[1] for x in list]\n cohs = self.getCommandsOnHosts(ctx, cohids)\n ret = []\n for element in list:\n if element[1] in cohs:\n if len(element) == 4:\n ret.append((element[0].toH(), element[1], element[2], cohs[element[1]].toH(), element[3]))\n else:\n ret.append((element[0].toH(), element[1], element[2], cohs[element[1]].toH()))\n else:\n ret.append((element[0].toH(), element[1], element[2], False))\n return ret\n\n def checkLightPullCommands(self, uuid):\n \"\"\"\n Returns all coh ids te re-execute.\n\n @param uuid: uuid of checked computer\n @type uuid: str\n\n @return: coh ids to start\n @rtype: list\n \"\"\"\n session = create_session()\n\n query = session.query(CommandsOnHost)\n query = query.select_from(self.commands.join(self.commands_on_host).join(self.target))\n query = query.filter(self.target.c.target_uuid == uuid)\n query = query.filter(self.commands_on_host.c.current_state == \"scheduled\")\n\n ret = [q.id for q in query.all()]\n\n session.close()\n\n return ret\n\n\n\n\n def displayLogs(self, ctx, params = None): # TODO USE ctx\n if params is None: # do not change the default value!\n params = {}\n session = create_session()\n for i in ('b_id', 'cmd_id', 'coh_id', 'gid', 'uuid', 'filt'):\n if i not in params or params[i] == '':\n params[i] = None\n if 'min' not in params:\n params['min'] = 0\n if 'max' not in params:\n params['max'] = -1\n #if not params.has_key('finished') or params['finished'] == '':\n # params['finished'] = False\n try:\n params['order_by'] = getattr(self.commands_on_host.c, params['order_by'])\n except:\n params['order_by'] = getattr(self.commands_on_host.c, 'id')\n\n size = 0\n\n# msc.displayLogs({'max': 10, 'finished': '', 'filt': '', 'uuid': 'UUID1620', 'min': 0},)\n if params['gid'] or params['uuid']: # we want informations about one group / host\n if params['cmd_id']: # we want informations about one command on one group / host\n # Using min/max, we get a range of commands, but we always want\n # the total count of commands.\n ret = self.__displayLogsQuery2(ctx, params, session).offset(int(params['min'])).limit(int(params['max'])-int(params['min'])).all()\n size = self.__displayLogsQuery2(ctx, params, session, True)\n session.close()\n return size, self.__displayLogReturn(ctx, ret)\n elif params['b_id']: # we want informations about one bundle on one group / host\n # Using min/max, we get a range of commands, but we always want\n # the total count of commands.\n ret = self.__displayLogsQuery2(ctx, params, session).order_by(self.commands.c.order_in_bundle).offset(int(params['min'])).limit(int(params['max'])-int(params['min'])).all()\n size = self.__displayLogsQuery2(ctx, params, session, True)\n session.close()\n return size, self.__displayLogReturn(ctx, ret)\n else: # we want all informations about on one group / host\n # Get all commands related to the given computer UUID or group\n # id\n ret = self.__displayLogsQuery(ctx, params, session).order_by(asc(params['order_by'])).all()\n cmds = []\n for c in ret:\n cmds.append((c.id, c.fk_bundle))\n\n size = []\n size.extend(cmds)\n size = len(self.__displayLogsQueryGetIds(size, params = params))\n\n ids = self.__displayLogsQueryGetIds(cmds, params['min'], params['max'], params)\n\n query = session.query(Commands).select_from(self.commands.join(self.commands_on_host).join(self.target))\n query = query.add_column(self.commands_on_host.c.id).add_column(self.commands_on_host.c.current_state)\n query = query.filter(self.commands.c.id.in_(ids))\n if params['uuid']:\n # Filter target according to the given UUID\n query = query.filter(self.target.c.target_uuid == params['uuid'])\n query = query.order_by(desc(params['order_by']))\n ret = query.group_by(self.commands.c.id).all()\n\n session.close()\n return size, self.__displayLogReturn(ctx, ret)\n else: # we want all informations\n if params['cmd_id']: # we want all informations about one command\n ret = self.__displayLogsQuery2(ctx, params, session).all()\n # FIXME: using distinct, size will always return 1 ...\n size = self.__displayLogsQuery2(ctx, params, session, True)\n session.close()\n return size, self.__displayLogReturn(ctx, ret)\n elif params['b_id']: # we want all informations about one bundle\n ret = self.__displayLogsQuery2(ctx, params, session).order_by(self.commands.c.order_in_bundle).all()\n # FIXME: using distinct, size will always return 1 ...\n size = self.__displayLogsQuery2(ctx, params, session, True)\n session.close()\n return size, self.__displayLogReturn(ctx, ret)\n else: # we want all informations about everything\n ret = self.__displayLogsQuery(ctx, params, session).order_by(asc(params['order_by'])).all()\n cmds = [(c.id, c.fk_bundle) for c in ret]\n\n size = []\n size.extend(cmds)\n size = len(self.__displayLogsQueryGetIds(size))\n\n ids = self.__displayLogsQueryGetIds(cmds, params['min'], params['max'], params = params)\n\n query = session.query(Commands).select_from(self.commands.join(self.commands_on_host).join(self.target))\n query = query.add_column(self.commands_on_host.c.id).add_column(self.commands_on_host.c.current_state)\n query = query.filter(self.commands.c.id.in_(ids))\n query = query.order_by(desc(params['order_by']))\n ret = query.group_by(self.commands.c.id).all()\n\n session.close()\n return size, self.__displayLogReturn(ctx, ret)\n\n ###################\n def getCommandsOnHosts(self, ctx, coh_ids):\n session = create_session()\n cohs = session.query(CommandsOnHost).add_column(self.commands_on_host.c.id).filter(self.commands_on_host.c.id.in_(coh_ids)).all()\n session.close()\n targets = self.getTargetsForCoh(ctx, coh_ids)\n if ComputerLocationManager().doesUserHaveAccessToMachines(ctx, [t.target_uuid for t in targets], False):\n ret = {}\n session = create_session()\n for e in cohs:\n # Loading coh phases\n e[0].phases = session.query(CommandsOnHostPhase).filter_by(fk_commands_on_host = e[1]).all()\n e[0].phases = [phase.toDict() for phase in e[0].phases]\n ret[e[1]] = e[0]\n session.close()\n return ret\n return {}\n\n def getCommandsOnHost(self, ctx, coh_id):\n session = create_session()\n coh = session.query(CommandsOnHost).get(coh_id)\n if coh == None:\n self.logger.warn(\"User %s try to access an coh that don't exists '%s'\" % (ctx.userid, coh_id))\n return False\n coh.phases = session.query(CommandsOnHostPhase).filter_by(fk_commands_on_host = coh_id).all()\n coh.phases = [phase.toDict() for phase in coh.phases]\n session.close()\n target = self.getTargetForCoh(ctx, coh_id)\n if ComputerLocationManager().doesUserHaveAccessToMachine(ctx, target.target_uuid):\n return coh\n self.logger.warn(\"User %s does not have right permissions to access '%s'\" % (ctx.userid, target.target_name))\n return False\n\n def getTargetsForCoh(self, ctx, coh_ids): # FIXME should we use the ctx\n session = create_session()\n targets = session.query(Target).select_from(self.target.join(self.commands_on_host)).filter(self.commands_on_host.c.id.in_(coh_ids)).all()\n session.close()\n return targets\n\n def getTargetForCoh(self, ctx, coh_id): # FIXME should we use the ctx\n # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n target = session.query(Target).select_from(self.target.join(self.commands_on_host)).filter(self.commands_on_host.c.id == coh_id).first()\n session.close()\n return target\n\n def getCommandsHistory(self, ctx, coh_id): # FIXME should we use the ctx\n # TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = session.query(CommandsHistory).filter(self.commands_history.c.fk_commands_on_host == coh_id).all()\n session.close()\n return [x.toH() for x in ret]\n\n def getBundle(self, ctx, fk_bundle):\n session = create_session()\n try:\n ret = session.query(Bundle).filter(self.bundle.c.id == fk_bundle).first().toH()\n except:\n self.logger.info(\"Bundle '%s' cant be retrieved by '%s'\"%(fk_bundle, ctx.userid))\n return [None, []]\n try:\n cmds = [a.toH() for a in session.query(Commands).filter(self.commands.c.fk_bundle == fk_bundle).order_by(self.commands.c.order_in_bundle).all()]\n except:\n self.logger.info(\"Commands for bundle '%s' cant be retrieved by '%s'\"%(fk_bundle, ctx.userid))\n return [ret, []]\n session.close()\n try:\n ret['creation_date'] = cmds[0]['creation_date']\n except:\n ret['creation_date'] = ''\n return [ret, cmds]\n\n @DatabaseHelper._session\n def getCommands(self, session, ctx, cmd_id):\n if cmd_id == None or cmd_id == '':\n return False\n a_targets = [target[0] for target in self.getTargets(cmd_id, True)]\n if ComputerLocationManager().doesUserHaveAccessToMachines(ctx, a_targets):\n def _update_command(command, phases):\n \"\"\"\n New scheduler introduce phase table and some statuses are no longer\n updated in command table, but in phase table\n So, put these missing results in return\n \"\"\"\n __statuses = {\n 'do_wol': 'wol',\n 'clean_on_success': 'delete',\n 'do_inventory': 'inventory',\n 'do_reboot': 'reboot',\n 'do_halt': 'halt',\n 'do_windows_update': 'windows_update',\n }\n #for step in ['do_wol', 'clean_on_success', 'do_inventory', 'do_reboot', 'do_halt']:\n for step in list(__statuses.keys()):\n setattr(command, step, __statuses[step] in phases and 'enable' or 'disable')\n return command\n\n command, coh = session.query(Commands).filter_by(id=cmd_id) \\\n .add_entity(CommandsOnHost) \\\n .outerjoin((CommandsOnHost, Commands.id == CommandsOnHost.fk_commands)).first()\n if coh is not None:\n phases = session.query(CommandsOnHostPhase).filter_by(fk_commands_on_host = coh.id).all()\n phases = [phase.toDict()['name'] for phase in phases]\n # _update_command call for missing statuses\n return _update_command(command, phases)\n else:\n return command\n\n self.logger.warn(\"User %s does not have good permissions to access command '%s'\" % (ctx.userid, str(cmd_id)))\n return False\n\n def getCommandsByGroup(self, gid):# TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = session.query(Commands).select_from(self.commands.join(self.commands_on_host).join(self.target)).filter(self.target.c.id_group == gid)\n ret = ret.order_by(desc(self.commands.c.start_date)).all()\n session.close()\n return ret\n\n def getTargetsByGroup(self, gid):# TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n ret = session.query(Target).filter(self.target.c.id_group == gid).all()\n session.close()\n return ret\n\n @DatabaseHelper._session\n def isPullTarget(self, session, uuid):\n try:\n session.query(PullTargets).filter(PullTargets.target_uuid == uuid).one()\n return True\n except NoResultFound:\n return False\n\n @DatabaseHelper._session\n def getPullTargets(self, session):\n query = session.query(PullTargets)\n return [uuid.target_uuid for uuid in query]\n\n @DatabaseHelper._session\n def removePullTargets(self, session, uuids):\n query = session.query(PullTargets).filter(\n PullTargets.target_uuid.in_(uuids)\n )\n query.delete(synchronize_session='fetch')\n return True\n\n def getTargets(self, cmd_id, onlyId = False):# TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n if onlyId:\n connection = self.getDbConnection()\n ret = connection.execute(select([self.target.c.target_uuid], and_(self.commands_on_host.c.fk_commands == cmd_id, self.target.c.id == self.commands_on_host.c.fk_target))).fetchall()\n else:\n session = create_session()\n ret = session.query(Target).select_from(self.target.join(self.commands_on_host)).filter(self.commands_on_host.c.fk_commands == cmd_id).all()\n session.close()\n return ret\n\n def getCommandOnHostCurrentState(self, ctx, cmd_id):\n session = create_session()\n ret = session.query(Commands).add_column(self.commands_on_host.c.current_state).select_from(self.commands.join(self.commands_on_host)).filter(self.commands.c.id == cmd_id).first()\n session.close()\n return ret[1]\n\n def getCommandOnHostTitle(self, ctx, cmd_id):\n session = create_session()\n ret = session.query(Commands).select_from(self.commands.join(self.commands_on_host)).filter(self.commands.c.id == cmd_id).first()\n session.close()\n return ret.title\n\n def getCommandOnHostInCommands(self, ctx, cmd_id):\n session = create_session()\n ret = session.query(CommandsOnHost).filter(self.commands_on_host.c.fk_commands == cmd_id).all()\n session.close()\n return [c.id for c in ret]\n\n def getCommandOnGroupByState(self, ctx, cmd_id, state, min = 0, max = -1):\n session = create_session()\n query = session.query(CommandsOnHost).add_column(self.target.c.target_uuid).select_from(self.commands_on_host.join(self.commands).join(self.target)).filter(self.commands.c.id == cmd_id).order_by(self.commands_on_host.c.host)\n ret = self.__filterOnStatus(ctx, query, state)\n session.close()\n if max != -1: ret = ret[min:max]\n return [{'coh_id':coh.id, 'uuid':coh.target_uuid, 'host':coh.host, 'start_date':coh.start_date, 'end_date':coh.end_date, 'current_state':coh.current_state} for coh in ret]\n\n def getCommandOnGroupStatus(self, ctx, cmd_id):# TODO use ComputerLocationManager().doesUserHaveAccessToMachine\n session = create_session()\n query = session.query(func.count(self.commands_on_host.c.id), CommandsOnHost).select_from(self.commands_on_host.join(self.commands)).filter(self.commands.c.id == cmd_id)\n ret = self.__getStatus(ctx, query)\n session.close()\n return ret\n\n def getMachineNamesOnGroupStatus(self, ctx, cmd_id, state, limit):\n session = create_session()\n query = session.query(CommandsOnHost).add_column(self.target.c.target_uuid).select_from(self.commands_on_host.join(self.commands).join(self.target)).filter(self.commands.c.id == cmd_id)\n if state in ['success', 'paused', 'stopped', 'running', 'failure']: # Global statues\n query = query.filter(self.commands_on_host.c.current_state.in_(self.__getAllStatus()[state]))\n # Treat failed statues\n elif state == \"fail_up\":\n query = query.filter(self.commands_on_host.c.uploaded == 'FAILED')\n elif state == \"fail_ex\":\n query = query.filter(self.commands_on_host.c.executed == 'FAILED')\n elif state == \"fail_rm\":\n query = query.filter(self.commands_on_host.c.deleted == 'FAILED')\n elif state == \"fail_inv\":\n query = query.filter(self.commands_on_host.c.inventoried == 'FAILED')\n elif state == \"fail_wol\":\n query = query.filter(self.commands_on_host.c.awoken == 'FAILED')\n elif state == \"fail_reboot\":\n query = query.filter(self.commands_on_host.c.rebooted == 'FAILED')\n elif state == \"fail_halt\":\n query = query.filter(self.commands_on_host.c.halted == 'FAILED')\n elif state == \"over_timed\":\n query = query.filter(self.commands_on_host.c.current_state == 'over_timed')\n\n # Limit list according to max_elements_for_static_list param in dyngroup.ini\n query.limit(limit)\n ret = [{'hostname': machine[0].host, 'target_uuid': machine[1]} for machine in query]\n session.close()\n return ret\n\n def getMachineNamesOnBundleStatus(self, ctx, fk_bundle, state, limit):\n session = create_session()\n query = session.query(CommandsOnHost).add_column(self.target.c.target_uuid).select_from(self.commands_on_host.join(self.commands).join(self.target)).filter(self.commands.c.fk_bundle == fk_bundle)\n if state in ['success', 'paused', 'stopped', 'running', 'failure']: # Global statues\n query = query.filter(self.commands_on_host.c.current_state.in_(self.__getAllStatus()[state]))\n # Treat failed statues\n elif state == \"fail_up\":\n query = query.filter(self.commands_on_host.c.uploaded == 'FAILED')\n elif state == \"fail_ex\":\n query = query.filter(self.commands_on_host.c.executed == 'FAILED')\n elif state == \"fail_rm\":\n query = query.filter(self.commands_on_host.c.deleted == 'FAILED')\n elif state == \"fail_inv\":\n query = query.filter(self.commands_on_host.c.inventoried == 'FAILED')\n elif state == \"fail_wol\":\n query = query.filter(self.commands_on_host.c.awoken == 'FAILED')\n elif state == \"fail_reboot\":\n query = query.filter(self.commands_on_host.c.rebooted == 'FAILED')\n elif state == \"fail_halt\":\n query = query.filter(self.commands_on_host.c.halted == 'FAILED')\n elif state == \"over_timed\":\n query = query.filter(self.commands_on_host.c.current_state == 'over_timed')\n\n\n # Limit list according to max_elements_for_static_list param in dyngroup.ini\n query.limit(limit)\n ret = [{'hostname': machine[0].host, 'target_uuid': machine[1]} for machine in query]\n session.close()\n return ret\n\n def getCommandOnBundleByState(self, ctx, fk_bundle, state, min = 0, max = -1):\n session = create_session()\n query = session.query(CommandsOnHost).add_column(self.target.c.target_uuid).select_from(self.commands_on_host.join(self.commands).join(self.target)).filter(self.commands.c.fk_bundle == fk_bundle).order_by(self.commands_on_host.c.host)\n ret = self.__filterOnStatus(ctx, query, state)\n session.close()\n if max != -1: ret = ret[min:max]\n return [{'coh_id': coh.id, 'uuid':coh.target_uuid, 'host':coh.host, 'start_date':coh.start_date, 'end_date':coh.end_date, 'current_state':coh.current_state} for coh in ret]\n\n def getCommandOnBundleStatus(self, ctx, fk_bundle):\n session = create_session()\n query = session.query(func.count(self.commands_on_host.c.id), CommandsOnHost).select_from(self.commands_on_host.join(self.commands)).filter(self.commands.c.fk_bundle == fk_bundle)\n ret = self.__getStatus(ctx, query)\n session.close()\n return ret\n\n def __putUUIDInCOH(self, coh, uuid):\n setattr(coh, 'target_uuid', uuid)\n return coh\n\n def __filterOnStatus(self, ctx, query, state):\n query = [self.__putUUIDInCOH(x[0], x[1]) for x in query]\n ret = self.__getStatus(ctx, query, True)\n if state in ret:\n return ret[state]['total'][1]\n for l1 in ret:\n if state in ret[l1]:\n return ret[l1][state][1]\n return None\n\n def getStateCoh(self, query, filter):\n \"\"\"\n Add filters to query and return a SQL count() of this query\n @param query: the query\n @type query: sqlalchemy query object\n @param filter: a list formated like this: [[field, state], [field, state], ...]\n field is name of field in commands_on_host table\n state is a list of possible states to filter on\n @type filter: list\n\n @return: SQL count()\n @rtype: int\n \"\"\"\n for f in filter:\n if isinstance(f[1], str): # f[1] must be a list\n f[1] = [f[1]]\n if len(f) == 3 and not f[2]:\n query = query.filter(not_(getattr(self.commands_on_host.c, f[0]).in_(f[1])))\n else:\n query = query.filter(getattr(self.commands_on_host.c, f[0]).in_(f[1]))\n\n return [machine[0] for machine in query]\n\n def getStateLen(self, query, filter):\n \"\"\"\n Add filters to query and return a SQL count() of this query\n @param query: the query\n @type query: sqlalchemy query object\n @param filter: a list formated like this: [[field, state], [field, state], ...]\n field is name of field in commands_on_host table\n state is a list of possible states to filter on\n @type filter: list\n\n @return: SQL count()\n @rtype: int\n \"\"\"\n try:\n for f in filter:\n if isinstance(f[1], str): # f[1] must be a list\n f[1] = [f[1]]\n if len(f) == 3:\n if isinstance(f[2], bool):\n if f[2]:\n query = query.filter(getattr(self.commands_on_host.c, f[0]).in_(f[1]))\n else:\n query = query.filter(not_(getattr(self.commands_on_host.c, f[0]).in_(f[1])))\n elif f[2] == '<=':\n query = query.filter(getattr(self.commands_on_host.c, f[0]) <= f[1][0])\n elif f[2] == '>=':\n query = query.filter(getattr(self.commands_on_host.c, f[0]) >= f[1][0])\n else:\n query = query.filter(getattr(self.commands_on_host.c, f[0]).in_(f[1]))\n return int(query.scalar())\n except:\n return 0\n\n def __getAllStatus(self):\n \"\"\"\n return global statuses (success, paused, stopped, running, failure) by commands_on_host's current_state\n \"\"\"\n return {\n 'success': ['done'],\n 'paused': ['paused', 'pause'],\n 'stopped': ['stopped', 'stop'],\n 'running': ['wol_in_progress', 'upload_in_progress', 'upload_done', 'execution_in_progress', 'execution_done', 'delete_in_progress', 'delete_done', \\\n 'inventory_in_progress', 'inventory_done', 'reboot_in_progress', 'reboot_done', 'scheduled', 're_scheduled', \\\n 'halt_in_progress', 'halt_done'],\n 'failure': ['failed', 'upload_failed', 'execution_failed', 'delete_failed', 'inventory_failed', 'reboot_failed', 'halt_failed', \\\n 'not_reachable'],\n }\n\n def __getStatus(self, ctx, query, verbose = False):\n running = self.__getAllStatus()['running']\n failure = self.__getAllStatus()['failure']\n stopped = self.__getAllStatus()['stopped']\n paused = self.__getAllStatus()['paused']\n success = self.__getAllStatus()['success']\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n sec_up = self.getStateLen(query, [\n [\"current_state\", [\"over_timed\"], False],\n [\"end_date\", [now], '>='],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"attempts_left\", [0], False],\n [\"uploaded\", [\"FAILED\"]],\n ])\n sec_ex = self.getStateLen(query, [\n [\"current_state\", [\"over_timed\"], False],\n [\"end_date\", [now], '>='],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"attempts_left\", [0], False],\n [\"executed\", [\"FAILED\"]],\n ])\n sec_rm = self.getStateLen(query, [\n [\"current_state\", [\"over_timed\"], False],\n [\"end_date\", [now], '>='],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"attempts_left\", [0], False],\n [\"deleted\", [\"FAILED\"]],\n ])\n sec_inv = self.getStateLen(query, [\n [\"current_state\", [\"over_timed\"], False],\n [\"end_date\", [now], '>='],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"attempts_left\", [0], False],\n [\"inventoried\", [\"FAILED\"]],\n ])\n\n success_total = self.getStateLen(query, [[\"current_state\", success]])\n stopped_total = self.getStateLen(query, [[\"current_state\", stopped]])\n paused_total = self.getStateLen(query, [[\"current_state\", paused]])\n running_total = self.getStateLen(query, [[\"current_state\", running]]) + self.getStateLen(query, [[\"current_state\", failure], [\"end_date\", now, '>='], [\"attempts_left\", [0], False]])\n failure_total = self.getStateLen(query, [[\"current_state\", failure], [\"attempts_left\", [0]]]) \\\n + self.getStateLen(query, [[\"current_state\", [\"over_timed\"]]]) \\\n + self.getStateLen(query, [[\"current_state\", failure], [\"attempts_left\", [0], False], [\"end_date\", now, '<=']])\n\n try:\n total = int(query.scalar())\n except:\n total = 0\n\n ret = {\n 'total': total,\n 'success':{\n 'total':[success_total, []],\n },\n 'stopped':{\n 'total':[stopped_total, []],\n },\n 'paused':{\n 'total':[paused_total, []],\n },\n 'running':{\n 'total': [running_total, []],\n 'wait_up': [sum([sec_up,\n self.getStateLen(query,\n [\n [\"current_state\", [\"over_timed\"], False],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"uploaded\", [\"TODO\"]],\n ]\n )]),[]],\n 'run_up':[self.getStateLen(query,\n [\n [\"current_state\", \"upload_in_progress\"],\n ]), []],\n 'sec_up': [sec_up, []],\n 'wait_ex': [sum([sec_ex,\n self.getStateLen(query,\n [\n [\"current_state\", [\"over_timed\"], False],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"uploaded\", [\"TODO\", \"FAILED\", \"WORK_IN_PROGRESS\"], False],\n [\"executed\", [\"TODO\"]],\n ]\n )]),[]],\n 'run_ex':[self.getStateLen(query,\n [\n [\"current_state\", [\"execution_in_progress\"]],\n ]), []],\n 'sec_ex': [sec_ex, []],\n 'wait_rm': [sum([sec_rm,\n self.getStateLen(query,\n [\n [\"current_state\", [\"over_timed\"], False],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"executed\", [\"TODO\", \"FAILED\", \"WORK_IN_PROGRESS\"], False],\n [\"deleted\", [\"TODO\"]],\n ]\n )]),[]],\n 'run_rm':[self.getStateLen(query,\n [\n [\"current_state\", [\"delete_in_progress\"]],\n ]), []],\n 'sec_rm': [sec_rm, []],\n 'wait_inv': [sum([sec_inv,\n self.getStateLen(query,\n [\n [\"current_state\", [\"over_timed\"], False],\n [\"current_state\", paused, False],\n [\"current_state\", stopped, False],\n [\"deleted\", [\"TODO\", \"FAILED\", \"WORK_IN_PROGRESS\"], False],\n [\"inventoried\", [\"TODO\"]],\n ]\n )]),[]],\n 'run_inv':[self.getStateLen(query,\n [\n [\"current_state\", [\"inventory_in_progress\"]],\n ]), []],\n 'sec_inv': [sec_inv, []],\n },\n 'failure':{\n 'total':[failure_total, []],\n 'fail_up': [self.getStateLen(query, [[\"current_state\", [\"failed\"]], [\"uploaded\", [\"FAILED\"]]]), []],\n 'conn_up': [self.getStateLen(query, [[\"attempts_left\", [0]], [\"uploaded\", [\"FAILED\"]], [\"current_state\", [\"not_reachable\"]]]), []],\n 'fail_ex': [self.getStateLen(query, [[\"current_state\", [\"failed\"]], [\"executed\", [\"FAILED\"]]]), []],\n 'conn_ex': [self.getStateLen(query, [[\"attempts_left\", [0]], [\"executed\", [\"FAILED\"]], [\"current_state\", [\"not_reachable\"]]]), []],\n 'fail_rm': [self.getStateLen(query, [[\"current_state\", [\"failed\"]], [\"deleted\", [\"FAILED\"]]]), []],\n 'conn_rm': [self.getStateLen(query, [[\"attempts_left\", [0]], [\"deleted\", [\"FAILED\"]], [\"current_state\", [\"not_reachable\"]]]), []],\n 'fail_inv': [self.getStateLen(query, [[\"current_state\", [\"failed\"]], [\"inventoried\", [\"FAILED\"]]]), []],\n 'conn_inv': [self.getStateLen(query, [[\"attempts_left\", [0]], [\"inventoried\", [\"FAILED\"]], [\"current_state\", [\"not_reachable\"]]]), []],\n 'over_timed':[self.getStateLen(query, [[\"current_state\", [\"over_timed\"]]]), []],\n 'fail_wol': [self.getStateLen(query, [[\"current_state\", [\"failed\"]], [\"awoken\", [\"FAILED\"]]]), []],\n 'fail_reboot': [self.getStateLen(query, [[\"current_state\", [\"failed\"]], [\"rebooted\", [\"FAILED\"]]]), []],\n 'fail_halt': [self.getStateLen(query, [[\"current_state\", [\"failed\"]], [\"halted\", [\"FAILED\"]]]), []],\n\n }\n }\n\n if verbose: # used for CSV generation\n for coh in query:\n if coh.current_state == 'done': # success\n if verbose: ret['success']['total'][1].append(coh)\n elif coh.current_state == 'stop' or coh.current_state == 'stopped': # stopped coh\n if verbose: ret['stopped']['total'][1].append(coh)\n elif coh.current_state == 'pause':\n if verbose: ret['paused']['total'][1].append(coh)\n elif coh.current_state == 'over_timed': # out of the valid period of execution (= failed)\n if verbose: ret['failure']['total'][1].append(coh)\n if verbose: ret['failure']['over_timed'][1].append(coh)\n elif coh.attempts_left == 0 and (coh.uploaded == 'FAILED' or coh.executed == 'FAILED' or coh.deleted == 'FAILED'): # failure\n if verbose: ret['failure']['total'][1].append(coh)\n if coh.uploaded == 'FAILED':\n if verbose: ret['failure']['fail_up'][1].append(coh)\n if coh.current_state == 'not_reachable':\n if verbose: ret['failure']['conn_up'][1].append(coh)\n elif coh.executed == 'FAILED':\n if verbose: ret['failure']['fail_ex'][1].append(coh)\n if coh.current_state == 'not_reachable':\n if verbose: ret['failure']['conn_ex'][1].append(coh)\n elif coh.deleted == 'FAILED':\n if verbose: ret['failure']['fail_rm'][1].append(coh)\n if coh.current_state == 'not_reachable':\n if verbose: ret['failure']['conn_rm'][1].append(coh)\n elif coh.attempts_left != 0 and (coh.uploaded == 'FAILED' or coh.executed == 'FAILED' or coh.deleted == 'FAILED'): # fail but can still try again\n if verbose: ret['running']['total'][1].append(coh)\n if coh.uploaded == 'FAILED':\n if verbose: ret['running']['wait_up'][1].append(coh)\n if verbose: ret['running']['sec_up'][1].append(coh)\n elif coh.executed == 'FAILED':\n if verbose: ret['running']['wait_ex'][1].append(coh)\n if verbose: ret['running']['sec_ex'][1].append(coh)\n elif coh.deleted == 'FAILED':\n ret['running']['wait_rm'][0] += 1\n ret['running']['sec_rm'][0] += 1\n else: # running\n if verbose and coh.deleted != 'DONE' and coh.deleted != 'IGNORED': ret['running']['total'][1].append(coh)\n if coh.deleted == 'DONE' or coh.deleted == 'IGNORED': # done\n if verbose: ret['success']['total'][1].append(coh)\n elif coh.executed == 'DONE' or coh.executed == 'IGNORED': # delete running\n if coh.deleted == 'WORK_IN_PROGRESS':\n if verbose: ret['running']['run_rm'][1].append(coh)\n else:\n if verbose: ret['running']['wait_rm'][1].append(coh)\n elif coh.uploaded == 'DONE' or coh.uploaded == 'IGNORED': # exec running\n if coh.executed == 'WORK_IN_PROGRESS':\n if verbose: ret['running']['run_ex'][1].append(coh)\n else:\n if verbose: ret['running']['wait_ex'][1].append(coh)\n else: # upload running\n if coh.uploaded == 'WORK_IN_PROGRESS':\n if verbose: ret['running']['run_up'][1].append(coh)\n else:\n if verbose: ret['running']['wait_up'][1].append(coh)\n\n return ret\n\n def antiPoolOverflowErrorback(self, reason):\n \"\"\"\n an erroback, with handle QueuePool error-like by :\n - intercepting all exception\n - trap only SA \"TimeoutError\" Exceptions\n - if exception identified as TimeoutError, recreate pool\n - then raise the error anew\n \"\"\"\n reason.trap(TimeoutError)\n if self.db.pool._max_overflow > -1 and self.db.pool._overflow >= self.db.pool._max_overflow :\n logging.getLogger().error('Timeout then overflow (%d vs. %d) detected in SQL pool : check your network connectivity !' % (self.db.pool._overflow, self.db.pool._max_overflow))\n self.db.pool.dispose()\n self.db.pool = self.db.pool.recreate()\n return reason\n","repo_name":"mandriva-management-console/mmc","sub_path":"pulse2/services/pulse2/database/msc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":85495,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"85"} +{"seq_id":"42506077655","text":"import os\nimport nester\nimport pickle\n\"\"\" A test program to test exception\"\"\"\nman = []\nother = []\ntry:\n data=open('/home/lgt/Program/pythonlearn/excep/sketch.txt')\n for each_line in data:\n try:\n if not each_line.find(':')== -1:\n (role,line_spoken) = each_line.split(':',1)\n line_spoken = line_spoken.strip()\n if role == 'Man':\n man.append(line_spoken)\n elif role == 'Other Man':\n other.append(line_spoken)\n except ValueError:\n pass\n data.close()\nexcept IOError:\n print(\"The datafile is missing\")\ntry:\n with open('man_data.txt', 'wb') as man_data,open('other_data.txt', 'wb') as other_data:\n pickle.dump(man, man_data)\n pickle.dump(other,other_data)\nexcept IOError as err:\n print(\"Can't not write to file\" + str(err))\nexcept pickle.PickleError as perr:\n print('Pickling Error:' + str(perr))\nfinally:\n man_data.close()\n other_data.close()\n","repo_name":"liguitong/pythonlearn","sub_path":"excep/exce.py","file_name":"exce.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27621863716","text":"# Import the libraries\nimport random\nimport requests\n\n# Define the chatbot class\nclass ScriptBot:\n\n # Initialize the chatbot with a name and a greeting\n def __init__(self, name, greeting):\n self.name = name\n self.greeting = greeting\n\n # Define the method to generate a response\n def respond(self, user_input):\n\n # If the user input is empty, return the greeting\n if not user_input:\n return self.greeting\n\n # If the user input is \"bye\", end the conversation\n elif user_input.lower() == \"bye\":\n return \"Bye, have a nice day!\"\n\n # If the user input is \"what is your name?\", return the name\n elif user_input.lower() == \"what is your name?\":\n return f\"My name is {self.name}.\"\n\n # If the user input is \"what can you do?\", return the capabilities\n elif user_input.lower() == \"what can you do?\":\n return f\"I can generate a few scripts for you. Just tell me what kind of script you want.\"\n\n # If the user input is \"generate a script for X\", try to generate a script for X\n elif user_input.lower().startswith(\"generate a script for \"):\n\n # Extract the script type from the user input\n script_type = user_input.lower().replace(\"generate a script for \", \"\")\n\n # Check if the script type is valid\n if script_type in [\"a poem\", \"a story\", \"a song\", \"a joke\"]:\n\n # Call the API to generate a script for the script type\n url = f\"https://api.scriptbot.com/{script_type}\"\n response = requests.get(url)\n\n # Check if the API call was successful\n if response.status_code == 200:\n\n # Extract the script from the response\n script = response.text\n\n # Return the script\n return f\"Here's your {script_type}:\\n{script}\"\n\n # If the API call was not successful, return an error message\n else:\n return f\"Sorry, something went wrong. Please try again later.\"\n\n # If the script type is not valid, return an error message\n else:\n return f\"Sorry, I can't generate a script for {script_type}. Please choose one of these options: a poem, a story, a song, or a joke.\"\n\n # If the user input is anything else, return a default message\n else:\n return \"I'm sorry, I don't understand. Please ask me something else.\"\n\n\n# Create an instance of the chatbot with a name and a greeting\nbot = ScriptBot(\"Scripty\", \"Hello, I'm Scripty. I can generate a few scripts for you.\")\n\n# Start the conversation loop\nwhile True:\n\n # Get the user input\n user_input = input(\"You: \")\n\n # Generate the bot response\n bot_response = bot.respond(user_input)\n\n # Print the bot response\n print(f\"{bot.name}: {bot_response}\")\n\n # Break the loop if the user input is \"bye\"\n if user_input.lower() == \"bye\":\n break\n","repo_name":"AR-DEV-1/A.I","sub_path":"A.I.py","file_name":"A.I.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"17653030304","text":"# Global Variables\nimport os\nimport random\nimport math\nimport pygame\nfrom os import listdir\nfrom os.path import isfile, join \npygame.init()\n\npygame.display.set_caption(\"Platformer\")\n\nWIDTH, HEIGHT = 1000, 800\nFPS = 100\nFPS2 = 60\nPLAYER_VEL1 = 5\nPLAYER_VEL2 = 5\nyellow = (255, 255, 0)\nred = (255, 0, 0)\n\nwindow = pygame.display.set_mode((WIDTH, HEIGHT))\n\ndef flip(sprites):\n return [pygame.transform.flip(sprite, True, False) for sprite in sprites]\n\ndef load_sprite_sheets(dir1, dir2, width, height, direction = False):\n path = join(dir1, dir2)\n images = [f for f in listdir(path) if isfile(join(path, f))]\n\n all_sprites = {}\n\n for image in images:\n sprite_sheets = pygame.image.load(join(path, image)).convert_alpha()\n\n sprites = []\n for i in range(sprite_sheets.get_width() // width):\n surface = pygame.Surface((width,height), pygame.SRCALPHA, 32)\n rect = pygame.Rect(i * width, 0, width, height)\n surface.blit(sprite_sheets, (0, 0), rect)\n sprites.append(pygame.transform.scale2x(surface))\n\n if direction:\n all_sprites[image.replace(\".png\", \"\") + \"_right\"] = sprites\n all_sprites[image.replace(\".png\", \"\") + \"_left\"] = flip(sprites)\n else:\n all_sprites[image.replace(\".png\", \"\")] = sprites\n\n return all_sprites\n\n\ndef get_block(size):\n path = join(\"Terrain\", \"Terrain.png\")\n image = pygame.image.load(path).convert_alpha()\n surface = pygame.Surface((size, size), pygame.SRCALPHA, 32)\n rect = pygame.Rect(96, 64, size, size) # 96, 0, size, size\n surface.blit(image, (0, 0), rect)\n return pygame.transform.scale2x(surface)\n\n# def get_door(size):\n# path = join(\"Other\", \"door.png\")\n# image = pygame.image.load(path).convert_alpha()\n# surface = pygame.Surface((size, size), pygame.SRCALPHA, 32)\n# rect = pygame.Rect(0, 0, size, size)\n# surface.blit(image, (0, 0), rect)\n# return pygame.transform.scale2x(surface)\n\n# Player One\nclass Duck(pygame.sprite.Sprite):\n HEALTH = 100\n COLOR = (255, 0, 255)\n GRAVITY = 1\n SPRITES = load_sprite_sheets(\"MainCharacters\", \"MaskDude\", 32, 32, True)\n ANIMATION_DELAY = 3\n\n def __init__ (self, x, y, width, height):\n super().__init__()\n self.rect = pygame.Rect(x, y, width, height)\n self.x_vel = 0\n self.y_vel = 0\n self.mask = None\n self.direction = \"left\"\n self.animation_count = 0\n self.fall_count = 0\n self.jump_count = 0\n self.hit = False\n self.hit_count = 0\n\n def jump(self):\n self.y_vel = -self.GRAVITY * 8\n self.animation_count = 0\n self.jump_count += 1\n if self.jump_count == 2:\n self.fall_count = 0\n\n def move(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n\n def make_hit(self):\n self.hit = True\n self.hit_count = 0 \n # self.HEALTH = self.HEALTH - self.hit_count\n # if self.HEALTH == 0:\n # self.x_vel = 0\n # self.y_vel = 0\n\n def move_left(self, vel):\n self.x_vel = -vel\n if self.direction != \"left\":\n self.direction = \"left\"\n self.animation_count = 0\n \n def move_right (self,vel):\n self.x_vel = vel\n if self.direction != \"right\":\n self.direction = \"right\"\n self.animation_count = 0\n\n def loop(self, fps):\n self.y_vel += min(1, (self.fall_count / fps) * self.GRAVITY)\n self.move(self.x_vel, self.y_vel)\n\n if self.hit:\n self.hit_count += 1\n if self.hit_count > fps * 2:\n self.hit = False\n self.hit_count = 0\n\n self.fall_count += 1\n self.update_sprite()\n\n def landed(self):\n self.fall_count = 0\n self.y_vel = 0\n self.jump_count = 0\n\n def hit_head(self):\n self.count = 0\n self.y_vel *= -1\n\n # def door(self):\n # self.rect.x = 1000\n\n def update_sprite(self):\n sprite_sheet = \"idlemodif\"\n if self.hit:\n sprite_sheet = \"hitmodif2\"\n elif self.y_vel < 0:\n if self.jump_count == 1:\n sprite_sheet=\"jumpmodif\"\n elif self.jump_count == 2 or self.jump_count == 3:\n sprite_sheet=\"double_jumpmodif\"\n elif self.y_vel > self.GRAVITY * 2:\n sprite_sheet = \"fallmodif\"\n\n elif self.x_vel != 0:\n sprite_sheet = \"runmodif\"\n \n sprite_sheet_name = sprite_sheet + \"_\" + self.direction\n sprites = self.SPRITES[sprite_sheet_name]\n sprite_index = (self.animation_count // self.ANIMATION_DELAY) % len(sprites)\n self.sprite = sprites[sprite_index]\n self.animation_count += 1\n self.update()\n\n def update(self):\n self.rect = self.sprite.get_rect(topleft = (self.rect.x, self.rect.y))\n self.mask = pygame.mask.from_surface(self.sprite)\n \n def draw(self, win, offset_x):\n win.blit(self.sprite, (self.rect.x - offset_x, self.rect.y))\n\n\n# Player Two\nclass Goose(pygame.sprite.Sprite):\n COLOR = (255, 0, 255)\n GRAVITY = 1\n SPRITES = load_sprite_sheets(\"MainCharacters\", \"NinjaFrog\", 32, 32, True)\n ANIMATION_DELAY = 3\n\n def __init__ (self, x, y, width, height):\n super().__init__()\n self.rect = pygame.Rect(x, y, width, height)\n self.x_vel = 0\n self.y_vel = 0\n self.mask = None\n self.direction = \"left\"\n self.animation_count = 0\n self.fall_count = 0\n self.jump_count = 0\n self.hit = False\n self.hit_count = 0\n\n def jump(self):\n self.y_vel = -self.GRAVITY * 8\n self.animation_count = 0\n self.jump_count += 1\n if self.jump_count == 2:\n self.fall_count = 0\n\n def move(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n\n def make_hit(self):\n self.hit = True\n self.hit_count = 0 \n\n def move_left(self, vel):\n self.x_vel = -vel\n if self.direction != \"left\":\n self.direction = \"left\"\n self.animation_count = 0\n \n def move_right (self,vel):\n self.x_vel = vel\n if self.direction != \"right\":\n self.direction = \"right\"\n self.animation_count = 0\n\n def loop(self, fps):\n self.y_vel += min(1, (self.fall_count / fps) * self.GRAVITY)\n self.move(self.x_vel * 1.25, self.y_vel * 1.25)\n\n if self.hit:\n self.hit_count += 1\n if self.hit_count > fps * 2:\n self.hit = False\n self.hit_count = 0\n\n self.fall_count += 1\n self.update_sprite()\n\n def landed(self):\n self.fall_count = 0\n self.y_vel = 0\n self.jump_count = 0\n\n def hit_head(self):\n self.count = 0\n self.y_vel *= -1\n\n def update_sprite(self):\n sprite_sheet = \"idlemodif\"\n if self.hit:\n sprite_sheet = \"hitmodif\"\n elif self.y_vel < 0:\n if self.jump_count == 1:\n sprite_sheet=\"jumpmodif\"\n elif self.jump_count == 2:\n sprite_sheet=\"double_jumpmodif\"\n elif self.y_vel > self.GRAVITY * 2:\n sprite_sheet = \"fallmodif\"\n\n elif self.x_vel != 0:\n sprite_sheet = \"runmodif\"\n \n sprite_sheet_namee = sprite_sheet + \"_\" + self.direction\n spritestwo = self.SPRITES[sprite_sheet_namee]\n sprite_index = (self.animation_count // self.ANIMATION_DELAY) % len(spritestwo)\n self.spritea = spritestwo[sprite_index]\n self.animation_count += 1\n self.update()\n\n def update(self):\n self.rect = self.spritea.get_rect(topleft = (self.rect.x, self.rect.y))\n self.mask = pygame.mask.from_surface(self.spritea)\n \n def draw(self, win, offset_x):\n win.blit(self.spritea, (self.rect.x - offset_x, self.rect.y))\n\n\nclass Object(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height, name=None):\n super().__init__()\n self.rect = pygame.Rect(x, y, width, height)\n self.image = pygame.Surface((width, height), pygame.SRCALPHA)\n self.width = width\n self.height = height\n self.name = name\n\n def draw(self, win, offset_x):\n win.blit(self.image, (self.rect.x - offset_x, self.rect.y))\n\nclass Block(Object):\n def __init__(self, x, y, size):\n super().__init__(x, y, size, size)\n block = get_block(size)\n self.image.blit(block, (0,0))\n self.mask = pygame.mask.from_surface(self.image)\n\nclass Fire(Object):\n ANIMATION_DELAY = 3\n\n def __init__(self, x, y, width, height):\n super().__init__(x, y, width, height, \"fire\")\n self.fire = load_sprite_sheets(\"Traps\", \"Fire\", width, height)\n self.image = self.fire[\"off\"][0]\n self.mask = pygame.mask.from_surface(self.image)\n self.animation_count = 0\n self.animation_name = \"off\"\n\n def on(self):\n self.animation_name = \"on\"\n\n def off(self):\n self.animation_name = \"off\"\n\n def loop(self):\n sprites = self.fire[self.animation_name]\n sprite_index = (self.animation_count // self.ANIMATION_DELAY) % len(sprites)\n\n self.image = sprites[sprite_index]\n self.animation_count += 1\n\n self.rect = self.image.get_rect(topleft = (self.rect.x, self.rect.y))\n self.mask = pygame.mask.from_surface(self.image)\n\n if self.animation_count // self.ANIMATION_DELAY > len(sprites):\n self.animation_count = 0\n\n# class Door(Object):\n# def __init__(self, x, y, size):\n# super().__init__(x, y, size, size)\n# door = get_door(size)\n# self.image.blit(door, (0,0))\n# self.mask = pygame.mask.from_surface(self.image)\n\n# Background\ndef get_background(name):\n image = pygame.image.load(join(\"Background\", name))\n _, _, width, height = image.get_rect() # The first two underscores are x and y. We don't need them in this game\n tiles = []\n\n for i in range(WIDTH // width + 1):\n for j in range(HEIGHT // height + 1):\n pos = (i * width, j * height)\n tiles.append(pos)\n\n return tiles, image \n\ndef draw(window, background, bg_image, duck, goose, objects, offset_x):\n for tile in background:\n window.blit(bg_image, tile)\n\n for obj in objects:\n obj.draw(window, offset_x)\n\n duck.draw(window, offset_x)\n goose.draw(window, offset_x)\n\n pygame.display.update()\n\ndef handle_vertical_collision(duck, goose, objects, dyOne, dyTwo):\n collided_objects = []\n for obj in objects:\n if pygame.sprite.collide_mask(duck, obj):\n if dyOne > 0:\n duck.rect.bottom = obj.rect.top\n duck.landed()\n elif dyOne < 0:\n duck.rect.top = obj.rect.bottom\n duck.hit_head()\n\n collided_objects.append(obj)\n\n if pygame.sprite.collide_mask(goose, obj):\n if dyTwo > 0:\n goose.rect.bottom = obj.rect.top\n goose.landed()\n elif dyTwo < 0:\n goose.rect.top = obj.rect.bottom\n goose.hit_head()\n \n collided_objects.append(obj)\n\n return collided_objects\n\ndef collide(player, objects, dx):\n player.move(dx, 0)\n player.update()\n collided_object = None\n for obj in objects:\n if pygame.sprite.collide_mask(player, obj):\n collided_object = obj \n break\n\n player.move(-dx, 0)\n player.update()\n return collided_object\n\n\ndef handle_move(duck, goose, objects):\n keys = pygame.key.get_pressed()\n\n duck.x_vel = 0\n goose.x_vel = 0\n collide_left = collide(duck, objects, -PLAYER_VEL1 * 2)\n collide_right = collide(duck, objects, PLAYER_VEL1 * 2)\n collide_left = collide(goose, objects, -PLAYER_VEL2 * 2)\n collide_right = collide(goose, objects, PLAYER_VEL2 * 2)\n\n if keys[pygame.K_a] and not collide_left:\n duck.move_left(PLAYER_VEL1)\n elif keys[pygame.K_d] and not collide_right:\n duck.move_right(PLAYER_VEL1)\n if keys[pygame.K_LEFT] and not collide_left:\n goose.move_left(PLAYER_VEL2)\n elif keys[pygame.K_RIGHT] and not collide_right:\n goose.move_right(PLAYER_VEL2)\n\n\n # display_surface = pygame.display.set_mode((WIDTH, HEIGHT))\n # font = pygame.font.Font('freesansbold.ttf', 32)\n # text = font.render('GeeksForGeeks', True, red, yellow)\n # textRect = text.get_rect()\n\n vertical_collide = handle_vertical_collision(duck, goose, objects, duck.y_vel, goose.y_vel)\n to_check = [collide_left, collide_right, *vertical_collide]\n for obj in to_check:\n if obj and obj.name == \"fire\":\n duck.make_hit()\n goose.make_hit()\n if goose.rect.x == duck.rect.x and goose.rect.y == duck.rect.y:\n duck.make_hit()\n\n # if obj and obj.name == \"door\":\n # display_surface.blit(text, textRect)\n\n# Main Function\ndef main(window):\n # Event Loop:\n clock = pygame.time.Clock()\n background, bg_image = get_background(\"Yellow.png\")\n\n block_size = 96\n \n duck = Duck(200, 400, 50, 50)\n goose = Goose(300, 400, 50, 50)\n floor = [Block(i * block_size, HEIGHT - block_size, block_size) for i in range(-WIDTH // block_size, WIDTH * 2 // block_size)]\n # blocks = [Block(0, HEIGHT - block_size, block_size)]\n fire = Fire(100, HEIGHT - block_size - 64, 16, 32)\n fire.on()\n # door = Door(200, HEIGHT - block_size - 64, 100)\n objects = [\n *floor, \n Block(0, HEIGHT - block_size * 2, block_size), \n Block(block_size * 4, HEIGHT - block_size * 4, block_size), \n Block(block_size * 3, HEIGHT - block_size * 7, block_size),\n Block(block_size * 4, HEIGHT - block_size * 7, block_size),\n Block(block_size * 5, HEIGHT - block_size * 7, block_size),\n Block(block_size * 3, HEIGHT - block_size * 7, block_size),\n Block(block_size * 2, HEIGHT - block_size * 7, block_size),\n Block(block_size, HEIGHT - block_size * 7, block_size),\n Block(block_size % 2, HEIGHT - block_size * 7, block_size),\n fire,\n Block(block_size * 11, HEIGHT - block_size * 7, block_size),\n Block(block_size * 12, HEIGHT - block_size * 7, block_size),\n Block(block_size * 13, HEIGHT - block_size * 7, block_size),\n Block(block_size * 14, HEIGHT - block_size * 7, block_size)\n #, door\n ]\n\n offset_x = 0\n offset_y = 0\n scroll_area_width = 150\n scroll_area_height = 100\n\n run = True\n while run:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n break\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and duck.jump_count < 4:\n duck.jump()\n if event.key == pygame.K_UP and goose.jump_count < 2:\n goose.jump()\n \n # collideleft = collide(Duck, objects, -PLAYER_VEL1 * 2)\n # collideright = collide(Duck, objects, PLAYER_VEL1 * 2)\n # verticalcollide = handle_vertical_collision(Duck, goose, objects, Duck.y_vel, goose.y_vel)\n # to_check = [collideleft, collideright, *verticalcollide]\n # for obj in to_check:\n # if obj and obj.name == \"door\":\n # run = False\n # break\n\n duck.loop(FPS2)\n goose.loop(FPS)\n fire.loop()\n handle_move(duck, goose, objects)\n draw(window, background, bg_image, duck, goose, objects, offset_x)\n\n keys = pygame.key.get_pressed()\n bool1 = keys[pygame.K_t]\n bool2 = keys[pygame.K_y]\n\n if bool1 == True:\n if (((duck.rect.right - offset_x >= WIDTH - scroll_area_width) and duck.x_vel > 0) or ((duck.rect.left - offset_x <= scroll_area_width) and duck.x_vel < 0)):\n offset_x += duck.x_vel\n if (((duck.rect.bottom - offset_y >= HEIGHT - scroll_area_height) and duck.y_vel > 0) or ((duck.rect.top - offset_y <= scroll_area_height) and duck.y_vel < 0)):\n offset_y += (duck.y_vel * 1)\n\n elif bool2 == True:\n if (((goose.rect.right - offset_x >= WIDTH - scroll_area_width) and goose.x_vel > 0) or ((goose.rect.left - offset_x <= scroll_area_width) and goose.x_vel < 0)):\n offset_x += (goose.x_vel * 1.25)\n if (((goose.rect.bottom - offset_y >= HEIGHT - scroll_area_height) and goose.y_vel > 0) or ((goose.rect.top - offset_y <= scroll_area_height) and goose.y_vel < 0)):\n offset_y += (goose.y_vel * 1)\n\n else:\n if (((duck.rect.right - offset_x >= WIDTH - scroll_area_width) and duck.x_vel > 0) or ((duck.rect.left - offset_x <= scroll_area_width) and duck.x_vel < 0)):\n offset_x += duck.x_vel\n if (((duck.rect.bottom - offset_y >= HEIGHT - scroll_area_height) and duck.y_vel > 0) or ((duck.rect.top - offset_y <= scroll_area_height) and duck.y_vel < 0)):\n offset_y += (duck.y_vel * 1)\n \n\n pygame.quit()\n quit()\n\nif __name__ == \"__main__\":\n main(window)\n","repo_name":"NicolasThaddeusL/PlatformerPygame1","sub_path":"version2.py","file_name":"version2.py","file_ext":"py","file_size_in_byte":17169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17019293197","text":"from node import Node\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insert(self, value, currentNode=None):\n if self.head is None:\n self.head = Node(value)\n return self\n elif currentNode is None:\n currentNode = self.head\n\n if currentNode.nextNode is None:\n currentNode.nextNode = Node(value)\n else:\n self.insert(value, currentNode.nextNode)\n return self\n\n def printList(self, currentNode=None):\n if self.head is None:\n print(\"No nodes in the list\")\n return self\n elif currentNode is None:\n currentNode = self.head\n\n print(\"Node(\", currentNode.value, \")\", end=\"\")\n\n if currentNode.nextNode is not None:\n print(\" -> \", end=\"\")\n self.printList(currentNode.nextNode)\n else:\n print(\"\")\n return self\n\n\nLinkedList().insert(1).insert(2).insert(3).insert(4).printList()\n","repo_name":"manuabhijit/competitive-programming-practice","sub_path":"data-structures/modules/linked-list.py","file_name":"linked-list.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23589002265","text":"import os\nimport re\n\ndef removeVideo(file_path):\n os.remove(file_path)\n\n\nimport pytube\nfrom pydub import AudioSegment\n\ndef mp4towav(video_path, base_path):\n mp4_file_path = video_path\n wav_file_path = os.path.join(base_path, 'youtube.wav')\n print('mp4 path', mp4_file_path)\n print('wav path', wav_file_path)\n \n audio = AudioSegment.from_file(mp4_file_path, format='mp4')\n audio.export(wav_file_path, format='wav')\n\n return wav_file_path\n\ndef saveVideo(url, base_path):\n data = pytube.YouTube(url)\n title = data.streams[0].title\n length = data.length\n video = data.streams.filter(file_extension='mp4').first() # video를 위한 mp4 download\n video_path = video.download(base_path) # file name 설정 필요\n\n # video rename\n base, ext = os.path.split(video_path)\n new_video_path = os.path.join(base,'youtube_original.mp4')\n #os.rename(video_path, new_video_path)\n os.replace(video_path, new_video_path)\n\n audio_path = mp4towav(new_video_path, base_path)\n\n print(audio_path)\n\n \n\n return new_video_path, audio_path, title, length\n\n\ndef return_file_name_dict(key):\n base_path = os.getcwd()\n base_path = os.path.join(base_path, \"web_app\", \"static\", key)\n\n list=os.listdir(base_path)\n\n cont={'cut_youtube':[],'scene_gif':[],'scene_youtube':[],'sum_gif':[],'voice_image':[],'voice_youtube':[],}\n\n for i in list:\n if i.startswith('cut_youtube'):\n cont['cut_youtube'].append(i)\n elif i.startswith('scene_gif'):\n cont['scene_gif'].append(i)\n elif i.startswith('scene_youtube'):\n cont['scene_youtube'].append(i)\n elif i.startswith('sum_gif'):\n cont['sum_gif'].append(i)\n elif i.startswith('voice_image'):\n cont['voice_image'].append(i)\n elif i.startswith('voice_youtube'):\n cont['voice_youtube'].append(i)\n\n for i in cont.keys():\n cont[i].sort(key=lambda f: int(re.sub('\\D', '', f)))\n\n return cont","repo_name":"JJooKim/ai_dev_teamB4","sub_path":"final_proj/web_app/Youtube/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"42304088157","text":"import random\nimport string\n\n\ndef _alphabetical():\n letters = string.ascii_letters\n length = random.randint(3, 15)\n mystr = ''.join(random.choices(letters, k=length))\n return mystr\n\n\ndef _real():\n return random.uniform(10, 1000000)\n\n\ndef _integers():\n return random.randint(1, 1000000)\n\n\ndef _alphanumerics():\n letters_digits = string.ascii_letters + string.digits\n length = random.randint(5, 20)\n prefix_length = random.randint(0, 10)\n suffix_length = random.randint(0, 10)\n prefix = ''.join([' ' for i in range(prefix_length)])\n suffix = ''.join([' ' for i in range(suffix_length)])\n mystr = prefix + ''.join(random.choices(letters_digits, k=length)) + suffix\n return mystr\n\n\ndef generate_string(default_type='alphabetical'):\n default_type = default_type.strip()\n if default_type.__eq__('alphabetical'):\n return _alphabetical()\n if default_type.__eq__('real'):\n return _real()\n if default_type.__eq__('int'):\n return _integers()\n if default_type.__eq__('alphanumeric'):\n return _alphanumerics()\n raise Exception(\"Does not support type {}\".format(default_type))\n\n\nif __name__ == '__main__':\n print(generate_string())\n print(generate_string(default_type='real'))\n print(generate_string(default_type='int'))\n print(generate_string(default_type='alphanumeric'))\n","repo_name":"ladin157/Omnilytics","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31396581928","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\nt = int(input())\nfor tc in range(t):\n n, m = map(int, input().split())\n arr = [list(map(int, input().split())) for _ in range(n)]\n print(arr)\n\n mx = 0\n sm = 0\n for i in range(n):\n for j in range(n):\n for di, dj in ((0,0), (1, 0), (0, 1), (1,1)):\n ni, nj = i + di, j + dj\n\n if 0<=nivar props=({\"activeInvestors|investor\".*),locationnal='''\n\n def get_content_list(self, html):\n json_content = json.loads(html)\n content_list = [{'b_id':i['id'],'title':i['title'],'img':i['cover']} for i in json_content['data']['items']]\n return content_list\n\n def save_content(self, content_list):\n with open('3kr.txt', 'a',encoding='utf-8') as f:\n for content in content_list:\n f.write(content['title'])\n f.write('\\n')\n f.write(content['img'])\n f.write('\\n')\n f.write(\"http://36kr.com/p/{}.html\".format(content['b_id']))\n f.write('\\n')\n self.b_id = str(content['b_id']) if content['b_id'] else None\n print(content)\n\n def run(self):\n # 1.start_url\n next_url = self.start_url\n while next_url is not None:\n # 2.发送请求,获取响应\n html = parse_url(next_url)\n # 3.提取数据,id\n content_list = self.get_content_list(html) if html is not None else []\n # 4.保存数据\n self.save_content(content_list)\n next_url = \"http://36kr.com/api/info-flow/main_site/posts?b_id=\" + self.b_id + \"&per_page=20\" if self.b_id else None\n\n\nif __name__ == '__main__':\n kr = KrSpider()\n kr.run()\n\n","repo_name":"jyongforever/Spider_test","sub_path":"36kr/try_36kr.py","file_name":"try_36kr.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23727230609","text":"from botocore.vendored import requests\n\nfrom smartthings_bridge.aws_iot.device_shadow import update_shadow\nfrom smartthings_bridge.aws_iot.mqtt import publish\nfrom smartthings_bridge.aws_iot.thing_registration import register_thing\nfrom smartthings_bridge import device_registry\n\n\nDEVICE_EVENT_TOPIC = 'house/device_event'\n\n\ndef register_things_for_capability(event, context):\n auth_token = event['auth_token']\n capability = event['capability']\n\n devices = requests.get(\n 'https://api.smartthings.com/v1/devices?capability={}'.format(capability),\n headers={\n 'Authorization': 'Bearer ' + auth_token,\n 'Content-type': 'application/json',\n },\n ).json()['items']\n\n for device in devices:\n register_thing(\n device_id=device['deviceId'],\n device_name=device['label'],\n capability=capability,\n )\n\n device_registry.upsert_device(\n device_id=device['deviceId'],\n device_name=device['label'],\n )\n\n\ndef device_event_handler(device_id, capability, attribute, value, timestamp):\n update_shadow(\n device_id,\n {attribute: value}\n )\n\n publish(\n DEVICE_EVENT_TOPIC,\n {\n 'device_id': device_id,\n 'capability': capability,\n 'attribute': attribute,\n 'value': value,\n 'timestamp': timestamp,\n },\n )\n","repo_name":"jimmingcheng/smartthings_bridge","sub_path":"smartthings_bridge/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10446558127","text":"import os\nfrom jdatetime import datetime, date\nimport re\nfrom app import app\nfrom flask import redirect, url_for, flash\nfrom flask_login import current_user\nfrom functools import wraps\nfrom werkzeug.utils import secure_filename\nfrom persiantools.jdatetime import JalaliDateTime\n\n\n\n#=================================== CREATE EMAIL ADMIN VALID ===============================================\n# regex = re.compile(r\"([-!#-'*+/-9=?A-Z^-~]+(\\.[-!#-'*+/-9=?A-Z^-~]+)*|\\\"([]!#-[^-~ \\t]|(\\\\[\\t -~]))+\\\")@([-!#-'*+/-9=?A-Z^-~]+(\\.[-!#-'*+/-9=?A-Z^-~]+)*|\\[[\\t -Z^-~]*])\")\nregex = re.compile(r\"admin@([-!#-'*+/-9=?A-Z^-~]+(\\.[-!#-'*+/-9=?A-Z^-~]+)*|\\[[\\t -Z^-~]*])\")\n\ndef isvalid_email_admin(email):\n return False if re.fullmatch(regex, email) else True\n\n#========================================== END =============================================================\n\n\n\n#====================================== CREATE ADMIN REQUIRED ===============================================\ndef admin_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if current_user.role == 0:\n return f(*args, **kwargs)\n else:\n flash(\"You need to be an admin to view this page.\", 'danger')\n return redirect(url_for('auth.logout'))\n return wrap\n\n#========================================== END =============================================================\n\n\ndef allow_extension(filename):\n ext = filename[-3:]\n extension = {'png', 'jpg'}\n if not ext in extension:\n return False\n return True\n\n\n#====================================== UPLOADING IMAGE ===============================================\n\ndef save_image(image, app_name, url, form):\n if image.filename == '':\n flash('you not select a image properly, please try again', 'warning')\n return redirect(url_for(url, form=form))\n if image:\n print('image is -----> ', image)\n filename = image.filename\n file_secure = secure_filename(filename)\n if not allow_extension(file_secure):\n flash('this extension for image file is not allowed', 'warning')\n return redirect(url_for(url, form=form))\n folder = os.path.join(app.config['UPLOAD_DIR'], app_name, str(date.today()))\n print('folder is -----> ', folder)\n try:\n os.makedirs(folder)\n except Exception as e:\n # flash(f'error {e} is happened, please try again', 'warning')\n pass\n finally:\n file = os.path.join(folder, file_secure)\n print('file is ----> ', file)\n image.save(file)\n flash('your image is uploaded successfully', 'success')\n return True\n return False\n\n\ndef save_avatar(avatar, obj):\n if avatar:\n filename = avatar.filename\n file_secure = secure_filename(filename)\n if not allow_extension(filename):\n flash('your image file is not allowed', 'warning')\n return False\n folder = os.path.join(app.config['UPLOAD_DIR'], f'{current_user.id}')\n try:\n os.makedirs(folder)\n except FileExistsError:\n pass\n # os.makedirs(folder, exist_ok=True)\n file = os.path.join(folder, file_secure)\n avatar.save(file)\n obj.avatar = f'uploads/{current_user.id}/{filename}'\n return True\n return False\n\n\n\ndef jdt_from_pdp(strdt):\n publish_date = strdt.split(' ')[0]\n publish_time = strdt.split(' ')[1]\n year = int(publish_date.split('-')[0])\n month = int(publish_date.split('-')[1])\n day = int(publish_date.split('-')[2])\n hour = int(publish_time.split(':')[0])\n minute = int(publish_time.split(':')[1])\n second = int(publish_time.split(':')[2])\n fa_datetime = datetime(year,month,day,hour,minute,second, locale='fa_IR')\n return fa_datetime\n\n\ndef jdt_to_gregorian(strdt):\n date = strdt.split(' ')[0] if strdt != None else ''\n try:\n time = strdt.split(' ')[1] if strdt != (None or '') else ''\n except:\n time = ''\n year = int(date.split('-')[0])\n month = int(date.split('-')[1])\n day = int(date.split('-')[2])\n if time != '':\n hour = int(time.split(':')[0])\n minute = int(time.split(':')[1])\n second = int(time.split(':')[2])\n en_datetime = JalaliDateTime(year,month,day,hour,minute,second).to_gregorian()\n else:\n en_datetime = JalaliDateTime(year,month,day).to_gregorian()\n return en_datetime","repo_name":"mosialgorithm/admin","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25555717851","text":"# Ejemplo: Se obtiene el numero del mes, y se lo pasa en letra\n\n# En Python no existe la sentencia del Switch como este caso\n#switch(num):\n# case 1:\n# break\n# case 2:\n# break\n\n# Se utiliza mediante el uso de Diccionarios:\ndef dameMes(num):\n meses = {\n 1 : \"Enero\", \n 2 : \"Febrero\",\n 3 : \"Marzo\",\n 4 : \"Abril\",\n 5 : \"Mayo\",\n 6 : \"Junio\",\n 7 : \"Julio\",\n 8 : \"Agosto\",\n 9 : \"Septiembre\",\n 10 : \"Octubre\",\n 11 : \"Noviembre\",\n 12 : \"Diciembre\"\n } \n return meses.get(num, \"Mes no válido\")\n\nmes = int(input(\"Introducir un número del 1 al 12 para saber el mes: \"))\nprint(dameMes(mes))","repo_name":"lucho-capomolla/Python","sub_path":"Funciones y Tipos de Datos/ejemploSwitch.py","file_name":"ejemploSwitch.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9006092628","text":"from random import randint\n\n\nclass Link:\n def __init__(self, infra, dataset, link):\n self.infra = infra\n self.dataset = dataset\n self.link = link\n\n self.transfers = {} # \n self.incoming = {} # \n\n self.clock = 0 # Time when the data was updated last\n\n def get_min(self):\n \"\"\"\n Gets the entry for the first data_staging job that gets over.\n :return: Entry\n \"\"\"\n if len(self.transfers) == 0:\n return None\n\n min_data_entry = list(self.transfers.keys())[0]\n min_data_time = self.infra.get_time_for_link_transfer(\n self.link, self.transfers[min_data_entry], len(self.transfers)\n )\n\n for data_id in self.transfers:\n time = self.infra.get_time_for_link_transfer(\n self.link, self.transfers[data_id], len(self.transfers)\n )\n\n if time < min_data_time:\n min_data_entry = data_id\n min_data_time = time\n\n # Second element is random to prevent colisions\n return min_data_time + self.clock, randint(0, 1000000000), self.link[0], \\\n self.link[1], 'POP', min_data_entry, self.clock\n\n def update(self, time):\n \"\"\"\n Updates the whole object to the time.\n :param time: time to update the system to\n :return: 0, if update failed (self.clock is more than this or get_min returned more than time) else 1\n \"\"\"\n if len(self.transfers) == 0:\n self.clock = time\n return 0\n if self.get_min()[0] < time or self.clock > time:\n self.clock = time\n return 0\n\n diff = self.clock - time\n size_downloaded = self.infra.get_size_for_link_time(self.link, diff, len(self.transfers))\n\n for data_id in self.transfers:\n self.transfers[data_id] -= size_downloaded\n\n self.clock = time\n return 1\n\n def complete_transfer(self, job):\n \"\"\"\n Completes the transfer for the given pop job\n :param job: the job as pushed inside the heap\n :return: a pop job for minimum time and the data_id, time_required to complete\n \"\"\"\n if job[5] not in self.transfers:\n return None, None\n # Stale entry, ignore it\n if job[6] < self.clock:\n return None, None\n else:\n data_id = job[5]\n now = job[0]\n\n total_time = now - self.incoming[data_id]\n\n # Update before deleting\n self.update(now)\n\n # Delete\n del self.transfers[data_id]\n del self.incoming[data_id]\n\n # print(\"Duration : {}, Quantity : {}, Src : {}, Dest : {}\".format(\n # total_time, self.dataset.get_size(job[5])/1000000000, job[2], job[3]\n # ))\n # print(str(total_time) + \" \" + str(job))\n\n # Return next min\n return self.get_min(), (data_id, total_time)\n\n def add_transfer(self, data_id, now):\n \"\"\"\n Add the data_id to transfers.\n Return the event when the first transfer from the transfers is over\n :param data_id: The data_id\n :param now: The time of the job pop\n :return: None if the data_id is already transferred, else\n \"\"\"\n if data_id in self.transfers:\n return None\n\n # Update before inserting\n self.update(now)\n\n self.transfers[data_id] = self.dataset.get_size(data_id)\n self.incoming[data_id] = now\n\n # print(\"ADD: time: {}, size: {}, min_end: {}, data_id: {}, expected_duration: {}, calculated_duration: {}, transfers: {}, src: {}, dest: {}, bw: {}\".format(\n # now, self.dataset.get_size(data_id)/1000000000, self.get_min()[0], data_id,\n # self.dataset.get_size(data_id)/(self.infra.get_network_for_link(self.link)[0]*1000000),\n # self.infra.get_time_for_link_transfer(self.link, self.dataset.get_size(data_id), len(self.transfers)),\n # len(self.transfers),\n # self.link[0], self.link[1], self.infra.get_network_for_link(self.link)[0]\n # ))\n return self.get_min()\n\n def get_bandwidth(self):\n \"\"\"\n Get the bandwidth of this link given to a file if this file is scheduled\n :return: bw\n \"\"\"\n return self.infra.get_network_for_link(self.link)[0]/(len(self.transfers) + 1)\n","repo_name":"AdwaitB/greco-p2p-analysis","sub_path":"link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17399395654","text":"#\n# @lc app=leetcode.cn id=24 lang=python3\n#\n# [24] 两两交换链表中的节点\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n dummyNode = ListNode(0, head)\n preNode = dummyNode\n while head:\n nextNode = head.next\n if not nextNode:\n break\n preNode.next, head.next, nextNode.next = nextNode, nextNode.next, head\n preNode = head\n head = head.next\n return dummyNode.next \n# @lc code=end\n\n","repo_name":"gnaixx/happycode","sub_path":"python3/linked-list/medium/24.两两交换链表中的节点.py","file_name":"24.两两交换链表中的节点.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"33142104173","text":"from django import forms\nfrom .models import SubmitAssignment,Assignment\n\nclass SubmitForm(forms.Form):\n description = forms.CharField(label='Description', required=False, widget=forms.Textarea(attrs=\n {\n 'class':'form-control',\n 'placeholder' : 'Describe your submission (If required)',\n }))\n link = forms.URLField(label='Link',required=False, widget=forms.TextInput(attrs=\n {\n 'class':'form-control',\n 'placeholder' : 'Link to your Assignment (If required)',\n }))\n files = forms.FileField(label='File',required=False)\n \n\nclass CreateAssignmentForm(forms.ModelForm): \n\n class Meta:\n model=Assignment\n exclude=[\n \"user\",\n \"semester\"\n ]\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['last_date'].widget=forms.DateTimeInput()\n for field in iter(self.fields):\n if self.fields[field].widget.__class__.__name__ in ('AdminTextInputWidget' , 'Textarea' ,'TextInput','URLInput', 'NumberInput' , 'AdminURLFieldWidget', 'Select'): \n self.fields[field].widget.attrs.update({ 'class': 'form-control' })","repo_name":"naveenkrnl/latest_remote","sub_path":"dondler/assignments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"10202666589","text":"#Python简单删除目录下文件以及文件夹\r\nimport os\r\nimport shutil\r\nfilelist=[]\r\nfilelist2=[]\r\nfilelist3=[]\r\nfilelist4=[]\r\n\r\nrootdir4 = r\"D:\\\\WMH\\\\static\\\\\"\r\nrootdir3 = r\"D:\\\\WMH\\\\png\\\\\"\r\nrootdir2 = r\"D:\\\\Project\\\\UNET-ZOO-master\\\\CT\\\\val\\\\\"\r\nrootdir=r\"D:\\\\Project\\\\UNET-ZOO-master\\\\saved_predict\\\\resnet34_unet\\\\6\\\\40\\\\esophagus\\\\\" #选取删除文件夹的路径,最终结果删除img文件夹\r\nfilelist=os.listdir(rootdir) #列出该目录下的所有文件名\r\nfor f in filelist:\r\n filepath = os.path.join( rootdir, f ) #将文件名映射成绝对路劲\r\n if os.path.isfile(filepath): #判断该文件是否为文件或者文件夹\r\n os.remove(filepath) #若为文件,则直接删除\r\n print(str(filepath)+\" removed!\")\r\n elif os.path.isdir(filepath):\r\n shutil.rmtree(filepath,True) #若为文件夹,则删除该文件夹及文件夹内所有文件\r\n print(\"dir \"+str(filepath)+\" removed!\")\r\nfilelist2=os.listdir(rootdir2) #列出该目录下的所有文件名\r\nfor f in filelist2:\r\n filepath2 = os.path.join( rootdir2, f ) #将文件名映射成绝对路劲\r\n if os.path.isfile(filepath2): #判断该文件是否为文件或者文件夹\r\n os.remove(filepath2) #若为文件,则直接删除\r\n print(str(filepath2)+\" removed!\")\r\n elif os.path.isdir(filepath2):\r\n shutil.rmtree(filepath2,True) #若为文件夹,则删除该文件夹及文件夹内所有文件\r\n print(\"dir \"+str(filepath2)+\" removed!\")\r\nfilelist3=os.listdir(rootdir3) #列出该目录下的所有文件名\r\nfor f in filelist3:\r\n filepath3 = os.path.join( rootdir3, f ) #将文件名映射成绝对路劲\r\n if os.path.isfile(filepath3): #判断该文件是否为文件或者文件夹\r\n os.remove(filepath3) #若为文件,则直接删除\r\n print(str(filepath3)+\" removed!\")\r\n elif os.path.isdir(filepath3):\r\n shutil.rmtree(filepath3,True) #若为文件夹,则删除该文件夹及文件夹内所有文件\r\n print(\"dir \"+str(filepath3)+\" removed!\")\r\nfilelist4=os.listdir(rootdir4) #列出该目录下的所有文件名\r\nfor f in filelist4:\r\n filepath4 = os.path.join( rootdir4, f ) #将文件名映射成绝对路劲\r\n if os.path.isfile(filepath4): #判断该文件是否为文件或者文件夹\r\n os.remove(filepath4) #若为文件,则直接删除\r\n print(str(filepath4)+\" removed!\")\r\n elif os.path.isdir(filepath4):\r\n shutil.rmtree(filepath4,True) #若为文件夹,则删除该文件夹及文件夹内所有文件\r\n print(\"dir \"+str(filepath4)+\" removed!\")","repo_name":"wls860707495/Code_paper","sub_path":"UNET-ZOO-master/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36699214914","text":"\"\"\"\nDay 14\nHigher Lower Game\n\"\"\"\n\nimport random\nimport os\n\nfrom day_014_data import data as d14_data\n\n\ndef format_data(account):\n\t\"\"\"Return the account data in printable format: name, a description, from country\"\"\"\n\treturn f\"{account['name']}, a {account['description']}, from {account['country']}\"\n\n\ndef compare_follower_count(usr_a, usr_b, choice):\n\t\"\"\"Compare the number of followers and return the user with the most followers\"\"\"\n\tif usr_a['follower_count'] == usr_b['follower_count']:\n\t\tif choice == 'a':\n\t\t\treturn usr_a\n\t\telse:\n\t\t\treturn usr_b\n\telif usr_a['follower_count'] > usr_b['follower_count']:\n\t\treturn usr_a\n\telse:\n\t\treturn usr_b\n\n\ndef higher_lower(last_choice={}, number_correct=0):\n\tif last_choice:\n\t\tuser_a = last_choice\n\t\tnumber_correct += 1\n\telse:\n\t\tuser_a = random.choice(list(d14_data))\n\t\n\tuser_b = random.choice(list(d14_data))\n\t\n\twhile user_a == user_b:\n\t\tuser_b = random.choice(list(d14_data))\n\t\t\n\tos.system('clear')\n\tif number_correct > 0:\n\t\tprint(f\"That's right! Your current score is {number_correct}\\n\")\n\tprint(f\"Compare a: {format_data(user_a)}.\")\n\tprint(f\"Compare b: {format_data(user_b)}.\")\n\tanswer = input(\"\\nWho has more followers? Type 'a' or 'b': \")\n\n\tif answer == 'a':\n\t\tselected_user = user_a\n\telse:\n\t\tselected_user = user_b\n\t\t\n\thas_more_followers = compare_follower_count(user_a, user_b, answer)\n\n\tif selected_user == has_more_followers:\n\t\thigher_lower(selected_user, number_correct)\n\telse:\n\t\tprint(f\"\\nSorry, that's wrong! Your final score is {number_correct}\\n\")\n\n\t\ndef run():\n\tprint(\"Day 14 Exercise: Higher Lower\\n\")\n\thigher_lower()","repo_name":"Ascendant73/100-Days-of-Code-2022","sub_path":"day_014.py","file_name":"day_014.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3524857260","text":"# -*- coding: utf-8 -*-\n\nimport chardet as cd\nfrom read import Reader\nclass Tool:\n def __init__(self):\n pass\n\t\n# def codetransition(self,path):\n# file = open(path,'rb')\n# data = file.read()\n# file.close()\n# eencode = cd.detect(data)\n# print (eencode)\n# if eencode['encoding'] == None:\n# data = data.decode('utf-8').encode('utf-8')\n# data = data.decode('utf-8')\n# elif eencode['encoding'] == 'ascii':\n# data = data.decode('ascii').encode('utf-8')\n# data = data.decode('utf-8')\n# elif eencode['encoding'] == 'UTF-8-SIG':\n# data = data.decode('UTF-8-SIG').encode('utf-8')\n# data = data.decode('utf-8')\n# elif eencode['encoding'] == 'GB2312':\n# data = data.decode('GB2312').encode('utf-8')\n# data = data.decode('utf-8')\n# elif eencode['encoding'] == 'UTF-8-SIG':\n# data = data.decode('ISO-8859-1').encode('utf-8')\n# data = data.decode('utf-8')\n# print (data)\n# file = open(path,'w',encoding = 'utf-8')\n# file.write(data)\n# file.close()\n\t\n def findmaxFscore(self,path):\n count = 1\n tmp = ''\n F_score = []\n try: \n file = open(path,'r',encoding = 'utf-8')\n except IOError:\n print ('文件读取错误')\n else: \n for line in file.readlines():\n if (count - 2) % 8 == 0 :\n for i in range(len(line)):\n if line[i] == 'F':\n for j in range(i+2,len(line)): \n tmp += line[j] \n F_score.append(float(tmp))\n tmp = ''\n break\n count += 1\n count = 2\n mmax = F_score[0]\n for i in range(1,len(F_score)):\n if F_score[i] > mmax:\n mmax = F_score[i]\n count =2+8* i\n return mmax,count\n def getAEOcount(self,path):\n a = 0\n e = 0\n o = 0\n reader = Reader()\n Inst = reader.readfiles(path)\n for i in Inst:\n for j in range(len(i.labels)): \n if len(i.labels[j]) == 3:\n if i.labels[j][2] == 'A':\n a = a+1\n elif i.labels[j][2] == 'E':\n e = e+1\n elif i.labels[j] == 'O':\n o += 1\n return a,e,o\npath = 'E:/20140517敖天宇/程序/now_model/PRF.txt'\ntool = Tool()\ntool1 = Tool()\nmmax,count=tool.findmaxFscore(path)\nprint (mmax,count)\n#a,e,o=tool1.getAEOcount(path)\n#print (a,e,o)\n\n\n","repo_name":"mayeeeeeeee/python_learning","sub_path":"ao/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19473985507","text":"from random import randint\r\nimport pygame\r\n\r\n\r\npygame.init()\r\nwin = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\r\nclock = pygame.time.Clock()\r\n\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nGREEN2 = (0, 255, 127)\r\nBLUE = (0, 0, 255)\r\nBLUE2 = (65, 105, 225)\r\nBLACK = (0, 0, 0)\r\nAMARELO = (255, 255, 0)\r\nBRANCO = (255, 255, 255)\r\n\r\nx = 1920\r\ny = 1080\r\npos_jogador1 = [x / 2, y / 2 - 200]\r\npos_jogador2 = [x / 2, y / 2 + 200]\r\ntamanho_jogadores = 50\r\nvelocidade_jogadores = 10\r\ntamanho_ponto = 25\r\nponto = pygame.Surface((tamanho_ponto, tamanho_ponto))\r\nponto.fill(RED)\r\npontuacao_p1 = 0\r\npontuacao_p2 = 0\r\n\r\nFonte = pygame.font.SysFont(\"monospace\", 40)\r\nFonte2 = pygame.font.SysFont(\"monospace\", 150)\r\nFonte3 = pygame.font.SysFont(\"monospace\", 30)\r\nFonte4 = pygame.font.SysFont(\"monospace\", 60)\r\n\r\nganhou = 'ganhou!'\r\n\r\n\r\nclass Player1:\r\n\r\n def __init__(self, x, y, color, tamanho):\r\n self.x = x/2\r\n self.y = y/2\r\n self.color = color\r\n self.tamanho = tamanho\r\n\r\n @staticmethod\r\n def mexer():\r\n\r\n comandos = pygame.key.get_pressed()\r\n if comandos[pygame.K_a]:\r\n pos_jogador1[0] -= velocidade_jogadores\r\n if comandos[pygame.K_d]:\r\n pos_jogador1[0] += velocidade_jogadores\r\n if comandos[pygame.K_w]:\r\n pos_jogador1[1] -= velocidade_jogadores\r\n if comandos[pygame.K_s]:\r\n pos_jogador1[1] += velocidade_jogadores\r\n\r\n\r\np1 = Player1(x, y, GREEN, tamanho_jogadores)\r\n\r\n\r\nclass Player2:\r\n def __init__(self, x, y, color, tamanho):\r\n self.x = x / 2\r\n self.y = y / 2\r\n self.color = color\r\n self.tamanho = tamanho\r\n\r\n @staticmethod\r\n def mexer():\r\n comandos = pygame.key.get_pressed()\r\n if comandos[pygame.K_LEFT]:\r\n pos_jogador2[0] -= velocidade_jogadores\r\n if comandos[pygame.K_RIGHT]:\r\n pos_jogador2[0] += velocidade_jogadores\r\n if comandos[pygame.K_UP]:\r\n pos_jogador2[1] -= velocidade_jogadores\r\n if comandos[pygame.K_DOWN]:\r\n pos_jogador2[1] += velocidade_jogadores\r\n\r\n\r\np2 = Player2(x, y, BLUE, tamanho_jogadores)\r\n\r\n\r\ndef criar_pontos():\r\n pos_x = randint(0, 1920 - tamanho_ponto)\r\n pos_y = randint(0, 1080 - tamanho_ponto)\r\n return pos_x, pos_y\r\n\r\n\r\npos_ponto = criar_pontos()\r\n\r\n\r\ndef pontuacao1(pos_jogador1, pos_ponto):\r\n pos_x1 = pos_jogador1[0]\r\n pos_y1 = pos_jogador1[1]\r\n\r\n pos_xp = pos_ponto[0]\r\n pos_yp = pos_ponto[1]\r\n\r\n if (pos_xp >= pos_x1 and pos_xp < (pos_x1 + tamanho_jogadores)) or (pos_x1 >= pos_xp and pos_x1 < (pos_xp + tamanho_ponto)):\r\n if (pos_yp >= pos_y1 and pos_yp < (pos_y1 + tamanho_jogadores)) or (pos_y1 >= pos_yp and (pos_y1 < (pos_yp + tamanho_ponto))):\r\n\r\n return True\r\n\r\n\r\ndef pontuacao2(pos_jogador2, pos_ponto):\r\n pos_x2 = pos_jogador2[0]\r\n pos_y2 = pos_jogador2[1]\r\n\r\n pos_xp = pos_ponto[0]\r\n pos_yp = pos_ponto[1]\r\n\r\n if (pos_xp >= pos_x2 and pos_xp < (pos_x2 + tamanho_jogadores)) or (pos_x2 >= pos_xp and pos_x2 < (pos_xp + tamanho_ponto)):\r\n if (pos_yp >= pos_y2 and pos_yp < (pos_y2 + tamanho_jogadores)) or (pos_y2 >= pos_yp and (pos_y2 < (pos_yp + tamanho_ponto))):\r\n\r\n return True\r\n\r\n\r\ndef resetar_posicao_jogar(pos_jogador1, pos_jogador2):\r\n if pos_jogador1[1] <= 0:\r\n pos_jogador1[1] = 0\r\n\r\n if pos_jogador1[1] >= 1080 - tamanho_jogadores:\r\n pos_jogador1[1] = y - tamanho_jogadores\r\n\r\n if pos_jogador1[0] <= 0:\r\n pos_jogador1[0] = 0\r\n\r\n if pos_jogador1[0] >= 1920 - tamanho_jogadores:\r\n pos_jogador1[0] = 1920 - tamanho_jogadores\r\n\r\n if pos_jogador2[1] <= 0:\r\n pos_jogador2[1] = 0\r\n\r\n if pos_jogador2[1] >= 1080 - tamanho_jogadores:\r\n pos_jogador2[1] = y - tamanho_jogadores\r\n\r\n if pos_jogador2[0] <= 0:\r\n pos_jogador2[0] = 0\r\n\r\n if pos_jogador2[0] >= 1920 - tamanho_jogadores:\r\n pos_jogador2[0] = 1920 - tamanho_jogadores\r\n\r\n\r\nrunning = True\r\nwhile running:\r\n for event in pygame.event.get():\r\n if event == pygame.QUIT:\r\n running = False\r\n comandos = pygame.key.get_pressed()\r\n if comandos[pygame.K_ESCAPE]:\r\n running = False\r\n\r\n clock.tick(60)\r\n pygame.time.delay(10)\r\n\r\n win.fill(BLACK)\r\n pygame.draw.rect(win, RED, (pos_ponto[0], pos_ponto[1], tamanho_ponto, tamanho_ponto))\r\n\r\n p1.mexer()\r\n p2.mexer()\r\n resetar_posicao_jogar(pos_jogador1, pos_jogador2)\r\n\r\n if pontuacao1(pos_jogador1, pos_ponto):\r\n pos_ponto = criar_pontos()\r\n pygame.draw.rect(win, RED, (pos_ponto[0], pos_ponto[1], tamanho_ponto, tamanho_ponto))\r\n pontuacao_p1 += 1\r\n\r\n if pontuacao2(pos_jogador2, pos_ponto):\r\n pos_ponto = criar_pontos()\r\n pygame.draw.rect(win, RED, (pos_ponto[0], pos_ponto[1], tamanho_ponto, tamanho_ponto))\r\n pontuacao_p2 += 1\r\n\r\n text1 = 'Pontuação:' + str(pontuacao_p1)\r\n label1 = Fonte.render(text1, 1, GREEN2)\r\n win.blit(label1, (0 + 10, 0 + 10))\r\n\r\n text2 = 'Pontuação:' + str(pontuacao_p2)\r\n label2 = Fonte.render(text2, 1, BLUE2)\r\n win.blit(label2, (x - 340, y - 50))\r\n\r\n if pontuacao_p1 >= 10 or pontuacao_p2 >= 10:\r\n game_over = 'Game Over'\r\n game_over_label = Fonte2.render(game_over, 1, AMARELO)\r\n win.blit(game_over_label, (x / 2 - 400, y / 2 - 200))\r\n\r\n r_to_restart = 'Aperte \"R\" para recomeçar'\r\n r_to_restart_label = Fonte3.render(r_to_restart, 1, AMARELO)\r\n win.blit(r_to_restart_label, (x / 2 - 225, y - 100))\r\n\r\n if pontuacao_p1 >= 10:\r\n verde = 'Verde'\r\n pontu_label = Fonte4.render(verde, 1, GREEN2)\r\n pontu_label2 = Fonte4.render(ganhou, 1, BRANCO)\r\n win.blit(pontu_label, (x / 2 - 200, y / 2))\r\n win.blit(pontu_label2, (x / 2, y / 2))\r\n\r\n elif pontuacao_p2 >= 10:\r\n azul = 'Azul'\r\n pontu_label = Fonte4.render(azul, 1, BLUE2)\r\n pontu_label2 = Fonte4.render(ganhou, 1, BRANCO)\r\n win.blit(pontu_label, (x / 2 - 180, y / 2))\r\n win.blit(pontu_label2, (x / 2, y / 2))\r\n\r\n comandos = pygame.key.get_pressed()\r\n if comandos[pygame.K_r]:\r\n pos_jogador1 = [x / 2, y / 2 - 200]\r\n pos_jogador2 = [x / 2, y / 2 + 200]\r\n pontuacao_p1 = 0\r\n pontuacao_p2 = 0\r\n pos_ponto = criar_pontos()\r\n pygame.draw.rect(win, RED, (pos_ponto[0], pos_ponto[1], tamanho_ponto, tamanho_ponto))\r\n\r\n pygame.draw.rect(win, GREEN, (pos_jogador1[0], pos_jogador1[1], tamanho_jogadores, tamanho_jogadores))\r\n pygame.draw.rect(win, BLUE, (pos_jogador2[0], pos_jogador2[1], tamanho_jogadores, tamanho_jogadores))\r\n\r\n pygame.display.update()\r\n","repo_name":"BrunoCG18/Catch-the-Squares","sub_path":"SquareGame.py","file_name":"SquareGame.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"16486347853","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nwill train a new network on a dataset and save the model as a checkpoint\npython train.py data_directory\nPrints out training loss, validation loss, and validation accuracy as the network trains\npython train.py data_dir --save_dir save_directory\npython train.py data_dir --arch \"vgg13\"\npython train.py data_dir --learning_rate 0.01 --hidden_units 512 --epochs 20\npython train.py data_dir \n\"\"\"\nimport argparse #used to get arguments from user \nimport os #used to get current working directory \nimport torch #used to identify available device CPU or GPU \nimport torchvision.transforms as transforms #used in transformations\nfrom torchvision import datasets # used to load the datasets with ImageFolder\nimport torchvision.models as models #imports models, pretrained VGG16 is used \nimport torch.nn as nn #Relu Linear Dropout used in classifier\nfrom collections import OrderedDict #Used to construct classiefier sequence of steps\nimport torch.optim as optim #SGD \nimport torch.nn.functional as F #used for softmax function\nimport numpy as np #used in train function to initialize tracker \n\ndef get_input_args():\n \"\"\"\n Retrieves and parses command line arguments provided by the user when\n they run the program from a terminal. This function uses Python's \n argparse module to create and define these command line arguments \n If the user fails to provide some or all of the 3 arguments, then the \n missing arguments.\n 1. save directory -- dir where checkpoint is saved with default value 'cur_dir' \n 2. CNN Model Architecure as -- arch with defualt value 'vgg'\n 3. Hyperparameter learning_rate --arch\n 4. Hyperparameter hidden_units --learning_rate \n 5. Hyperparameter epochs --hidden_units \n 5. Hyperparameter epochs --epochs \n \"\"\" \n #cur_dir will contain the path of where the script is executing \n cur_dir = os.path.dirname(os.path.realpath(__file__))\n #Create Argument Parser object named parser \n parser = argparse.ArgumentParser()\n #Argument: path to a folder\n parser.add_argument('--data_dir', type=str, default='flowers/', \\\n help='Directory containing data for training, validation, and testing')\n #Argument: path to a folder\n parser.add_argument('--save_dir', type=str, default='.', \\\n help='Path to directory where checkpoint will be stored')\n #Argument: architecture type \n parser.add_argument('--arch', type=str, default='vgg16', \\\n help='Neural network architecure, vgg16, alexnet, densenet161, mobilenet_v2')\n #Argument: hyperparameter learning_rate\n parser.add_argument('--learning_rate', type=float, default='.02', \\\n help='Learning rate for the neural network')\n #Argument: hyperparameter hidden_units\n parser.add_argument('--hidden_units', type=int, default='4096', \\\n help='Number of hidden units in the classifier')\n #Argument: hyperparameter hidden_units\n parser.add_argument('--output_units', type=int, default='102', \\\n help='Number of output units in the classifier')\n #Argument: hyperparameter epochs\n parser.add_argument('--epochs', type=int, default='3', \\\n help='Number of epochs')\n #Assign variables in_args tp parse_args()\n in_args = parser.parse_args()\n #access values of Arguments by printing it \n #print(\"Arguments: \", in_args) #DEBUG\n\n if in_args.hidden_units % 2 != 0:\n raise ValueError('Please choose an even number for hidden units')\n elif in_args.hidden_units < (2*in_args.output_units):\n raise ValueError('Please choose a number twice the size of output units, default=', in_args.output_units)\n\n\n return in_args\n\ndef load_loaders(data_dir):\n \"\"\"load transformed training data, validation data and testing data \n \n Extended description of function. \n \n Parameters: \n arg1 (str): Data director with seperate folders for train valid and test\n \n Returns: \n int: return loaders dict \n \"\"\"\n #data_dir = 'flowers'\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n #define dataloader parmeters\n batch_size = 32\n num_workers = 0\n \n #normalize the means and standard deveiations \n #of the images to what the netword expects\n standard_normalization = transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n \n # TODO: Define your transforms for the training, validation, and testing sets\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n standard_normalization])\n \n valid_transforms = transforms.Compose([transforms.Resize(256), \n transforms.CenterCrop(224),\n transforms.ToTensor(),\n standard_normalization])\n \n test_transforms = transforms.Compose([transforms.Resize(size=(224, 224)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n standard_normalization])\n \n # TODO: Load the datasets with ImageFolder\n train_data = datasets.ImageFolder(train_dir, transform = train_transforms)\n valid_data = datasets.ImageFolder(valid_dir, transform = valid_transforms)\n test_data = datasets.ImageFolder(test_dir, transform = test_transforms)\n \n # TODO: Using the image datasets and the trainforms, define the dataloaders\n train_loader = torch.utils.data.DataLoader(train_data,\n batch_size = batch_size,\n num_workers = num_workers,\n shuffle = True)\n #print(len(train_loader))#DEBUG\n valid_loader = torch.utils.data.DataLoader(valid_data, \n batch_size=batch_size,\n num_workers=num_workers,\n shuffle = True)\n #print(len(valid_loader))#DEBUG\n test_loader = torch.utils.data.DataLoader(test_data, \n batch_size = batch_size,\n num_workers = num_workers,\n shuffle = True)\n #print(len(test_loader))#DEBUG\n loaders= {\n 'train': train_loader,\n 'valid': valid_loader,\n 'test': test_loader\n }\n l_data= {\n 'train': train_data,\n 'valid': valid_data,\n 'test': test_data\n }\n \n return loaders, l_data\n \ndef load_arch(arch):\n \"\"\"load pretrained model. \n \n Parameters: \n arch (str): desired architecure input from user \n \n Returns: \n dict: Pretrained model \n \"\"\"\n if arch == 'vgg16':\n #Implementation Model Architecture\n #Initialized model is saved into model_transfer\n #Model Architecure pre trained VGG16\n model_transfer = models.vgg16(pretrained=True)\n classifier_input = model_transfer.classifier[0].in_features \n #print('vgg',classifier_input)\n elif arch == 'alexnet':\n model_transfer = models.alexnet(pretrained=True)\n classifier_input = model_transfer.classifier[1].in_features\n #print('alexnet',classifier_input)\n elif arch == 'densenet161':\n model_transfer = models.densenet161(pretrained=True)\n classifier_input = model_transfer.classifier.in_features\n #print('densenet161',classifier_input)\n elif arch == 'mobilenet_v2':\n model_transfer = models.mobilenet_v2(pretrained=True)\n classifier_input = model_transfer.classifier[1].in_features\n #print('mobilenet_v2',classifier_input)\n\n else:\n raise ValueError('Please only select vgg16, alexnet, densenet161, mobilenet_v2')\n \n #Freez feature parameters, so the net acts as fixed feature extracter \n #and we only backprop through the new classifer and not the feature extracter\n for param in model_transfer.parameters():\n param.requires_grad = False\n\n return model_transfer, classifier_input\n\ndef create_new_classifier(learning_rate, model_transfer, classifier_input, hidden_units, output_units):\n \"\"\"Creates a new classifier with 102 ouputs matching flower types. \n \n Parameters: \n learning_rate (int):\n model_transfer (int): \n\n Returns: \n str: criterion_transfer\n str: optimizer_transfer\n \"\"\"\n #Untrained feed-forward network as a classifier, \n #using ReLU activations and dropout as specified above\n #The classifer follows the original VGG16 model \n #but modifies the output layer to reflect flower catergories \n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(in_features = classifier_input, out_features = hidden_units, bias = True)),\n ('relu', nn.ReLU(inplace = True)),\n ('dropout', nn.Dropout(p= 0.5, inplace = False)),\n ('fc2', nn.Linear(in_features = hidden_units, out_features = int(hidden_units/2), bias = True)),\n ('relu', nn.ReLU(inplace = True)),\n ('dropout', nn.Dropout(p = 0.5, inplace = False)),\n ('Linear', nn.Linear(in_features =int(hidden_units/2), out_features = output_units, bias = True)),\n ('softmax', nn.LogSoftmax(dim=1)) #this is needed to ouput the prop.\n ]))\n model_transfer.classifier = classifier\n #Spcecifies a loss function and optimizer.\n criterion_transfer = nn.CrossEntropyLoss()\n optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=learning_rate)\n return criterion_transfer, optimizer_transfer\n\n\ndef train(n_epochs, loaders, model, optimizer, criterion, device):\n \"\"\"Trains and validates classifier portion of the model. \n \n the train function consits of 1 main loop but two parts, \n the first is loop goes over batch size of 32\n and is broken on the 10th iteration i.e. 32*10=320 images. \n at which point the model is validated and metrics are printed\n \n Parameters:\n n_epochs (int): number of epochs \n loaders (dict): loaders see load_loaders above \n model (dict): \n optimizer \n criterion \n device (str): GPU or CPU\n \n Returns: \n dict: trained model \n \"\"\"\n #print('in train',len(loaders['train']))\n \"\"\"return trained model\"\"\"\n #initialize tracker for minimum validation loss\n model.to(device)\n valid_loss_min = np.Inf\n step = 0\n #iterate over epochs\n for epoch in range(1, n_epochs+1):\n #initialize variables to monitor training and validation loss\n valid_loss = 0.0\n train_loss = 0.0\n accuracy = 0.0\n #train the model\n model.train()\n for batch_idx, (data, target) in enumerate(loaders['train']):\n step += 1 #accumulate steps \n #move tensors to GPU or CPU depending on device variable\n data, target = data.to(device), target.to(device)\n ##find the loss and update the model parameters accordingly\n \n #initialize weight clearing gradients of optimized variables\n optimizer.zero_grad()\n #forward pass: computing prediction by passing inputs to model and getting log propabilites\n output = model.forward(data)\n #calculate batch loss with log propability and labels\n loss = criterion(output, target)\n #back prop: computing gradient of the loss\n loss.backward()\n #parameter update for single optimization step\n optimizer.step()\n ##record/update the average training loss, using\n train_loss += loss.data \n \n #COMMENT Print every 10\n #if batch_idx % 100 == 0:\n #print(batch_idx)\n if step % 10 == 0:\n valid_loss = 0.0\n accuracy = 0.0\n \n #validate model\n model.eval()\n for batch_idx, (data, target) in enumerate(loaders['valid']):\n # move to GPU\n data, target = data.to(device), target.to(device)\n ## ## TODO: update the average validation loss\n #forward pass: computing predictions by passing inputs to model\n output = model.forward(data)\n #calculate batch loss\n loss = criterion(output, target)\n #update average validation loss\n valid_loss = valid_loss + loss.data \n \n # Class with highest probability is our predicted class, compare with true label \n # Calculate accuracy\n # Model's output is log-softmax, take exponential to get the probabilities\n # ps = propbality\n ps = torch.exp(output)\n # the topk method reutrns the highest k probabilities and the indices of those \n #probabilities corresponding to the classes, k = 1\n # check for equality with labels \n equality = (target.data == ps.max(dim=1)[1])\n # Accuracy is number of correct predictions divided by all predictions; mode \n # using the equals we can update our accuracy\n # once its changed to a float tensor the mean function can be ran\n accuracy += equality.type(torch.FloatTensor).mean()\n \n # print training/validation statistics \n #running_loss/print_every takes average of training loss \n #so everytime its printed the average is takes\n #len of valid_loader tells us how many batches are actually in our test data set \n #that we are getting from test loader, since we are summing up the batches above\n #we take the total loss and divide it by the number of batches\n #taking the total loss and dividing it by the number of batches \n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f} \\tAccuracy: {:.6f}'.format(\n epoch, \n #train_loss/len(train_loader),\n #valid_loss/len(valid_loader),\n #accuracy/len(valid_loader)\n train_loss/len(loaders['train']),\n valid_loss/len(loaders['valid']),\n accuracy/len(loaders['valid'])\n ))\n #put the model back in training mode enabling dropout and grads \n model.train()\n train_loss = 0 #comment \n #return trained model\n return model\n\ndef test(loaders, model, criterion, device):\n \"\"\"Tests the trained model by measuring its performance on the train dataset \n \n Parameters:\n loaders (dict): \n model (dict): \n criterion ():\n device (str): GPU or CPU\n \n Returns: \n NONE \n \"\"\"\n #monitor test loss and accuracy\n test_loss = 0.\n correct = 0.\n total = 0.\n \n model.eval()\n for batch_idx, (data, target) in enumerate(loaders['test']):\n # move to GPU\n data, target = data.to(device), target.to(device)\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # update average test loss \n test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))\n # convert output probabilities to predicted class\n pred = output.data.max(1, keepdim=True)[1]\n # compare predictions to true label\n correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())\n total += data.size(0)\n \n print('Test Loss: {:.6f}\\n'.format(test_loss))\n\n print('\\nTest Accuracy: %2d%% (%2d/%2d)' % (\n 100. * correct / total, correct, total))\n\n\ndef save_checkpoint(save_dir, model_transfer, optimizer_transfer, epochs, arch, loaders, l_data):\n \"\"\"Saves the state of the model that has the new classifier that have been trained. \n \n Parameters: \n arg1 (int): Description of arg1 \n \n Returns: \n int: Description of return value \n \"\"\"\n #print(save_dir, model, data, optimizer, epochs, arch)\n #attach the mapping of classes to indices to the model \n #as an attribute which makes inference easier later on\n #model_transfer.class_to_idx = train_data.class_to_idx\n #model_transfer.class_to_idx = train_data.class_to_idx\n model_transfer.class_to_idx = l_data['train'].class_to_idx\n\n checkpoint = {'architecture': 'VGG16',\n 'classifier': model_transfer.classifier,\n 'state_dict': model_transfer.state_dict(),\n 'optimizer_dict': optimizer_transfer.state_dict,\n #'class_to_idx': train_data.class_to_idx,\n 'class_to_idx': l_data['train'].class_to_idx,\n }\n torch.save(checkpoint, 'checkpoint_model_transfer.pth')\n\ndef main():\n \"\"\"\n Main \n \"\"\"\n #print('python train.py -h')\n in_args = get_input_args()\n print('Arguments', in_args)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n #print('device:', device) #DEBUG\n print('Loading Loaders')\n loaders, l_data = load_loaders(in_args.data_dir) \n #for i in loaders:\n # print(i, loaders[i])\n #print('len of loaders train: ', len(loaders['train']))\n #print('len of loaders train: ', len(loaders['valid']))\n #print('len of loaders train: ', len(loaders['test']))\n #print('Num test images: ', len(loaders.test_data))\n \n print('Loading Model Architecture') \n model_transfer, classifier_input = load_arch(in_args.arch)\n #print('Model',model_transfer)#DEBUG\n\n print('Creating Classifier') \n criterion_transfer, optimizer_transfer = create_new_classifier(in_args.learning_rate, model_transfer, classifier_input, in_args.hidden_units, in_args.output_units)\n #print('Model',model_transfer)#DEBUG\n\n print('Training Model, go grab a coffee this will take a while :-)') \n model_transfer = train(in_args.epochs, loaders, model_transfer, optimizer_transfer, criterion_transfer, device)\n\n print('Testing Model') \n test(loaders, model_transfer, criterion_transfer, device)\n \n print('Saving Model') \n save_checkpoint(in_args.save_dir, model_transfer, optimizer_transfer, in_args.epochs, in_args.arch, loaders, l_data)\n\nif __name__ == '__main__':\n main()\n #called if script is executed on its own \n\n\n","repo_name":"atkatchev/image-classification-application","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":19242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70787061391","text":"from machine import Pin\nfrom time import sleep\n\n# GPIO is the internal built-in LED\nled0 = Pin(0, Pin.OUT)\nled1 = Pin(1, Pin.OUT)\nled2 = Pin(2, Pin.OUT)\n\n# input on the lower left of the Pico using a built-in pull-down resistor to keep the value from floating\nmiddle_switch = Pin(7, Pin.IN, Pin.PULL_DOWN) \nright_switch = Pin(28, Pin.IN, Pin.PULL_DOWN)\nleft_switch = Pin(27, Pin.IN, Pin.PULL_DOWN)\n\n\nwhile True:\n if middle_switch.value(): # if the value changes\n led0.on()\n print('middle')\n else: led0.off()\n\n if right_switch.value(): # if the value changes\n led1.on()\n print('right')\n else: led1.off()\n \n if left_switch.value(): # if the value changes\n led2.on()\n print('left')\n else: led2.off()\n sleep(.1)","repo_name":"CoderDojoTC/micropython","sub_path":"src/kits/maker-pi-rp2040-robots/switch-bot/switch-test.py","file_name":"switch-test.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"83"} +{"seq_id":"4789574187","text":"import os\nimport random\nfrom datetime import datetime\nimport better_profanity\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nfrom Vendor import Vendor\nfrom VendorDictionary import VendorDictionary\nfrom xur_quotes import who_is_xur, who_are_the_nine, bad_word, bad_word_at_xur\nfrom helpers import Emoji\n\nregular_profanity = better_profanity.Profanity()\nhate_speech_check = better_profanity.Profanity()\nhate_speech_check.load_censor_words_from_file('hate_speech.txt')\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nXur = Vendor(name='Xur')\nVendor_Dictionary = VendorDictionary()\nclient = commands.Bot(command_prefix=\"!\")\n\n\n@client.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(\"I do not understand\")\n\n\n@client.command()\nasync def xur(ctx):\n # Sends an embedded message containing Xur's current inventory, or if he\n # is not currently present, returns a message telling the user when he'll arrive next\n await client.wait_until_ready()\n message = Xur.message()\n if Xur.embedded:\n await ctx.send(embed=message)\n else:\n await ctx.send(message)\n\n\n@client.command()\nasync def bounties(ctx, *args):\n # Sends an embedded message containing the requested vendor's bounties. If they\n # are not currently present or the user requests a unidentifiable vendor, sends a informational message\n if len(args) == 0:\n await ctx.send(\"\\!bounties requires an argument '[vendor_name]', try \\!bounties [vendor_name]\")\n return\n try:\n vendor = Vendor_Dictionary.search(name=\" \".join(args[:]))\n await client.wait_until_ready()\n await ctx.send(embed=vendor.message())\n except (RuntimeError, AttributeError):\n await client.wait_until_ready()\n await ctx.send(\"That vendor does not exist in my files or doesn't sell bounties\")\n\n\n@client.event\nasync def on_ready(): # Confirmation in the terminal to let you know the bot has activated successfully\n print(f'{client.user.name} has connected to Discord!')\n\n\n@client.event\nasync def on_message(message):\n # if the message is from xur_bot, ignore it\n if message.author == client.user:\n return\n # Check what kind of message it is\n if (\n ('who is xur' in message.content.lower()) |\n ('what is xur' in message.content.lower())\n ):\n response = random.choice(who_is_xur)\n await message.channel.send(response)\n elif hate_speech_check.contains_profanity(message.content):\n await message.channel.purge(limit=1)\n\n elif (\n (regular_profanity.contains_profanity(message.content)) |\n ('i\\'m salty' in message.content.lower()) |\n ('im salty' in message.content.lower()) |\n ('i am salty' in message.content.lower())\n ):\n if 'xur' in message.content.lower():\n await message.add_reaction(emoji=Emoji.ULDREN_THUMBS_DOWN.value)\n response = random.choice(bad_word_at_xur)\n else:\n response = random.choice(bad_word)\n await message.channel.send(response)\n\n elif (\n ('who are the nine' in message.content.lower()) |\n ('what are the nine' in message.content.lower()) |\n ('who is the nine' in message.content.lower()) |\n ('what is the nine' in message.content.lower())\n ):\n response = random.choice(who_are_the_nine)\n await message.channel.send(response)\n\n await client.process_commands(message)\n\n\n@client.event\nasync def on_error(event, *args, **kwargs):\n with open('err.log', 'a') as f:\n if event == 'on_message':\n message = args[0]\n f.write(\n f'\\nERROR on {datetime.now().strftime(\"%m/%d/%Y at %H:%M:%S\")}\\nServer: {message.guild}\\nChannel: {message.channel}\\nUser: {message.author}\\nUnhandled message: {message.content}\\n')\n else:\n raise\n\n\nclient.run(TOKEN)\n","repo_name":"Hayden-J-C/xur_bot","sub_path":"xur_bot.py","file_name":"xur_bot.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"27037897062","text":"import itertools\r\n\r\nf = open('something.txt')\r\nfile_stuff = f.read()\r\nargs = [p.strip() for p in file_stuff.splitlines()]\r\nf.close()\r\n\r\nnumber = int(args[0])\r\nallowed_mismatches = int(args[1])\r\ndna_s = args[2:]\r\n\r\npossible_kmers = [''.join(a) for a in itertools.product('ATCG', repeat=number)]\r\n\r\ndef h_distance(pat, st):\r\n n = 0\r\n if len(pat) == len(st):\r\n for x, i in zip(st, pat):\r\n if x != i:\r\n n += 1\r\n if n > allowed_mismatches:\r\n return False\r\n return True\r\n\r\n\r\ndef score(dna, mo, k):\r\n final_score = 0\r\n for r in dna:\r\n something = False\r\n for i in range(len(r)-k+1):\r\n if h_distance(mo, r[i:i+k]):\r\n something = True\r\n break\r\n if not something:\r\n return False\r\n return True\r\n\r\n\r\ndef get_m(kmer, d):\r\n miss = set()\r\n for i in itertools.combinations(range(len(kmer)), d):\r\n for r in possible_kmers:\r\n mismatches = list(kmer)\r\n for index, replace in zip(i, r):\r\n mismatches[index] = replace\r\n miss.add(''.join(mismatches))\r\n return miss\r\n\r\n\r\ndef get_kmer(dna, k, k_set):\r\n for d in dna:\r\n for x in range(len(d)-k+1):\r\n w = d[x:x+k]\r\n k_set.add(w)\r\n\r\n\r\ndef motifEnumeration(dna, k, d):\r\n patterns = []\r\n kmers = set()\r\n get_kmer(dna, k, kmers)\r\n for kmer in list(kmers):\r\n patterns.extend(get_m(kmer, d))\r\n\r\n patterns = list(set(patterns))\r\n\r\n top = []\r\n for mo in sorted(patterns):\r\n if score(dna, mo, k):\r\n top.append(mo)\r\n return top\r\n\r\n\r\na = motifEnumeration(dna_s, number, allowed_mismatches)\r\nprint(' '.join(a))\r\n","repo_name":"saleisha57/DesktopClean","sub_path":"Bioinformatics/HW_3/motif_enumeration.py","file_name":"motif_enumeration.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21045693411","text":"#Project Number 5\r\n#James Draney\r\n#jamesdraney99@gmail.com\r\n#Lets get it\r\n#Sentence Program\r\n#yktv\r\n\r\n\r\nimport math\r\nimport string\r\n\r\ndef main():\r\n\r\n\r\n sentence=(input(\"Write a Sentence \"))\r\n print(\"Number of Characters: \", len(sentence))\r\n words=len(sentence.split())\r\n print(\"Number of Words: \" ,words)\r\n count=0\r\n numwords=1\r\n for i in range (len(sentence)):\r\n if sentence[count]==\" \":\r\n count=count+1\r\n \r\n \r\n \r\n \r\n x=len(sentence.replace(\" \", \"\"))/words\r\n \r\n print(\"Average Word Length \",(x))\r\n\r\nmain()\r\n\r\n\r\n#divide number of letters by number of words\r\n#convert letters in wor into numeric values\r\n#divide those values by wor\r\n","repo_name":"Marist-CMPT120-FA19/James-Draney-Project-5","sub_path":"James Draney Project 5.py","file_name":"James Draney Project 5.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42765937013","text":"from onesignal_sdk.client import Client\nfrom config import (\n ONESIGNAL_APP_ID,\n ONESIGNAL_REST_API_KEY,\n ONESIGNAL_USER_AUTH_KEY\n)\n\nclient = Client(\n app_id=ONESIGNAL_APP_ID,\n rest_api_key=ONESIGNAL_REST_API_KEY,\n user_auth_key=ONESIGNAL_USER_AUTH_KEY\n)\n\ndef sendNotificationToTokens(tokens, title, message):\n notification_body = {\n 'include_player_ids': tokens,\n 'contents': {\n 'en': message\n },\n 'headings': {\n 'en': title\n }\n }\n response = client.send_notification(notification_body)\n return response\n\n\ndef sendNotificationToTopic(topics, title, message):\n notification_body = {\n 'included_segments': topics,\n 'contents': {\n 'en': message\n },\n 'headings': {\n 'en': title\n }\n }\n response = client.send_notification(notification_body)\n return response\n","repo_name":"FazilCherukad/django-backend","sub_path":"core/utils/onesignal.py","file_name":"onesignal.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29576409714","text":"# coding: utf-8\n\nimport xarray as xr\nimport numpy as np\nimport os\nimport pandas as pd\nimport sys\nsys.path.append('../lib')\nimport meelib\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> pre-defined informations\nvarList = [\"AQI\", \"PM2.5\", \"PM2.5_24h\", \"PM10\", \"PM10_24h\", \"SO2\", \"SO2_24h\", \"NO2\", \"NO2_24h\", \"O3\", \"O3_24h\", \"O3_8h\", \"O3_8h_24h\", \"CO\", \"CO_24h\"]\nunits = ['ug/m3' for _ in varList]\nunits[0] = ''\nunits[-1] = 'mg/m3'\nunits[-2] = 'mg/m3'\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> key ctrl variables\nstart_YYYYMM, end_YYYYMM = '201405', '202207'\noutDir = \"ncdata\"\ncsi_file = '../1-csi/csi.csv'\n\nechoLevel = 1 # print control\n\n# ================= dependent\nyms = meelib.render_ym_series(start_YYYYMM, end_YYYYMM)\nos.makedirs(outDir, exist_ok = True)\n\n\ndf_csi = pd.read_csv(csi_file)\ndf_csi.set_index(['code'], inplace = True)\n\n\n\ndef init_1m(ym):\n\n with open(f'CS-Lists/sites.{ym}.txt') as f:\n siteCodes = f.read().splitlines()\n with open(f'CS-Lists/cities.{ym}.txt', encoding = \"utf-8\") as f:\n cityNames = f.read().splitlines()\n\n ym_days = meelib.get_ndays_of_ym(ym)\n ymdhs = meelib.render_dh_series(f\"{ym}0100\", f\"{ym}{ym_days}23\")\n\n arrS = np.full((len(ymdhs), len(siteCodes)), np.nan)\n arrC= np.full((len(ymdhs), len(cityNames)), np.nan)\n\n ds = xr.Dataset()\n\n ds.coords['site'] = (\"site\", siteCodes)\n ds.coords['city'] = ('city', cityNames)\n ds.coords['ymdh'] = (\"ymdh\", ymdhs)\n\n\n\n ds['site_name'] = (\"site\", df_csi.loc[siteCodes, :].name.values)\n ds['site_city'] = (\"site\", df_csi.loc[siteCodes, :].city.values)\n ds['site_prov'] = (\"site\", df_csi.loc[siteCodes, :].prov.values)\n ds['site_nation'] = (\"site\", df_csi.loc[siteCodes, :].nation.values)\n ds['site_lat'] = (\"site\", df_csi.loc[siteCodes, :].lat.values)\n ds['site_lon'] = (\"site\", df_csi.loc[siteCodes, :].lon.values)\n ds['site_risk'] = (\"site\", df_csi.loc[siteCodes, :].risk.values)\n\n for i, v in enumerate(varList):\n ds[v] = ((\"ymdh\", \"site\"), arrS)\n if units[i]:\n ds[v].attrs['units'] = units[i]\n for i, v in enumerate(varList):\n ds[f\"{v}_city\"] = ((\"ymdh\", \"city\"), arrC)\n if units[i]:\n ds[f\"{v}_city\"].attrs['units'] = units[i]\n\n ds.attrs['source'] = 'https://quotsoft.net/air/'\n\n # encoding = {v : {\"_FillValue\" : None, \"zlib\" : True, \"complevel\" : 1} for v in varList}\n encoding = {v : {\"_FillValue\" : None, 'dtype' : 'float32'} for v in varList}\n encoding.update({f'{v}_city' : {\"_FillValue\" : None, 'dtype' : 'float32'} for v in varList})\n\n\n\n ds.to_netcdf(f'ncdata/CNMEE.aqo.site-city.{ym}.nc', encoding=encoding, format='netCDF4')\n\n\nfor ym in yms:\n meelib.logT(f\"processing {ym}\", 1, echoLevel)\n init_1m(ym)\n # break\n","repo_name":"Roadelse/AirQualityObs-CNMEE","sub_path":"2-csv2nc/2-init.py","file_name":"2-init.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7854745634","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------\n# RestAPI mlapi entry point \n# based on: https://github.com/rodrigo-arenas/fast-ml-deploy/blob/master/app.py\n#\n# (C) 2021 Statistics Canada\n# Author: Andres Solis Montero\n# -----------------------------------------------------------\nfrom fastapi import FastAPI\nfrom ml.classifier import IrisClassifier\nfrom pydantic import BaseModel, conlist\nfrom typing import List, Any\nfrom fastapi import UploadFile\n\napp = FastAPI(title=\"MLAPI Template\", description=\"API for ml model\", version=\"1.0\")\n\"\"\"FastAPI global app instance\"\"\"\n\nclass IrisPredictionInput(BaseModel):\n \"\"\"\n Request data descrition containing a list of lists containing four \n features (sepal length, sepal width, petal length, petal width) all \n expressed in centimeters. \n\n\n Examples:\n ```\n instance = IrisPredictionInput([[0,1,2,3]])\n ```\n \"\"\"\n data: List[conlist(float, min_items=4, max_items=4)]\n\nclass IrisPredictionResponse(BaseModel):\n \"\"\"\n Prediction response list and classes probabilities. \n\n Examples:\n ```\n IrisPredictionResponse(prediction=[0], probability=[1.0, 0, 0])\n ```\n \"\"\"\n prediction: List[int]\n probability: List[Any]\n\nclf = IrisClassifier.load()\n\"\"\"Global classifier instance\"\"\"\n\n@app.get(\"/\")\ndef read_root():\n \"\"\"\n FastAPI GET route '/'\n\n Returns:\n dict:\n A dummy Hello World! dictionary\n \"\"\"\n return {\"Hello\":\"World!\"}\n\n@app.post(\"/predict\",response_model=IrisPredictionResponse)\nasync def predict(iris: IrisPredictionInput) :\n \"\"\"\n FastAPI POST route '/predict' endpoint for a prediction request\n \n Args:\n iris: IrisPredictionInput\n A list of list containing 4 features e.g., [[1,0,1,1]]\n\n Returns:\n IrisPredictionResponse:\n A prediction response object\n \"\"\"\n return clf.predict(iris.data)\n\n@app.post(\"/train\")\nasync def train(gradient_boosting: bool = False) -> bool:\n \"\"\"\n FastAPI POST route '/train' endpoint to train our model\n \n Args:\n gradient_boosting: bool\n A boolean flag to switch between a DTreeClassifier or GradientBoostClassifier\n\n Returns:\n bool:\n A boolean value identifying if trainning was successful.\n \"\"\"\n data = clf.dataset()\n return clf.train(data['X'], data['y'], gradient_boosting)\n\n","repo_name":"StatCan/mlapi","sub_path":"src/mlapi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"38780236554","text":"# %%\nimport os\n\n\ndef remove_non_windows_friendly_characters_from_image_names():\n city_fps = os.listdir(\"images\")\n for city_name in city_fps:\n images = os.listdir(os.path.join(\"images\", city_name))\n image_names = [os.path.join(\"images\", city_name, fp)\n for fp in images]\n for image_name in image_names:\n new_name = image_name.replace('|', '')\n print()\n print(image_name)\n print(new_name)\n os.rename(image_name, new_name)\n\n\nif __name__ == \"__main__\":\n remove_non_windows_friendly_characters_from_image_names()\n\n# %%\n","repo_name":"rhodesy76-2/City-Life-2","sub_path":"migration.py","file_name":"migration.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71400991950","text":"import pymongo # Python needs a MongoDB driver to access the MongoDB database. MongoDB driver is pymongo\r\n\r\nmdb_obj = pymongo.MongoClient(\"mongodb://localhost:27017/\") #mongoDB object creation and 27017 is the default port for mongoDB\r\n\r\nmydb = mdb_obj[\"info\"] # name of the database is info\r\n\r\ntab=mydb[\"user_info\"] # name of the collection(table in mysql) is user_info \r\n\r\ndoc = { \"_id\":1,\"name\": \"John\", \"address\": \"Highway 37\" } # here document in mongoDB is same as a record in MySQL\r\n\r\nx = tab.insert_one(doc)\r\n\r\ndblist = mdb_obj.list_database_names()\r\nif \"mydatabase\" in dblist:\r\n print(\"The database exists.\")\r\n\r\ncollist = mydb.list_collection_names()\r\nif \"customers\" in collist:\r\n print(\"The collection exists.\")\r\n\r\nfor x in tab.find():\r\n print(x)","repo_name":"zsetri/API","sub_path":"API/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"16305394243","text":"from pathlib import Path\n\nBASE_DIR = Path(__file__).resolve().parent\n\nSECRET_KEY = 'django-insecure-1!@=*3w7^$_5fjgi8(4xe*=-7__9d(q6zgh^d=u8y3x*wi*d-i'\n\nINSTALLED_APPS = [\n 'userdata',\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\nUSE_TZ = False\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n","repo_name":"MishkaGAMII/simulator-of-whatsapp-creation","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25775302526","text":"\"\"\"\nExtract a minimal tree that includes a set of taxa, and their closest common ancestors.\n\"\"\"\n\n\"\"\"\ne.g. if called on Panthera_tigris, Panthera_leo, Canis_lupus, Crocodylus niloticus, it returns (without the formatting):\n(\n Crocodylus_niloticus_ott35864:6.819304991155585,\n (\n Canis_lupus_ott247341:1.123457,\n (\n Panthera_leo_ott563151:4.654321,\n Panthera_tigris_ott42314:4.814815\n )Panthera_ott563154:0.080247\n )CARNIVORA_ott44565:13.561728\n)Amniota_ott229560:20.0;\n\nThis is not used directly by OneZoom, but is a useful general purpose utility.\n\"\"\"\n\nimport argparse\nimport logging\nimport sys\nfrom typing import Set\n\nfrom .newick_parser import parse_tree\n\n__author__ = \"David Ebbo\"\n\n\ndef extract_minimal_tree(newick_tree, target_taxa: Set[str]):\n # We build the node list as we find them and process them\n node_list = []\n\n # Clone the taxa set so we don't modify the original\n target_taxa = set(target_taxa)\n\n for node in parse_tree(newick_tree):\n taxon = node[\"taxon\"]\n ott = node[\"ott\"]\n node_start_index = node[\"start\"]\n node_end_index = node[\"end\"]\n\n # This is a bit hacky. Ideally, the parser would give us the child count\n is_parent_node = newick_tree[node_start_index] == \"(\"\n\n if taxon in target_taxa or ott in target_taxa:\n # We've found a target taxon, so remove it from the target list\n target_taxa.remove(taxon if taxon in target_taxa else ott)\n found_target_taxon = True\n else:\n found_target_taxon = False\n\n # If this taxon is in the target list, add it to the nodes list\n if found_target_taxon or is_parent_node:\n # Any node with higher depth must be a child of this one\n # But ignore the whole child logic if we're separating trees\n children = [n for n in node_list if n[\"depth\"] > node[\"depth\"]]\n\n # Assert that all the children have depth 1 less than this node. This is\n # because any deeper nodes would have been bubbled up\n assert all(\n [child_node[\"depth\"] == node[\"depth\"] + 1 for child_node in children]\n )\n\n # Reduce the depth of the children to bubble them up\n for child_node in children:\n child_node[\"depth\"] -= 1\n\n # If we found a taxon, or there are multiple children, we need to add a node to the list\n if found_target_taxon or len(children) > 1:\n # Remove the children from the search list\n node_list = [n for n in node_list if n not in children]\n\n # Full name including the edge length\n tree_string = newick_tree[\n node[\"full_name_start_index\"] : node_end_index\n ]\n if children:\n # Add the children to the tree string\n tree_string = f\"({','.join([child_node['tree_string'] for child_node in children])}){tree_string}\"\n\n node_list.append(\n {\n \"name\": taxon,\n \"ott\": ott,\n \"tree_string\": tree_string,\n \"depth\": node[\"depth\"],\n }\n )\n\n # If we've found all the target taxa, we're done\n if not target_taxa and len(node_list) <= 1:\n break\n\n if target_taxa:\n logging.warning(f'Could not find the following taxa: {\", \".join(target_taxa)}')\n\n # Return the tree, if any\n assert len(node_list) <= 1\n return node_list[0][\"tree_string\"] if len(node_list) > 0 else None\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"treefile\",\n type=argparse.FileType(\"r\"),\n nargs=\"?\",\n default=sys.stdin,\n help=\"The tree file in newick form\",\n )\n parser.add_argument(\n \"outfile\",\n type=argparse.FileType(\"w\"),\n nargs=\"?\",\n default=sys.stdout,\n help=\"The output tree file\",\n )\n parser.add_argument(\n \"--taxa\", \"-t\", nargs=\"+\", required=True, help=\"the taxa to search for\"\n )\n args = parser.parse_args()\n\n target_taxa = set(args.taxa)\n\n # Read the whole file as a string. This is not ideal, but it's still\n # very fast even with the full OpenTree tree.\n # This could be optimized to read by chunks, with more complexity\n tree = args.treefile.read()\n\n result = extract_minimal_tree(tree, target_taxa)\n if result:\n args.outfile.write(result + \";\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"OneZoom/tree-build","sub_path":"oz_tree_build/newick/extract_minimal_tree.py","file_name":"extract_minimal_tree.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"35735457926","text":"import sys\nfrom typing import Coroutine\ninput = lambda : sys.stdin.readline().rstrip()\nsys.setrecursionlimit(100001)\nN=int(input())\n\nconnect=[[] for i in range(N+1)] #1~N개의 연결리스트\n\nfor i in range(N-1):\n a=list(map(int,input().split()))\n connect[a[0]].append(a[1])\n connect[a[1]].append(a[0])\n\nparent=[0]*(N+1)\nvisit=[0]*(N+1)\n\ndef dfs(i): \n visit[i]=1 #방문 표시\n for j in connect[i]:\n if visit[j]!=1: \n if parent[j]==0:\n parent[j]=i\n dfs(j)\n\ndfs(1)\n\nfor i in range(2,N+1):\n print (parent[i])","repo_name":"KIMTHE/algorithm-study","sub_path":"TheSim_ps/자료구조/11725_python.py","file_name":"11725_python.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"12121766363","text":"import numpy as np\r\nimport sys\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom PyQt5.QtWidgets import QWidget\r\nfrom PyQt5.QtWidgets import QLabel\r\nfrom PyQt5.QtWidgets import QApplication\r\nfrom PyQt5.QtWidgets import QPushButton\r\nfrom PyQt5.QtWidgets import QLineEdit\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nfrom PyQt5.QtWidgets import QInputDialog\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nfrom PyQt5.QtWidgets import QCheckBox\r\nfrom PyQt5.QtGui import QIcon, QFont\r\nfrom PyQt5 import QtCore, QtGui\r\n\r\nclass MainWindow(QWidget):\r\n\r\n def __init__(self, parent=None):\r\n super().__init__(parent)\r\n self.initUI()\r\n self.save_results = False\r\n self.route = None\r\n\r\n def initUI(self):\r\n \r\n reg_int = QtCore.QRegExp(\"[0-9]{1,8}\")\r\n validator_int = QtGui.QRegExpValidator(reg_int)\r\n reg = QtCore.QRegExp(\"^[-]?[0-9]{1,4}(\\.[0-9]{1,5})?\")\r\n validator = QtGui.QRegExpValidator(reg)\r\n \r\n self.t_0_label = QLabel(self)\r\n self.t_0_label.move(10, 12)\r\n self.t_0_label.setFont(QFont(\"Arial\", 11))\r\n self.t_0_label.setText(\"Введіть початок відрізку розбиття \"+\r\n \"t_0: \")\r\n \r\n self.t_0 = QLineEdit(self)\r\n self.t_0.move(337, 10)\r\n self.t_0.resize(80, 20)\r\n self.t_0.setValidator(validator)\r\n \r\n self.t_n_label = QLabel(self)\r\n self.t_n_label.move(10, 40)\r\n self.t_n_label.setFont(QFont(\"Arial\", 11))\r\n self.t_n_label.setText(\"Введіть кінець відрізку розбиття \"+\r\n \"t_n: \")\r\n \r\n self.t_n = QLineEdit(self)\r\n self.t_n.move(337, 38)\r\n self.t_n.resize(80, 20)\r\n self.t_n.setValidator(validator)\r\n \r\n self.N_label = QLabel(self)\r\n self.N_label.move(10, 68)\r\n self.N_label.setFont(QFont(\"Arial\", 11))\r\n self.N_label.setText(\"Введіть кількість розбиттів N: \")\r\n \r\n self.N = QLineEdit(self)\r\n self.N.move(337, 66)\r\n self.N.resize(80, 20)\r\n self.N.setValidator(validator_int)\r\n \r\n self.K_label = QLabel(self)\r\n self.K_label.move(10, 96)\r\n self.K_label.setFont(QFont(\"Arial\", 11))\r\n self.K_label.setText(\"Введіть кількість гармонік K: \")\r\n \r\n self.K = QLineEdit(self)\r\n self.K.move(337, 94)\r\n self.K.resize(80, 20)\r\n self.K.setValidator(validator_int)\r\n \r\n self.cb = QCheckBox('Зберегти результати у файли', self)\r\n self.cb.move(10, 135)\r\n self.cb.setFont(QFont(\"Arial\", 11))\r\n self.cb.stateChanged.connect(self.save)\r\n\r\n but1 = QPushButton(\"Ввести значення\\nв точках розбиття\", self)\r\n but1.move(150, 180)\r\n but1.resize(130, 40)\r\n but1.clicked.connect(self.manual_values)\r\n \r\n but2 = QPushButton(\"Згенерувати випадкові\\nзначення\", self)\r\n but2.move(300, 180)\r\n but2.resize(130, 40)\r\n but2.clicked.connect(self.random_values)\r\n \r\n self.text_label = QLabel(self)\r\n self.text_label.move(450, 20)\r\n self.text_label.setFont(QFont(\"Arial\", 11))\r\n self.text_label.setText(\"Довідка:\")\r\n \r\n self.text_label = QLabel(self)\r\n self.text_label.move(450, 50)\r\n self.text_label.setFont(QFont(\"Arial\", 11))\r\n self.text_label.setText(\"1) Період T = t_n - t_0\")\r\n \r\n self.text_label = QLabel(self)\r\n self.text_label.move(450, 80)\r\n self.text_label.setFont(QFont(\"Arial\", 11))\r\n self.text_label.setText(\"2) Для більш точного результату\")\r\n \r\n self.text_label = QLabel(self)\r\n self.text_label.move(466, 100)\r\n self.text_label.setFont(QFont(\"Arial\", 11))\r\n self.text_label.setText(\"варто обрати N = 2K+1\")\r\n\r\n self.setGeometry(150, 150, 680, 250)\r\n self.setWindowTitle(\"Апроксимація періодичного сигналу рядами Фур\\'є\")\r\n self.setWindowIcon(QIcon('icon.jpg'))\r\n self.show()\r\n \r\n def save(self, state):\r\n \r\n if state == QtCore.Qt.Checked:\r\n self.save_results = True\r\n if self.route is None:\r\n self.route = str(QFileDialog.getExistingDirectory(self,\r\n \"Виберіть робочу директорію для збереження\",\r\n \"/\", QFileDialog.ShowDirsOnly))\r\n if self.route != \"\":\r\n self.route = self.route+\"/\"\r\n else:\r\n self.save_results = False\r\n \r\n def manual_values(self):\r\n \r\n if (not self.isEmpty() or not self.t_0_less_than_t_n() or \r\n not self.N_more_than_2K_1() or not self.positive_N() or\r\n not self.positive_K()):\r\n \r\n QMessageBox.warning(self, 'Warning',\r\n '1) Всі поля мають бути заповнені\\n'+\r\n '2) Значення t_0 має бути меншим, ніж значення t_n\\n'+\r\n '3) Значення N має бути більшим, ніж 2\\n'+\r\n '4) Значення N має бути не меншим, ніж 2K+1\\n'+\r\n '5) Значення K має бути більшим, ніж 0')\r\n else:\r\n N = int(self.N.text())\r\n t_0 = float(self.t_0.text())\r\n t_n = float(self.t_n.text())\r\n K = int(self.K.text())\r\n T = t_n - t_0\r\n L = T/2\r\n t = np.arange(t_0, t_n+T/(2*N), T/N)\r\n y = []\r\n \r\n for i in range(N):\r\n value, ok = QInputDialog.getDouble(self, 'Input Dialog',\r\n 'Введіть значення в точці t['+str(i)+'] = '+\r\n str(round(t[i],3)), decimals=5)\r\n if ok:\r\n y.append(value)\r\n else:\r\n break\r\n \r\n y = np.array(y)\r\n if y.shape[0] == N:\r\n a, b = self.fourier_coeffs(y, t[:N], K, L)\r\n t_f = np.arange(t_0, t_n+0.0005, 0.001)\r\n f_approx = self.fourier_approximation(a, b, t_f, K, L)\r\n \r\n y = np.append(y, y[0])\r\n \r\n if self.save_results == True:\r\n \r\n dir_name = 'manual/'\r\n \r\n self.save_results_function(dir_name, a, b, t_f, f_approx)\r\n \r\n self.plot_res(t, y, t_f, f_approx)\r\n \r\n def random_values(self):\r\n \r\n if (not self.isEmpty() or not self.t_0_less_than_t_n() or \r\n not self.N_more_than_2K_1() or not self.positive_N() or\r\n not self.positive_K()):\r\n \r\n QMessageBox.warning(self, 'Warning',\r\n '1) Всі поля мають бути заповнені\\n'+\r\n '2) Значення t_0 має бути меншим, ніж значення t_n\\n'+\r\n '3) Значення N має бути більшим, ніж 2\\n'+\r\n '4) Значення N має бути не меншим, ніж 2K+1\\n'+\r\n '5) Значення K має бути більшим, ніж 0')\r\n else:\r\n N = int(self.N.text())\r\n t_0 = float(self.t_0.text())\r\n t_n = float(self.t_n.text())\r\n K = int(self.K.text())\r\n \r\n T = t_n - t_0\r\n L = T/2\r\n \r\n t = np.arange(t_0, t_n+T/(2*N), T/N)\r\n y = np.random.randn(N)\r\n \r\n a, b = self.fourier_coeffs(y, t[:N], K, L)\r\n t_f = np.arange(t_0, t_n+0.0005, 0.001)\r\n f_approx = self.fourier_approximation(a, b, t_f, K, L)\r\n \r\n y = np.append(y, y[0])\r\n \r\n if self.save_results:\r\n \r\n dir_name = 'random/'\r\n \r\n self.save_results_function(dir_name, a, b, t_f, f_approx)\r\n \r\n self.plot_res(t, y, t_f, f_approx)\r\n \r\n def save_results_function(self, dir_name, a, b, t_f, f_approx):\r\n \r\n i = 1\r\n while os.path.exists(self.route+dir_name+'model_'+str(i)+'/') == True:\r\n i += 1\r\n \r\n dir_name = self.route+dir_name+'model_'+str(i)+'/'\r\n os.makedirs(os.path.dirname(dir_name))\r\n \r\n with open(dir_name+'result.txt', 'w') as file:\r\n \r\n file.write('Точки розбиття Значення функції\\n\\n')\r\n \r\n for k in range(t_f.shape[0]-1):\r\n file.write(str(round(t_f[k], 3))+': '+\r\n str(f_approx[k])+'\\n')\r\n \r\n file.write(str(round(t_f[t_f.shape[0]-1], 3))+\r\n ': '+str(f_approx[t_f.shape[0]-1]))\r\n \r\n with open(dir_name+'fourier_coeffs.txt', 'w') as file:\r\n \r\n file.write('Коефіцієнти ряду Фур\\'є\\n\\n')\r\n file.write('a[0] = '+str(a[0])+'\\n\\n')\r\n \r\n for k in range(1, a.shape[0]-1):\r\n file.write('a['+str(k)+'] = '+str(a[k])+'\\n')\r\n \r\n file.write('a['+str(a.shape[0]-1)+'] = '+\r\n str(a[a.shape[0]-1])+'\\n\\n')\r\n \r\n for k in range(b.shape[0]-1):\r\n file.write('b['+str(k+1)+'] = '+str(b[k])+'\\n')\r\n \r\n file.write('b['+str(b.shape[0])+'] = '+str(b[b.shape[0]-1]))\r\n \r\n def isEmpty(self):\r\n \r\n if (self.N.text() == '' or self.t_0.text() == '' or\r\n self.t_n.text() == '' or self.K.text() == ''):\r\n return False\r\n else:\r\n return True\r\n \r\n def t_0_less_than_t_n(self):\r\n \r\n if float(self.t_0.text()) < float(self.t_n.text()):\r\n return True\r\n else:\r\n return False\r\n \r\n def positive_N(self):\r\n \r\n if int(self.N.text()) >= 3:\r\n return True\r\n else:\r\n return False\r\n \r\n def positive_K(self):\r\n \r\n if int(self.K.text()) > 0:\r\n return True\r\n else:\r\n return False\r\n \r\n def N_more_than_2K_1(self):\r\n \r\n if int(self.N.text()) >= 2*int(self.K.text())+1:\r\n return True\r\n else:\r\n return False\r\n \r\n def fourier_coeffs(self, y, t, K, L):\r\n \r\n a = [2*np.mean(y)]\r\n b = []\r\n \r\n for i in range(1, K+1):\r\n a.append(2*np.mean(y*np.cos(i*t*np.pi/L)))\r\n b.append(2*np.mean(y*np.sin(i*t*np.pi/L)))\r\n \r\n return np.array(a), np.array(b)\r\n \r\n def fourier_approximation(self, a, b, t, K, L):\r\n \r\n f = a[0]/2\r\n for i in range(1, K+1):\r\n f += a[i]*np.cos(i*t*np.pi/L)\r\n f += b[i-1]*np.sin(i*t*np.pi/L)\r\n \r\n return f\r\n \r\n def plot_res(self, t, y, t_f, f_approx):\r\n \r\n plt.figure()\r\n plt.plot(t_f, f_approx)\r\n plt.scatter(t, y, c='r')\r\n plt.grid(True)\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n w = MainWindow()\r\n sys.exit(app.exec_())","repo_name":"mHaleta/ASKM-Labs","sub_path":"ASKM-course-work/ASKM-course-work.py","file_name":"ASKM-course-work.py","file_ext":"py","file_size_in_byte":11766,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12437965014","text":"# KNN 3\r\n# KNN 5\r\n# Naive Bayes\r\n# NN\r\n# Decision Tree\r\n# SVM linear\r\n# SVM poly\r\n# SVM sigmoid\r\n\r\nimport os, re, time\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nfrom util import classify\r\n\r\nDIR = \"Classification\"\r\nDATASET_FILENAME = DIR+os.sep+\"Rest_Mex_2022_Sentiment_Analysis_Track_Train.xlsx\"\r\nCHECKPOINT_FILENAME_1 = DIR+os.sep+\"lemmas_adj.npy\"\r\nCHECKPOINT_FILENAME_2_x = DIR+os.sep+\"Rest_Mex_Preprocessed-x.npy\"\r\nCHECKPOINT_FILENAME_2_y = DIR+os.sep+\"Rest_Mex_Preprocessed-y.npy\"\r\n\r\nCROSS_VALIDATION_TIMES = 2\t#5\r\n\r\n\r\n\"\"\"\tRest_Mex_2022_Sentiment_Analysis_Track_Train.xlsx \r\n\r\n\tSamples: #30,213\r\n\tFeatures: #2\r\n\tType of Classifications: #2\t\r\n\r\n\tStructure:\r\n\t\t- Title\t\t\t-> str\r\n\t\t- Opinion\t\t-> str\r\n\t\t- Polarity\t\t-> int {1,2,3,4,5}\t\r\n\t\t\t\t\t\t\t1 - Very Negative\r\n\t\t\t\t\t\t\t2 - Negative\r\n\t\t\t\t\t\t\t3 - Neutral\r\n\t\t\t\t\t\t\t4 - Positive\r\n\t\t\t\t\t\t\t5 - Very Positive\r\n\t\t- Attraction\t-> str {Hotel, Restaurant, Attractive}\r\n\"\"\"\r\ntarget_names = [\"Very Negative\", \"Negative\", \"Neutral\", \"Positive\", \"Very Positive\"]\r\n\r\ndef dataset_resume(X:np.ndarray, y:np.ndarray, title:str=\"Dataset Resume\"):\r\n\ttarget_names, target_counts = np.unique(y, return_counts=True)\r\n\tcount_tot = target_counts.sum()\r\n\r\n\ty_stats = pd.DataFrame(data={\r\n\t\t\"Name\":target_names,\r\n\t\t\"Count\":target_counts, \r\n\t\t\"Porcent\":np.array([c*100/count_tot for c in target_counts])\r\n\t})\r\n\r\n\tprint(\"\\n\")\r\n\tprint(\"*\"*(len(title)+32))\r\n\tprint(f\"{'*'*15} {title} {'*'*15}\")\r\n\tprint(f\"\\n\\t - Stadistics of Classification -\")\r\n\tprint(f\"{y_stats}\")\r\n\tprint(f\"\\n\\t - Data -\")\r\n\tprint(X)\r\n\tprint(\"*\"*(len(title)+32))\r\n\tprint(\"\\n\")\r\n\r\n\r\ndef oversapling(X:np.ndarray, y:np.ndarray):\r\n\tfrom imblearn.over_sampling import RandomOverSampler\r\n\r\n\tX, y = RandomOverSampler(random_state=0).fit_resample(X,y)\r\n\treturn X, np.reshape(y, (y.shape[0],1))\r\n\r\n\r\ndef pln_preprocessed(X:np.ndarray):\r\n\tif not os.path.exists(CHECKPOINT_FILENAME_1):\t\r\n\t\timport spacy\r\n\t\tnlp = spacy.load(\"es_core_news_lg\")\r\n\t\t\r\n\t\tX_preprocessed = []\r\n\t\ti = 1\r\n\t\tfor i, row in enumerate(X.values):\r\n\t\t\tprint(f\"\\rPreprocessing {i}/{len(X.values)}\", end=\"\")\r\n\t\t\ttext = \". \".join([re.sub(r\"[\\t ]+\", \" \", re.sub(r\"(^\\s|\\s$)\", \"\", s, 0, re.MULTILINE), 0, re.MULTILINE) for s in row])\r\n\t\t\tdoc = nlp(text)\r\n\t\t\tlemmas = \" \".join([token.lemma_ for token in doc if token.pos_ in [\"ADJ\"]])\r\n\t\t\tX_preprocessed.append(lemmas)\r\n\t\tX_preprocessed = np.array(X_preprocessed)\r\n\r\n\t\tnp.save(CHECKPOINT_FILENAME_1, X_preprocessed)\r\n\t\tprint(f\"CHECKPOINT '{CHECKPOINT_FILENAME_1}' SAVED\")\r\n\telse:\r\n\t\tX_preprocessed = np.load(CHECKPOINT_FILENAME_1)\r\n\t\tprint(f\"CHECKPOINT '{CHECKPOINT_FILENAME_1}' LOADED\")\r\n\r\n\tfreq_count_vectorizer = CountVectorizer()\r\n\tX_preprocessed = freq_count_vectorizer.fit_transform(X_preprocessed).toarray()\r\n\tvocabulary = freq_count_vectorizer.get_feature_names_out()\r\n\r\n\treturn X_preprocessed, vocabulary\r\n\r\nif __name__==\"__main__\":\r\n\t### BEGIN DATASET LOADING ###\r\n\tif os.path.exists(CHECKPOINT_FILENAME_2_x):\r\n\t\trestmex_X = np.load(CHECKPOINT_FILENAME_2_x)\r\n\t\trestmex_Y = np.load(CHECKPOINT_FILENAME_2_y)\r\n\t\tprint(f\"CHECKPOINT '{CHECKPOINT_FILENAME_2_x}' LOADED\")\r\n\telse:\r\n\t#\tRest Mex Dataset\r\n\t\trestmex_dataset = pd.read_excel(DATASET_FILENAME, dtype=str)\\\r\n\t\t\t.replace(to_replace=np.NaN,value=\"\")\r\n\r\n\t\trestmex_X = np.array([\". \".join(patter) for patter in restmex_dataset.drop([\"Polarity\", \"Attraction\"], axis=1).values.tolist()])\r\n\t\trestmex_Y = restmex_dataset.drop([\"Title\", \"Opinion\", \"Attraction\"], axis=1)\\\r\n\t\t\t.astype(int).values\r\n\t\t\r\n\t#\tPLN Preprocessing\r\n\t\trestmex_X, vocabulary = pln_preprocessed(restmex_X)\r\n\t\tdataset_resume(restmex_X, restmex_Y, \"Rest Mex Sentiment Analysis Dataset\")\r\n\r\n\t#\tOverSampling Preprocessing\r\n\t\trestmex_X, restmex_Y = oversapling(restmex_X, restmex_Y)\r\n\t\t#dataset_resume(restmex_X, restmex_Y, \"Rest Mex Sentiment Analysis Dataset W/ Oversampling\")\r\n\r\n\t\tnp.save(CHECKPOINT_FILENAME_2_x, restmex_X)\r\n\t\tnp.save(CHECKPOINT_FILENAME_2_y, restmex_Y)\r\n\t\tprint(f\"CHECKPOINT '{CHECKPOINT_FILENAME_2_x}' SAVED\")\r\n\t### END OF DATASET LOADING ###\r\n\tdataset_resume(restmex_X, restmex_Y, \"Rest Mex Sentiment Analysis Dataset W/ Oversampling\")\r\n\trestmex_Y = np.reshape(restmex_Y, (restmex_Y.shape[0],))\r\n\r\n\t### BEGIN CLASSIFICATION ###\r\n\tprint(\"CLASSIFICATION\")\r\n\r\n#\tKNN\r\n\t#\t-\t1NN\r\n\tknn = KNeighborsClassifier(n_neighbors=1, weights='distance')\r\n\tclassify(knn, restmex_X, restmex_Y, labels=target_names, title=\"KNN - 1NN\", by_iterations=True)\r\n\r\n\t#\t-\t3NN\r\n\tknn = KNeighborsClassifier(n_neighbors=3, weights='distance')\r\n\tclassify(knn, restmex_X, restmex_Y, labels=target_names, title=\"KNN - 3NN\", by_iterations=True)\r\n\r\n\t#\t-\t5NN\r\n\tknn = KNeighborsClassifier(n_neighbors=5, weights='distance')\r\n\tclassify(knn, restmex_X, restmex_Y, labels=target_names, title=\"KNN - 5NN\", by_iterations=True)\r\n\r\n\r\n#\tNaive Bayes\r\n\tgnb = GaussianNB()\r\n\tclassify(gnb, restmex_X, restmex_Y, labels=target_names, title=\"Naive Bayes\", by_iterations=True)\r\n\t\r\n\r\n#\tDecision Tree\r\n\tdt = DecisionTreeClassifier(random_state=0)\r\n\tclassify(dt, restmex_X, restmex_Y, labels=target_names, title=\"Decision Tree\", by_iterations=True)\r\n\r\n\r\n#\tSVM\r\n\t#\t-\tLinear Kernel\r\n\tsvm = SVC(kernel=\"linear\", C=1)\r\n\tclassify(svm, restmex_X, restmex_Y, labels=target_names, title=\"SVM - Linear Kernel\", by_iterations=True)\r\n\r\n\t#\t-\tPolynomial Kernel\r\n\tsvm = SVC(kernel=\"poly\", C=1)\r\n\tclassify(svm, restmex_X, restmex_Y, labels=target_names, title=\"SVM - Polynomial Kernel\", by_iterations=True)\r\n\r\n\t#\t-\tSigmoid Kernel\r\n\tsvm = SVC(kernel=\"sigmoid\", C=1)\r\n\tclassify(svm, restmex_X, restmex_Y, labels=target_names, title=\"SVM - Sigmoid Kernel\", by_iterations=True)\r\n\r\n# \tNeural Network\r\n\timport torch as t\r\n\tfrom torch import nn, optim\r\n\trestmex_Y = np.reshape(restmex_Y, (restmex_Y.shape[0],))\r\n\trestmex_X = t.FloatTensor(restmex_X)\r\n\trestmex_Y = t.LongTensor(restmex_Y)-1\r\n\r\n\tdef getNN(\r\n\t\thidden_layers:tuple, \r\n\t\tlearning_rate:float=0.1, \r\n\t\tin_num:int=100, \r\n\t\tout_num:int=5, \r\n\t\tactivation_hidden:nn.Module=nn.ReLU(), \r\n\t\tactivation_output:nn.Module=nn.Sigmoid(), \r\n\t\tbackpropagation_fun:optim.Optimizer=optim.Adam\r\n\t):\r\n\t\tlayers = []\r\n\t\tfor i in range(0, hidden_layers[0]+1):\r\n\t\t\tactivation = activation_hidden\r\n\t\t\tif i==hidden_layers[0]:\r\n\t\t\t\tactivation = activation_output\r\n\r\n\t\t\tprint(f'({in_num if i==0 else hidden_layers[1]},{out_num if i==hidden_layers[0] else hidden_layers[1]})', end='\\n' if i==hidden_layers[0] else ' -> ')\r\n\r\n\t\t\tlayer = nn.Linear(\r\n\t\t\t\tin_num if i==0 else hidden_layers[1], \r\n\t\t\t\tout_num if i==hidden_layers[0] else hidden_layers[1],\r\n\t\t\t\tFalse\r\n\t\t\t)\r\n\t\t\t\r\n\t\t\tlayers.append((f'layer{i}', layer))\r\n\t\t\tlayers.append((f'fun{i}', activation))\r\n\r\n\t\tfrom collections import OrderedDict\r\n\t\tmodel = nn.Sequential(OrderedDict(layers))\r\n\t\toptimizer = backpropagation_fun(model.parameters(), lr=learning_rate)\r\n\t\treturn model, optimizer\r\n\r\n\r\n\tnnClassifier, opt = getNN(\r\n\t\thidden_layers = (2, restmex_X.shape[1]//3),\r\n\t\tlearning_rate=0.1,\r\n\t\tin_num = restmex_X.shape[1],\r\n\t\tout_num = 5,\r\n\t\tactivation_hidden = nn.ReLU(),\r\n\t\tactivation_output = nn.Softmax(dim=1), #lambda x: t.heaviside(x, t.tensor([0.]))\r\n\t\tbackpropagation_fun = optim.SGD\r\n\t)\r\n\r\n\tdef nn_fit(X, y):\r\n\t\ttime_batch_start = time()\r\n\t\ttime_batch_end = time()\r\n\t\tepochs = 50 # 8 mins per epoch\r\n\t\tbs = 512\r\n\t\tn = X.shape[0]\r\n\t\tcriterion = nn.CrossEntropyLoss(reduction='mean')\r\n\t\tloss = t.tensor([[0]])\r\n\t\t\r\n\t\tfor epoch in range(epochs):\r\n\t\t\tfor i in range((n-1)//bs+1):\r\n\t\t\t\tprint(f'\\rLoss {loss:.4f} Epoch {epoch+1}/{epochs} Batch {i+1}/{(n-1)//bs+1} ({(time_batch_end-time_batch_start):.2f} seconds)', end=' '*6)\r\n\t\t\t\ttime_batch_start = time()\r\n\r\n\t\t\t\topt.zero_grad()\r\n\r\n\t\t\t\tstart_i = i * bs\r\n\t\t\t\tend_i = start_i + bs\r\n\t\t\t\txb = X[start_i:end_i]\r\n\t\t\t\tyb = y[start_i:end_i]\r\n\r\n\t\t\t\tyb_probs = nnClassifier.forward(xb)\r\n\r\n\t\t\t\tloss = criterion(yb_probs, yb)\r\n\t\t\t\tloss.backward()\r\n\t\t\t\topt.step()\r\n\r\n\t\t\t\ttime_batch_end = time()\r\n\t\t\t# if epoch%2==0:\r\n\t\t\t# \tview_classify(yb_probs[0], yb[0])\r\n\r\n\tdef nn_predict(X):\r\n\t\twith t.no_grad():\r\n\t\t\ty_probs = nnClassifier.forward(X)\r\n\t\t\ty_pred = t.argmax(y_probs, dim=1)\r\n\t\t\treturn y_pred\r\n\r\n\tdef nn_score(X, y):\r\n\t\tfrom sklearn.metrics import accuracy_score\r\n\t\ty_pred = nn_predict(X)\r\n\t\treturn accuracy_score(y, y_pred)\r\n\r\n\tclassify(None, restmex_X, restmex_Y, labels=target_names, title=\"Neural Network\", by_iterations=True, n_splits=1, fit_fun=nn_fit, predict_fun=nn_predict, score_fun=nn_score)\r\n\r\n\r\n\t### END OF CLASSIFICATION ###\r\n","repo_name":"M1ndBlast/PatternRecognition","sub_path":"Classification/sklearn_classification.py","file_name":"sklearn_classification.py","file_ext":"py","file_size_in_byte":8655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8741520272","text":"import requests\nfrom bs4 import BeautifulSoup\n\nresponse = requests.get('https://g1.globo.com/')\ncontent = response.content\n\n# Converte a classe do Site \"bytes -> bs4. BeautifulSoup\" para BeautifulSoup\nsite = BeautifulSoup(content, 'html.parser')\n\n# Imprimir o site no padrão HTML\n# print(site.prettify())\n\n# HTML da Notícia\nnoticia = site.find('div', attrs={'class': 'feed-post-body'})\n\n# Título da Notícia\ntitulo = noticia.find('a', attrs={'class': 'feed-post-link'})\nprint(titulo)\n\n# Subtitulo\nsubtitulo = noticia.find('div', attrs={'class': 'feed-post-body-resumo'})\nprint(subtitulo.text)","repo_name":"caiorr1/UsandoPython","sub_path":"Estudos/Requests/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7369023170","text":"def findMaxConsecutiveOnes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n c = 0\n ans = 0\n for val in nums:\n c = (c+val)*val\n ans = max(ans, c)\n return ans\n\ns = Solution()\ninp = [1,0,1,1,0]\nprint(s.findMaxConsecutiveOnes(inp))\n","repo_name":"Rohithyeravothula/leetcode","sub_path":"max_ones.py","file_name":"max_ones.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"69928623951","text":"import pytest\n\nfrom torrent_client.download.file_loading.torrent_loader import TorrentLoader\nfrom torrent_client.download.part import Part\nfrom torrent_client.download.test.fakes.fake_file_loader import FakeFileLoader\n\n\nclass TestUnitTorrentLoaderFileToParts:\n def test_one_file_one_part_full_pieces(self):\n file = FakeFileLoader(\n [\n Part(None, 10, 0),\n ]\n )\n\n t_loader = TorrentLoader(files_loaders=[file])\n pieces = t_loader.get_files_parts_in_pieces(10)\n assert len(pieces) == 1\n assert isinstance(pieces[0], list)\n assert file.start_part_size == 10\n assert file.normal_part_size == 10\n\n @pytest.mark.parametrize(\"number_of_parts\", [1, 2, 3])\n def test_one_file_combine_n_to_piece(self, number_of_parts):\n piece_size = 6\n part_size = piece_size//number_of_parts\n file = FakeFileLoader(\n [\n Part(None, part_size, part_ind*part_size) for part_ind in range(number_of_parts)\n ]\n )\n t_loader = TorrentLoader(files_loaders=[file])\n pieces = t_loader.get_files_parts_in_pieces(piece_size)\n assert len(pieces) == 1\n assert isinstance(pieces[0], list)\n assert file.start_part_size == piece_size\n assert file.normal_part_size == piece_size\n assert sum(piece.size for piece in pieces[0]) == piece_size\n\n @pytest.mark.parametrize(\"number_of_files\", [1, 2, 3])\n def test_combine_n_files_one_part(self, number_of_files: int):\n piece_size = 6\n block_size = piece_size//number_of_files\n files = [FakeFileLoader([Part(None, block_size, file_ind*block_size)]) for file_ind in range(number_of_files)]\n t_loader = TorrentLoader(files_loaders=files)\n pieces = t_loader.get_files_parts_in_pieces(piece_size)\n assert len(pieces) == 1\n assert isinstance(pieces[0], list)\n for ind, file in enumerate(files):\n assert file.start_part_size == piece_size-ind*block_size\n assert file.normal_part_size == piece_size\n\n\n def test_half_part_at_end(self):\n piece_size = 6\n number_of_blocks = 3\n block_size = piece_size//2\n file = FakeFileLoader(\n [\n Part(None, block_size, block_ind*block_size) for block_ind in range(number_of_blocks)\n ]\n )\n t_loader = TorrentLoader(files_loaders=[file])\n pieces = t_loader.get_files_parts_in_pieces(piece_size)\n assert len(pieces) == 2\n assert isinstance(pieces[0], list)\n assert isinstance(pieces[1], list)\n assert sum(part.size for part in pieces[0]) == piece_size\n assert sum(piece.size for piece in pieces[1]) == piece_size/2\n\n\n","repo_name":"israelWert/torrent_client","sub_path":"torrent_client/download/test/unit/test_unit_torrent_loader_files_to_parts.py","file_name":"test_unit_torrent_loader_files_to_parts.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12112504606","text":"from xonsh.tools import color_style\nfrom xonsh.style_tools import Token\n\n# setting terminal colors\n# - \\033]4;N;#RRGGBB\\033\\\\ - set color N to #RRGGBB\n# - \\033]S;#RRGGBB\\033\\\\ - set special color to #RRGGBB where S is:\n# -- 10 - foreground - Token.Terminal.Foreground\n# -- 11 - background - Token.Terminal.Background\n# -- 12 - cursor - Token.Terminal.Cursor\n# more info: https://www.xfree86.org/current/ctlseqs.html => Operating System Controls\n\n\n__all__ = ()\n\n_TC_COLOR_NAMES = [\n \"BLACK\",\n \"RED\",\n \"GREEN\",\n \"YELLOW\",\n \"BLUE\",\n \"PURPLE\",\n \"CYAN\",\n \"WHITE\",\n \"INTENSE_BLACK\",\n \"INTENSE_RED\",\n \"INTENSE_GREEN\",\n \"INTENSE_YELLOW\",\n \"INTENSE_BLUE\",\n \"INTENSE_PURPLE\",\n \"INTENSE_CYAN\",\n \"INTENSE_WHITE\",\n]\n\n_TC_SPECIAL_NAMES = [\n \"Foreground\",\n \"Background\",\n \"Cursor\",\n]\n\n\ndef _tc_set_color(idx, color):\n print(f\"\\x1b]4;{idx};{color}\\x1b\\\\\", end=\"\")\n\n\ndef _tc_set_special(idx, color):\n print(f\"\\x1b]{10+idx};{color}\\x1b\\\\\", end=\"\")\n\n\ndef _tc_set_term_colors():\n style = color_style()\n for idx, key in enumerate(_TC_COLOR_NAMES):\n token = getattr(Token.Color, key)\n try:\n color = style.get(token)\n _tc_set_color(idx, color)\n except:\n pass\n\n\ndef _tc_set_spec_colors():\n style = color_style()\n for idx, key in enumerate(_TC_SPECIAL_NAMES):\n token = getattr(Token.Terminal, key)\n try:\n color = style.get(token)\n _tc_set_special(idx, color)\n except:\n pass\n\n\ndef _tc_show_colors():\n collist = []\n for line in range(0, 2):\n for col in range(0, 8):\n collist.append(f\"\\x1b[48;5;{line * 8 + col}m \")\n collist.append(\"\\n\")\n return \"\".join(collist)\n\n\ndef _tc_set_colors(args=None, stdin=None):\n _tc_set_term_colors()\n _tc_set_spec_colors()\n\n if __xonsh__.env.get('XONTRIB_TERMCOLORS_DEBUG', False):\n return _tc_show_colors()\n\n\n@events.on_post_init\ndef on_post_init(**_):\n _tc_set_colors()\n\n\naliases[\"termcolors\"] = _tc_set_colors\n","repo_name":"dyuri/xontrib-termcolors","sub_path":"xontrib/termcolors.py","file_name":"termcolors.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"71343020111","text":"from tqdm import tqdm\r\nimport time\r\n\r\n# Number of iterations for the processing animation\r\nnum_iterations = 100\r\n\r\n# Create a progress bar using tqdm with leave=True\r\nprogress_bar = tqdm(total=num_iterations, unit='iteration', desc='Processing', bar_format='{desc}: {percentage:3.0f}% {bar}', leave=True)\r\n\r\n# First loop\r\nfor _ in range(num_iterations // 2):\r\n # Perform some processing here\r\n time.sleep(0.1) # Simulate a small delay\r\n \r\n # Update the progress bar\r\n progress_bar.update(1)\r\n\r\n# Reset the progress bar to 0 and leave=True\r\nprogress_bar.reset(total=num_iterations)\r\n\r\n# Second loop\r\nfor _ in range(num_iterations // 2):\r\n # Perform some processing here\r\n time.sleep(0.1) # Simulate a small delay\r\n \r\n # Update the progress bar\r\n progress_bar.update(1)\r\n\r\n# Close the progress bar\r\nprogress_bar.close()\r\n","repo_name":"leonardodgomes/python","sub_path":"scripts/utils/progressing_animation.py","file_name":"progressing_animation.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25105153582","text":"'''\nWrite a script that takes in two numbers from the user and calculates the quotient. Using a try/except,\nthe script should handle:\n\n- if the user enters a string instead of a number\n- if the user enters a zero as the divisor\n\nTest it and make sure it does not crash when you enter incorrect values.\n\n'''\n\n\ntry:\n x = int(input(\"Please write your first number :\"))\n y = int(input(\"Please write your second number :\"))\n division = x/y\nexcept ZeroDivisionError:\n print(\"You can't divide by zero !\")\nexcept ValueError:\n print(\"You have to enter a number !\")\n\n","repo_name":"Dansultan/python_fundamentals-master","sub_path":"09_exceptions/09_02_calculator.py","file_name":"09_02_calculator.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41271588052","text":"import func\n\nOPERATOR = 'operator'\nFUNC = 'func'\nREF = 'ref'\nLITERAL = 'literal'\nARGS = 'args'\n\noperator_symbols = {\n 'and': 'and',\n 'or': 'or',\n 'eq': '==',\n 'not': 'not',\n 'gt': '>',\n 'lt': '<',\n 'gte': '<=',\n 'lte': '>=',\n}\n\n\ndef compile(exp_tree: dict) -> str:\n if OPERATOR in exp_tree:\n op = exp_tree[OPERATOR]\n args = exp_tree[ARGS]\n compiled_args = [compile(arg) for arg in args]\n return '_{}({})'.format(op, ', '.join(compiled_args))\n elif FUNC in exp_tree:\n func = exp_tree[FUNC]\n args = exp_tree[ARGS]\n compiled_args = [compile(arg) for arg in args]\n return '_{}({})'.format(func, ', '.join(compiled_args))\n elif REF in exp_tree:\n ref = exp_tree[REF]\n tokens = ref.split('.')\n ref = tokens[0]\n if len(tokens) > 1:\n ref += \"['\" + \"']['\".join(tokens[1:]) + \"']\"\n return ref\n elif LITERAL in exp_tree:\n literal = exp_tree[LITERAL]\n if isinstance(literal, str):\n return \"'{}'\".format(literal)\n else:\n return str(literal)\n else:\n raise Exception('InvalidSegmentExpressionSyntax')\n\n\ndef compiles(exp_tree: dict, operators: dict = operator_symbols) -> str:\n if OPERATOR in exp_tree:\n op = exp_tree[OPERATOR]\n args = exp_tree[ARGS]\n compiled_args = [compiles(arg) for arg in args]\n return '(' + (' {} '.format(operators[op])).join(compiled_args) + ')'\n elif FUNC in exp_tree:\n func = exp_tree[FUNC]\n args = exp_tree[ARGS]\n compiled_args = [compiles(arg) for arg in args]\n return '{}({})'.format(func, ', '.join(compiled_args))\n elif REF in exp_tree:\n ref = exp_tree[REF]\n return ref\n elif LITERAL in exp_tree:\n literal = exp_tree[LITERAL]\n if isinstance(literal, str):\n return \"'{}'\".format(literal)\n else:\n return str(literal)\n else:\n raise Exception('InvalidSegmentExpressionSyntax')\n\n\ndef evaluate(exp_tree: dict, variables: dict) -> bool:\n g = {}\n g['locals'] = None\n g['globals'] = None\n g['__name__'] = None\n g['__file__'] = None\n g['__builtins__'] = None\n\n context = {\n '_and': func._and,\n '_or': func._or,\n '_eq': func._eq,\n '_not': func._not,\n '_gt': func._gt,\n '_lt': func._lt,\n '_gte': func._gte,\n '_lte': func._lte,\n '_count': func._count,\n '_sum': func._sum,\n '_max': func._max,\n '_min': func._min,\n '_distinct': func._distinct,\n '_has': func._has,\n '_fileter': func._fileter,\n '_select': func._select,\n }\n\n context.update(variables)\n expression = compile(exp_tree)\n return eval(expression, g, context)\n","repo_name":"okada-datasign/segment-expression","sub_path":"expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"111322471","text":"from pathlib import Path\nimport sys\n\nDEBUG = True\n\nBASE_DIR = Path(__file__).parent\n\nHOST = \"localhost\"\nPORT = 9999\nRETRIES = 5\n\nLOG_FILE = BASE_DIR / \"logs/client.audit\"\nERROR_FILE = BASE_DIR / \"logs/client.error\"\nLOGGERS = {\n \"version\": 1,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"stream\": sys.stderr,\n \"formatter\": \"basic\",\n },\n \"audit_file\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"maxBytes\": 5000000,\n \"backupCount\": 1,\n \"filename\": LOG_FILE,\n \"encoding\": \"utf-8\",\n \"formatter\": \"basic\",\n },\n \"error_file\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"maxBytes\": 5000000,\n \"backupCount\": 1,\n \"filename\": ERROR_FILE,\n \"encoding\": \"utf-8\",\n \"formatter\": \"basic\",\n },\n },\n \"formatters\": {\n \"basic\": {\n \"style\": \"{\",\n \"format\": \"{asctime:s} [{levelname:s}] -- {name:s}: {message:s}\",\n }\n },\n \"loggers\": {\n \"user_info\": {\n \"handlers\": (\"console\",),\n \"level\": \"INFO\" if DEBUG is False else \"DEBUG\",\n },\n \"error\": {\"handlers\": (\"error_file\",), \"level\": \"ERROR\"},\n \"audit\": {\"handlers\": (\"audit_file\",), \"level\": \"DEBUG\"},\n },\n}\n","repo_name":"Moist-Cat/loadbal","sub_path":"src/loadbal/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"75071118032","text":"import datetime as dt\n\nfrom pydantic import BaseModel\n\n\nclass NoteSummary(BaseModel):\n minutes: int\n payment: int | None = None\n\n class Config:\n from_attributes = True\n\n\nclass NoteBase(BaseModel):\n minutes: int | None = None\n text: str | None = None\n\n\nclass NoteCreate(NoteBase):\n pass\n\n\nclass Note(NoteBase):\n user_id: int\n date: dt.date\n\n class Config:\n from_attributes = True\n\n\nclass NoteUpdate(BaseModel):\n minutes: int | None = None\n text: str | None = None\n","repo_name":"prostoLavr/TimeManager","sub_path":"time_manager/schemas/note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40777157981","text":"from django.core.files import File\nfrom django.test import tag\n\nfrom channel_management.models import ChannelOwner\nfrom channels.models import Channel, ChannelContent, ContentFile\nfrom utility.tests import GhasedTestCase, APIClientCheckingDecorator, open_file_for_test\n\n\n@tag('unit_test')\nclass TestGetChannelContents(GhasedTestCase):\n _contents_url = None\n _contents_url_template = '/api/channels/{pk}/contents/'.format\n _one_content_url = None\n _one_content_url_template = '/api/channels/contents/{pk}/'.format\n\n @property\n def contents_url(self):\n return self._contents_url\n\n @contents_url.setter\n def contents_url(self, pk):\n self._contents_url = self._contents_url_template(pk=str(pk))\n\n @property\n def one_content_url(self):\n return self._one_content_url\n\n @one_content_url.setter\n def one_content_url(self, pk):\n self._one_content_url = self._one_content_url_template(pk=str(pk))\n\n def setUp(self) -> None:\n super().setUp()\n self.client = APIClientCheckingDecorator(self.client)\n self.channel = Channel.objects.create(name='unit+test')\n self.owner = ChannelOwner.objects.create(channel=self.channel, ghased=self.create_random_ghased())\n\n def get_content_from_response(self, data, id_):\n for content in data['results']:\n if str(content['id']) == str(id_):\n return content\n return None\n\n @tag('unit_api')\n @open_file_for_test('resources/sample.jpg')\n def test_get_contents(self):\n self.contents_url = self.channel.id\n free_content = ChannelContent.objects.create(\n channel=self.channel,\n title='first',\n summary='hello',\n text='hello world',\n )\n ContentFile.objects.create(\n content=free_content,\n file=File(self._opened_files[0]),\n file_type=ContentFile.ContentFileTypes.IMAGE,\n )\n free_content_with_no_file = ChannelContent.objects.create(\n channel=self.channel,\n title='second',\n summary='hello',\n text='hello world',\n )\n premium_content = ChannelContent.objects.create(\n channel=self.channel,\n title='third',\n summary='hello',\n text='hello world',\n price=100_000,\n )\n ContentFile.objects.create(\n content=premium_content,\n file=File(self._opened_files[0]),\n file_type=ContentFile.ContentFileTypes.IMAGE,\n )\n response = self.client.get(\n self.contents_url,\n HTTP_AUTHORIZATION=self.jwt_token,\n format='json',\n assert_status_code=200,\n )\n data = response.json()\n self.assertIsNotNone(self.get_content_from_response(data, free_content.id)['complete_content']['file'])\n self.assertIsNotNone(self.get_content_from_response(data, free_content_with_no_file.id)['complete_content']['text'])\n self.assertIsNone(self.get_content_from_response(data, free_content_with_no_file.id)['complete_content']['file'])\n self.assertIsNone(self.get_content_from_response(data, premium_content.id)['complete_content'])\n\n @tag('unit_api')\n @open_file_for_test('resources/sample.jpg')\n def test_get_one_content(self):\n premium_content = ChannelContent.objects.create(\n channel=self.channel,\n title='third',\n summary='hello',\n text='hello world',\n price=100_000,\n )\n ContentFile.objects.create(\n content=premium_content,\n file=File(self._opened_files[0]),\n file_type=ContentFile.ContentFileTypes.IMAGE,\n )\n self.one_content_url = premium_content.id\n response = self.client.get(\n self.one_content_url,\n HTTP_AUTHORIZATION=self.jwt_token,\n format='json',\n assert_status_code=200,\n )\n data = response.json()\n self.assertIsNotNone(data['title'])\n self.assertIsNone(data['complete_content'])\n","repo_name":"OOD-Ghasedak/Backend","sub_path":"channels/tests/test_get_channel_contents.py","file_name":"test_get_channel_contents.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14759386684","text":"# Copied from \n# https://github.com/ppb/pursuedpybear/blob/master/examples/animated_sprites/animated_sprite.py\n\nimport math\nimport random\n\nimport ppb\nfrom ppb.features.animation import Animation\nimport ppb.events as events\n\ndef random_pos():\n return ppb.Vector(random.random() * 4 - 2, random.random() * 4 -2)\n\nclass Blob(ppb.BaseSprite):\n image = Animation(\"resources/blob_{0..6}.png\", 10)\n target = ppb.Vector(0, 0)\n speed = 1\n repulsion = 0.5 \n\n def on_mouse_motion(self, event: events.MouseMotion, signal):\n self.target = event.position\n\n def on_update(self, event: events.Update, signal):\n intent_vector = self.target - self.position\n if intent_vector:\n self.position += intent_vector.scale(self.speed * event.time_delta)\n self.rotation = math.degrees(math.atan2(intent_vector.y, intent_vector.x)) - 90\n for sib in self.siblings(event):\n repulsion_vector = sib.position - self.position\n if repulsion_vector:\n self.position -= repulsion_vector.scale(self.repulsion * event.time_delta)\n\n\n def siblings(self, event):\n for sprite in event.scene.get(kind=Blob):\n if sprite != self:\n yield sprite\n\n\ndef setup(scene):\n scene.add(Blob(position = random_pos()))\n scene.add(Blob(position = random_pos()))\n scene.add(Blob(position = random_pos()))\n\n\nppb.run(setup)\n","repo_name":"dayton-dynamic/make-it-dayton-game-demo","sub_path":"animated_sprites/siblings.py","file_name":"siblings.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"16058906695","text":"\nfrom inspect import isfunction, ismethod, getcallargs\n\nimport rospy\nimport actionlib\nfrom actionlib.action_client import CommState\nfrom actionlib_msgs.msg import GoalStatus\n\nfrom lifecycle_msgs.msg import LifecycleGoal, LifecycleAction, Lifecycle\nfrom lifecycle.lifecycle_model import LifecycleModel\n\nLIFECYCLE_ACTION_NAME = \"lifecycle\"\nLIFECYCLE_STATE_TOPIC = \"lifecycle_state\"\n\n\nclass LifecycleAPIException(Exception):\n \"\"\"\n Thrown on bad use of the lifecycle API\n \"\"\"\n def __init__(self, msg):\n Exception.__init__(self, msg)\n\n\ndef check_args(fn, *args):\n if not isfunction(fn) and not ismethod(fn):\n return False\n try:\n getcallargs(fn, *args)\n return True\n except TypeError as e:\n return False\n\n\nclass LifecycleTransitionSequence(object):\n def __init__(self, client, events, completion_cb):\n for event in events:\n if event not in LifecycleModel.KNOWN_EVENTS:\n raise LifecycleAPIException(\"Event %s is not a life-cycle event\" % event)\n # make sure the completion_callback works, or we'll get an exception much later\n if completion_cb is None or not check_args(completion_cb, True):\n raise LifecycleAPIException(\"Invalid completion_cb, got '%s', but function with 1 unbound \"\n \"argument is required\" % completion_cb)\n\n self._client = client\n self._events = list(events)\n self._current_handle = None\n self._cancelled = False\n self._completion_cb = completion_cb\n self._result = None\n\n def go(self):\n self._step()\n\n def cancel(self):\n self._cancelled = True\n if self._current_handle is not None:\n self._current_handle.cancel()\n self._current_handle = None\n\n def is_done(self):\n \"\"\"\n Returns true when the sequence has completed processing, either because\n it was consumed, or because it was cancelled. Use has_succeeded to determine the status\n \"\"\"\n return self._cancelled or len(self._events) == 0\n\n def get_result(self):\n \"\"\"\n Gets the last result from the server.\n :return:\n \"\"\"\n return self._result\n\n def has_succeeded(self):\n \"\"\"\n Returns true when the sequence was successfully executed\n :return:\n \"\"\"\n return not self._cancelled and self._result == GoalStatus.SUCCEEDED\n\n def _step(self):\n if self._cancelled or len(self._events) == 0:\n self._completion_cb(self.has_succeeded())\n else:\n goal = LifecycleGoal()\n goal.transition = self._events[0]\n self._current_handle = self._client.send_goal(goal, self._transition_cb)\n\n def _transition_cb(self, client_goal_handle):\n # if we completed the current transition, invoke the next\n # TODO check for errors\n if client_goal_handle.get_comm_state() == CommState.DONE:\n self._result = client_goal_handle.get_goal_status()\n # on success, we remove the current event only\n if self.has_succeeded():\n self._events.pop(0)\n # on failure, we remove everything\n elif self._result == GoalStatus.ABORTED:\n self._events = []\n else:\n print(self._result)\n\n self._step()\n\n\nclass LifecycleClient(object):\n def __init__(self, client):\n \"\"\"\n ONLY USE DIRECTLY DURING TESTING -- Use \"create_client\" for normal creation.\n\n Creates a life-cycle client that uses the given action-client to communicate. Also exposes a state_cb\n for registration with state-listeners (create_client does that correctly)\n :param client:\n :return:\n \"\"\"\n self._client = client\n self._server_state = LifecycleGoal.PSTATE_UNCONFIGURED\n self._handle = None\n\n def go_to_state(self, target_state, completion_cb):\n \"\"\"\n Sends the necessary events to go the given target state, based on the current state. Invokes \"completion_cb\"\n when done. completion_cb should take a single boolean to indicate success.\n\n :param target_state:\n :return:\n \"\"\"\n self.completion_cb_ = completion_cb\n \n # if we're already in the target state, do nothing\n if target_state == self._server_state:\n completion_cb(True)\n return\n\n # cancel current sequence if there is one\n if self._handle is not None:\n self.cancel()\n\n # otherwise start the sequence of events to get to the target state\n events = LifecycleModel.EVENTS.get((self._server_state, target_state), None)\n if events is not None:\n self._handle = LifecycleTransitionSequence(self._client, events, self._transition_completion_cb)\n self._handle.go()\n\n def cancel(self):\n if self._handle is not None:\n self._handle.cancel()\n self._handle = None\n\n def state_cb(self, msg):\n \"\"\"\n Stores the last server state internally\n :param msg:\n :type msg: Lifecycle\n :return:\n \"\"\"\n self._server_state = msg.end_state\n \n def _transition_completion_cb(self, result):\n self._handle = None\n self.completion_cb_(result)\n\n\ndef create_client(component_fqn):\n action_client = actionlib.action_client.ActionClient(component_fqn + \"/\" + LIFECYCLE_ACTION_NAME,\n LifecycleAction)\n action_client.wait_for_server(timeout=rospy.Duration(1.0))\n lc_client = LifecycleClient(action_client)\n rospy.Subscriber(component_fqn + \"/\" + LIFECYCLE_STATE_TOPIC, Lifecycle, lc_client.state_cb)\n return lc_client\n","repo_name":"boschresearch/ros1_lifecycle","sub_path":"lifecycle_python/src/lifecycle/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"83"} +{"seq_id":"27761213768","text":"import time\n\nfrom flask import Blueprint, render_template, request, flash, redirect, url_for\nimport json\n\nimport config\nfrom model import model_mysql\nfrom model import model_mysql_query\nfrom utils import get_cookie_check, delete_file, GetName\n\n\n# Blueprint\nbacktest = Blueprint('backtest', __name__, static_folder='static', template_folder='templates')\n\nBUCKET_NAME = config.BUCKET_NAME\nOBJECT_PATH = config.OBJECT_PATH\n\nget_name = GetName()\n\n\ndef add_key(form):\n form['category_name'] = get_name.category(form['category'])\n form['strategy_line_name'] = get_name.strategy_line(form['strategy_line'])\n form['strategy_in_name'] = get_name.strategy_in(form['strategy_in'])\n form['strategy_out_name'] = get_name.strategy_out(form['strategy_out'])\n form['strategy_sentiment_name'] = get_name.strategy_sentiment(form['strategy_sentiment'])\n form['source_name'] = get_name.source(form['source'])\n return form\n\n@backtest.route('/backtest.html', methods=['GET'])\ndef backtest_page():\n token = request.cookies.get('token')\n uid = get_cookie_check()\n if isinstance(uid, int) is False:\n return render_template('login.html')\n\n try:\n db_mysql = model_mysql.DbWrapperMysqlDict()\n sql_backtest = model_mysql_query.sql_backtest\n strategy_backtest_dict_list = db_mysql.query_tb_all(sql_backtest, (uid,))\n strategy_backtest_dict_list_length = int(len(strategy_backtest_dict_list))\n return render_template('backtest.html', strategy_backtest_dict_list=strategy_backtest_dict_list, strategy_backtest_dict_list_length=strategy_backtest_dict_list_length, token=token)\n\n except:\n flash('請先建立策略做回測', 'info')\n return redirect(url_for('strategy.strategy_page'))\n\n\n@backtest.route('/api/1.0/send_backtest', methods=['POST'])\ndef send_backtest():\n token = request.cookies.get('token')\n uid = get_cookie_check()\n if isinstance(uid, int) is False:\n return render_template('login.html')\n else:\n # send parameter\n send_backtest_strategy_id = request.form.to_dict()['send_backtest']\n db_mysql = model_mysql.DbWrapperMysqlDict()\n sql_fetch_strategy_backtest = model_mysql_query.sql_fetch_strategy_backtest\n send_backtest = db_mysql.query_tb_one(sql_fetch_strategy_backtest, (send_backtest_strategy_id,))\n send_backtest = add_key(send_backtest)\n\n # fetch sample strategy\n sql_sample_strategy = model_mysql_query.sql_sample_strategy\n sample_strategy_form = db_mysql.query_tb_all(sql_sample_strategy)\n sample_strategy_form_length = int(len(sample_strategy_form))\n sample_strategy_form = [add_key(sample_strategy) for sample_strategy in sample_strategy_form]\n\n return render_template('strategy.html', send_backtest=send_backtest, sample_strategy_form=sample_strategy_form, sample_strategy_form_length=sample_strategy_form_length, token=token)\n\n\n@backtest.route('/api/1.0/remove_strategy', methods=['POST'])\ndef remove_strategy():\n\n form = json.loads(list(request.form.keys())[0])\n strategy_id = form['strategy_id']\n file_path = form['file_path']\n user_id = form['user_id']\n\n sql_delete_strategy = \"DELETE FROM `strategy_backtest` WHERE `id` = '{}'\".format(strategy_id)\n db_mysql = model_mysql.DbWrapperMysql()\n db_mysql.delete_row(sql_delete_strategy)\n print(f\"strategy {strategy_id} is deleted\")\n\n file_name = file_path.split(\"/\")[-1]\n delete_file(BUCKET_NAME, OBJECT_PATH, user_id, file_name)\n resp = redirect(url_for('backtest.backtest_page'))\n flash(\"已刪除策略\", 'success')\n return resp\n","repo_name":"menghsin-2021/sentimentrader","sub_path":"flask_app/controller/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"10411832893","text":"import requests\n\n# Faz uma chamada de API e armazena a resposta.\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars' # Armazenamos o URL da chamada da API.\nr = requests.get(url) # Usamos requests para fazer a chamada.\nprint('Status Code:', r.status_code)\n''' O objeto com a resposta tem um atributo chamado status_code, que nos informa se a resposta\n foi bem sucedida. '''\n\n# Armazena a resposta da API em uma variável.\nrespose_dict = r.json()\n\n# Processa o resultado.\nprint(respose_dict.keys())\n","repo_name":"portelaoliveira/CSV_e_JSON","sub_path":"APIS/repos.py","file_name":"repos.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"21469787967","text":"# Простенькая программка, считающая определитель квадратной матрицы со следующими параметрами:\r\n# размер матрицы - целое число, введённое пользователем\r\n# элементы матрицы - случайные числа из диапазона от 1 до 10\r\nimport random\r\n\r\ndef gauss(matr):\r\n \"\"\"\r\n Прямой ход метода Гаусса.\r\n\r\n Функция приводит квадратную n * n матрицу к верхнему треугольному виду\r\n (не сокращая элементы, стоящие на главной диагонали).\r\n\r\n Аргументы:\r\n matr - двумерный массив размером N * N\r\n \"\"\"\r\n\r\n for i in range(len(matr)):\r\n if (matr[i][i] == 0): continue\r\n for j in range(i + 1, len(matr[i])):\r\n c = matr[j][i] / matr[i][i]\r\n for k in range(i, len(matr[i])):\r\n matr[j][k] -= c * matr[i][k]\r\n return matr\r\n\r\ndef determinant(matr):\r\n \"\"\"\r\n Вычисление определителя N * N матрицы с помощью метода Гаусса.\r\n\r\n Аргументы:\r\n matr - двумерный массив размером N * N\r\n\r\n Исключения:\r\n ValueError, если передан массив с разной длиной строк/столбцов.\r\n \"\"\"\r\n\r\n for i in range(len(matr)):\r\n if (len(matr[i]) != len(matr)):\r\n raise ValueError('Матрица не является квадратной!')\r\n result = 1.0;\r\n gauss(matr); \r\n for i in range(len(matr)):\r\n result *= matr[i][i];\r\n return round(result)\r\n\r\nsize = int(input()); \r\nif (size < 1):\r\n size = random.randint(1, 10);\r\nmas = [[0] * size for i in range(size)];\r\n\r\nfor i in range(len(mas)):\r\n for j in range(len(mas[i])):\r\n mas[i][j] = random.randint(1, 10);\r\nprint(\"Matrix:\", mas)\r\ntry:\r\n print(\"Determinant is\", determinant(mas))\r\nexcept ValueError as ve:\r\n print(ve.args[0])\r\n","repo_name":"icestormikk/ideal-potato","sub_path":"homework1/determinant_prog.py","file_name":"determinant_prog.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"4288712163","text":"from __future__ import annotations\n\nimport inspect\nimport logging\nimport re\nfrom typing import TYPE_CHECKING, Any, Iterable, Tuple\n\nimport aiohttp_cors\nimport attrs\nimport graphene\nimport trafaret as t\nfrom aiohttp import web\nfrom graphql.error import GraphQLError, format_error # pants: no-infer-dep\nfrom graphql.execution import ExecutionResult # pants: no-infer-dep\nfrom graphql.execution.executors.asyncio import AsyncioExecutor # pants: no-infer-dep\n\nfrom ai.backend.common import validators as tx\nfrom ai.backend.common.logging import BraceStyleAdapter\n\nfrom ..models.base import DataLoaderManager\nfrom ..models.gql import GQLMutationPrivilegeCheckMiddleware, GraphQueryContext, Mutations, Queries\nfrom .auth import auth_required\nfrom .exceptions import GraphQLError as BackendGQLError\nfrom .manager import GQLMutationUnfrozenRequiredMiddleware\nfrom .types import CORSOptions, WebMiddleware\nfrom .utils import check_api_params\n\nif TYPE_CHECKING:\n from .context import RootContext\n\nlog = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined]\n\n_rx_mutation_hdr = re.compile(r\"^mutation(\\s+\\w+)?\\s*(\\(|{|@)\", re.M)\n\n\nclass GQLLoggingMiddleware:\n def resolve(self, next, root, info: graphene.ResolveInfo, **args) -> Any:\n graph_ctx: GraphQueryContext = info.context\n if len(info.path) == 1:\n log.info(\n \"ADMIN.GQL (ak:{}, {}:{}, op:{})\",\n graph_ctx.access_key,\n info.operation.operation,\n info.field_name,\n info.operation.name,\n )\n return next(root, info, **args)\n\n\nasync def _handle_gql_common(request: web.Request, params: Any) -> ExecutionResult:\n root_ctx: RootContext = request.app[\"_root.context\"]\n app_ctx: PrivateContext = request.app[\"admin.context\"]\n manager_status = await root_ctx.shared_config.get_manager_status()\n known_slot_types = await root_ctx.shared_config.get_resource_slots()\n\n gql_ctx = GraphQueryContext(\n schema=app_ctx.gql_schema,\n dataloader_manager=DataLoaderManager(),\n local_config=root_ctx.local_config,\n shared_config=root_ctx.shared_config,\n etcd=root_ctx.shared_config.etcd,\n user=request[\"user\"],\n access_key=request[\"keypair\"][\"access_key\"],\n db=root_ctx.db,\n redis_stat=root_ctx.redis_stat,\n redis_image=root_ctx.redis_image,\n redis_live=root_ctx.redis_live,\n manager_status=manager_status,\n known_slot_types=known_slot_types,\n background_task_manager=root_ctx.background_task_manager,\n storage_manager=root_ctx.storage_manager,\n registry=root_ctx.registry,\n idle_checker_host=root_ctx.idle_checker_host,\n )\n result = app_ctx.gql_schema.execute(\n params[\"query\"],\n app_ctx.gql_executor,\n variable_values=params[\"variables\"],\n operation_name=params[\"operation_name\"],\n context_value=gql_ctx,\n middleware=[\n GQLLoggingMiddleware(),\n GQLMutationUnfrozenRequiredMiddleware(),\n GQLMutationPrivilegeCheckMiddleware(),\n ],\n return_promise=True,\n )\n if inspect.isawaitable(result):\n result = await result\n return result\n\n\n@auth_required\n@check_api_params(\n t.Dict(\n {\n t.Key(\"query\"): t.String,\n t.Key(\"variables\", default=None): t.Null | t.Mapping(t.String, t.Any),\n tx.AliasedKey([\"operation_name\", \"operationName\"], default=None): t.Null | t.String,\n }\n )\n)\nasync def handle_gql(request: web.Request, params: Any) -> web.Response:\n result = await _handle_gql_common(request, params)\n return web.json_response(result.to_dict(), status=200)\n\n\n@auth_required\n@check_api_params(\n t.Dict(\n {\n t.Key(\"query\"): t.String,\n t.Key(\"variables\", default=None): t.Null | t.Mapping(t.String, t.Any),\n tx.AliasedKey([\"operation_name\", \"operationName\"], default=None): t.Null | t.String,\n }\n )\n)\nasync def handle_gql_legacy(request: web.Request, params: Any) -> web.Response:\n # FIXME: remove in v21.09\n result = await _handle_gql_common(request, params)\n if result.errors:\n errors = []\n for e in result.errors:\n if isinstance(e, GraphQLError):\n errmsg = format_error(e)\n errors.append(errmsg)\n else:\n errmsg = {\"message\": str(e)}\n errors.append(errmsg)\n log.error(\"ADMIN.GQL Exception: {}\", errmsg)\n raise BackendGQLError(extra_data=errors)\n return web.json_response(result.data, status=200)\n\n\n@attrs.define(auto_attribs=True, slots=True, init=False)\nclass PrivateContext:\n gql_executor: AsyncioExecutor\n gql_schema: graphene.Schema\n\n\nasync def init(app: web.Application) -> None:\n app_ctx: PrivateContext = app[\"admin.context\"]\n app_ctx.gql_executor = AsyncioExecutor()\n app_ctx.gql_schema = graphene.Schema(\n query=Queries,\n mutation=Mutations,\n auto_camelcase=False,\n )\n\n\nasync def shutdown(app: web.Application) -> None:\n pass\n\n\ndef create_app(\n default_cors_options: CORSOptions,\n) -> Tuple[web.Application, Iterable[WebMiddleware]]:\n app = web.Application()\n app.on_startup.append(init)\n app.on_shutdown.append(shutdown)\n app[\"admin.context\"] = PrivateContext()\n cors = aiohttp_cors.setup(app, defaults=default_cors_options)\n cors.add(app.router.add_route(\"POST\", r\"/graphql\", handle_gql_legacy))\n cors.add(app.router.add_route(\"POST\", r\"/gql\", handle_gql))\n return app, []\n\n\nif __name__ == \"__main__\":\n # If executed as a main program, print all GraphQL schemas.\n # (graphene transforms our object model into a textual representation)\n # This is useful for writing documentation!\n schema = graphene.Schema(query=Queries, mutation=Mutations, auto_camelcase=False)\n print(\"======== GraphQL API Schema ========\")\n print(str(schema))\n","repo_name":"lablup/backend.ai","sub_path":"src/ai/backend/manager/api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","stars":440,"dataset":"github-code","pt":"83"} +{"seq_id":"5135370907","text":"# The code used for problem 3\n# in the second mandatory assignment\n# in MAT1120 at the University of Oslo\n\nfrom assignment2 import *\nimport numpy as np\n# Initializing matrices and t values\n\nn = 8\nC = np.zeros((n, n))\nS = np.zeros((n, n))\nt = np.pi/16 + (np.linspace(1, 8, 8)-1)*np.pi/8\n\nfor i in range(n):\n for j in range(n):\n C[i][j] = np.cos((j)*t[i])\n S[i][j] = np.sin((j+1)*t[i])\n\n# Verifying that the matrices C and S are semi-orthogonal:\n\nprint(\"Is C semi-orthogonal? \" , is_semi_orthogonal(C))\nprint(\"Is S semi-orthogonal? \" , is_semi_orthogonal(S))\n\n# Finding the inverse matrices of C and S:\n\nprint(\"Inverse of C: \")\nprint_matrix(find_inverse(C))\nprint(\"Inverse of S: \")\nprint_matrix(find_inverse(S))\n\nmatrix_to_tex(find_inverse(C), \"C_matrix\")\nmatrix_to_tex(find_inverse(S), \"S_matrix\")\n","repo_name":"qTipTip/MAT1120","sub_path":"assignments/assignment-2/src/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30412822085","text":"#Emmanuel Galeana\r\n#10/09/2020\r\n#Primer examen\r\n\r\ndef convierte_C_a_K(valori,numero,incremento): \r\n centigrados = valori\r\n x = 0\r\n print('\\t # | centigrados | Kelvin |\\t')\r\n for i in range(numero):\r\n kelvin = centigrados + 273.15\r\n x = x+1\r\n print(\"\\t|{:>2}|\\t{:>11.3f}|\\t{:>4.3f}|\\t\".format(x,centigrados,kelvin))\r\n centigrados=incremento+centigrados\r\n\r\n\r\ndef serie_secuencia (num):\r\n x=7\r\n i= 0\r\n contador=4\r\n if num>1:\r\n if num%2 != 0:\r\n print(x, end=' ')\r\n for i in range ((num//2)): \r\n x=x+2\r\n b=x+contador\r\n contador+=1\r\n print(x,b ,end=' ')\r\n x=b\r\n elif num%2 == 0:\r\n print(x, end=' ')\r\n for i in range ((num//2)): \r\n x=x+2\r\n b=x+contador\r\n contador+=1\r\n print(x,b ,end=' ')\r\n x=b \r\n \r\n\r\ndef main ():\r\n opcion = 1\r\n while opcion != 0:\r\n opcion = int(input('\\nExamen de Emmanuel \\n'+ ' \\n' + 'Menú \\n' + '0. Salir \\n' + '1. convierte_C_a_K \\n' + '2. serie_secuencia \\n'))\r\n if opcion == 1:\r\n valori = float(input('Valor inicial de los grados centrigrados a convertir: '))\r\n numero = int(input('Numero n de conversiones que se haran: '))\r\n incremento = float(input('Incremento entre los valores centigrados: '))\r\n convierte_C_a_K(valori,numero,incremento)\r\n elif opcion == 2:\r\n num = int(input('Ingresa rango: ')) \r\n serie_secuencia(num)\r\n else:\r\n if opcion == 0:\r\n print ('Adios')\r\n break\r\n \r\n \r\nmain()","repo_name":"SnipperPlayer/Codes-Python","sub_path":"Examen/Examen_Emmanuel.py","file_name":"Examen_Emmanuel.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11760448166","text":"# -------- please select standard source parameter -----\n\nOBJECT = 'OriKL'\nON_X = 83.806\nON_Y = -5.374\nON_COORD = 'J2000'\nOFF_NAME = 'OriKL'\nOFF_X = 82.559\nOFF_Y = -5.668\nDATA_PATH = '/home/amigos/NECST/data/ps/'\n\nparams={}\n#==========================\nparams['object'] = OBJECT\nparams['on_x'] = ON_X\nparams['on_y'] = ON_Y\nparams['on_coord'] = ON_COORD\nparams['off_name'] = OFF_NAME\nparams['path'] = DATA_PATH\nparams['offset_on_x'] = 0.0\nparams['offset_on_y'] = 0.0\nparams['offset_on_dcos'] = 0\nparams['offset_on_coord'] = 'HORIZONTAL'\nparams['repeat'] = 1\nparams['exposure'] = 20.0\nparams['r_interval'] = 10.0\nparams['hosei'] = '/home/amigos/NECST/soft/server/hosei_230.txt'\n\nparams['planet'] = None\n\nimport core.observer\n\nobs = core.observer.observer()\nobs.operate_ps(params)\n","repo_name":"controlmanagement/script","sub_path":"ps.py","file_name":"ps.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6146270217","text":"#-------------------------------------------------------------------------------\r\n# Name: Annotation Verifyer\r\n#\r\n# Purpose: Verify the annotation done using the annotation.txt file. The\r\n# format supported is (filename, xmin, ymin, xmax, ymax, class_name)\r\n#\r\n# Author: Aniket\r\n#\r\n# Created: 26-09-2020\r\n#-------------------------------------------------------------------------------\r\n\r\n\r\nimport imutils\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\n\r\npath = \"D:/variance.ai/faster-rcnn/\"\r\ntext_path = path + \"pose_all.txt\"\r\ncount = 0\r\nwith open(text_path,'r') as f:\r\n print(\"Reading Text file\")\r\n for line in f:\r\n line_split = line.strip().split(',')\r\n (filename,x1,y1,x2,y2,class_name) = line_split\r\n if filename[:1] == '\"':\r\n filename = filename[26:]\r\n class_name = class_name[:-1]\r\n else:\r\n filename = filename[25:]\r\n #print(filename)920\r\n try:\r\n im=cv2.imread(path+\"Val/\"+filename)\r\n\r\n cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)),(0, 255, 0), 2)\r\n cv2.putText(im, class_name, (int(x1),int(y1)-10), cv2.FONT_HERSHEY_SIMPLEX,0.75, (25, 155, 205), 2)\r\n im = imutils.resize(im, width=400) ## just to fit in the screen. can be changed according to your screen size\r\n cv2.imshow(\"data\",im)\r\n cv2.waitKey(0)\r\n count+=1\r\n #break\r\n except:\r\n pass\r\nprint(count,\": Data Done\")\r\ncv2.destroyAllWindows()","repo_name":"aniketzz/Annotation-verify","sub_path":"annotate verify.py","file_name":"annotate verify.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22268578171","text":"import os\nfrom pytube import YouTube\nfrom spleeter.separator import Separator\nfrom spleeter.audio.adapter import AudioAdapter\nimport tkinter as tk\n\nclass App(tk.Tk):\n def __init__(self):\n super().__init__()\n #root\n self.title(\"VocalRemover\")\n self.geometry(\"600x100\")\n self.resizable(False, False)\n #label\n self.label = tk.Label(self, text=\"Type in url\", font=35, fg=\"black\")\n self.label.pack()\n #textBox\n self.textBox = tk.Text(self, font = 50, width=48, height=1)\n self.textBox.pack()\n #button\n self.button = tk.Button(self, text=\"Download\", width=8, command=self.initiate_downloading)\n self.button.pack()\n self.mainloop()\n\n def initiate_downloading(self):\n url = self.textBox.get(\"1.0\",'end-1c')\n audo_file_name = self.download_mp3(url)\n if(audo_file_name != None):\n self.separete(audo_file_name)\n #deleting original mp3 file\n os.remove(audo_file_name)\n\n def download_mp3(self, url):\n try:\n yt = YouTube(url)\n self.label.config(text = \"Downloading...\")\n self.update()\n self.after(200)\n stream = yt.streams.get_audio_only()\n stream.download()\n except Exception as e:\n self.label.config(text = type(e).__name__)\n else:\n self.label.config(text = \"Downloaded!\")\n self.update()\n self.after(200)\n return stream.default_filename\n\n def separete(self, audo_file_name):\n self.label.config(text = \"Separating...\")\n self.update()\n self.after(200)\n separator = Separator('spleeter:2stems')\n separator.separate_to_file(audo_file_name, './audio')\n self.label.config(text = \"Audio separated\")\n\n\nif __name__ == '__main__':\n app = App()\n app.mainloop()\n\n","repo_name":"melchiorr1/VocalRemover","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27760086746","text":"import Stack\n\n\n# 字符串反转 倒序 返回列表\ndef reverse(text):\n textA = str(text)\n textA = textA[::-1]\n return list(textA)\n\n# 字符串 倒序压栈\ndef pushStack(stack,text):\n text = reverse(text)\n for i in text:\n # print(i)\n stack.push(i)\n return stack\n\n# 判定是否为终结符\ndef isTerminator(text):\n listT = 'adbe#'\n listT = list(listT)\n # print(listT)\n if text in listT:\n return True\n else:\n return False\n\n# 根据内容得到二维表的行\ndef getLine(text):\n text = str(text)\n text = text.strip()#去掉空格\n listT = 'SHMA'\n listT = list(listT)\n j = 0\n for i in listT:\n j+=1\n if (i == text):\n return j\n\n# 根据内容得到二维表的列\ndef getColumn(text):\n text = str(text)\n text = text.strip()\n listT = 'adbe#'\n listT = list(listT)\n j = 0\n for i in listT:\n j+=1\n if (i == text):\n return j\n\n\n\n\n# print(isTerminator('a'))\n# print(isTerminator('d'))\n# print(isTerminator('b'))\n# print(isTerminator('e'))\n# print(isTerminator('#'))\n# print(isTerminator('c'))\n\n\n# a=rever\n# for i in a:\n# print(i)\n\n# stack1 = Stack.Stack()\n# print(stack1.is_empty())\n\n# stack1.push(1)\n# stack1.push(2)\n# print(stack1)\n# print(stack1.pop())\n# print(stack1.pop())\n# print(stack1.pop())\n\n# stack1 = pushStack(stack1,'123456')\n# print(stack1.is_empty())\n# print(stack1.pop())\n# print(stack1.pop())\n# print(stack1.pop())\n# print(stack1.pop())","repo_name":"MrWQ/Parser","sub_path":"Analysis/AnalysisClass.py","file_name":"AnalysisClass.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40115001103","text":"#!/usr/bin/env python\n\nimport sys\nfrom glob import glob\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser(description='binning and grabbing ps of interest, MUST RUN IN FOLDER WITH PS AND CT')\n parser.add_argument('condition',default=None,help = '<.txt> biological condition')\n\n args = parser.parse_args()\n\n ps = glob(\"./*.ps\")\n\n cond = args.condition\n\n for item in ps:\n with open(item,\"r+\") as f:\n lines = f.readlines()\n\n for item in lines:\n insert = []\n contents = item.split(\" \")\n\n if len(contents) == 9:\n sub_item = contents[7].split(\".\")\n if len(sub_item) > 1:\n q = 0\n index = lines.index(item)\n while q < len(sub_item):\n insert.append(sub_item[q])\n if q == 0:\n insert.append(cond)\n q += 1\n insert = \".\".join(insert)\n\n contents[7] = insert\n\n lines[index] = contents\n\n\n\n f.seek(0)\n f.truncate()\n for item in lines:\n if not isinstance(item, basestring):\n f.write(\" \".join(item))\n else:\n f.write(item)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zfmandell/the-workshop","sub_path":"ps_editor.py","file_name":"ps_editor.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37669344656","text":"def duplicate_removal (filename): \n with open(filename, 'r') as f:\n lines = f.readlines()\n unique_lines = set()\n duplicate_lines = set()\n for line in lines:\n if line in unique_lines:\n duplicate_lines.add(line)\n else:\n unique_lines.add(line)\n with open('result.txt', 'w') as f:\n for line in lines:\n if line not in duplicate_lines:\n f.write(line)\n print(f'Файл {filename} перезаписан в файл result.txt')","repo_name":"mothersjacke7/text-handler","sub_path":"modules/duplicate_removal.py","file_name":"duplicate_removal.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43321101745","text":"from __future__ import print_function\n\nfrom collections import OrderedDict\nimport numpy as np\nfrom kernel_tuner import tune_kernel, run_kernel\n\nfrom context import get_kernel_path\nfrom km3net.util import generate_input_data\n\ndef tune_correlate_full_kernel(kernel_name):\n\n with open(get_kernel_path()+'correlate_full.cu', 'r') as f:\n kernel_string = f.read()\n\n N = np.int32(1e6)\n sliding_window_width = np.int32(1500)\n problem_size = (N, 1)\n\n #generate input data with an expected density of correlated hits\n x,y,z,ct = generate_input_data(N, factor=1750.0)\n\n #setup kernel arguments\n row_idx = np.zeros(10).astype(np.int32) #not used in first kernel\n col_idx = np.zeros(10).astype(np.int32) #not used in first kernel\n prefix_sums = np.zeros(10).astype(np.int32) #not used in first kernel\n sums = np.zeros(N).astype(np.int32)\n args = [row_idx, col_idx, prefix_sums, sums, N, sliding_window_width, x, y, z, ct]\n\n #run the sums kernel once\n params = {\"block_size_x\": 256, \"write_sums\": 1}\n answer = run_kernel(kernel_name, kernel_string, problem_size, args, params)\n reference = [None for _ in range(len(args))]\n reference[3] = answer[3]\n sums = reference[3].astype(np.int32)\n\n #setup tuning parameters\n tune_params = OrderedDict()\n tune_params[\"block_size_x\"] = [32*i for i in range(1,33)] #multiples of 32\n tune_params[\"write_sums\"] = [1]\n tune_params[\"write_spm\"] = [0]\n\n kernel_1 = tune_kernel(kernel_name, kernel_string, problem_size, args, tune_params, verbose=True)\n\n #tune kernel #2\n total_correlated_hits = sums.sum()\n print(\"total_correlated_hits\", total_correlated_hits)\n print(\"density\", total_correlated_hits/(float(N)*sliding_window_width))\n\n col_idx = np.zeros(total_correlated_hits).astype(np.int32)\n row_idx = np.zeros(total_correlated_hits).astype(np.int32)\n prefix_sums = np.cumsum(sums).astype(np.int32)\n args = [row_idx, col_idx, prefix_sums, sums, N, sliding_window_width, x, y, z, ct]\n\n tune_params[\"write_sums\"] = [0]\n tune_params[\"write_spm\"] = [1]\n\n kernel_2 = tune_kernel(kernel_name, kernel_string, problem_size, args, tune_params, verbose=True)\n\n return kernel_1, kernel_2\n\n","repo_name":"nlesc-km3net/KM3NeT","sub_path":"tuning/correlate_full.py","file_name":"correlate_full.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"73355576912","text":"from django.db import models\nfrom authapp.models import User\n\n\nclass Project(models.Model):\n name = models.CharField(max_length=128)\n link = models.CharField(max_length=128, blank=True, null=True)\n user = models.ManyToManyField(User, related_name='project')\n\n\nclass ToDo(models.Model):\n project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='todo')\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n text = models.TextField()\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n is_active = models.BooleanField(default=True)\n","repo_name":"Chernickk/todo","sub_path":"todoapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29618397923","text":"import os\nimport sys\nimport argparse\nimport signal\nimport configparser\nfrom functools import reduce\n\nimport socket\n# disable buffering\n#socket._fileobject.default_bufsize = 0\n\nimport http.client\nhttp.client.HTTPConnection.debuglevel = 1\n\nimport anyjson\nimport logbook\nimport MySQLdb\n\n# this is for consuming the streaming API\nimport tweepy\nimport tweetsclient\nimport politwoops\n\n\n_script_ = (os.path.basename(__file__)\n if __name__ == \"__main__\"\n else __name__)\nlog = logbook.Logger(_script_)\n\nclass DataRecord(object):\n def __init__(self, *args, **kwargs):\n object.__setattr__(self, '_dict', {})\n self._dict.update(((arg, None) for arg in args))\n self._dict.update(kwargs)\n\n def __getattr__(self, attr):\n if attr not in self._dict:\n raise AttributeError(\"{cls!r} has no attribute {attr!r}\".format(cls=self.__class__.__name__,\n attr=attr))\n return self._dict[attr]\n\n def __setattr__(self, attr, value):\n raise AttributeError(\"All attributes of DataRecord objects are read-only\")\n\nclass Usage(Exception):\n def __init__(self, msg):\n self.msg = msg\n\ndef dict_mget(thedict, keylist, default=None):\n result = reduce(lambda d, k: None if d is None else d.get(k), keylist, thedict)\n return result if result is not None else default\n\nclass TweetListener(tweepy.streaming.StreamListener):\n def __init__(self, queue, *args, **kwargs):\n super(TweetListener, self).__init__(*args, **kwargs)\n self.queue = queue\n self.config = tweetsclient.Config().get()\n self.database = MySQLdb.connect(\n host=self.config.get('database', 'host'),\n port=int(self.config.get('database', 'port')),\n db=self.config.get('database', 'database'),\n user=self.config.get('database', 'username'),\n passwd=self.config.get('database', 'password'),\n charset=\"utf8mb4\",\n use_unicode=True\n )\n self.database.autocommit(True) # needed if you're using InnoDB\n self.database.cursor().execute('SET NAMES UTF8MB4')\n self.users = self.get_users()\n\n def get_users(self):\n cursor = self.database.cursor()\n q = \"SELECT `twitter_id`, `user_name`, `id` FROM `politicians` where status IN (1,2)\"\n cursor.execute(q)\n ids = {}\n for t in cursor.fetchall():\n ids[t[0]] = t[2]\n return ids\n\n def on_data(self, data):\n try:\n tweet = anyjson.deserialize(data)\n if 'delete' in tweet:\n status = dict_mget(tweet, ['delete', 'status'])\n if status is not None:\n self.queue.put(anyjson.serialize(tweet))\n log.notice(u\"Queued delete notification for user {0} for tweet {1}\".format(status.get('user_id_str'), status.get('id_str')))\n elif 'user' in tweet:\n if 'retweeted_status' in tweet and tweet['user']['id'] in self.users:\n self.queue.put(anyjson.serialize(tweet))\n log.notice(u\"Queued RT for user {0} for tweet {1}\".format(dict_mget(tweet, ['user', 'screen_name']), tweet.get('id_str')))\n elif tweet['in_reply_to_status_id'] == None and tweet['user']['id'] in self.users:\n self.queue.put(anyjson.serialize(tweet))\n log.notice(u\"Queued tweet for user {0} for tweet {1}\".format(dict_mget(tweet, ['user', 'screen_name']), tweet.get('id_str')))\n elif tweet['in_reply_to_status_id'] != None and tweet['user']['id'] in self.users:\n self.queue.put(anyjson.serialize(tweet))\n log.notice(u\"Queued reply tweet for user {0} for tweet {1}\".format(dict_mget(tweet, ['user', 'screen_name']), tweet.get('id_str')))\n\n else:\n log.notice(u\"Did not queue tweet: {0}\".format(tweet))\n\n except Exception as e:\n log.error(u\"TweetListener.on_data() caught exception: {0}\".format(e))\n return False # Closes connection, stops streaming\n\n def on_timeout(self):\n log.error(u\"TweetListener connection timed out.\")\n\n def on_error(self, status_code):\n log.error(u\"TweetListener got bad status code: {0}\".format(status_code))\n\nclass TweetStreamClient(object):\n def __init__(self):\n self.config = tweetsclient.Config().get()\n consumer_key = self.config.get('tweets-client', 'consumer_key')\n consumer_secret = self.config.get('tweets-client', 'consumer_secret')\n access_token = self.config.get('tweets-client', 'access_token')\n access_token_secret = self.config.get('tweets-client', 'access_token_secret')\n log.debug(\"Consumer credentials: {key}, {secret}\",\n key=consumer_key,\n secret=consumer_secret)\n log.debug(\"Access credentials: {token}, {secret}\",\n token=access_token,\n secret=access_token_secret)\n self.twitter_auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n self.twitter_auth.set_access_token(access_token, access_token_secret)\n try:\n username = self.twitter_auth.get_username()\n log.notice(\"Authenticated as {user}\".format(user=username))\n except tweepy.error.TweepError as e:\n log.error(e)\n\n def get_config_default(self, section, key, default = None):\n try:\n return self.config.get(section, key)\n except configparser.NoOptionError:\n return default\n\n def load_plugin(self, plugin_module, plugin_class):\n pluginModule = __import__(plugin_module)\n components = plugin_module.split('.')\n for comp in components[1:]:\n pluginModule = getattr(pluginModule, comp)\n pluginClass = getattr(pluginModule, plugin_class)\n return pluginClass\n\n def init_beanstalk(self):\n tweets_tube = self.config.get('beanstalk', 'tweets_tube')\n\n log.info(\"Initiating beanstalk connection. Queueing tweets to {use}...\", use=tweets_tube)\n\n self.beanstalk = politwoops.utils.beanstalk(host=self.config.get('beanstalk', 'host'),\n port=int(self.config.get('beanstalk', 'port')),\n watch=None,\n use=tweets_tube)\n\n def stream_forever(self):\n track_module = self.get_config_default('tweets-client', 'track-module', 'tweetsclient.config_track')\n track_class = self.get_config_default('tweets-client', 'track-class', 'ConfigTrackPlugin')\n log.debug(\"Loading track plugin: {module} - {klass}\",\n module=track_module, klass=track_class)\n\n pluginClass = self.load_plugin(track_module, track_class)\n self.track = pluginClass()\n stream_type = self.track.get_type()\n log.debug(\"Initializing a {0} stream of tweets.\", stream_type)\n track_items = self.track.get_items()\n log.debug(str(track_items))\n\n stream = None\n if stream_type == 'users':\n tweet_listener = TweetListener(self.beanstalk)\n stream = tweepy.Stream(self.twitter_auth, tweet_listener, secure=True)\n stream.filter(follow=track_items)\n elif stream_type == 'words':\n raise Exception('The words stream type is no longer supported.')\n else:\n raise Exception('Unrecognized stream type: {0}'.format(stream_type))\n\n def run(self):\n self.init_beanstalk()\n with politwoops.utils.Heart() as heart:\n politwoops.utils.start_heartbeat_thread(heart)\n politwoops.utils.start_watchdog_thread(heart)\n self.stream_forever()\n\n self.beanstalk.close()\n return 0\n\ndef main(args):\n signal.signal(signal.SIGHUP, politwoops.utils.restart_process)\n\n log_handler = politwoops.utils.configure_log_handler(_script_, args.loglevel, args.output)\n with logbook.NullHandler():\n with log_handler.applicationbound():\n log.debug(\"Starting tweets-client.py\")\n try:\n app = TweetStreamClient()\n if args.authtest:\n return\n if args.restart:\n return politwoops.utils.run_with_restart(app.run)\n else:\n return app.run()\n except KeyboardInterrupt:\n log.notice(\"Killed by CTRL-C\")\n\nif __name__ == \"__main__\":\n args_parser = argparse.ArgumentParser(description=__doc__)\n args_parser.add_argument('--loglevel', metavar='LEVEL', type=str,\n help='Logging level (default: notice)',\n default='notice',\n choices=('debug', 'info', 'notice', 'warning',\n 'error', 'critical'))\n args_parser.add_argument('--output', metavar='DEST', type=str,\n default='-',\n help='Destination for log output (-, syslog, or filename)')\n args_parser.add_argument('--restart', default=False, action='store_true',\n help='Restart when an error cannot be handled.')\n args_parser.add_argument('--authtest', default=False, action='store_true',\n help='Authenticate against Twitter and exit.')\n args = args_parser.parse_args()\n sys.exit(main(args))\n","repo_name":"propublica/politwoops-tweet-collector","sub_path":"bin/tweets-client.py","file_name":"tweets-client.py","file_ext":"py","file_size_in_byte":9517,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"83"} +{"seq_id":"43035092872","text":"from transceiver import *\nreceiver=rx(27, \"pi4b\", repeat=10)\nwhile True:\n\ttry:\n\t\tfor x in receiver.recv():\n\t\t\tprint(\"recv.py: \", x)\n\texcept Exception as e:\n\t\tprint(e)\n\t\treceiver.cleanup()\n\t\texit(0)\n","repo_name":"Terraminator/divy","sub_path":"recv.py","file_name":"recv.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18535038318","text":"import simulation_helpers\nimport numpy as np\nimport itertools\nimport Bee\nimport random\nimport matplotlib.pyplot as plt\nimport math\nimport statistics\nfrom matplotlib.animation import FuncAnimation\nimport collections\n\n\nclass Simulation:\n def __init__(self, num_bees, side_length, step_count, thetastar, r_or_u=\"uniform\",\n use_periodic_boundary_conditions=True, initially_fed_percentage=0.1):\n self.total_bees = num_bees\n self.n = side_length\n self.steps = step_count\n self.bee_array = []\n self.r_or_u = r_or_u\n self.tstar_seed = thetastar\n thetastars = [np.linspace(-thetastar, thetastar, simulation_helpers.TSTAR_RANGE)]\n self.thetastar = list(thetastars[random.randint(0, len(thetastars) - 1)])\n self.has_run = False\n\n # simulation parameters\n self.food_variance_threshold = 100\n self.min_food_level = 5.0\n self.r = 2.0\n\n self.food_variance_vs_time = collections.OrderedDict()\n self.max_donations_over_time = collections.OrderedDict()\n self.largest_cluster_over_time = collections.OrderedDict()\n self.cluster_count_over_time = collections.OrderedDict()\n\n for i in range(0, self.total_bees):\n self.bee_array.append(Bee.Bee(i, total=self.total_bees, tstar=self.thetastar,\n tstar_range=simulation_helpers.TSTAR_RANGE,\n initially_fed_percentage=initially_fed_percentage,\n n=self.n, steps=self.steps, r_or_u=self.r_or_u,\n use_periodic_boundary_conditions=use_periodic_boundary_conditions))\n if all(bee.food_level == 0 for bee in self.bee_array):\n self.bee_array[0].set_food_level(100)\n self.init_stats()\n\n def init_stats(self):\n food_level_values = []\n donation_values = []\n for bee in self.bee_array:\n food_level_values.append(bee.food_level)\n donation_values.append(len(bee.food_out_edges))\n\n self.food_variance_vs_time[0] = statistics.variance(food_level_values)\n self.max_donations_over_time[0] = max(donation_values)\n self.largest_cluster_over_time[0] = 1\n\n def run(self, food_donation_percent=0.50, food_transition_rate=1):\n for step in range(1, self.steps):\n food_level_values = []\n donation_values = []\n cluster_sizes = []\n clusters = set()\n\n for bee in self.bee_array:\n bee.move(step)\n food_level_values.append(bee.food_level)\n donation_values.append(len(bee.food_out_edges))\n\n self.food_variance_vs_time[step] = statistics.variance(food_level_values)\n self.max_donations_over_time[step] = max(donation_values)\n for bee_1, bee_2 in itertools.combinations(self.bee_array, 2):\n dist = ((bee_1.positionx[step] - bee_2.positionx[step]) ** 2 +\n (bee_1.positiony[step] - bee_2.positiony[step]) ** 2) ** 0.5\n simulation_helpers.populate_clusters(step, dist, bee_1, bee_2, self.r)\n simulation_helpers.adjust_direction_for_attraction(step, dist, bee_1, bee_2, self.r)\n simulation_helpers.setup_trophallaxis(step, dist, bee_1, bee_2,\n food_donation_percent, food_transition_rate)\n\n if all([bee.food_level > self.min_food_level for bee in self.bee_array]):\n print(\"Convergence due to min food level reached after {} steps\".format(step))\n break\n\n if self.food_variance_vs_time[step] <= self.food_variance_threshold:\n print(\"Convergence due to variance reached after {} steps\".format(step))\n break\n\n for bee in self.bee_array:\n cluster_sizes.append(bee.cluster_size[step])\n clusters.add(frozenset(bee.cluster))\n self.cluster_count_over_time[step] = len(clusters)\n self.largest_cluster_over_time[step] = max(cluster_sizes)\n\n self.has_run = True\n\n def animate_walk(self):\n assert self.has_run, \"Animation cannot render until the simulation has been run!\"\n plt.style.use('seaborn-pastel')\n fig = plt.figure()\n ax = plt.axes(xlim=(0, self.n), ylim=(0, self.n))\n xdatas = {n: [] for n in range(0, self.total_bees)}\n ydatas = {n: [] for n in range(0, self.total_bees)}\n\n bee_paths = [ax.plot([], [], '*')[0] for _ in self.bee_array]\n r_set = set(np.linspace(0, 1, num=self.total_bees))\n g_set = set(np.linspace(0, 1, num=self.total_bees))\n b_set = set(np.linspace(0, 1, num=self.total_bees))\n for line in bee_paths:\n r = random.sample(r_set, 1)[0]\n g = random.sample(g_set, 1)[0]\n b = random.sample(b_set, 1)[0]\n line.set_color((r, g, b))\n\n def animate(i, bees, lines):\n for line, bee in zip(lines, bees):\n xdatas[bee.number].append(bee.trace.get(i)[0])\n ydatas[bee.number].append(bee.trace.get(i)[1])\n line.set_data(xdatas[bee.number][0], ydatas[bee.number][0])\n xdatas[bee.number].pop(0)\n ydatas[bee.number].pop(0)\n return lines\n\n ax.set_xlim([0.0, self.n])\n ax.set_xlabel('X')\n\n ax.set_ylim([0.0, self.n])\n ax.set_ylabel('Y')\n\n ax.set_title('2D Walk Test')\n\n anim = FuncAnimation(fig, animate, frames=self.steps, fargs=(self.bee_array, bee_paths),\n interval=1000, blit=False)\n\n # anim.save('bee_paths.gif', writer='pillow')\n plt.show()\n\n def get_timeseries_stats(self, identifier=\"variance\"):\n plot_dict = None\n secondary_plot_dict = None\n if identifier == \"variance\":\n plot_dict = self.food_variance_vs_time\n if identifier == \"num_clusters\":\n plot_dict = self.cluster_count_over_time\n secondary_plot_dict = self.largest_cluster_over_time\n if identifier == \"stdev\":\n plot_dict = self.food_variance_vs_time\n for key, value in plot_dict.items():\n plot_dict[key] = math.sqrt(value)\n if identifier == \"max_donations\":\n plot_dict = self.max_donations_over_time\n\n return plot_dict, secondary_plot_dict\n","repo_name":"owingit/b-e-e-s","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"39417981284","text":"import os\nimport fnmatch\n\nfrom .folder_types import (\n Static,\n ListField,\n Entity,\n Project,\n UserWorkspace,\n ShotgunStep,\n ShotgunTask,\n)\n\nfrom ..errors import TankError, TankUnreadableFileError\nfrom ..util import yaml_cache\n\n\ndef read_ignore_files(schema_config_path):\n \"\"\"\n Reads ignore_files from root of schema if it exists.\n Returns a list of patterns to ignore.\n \"\"\"\n ignore_files = []\n file_path = os.path.join(schema_config_path, \"ignore_files\")\n if os.path.exists(file_path):\n open_file = open(file_path, \"r\")\n try:\n for line in open_file.readlines():\n # skip comments\n if \"#\" in line:\n line = line[: line.index(\"#\")]\n line = line.strip()\n if line:\n ignore_files.append(line)\n finally:\n open_file.close()\n return ignore_files\n\n\nclass FolderConfiguration(object):\n \"\"\"\n Class that loads the schema from disk and constructs folder objects.\n \"\"\"\n\n def __init__(self, tk, schema_config_path):\n \"\"\"\n Constructor\n \"\"\"\n self._tk = tk\n\n # access shotgun nodes by their entity_type\n self._entity_nodes_by_type = {}\n\n # maintain a list of all Step nodes for special introspection\n self._step_fields = []\n\n # read skip files config\n self._ignore_files = read_ignore_files(schema_config_path)\n\n # load schema\n self._load_schema(schema_config_path)\n\n ##########################################################################################\n # public methods\n\n def get_folder_objs_for_entity_type(self, entity_type):\n \"\"\"\n Returns all the nodes representing a particular sg entity type\n \"\"\"\n return self._entity_nodes_by_type.get(entity_type, [])\n\n def get_task_step_nodes(self):\n \"\"\"\n Returns all step nodes in the configuration\n \"\"\"\n return self._step_fields\n\n ####################################################################################\n # utility methods\n\n def _get_sub_directories(self, parent_path):\n \"\"\"\n Returns all the directories for a given path\n \"\"\"\n directory_paths = []\n for file_name in os.listdir(parent_path):\n\n # check our ignore list\n if any(fnmatch.fnmatch(file_name, p) for p in self._ignore_files):\n continue\n\n full_path = os.path.join(parent_path, file_name)\n if os.path.isdir(full_path):\n directory_paths.append(full_path)\n\n return directory_paths\n\n def _get_files_in_folder(self, parent_path):\n \"\"\"\n Returns all the files for a given path except yml files\n Also ignores any files mentioned in the ignore files list\n \"\"\"\n file_paths = []\n items_in_folder = os.listdir(parent_path)\n\n folders = [\n f for f in items_in_folder if os.path.isdir(os.path.join(parent_path, f))\n ]\n\n for file_name in items_in_folder:\n\n full_path = os.path.join(parent_path, file_name)\n\n if not os.path.isfile(full_path):\n # not a file path!\n continue\n\n if any(fnmatch.fnmatch(file_name, p) for p in self._ignore_files):\n # don't process files matching ignore pattern(s)\n continue\n\n if file_name.endswith(\".yml\") and os.path.splitext(file_name)[0] in folders:\n # this is a foo.yml and we have a folder called foo\n # this means that this is a config file!\n continue\n\n if file_name.endswith(\"symlink.yml\"):\n # this is symlink schema component and not a normal file, so\n # don't include it in the files enumeration\n continue\n\n # by now should be left with regular non-system files only\n file_paths.append(full_path)\n\n return file_paths\n\n def _get_symlinks_in_folder(self, parent_path):\n \"\"\"\n Returns all xxx.symlink.yml files in a location.\n\n :param parent_path: file system folder to scan\n :returns: list of (name, target_expression, full_metadata) where name is the name of the symlink\n and target_expression is a target expression to be passed into the folder creation.\n For example, if the file in the schema location is called \"foo_bar.symlink.yml\",\n the name parameter will be 'foo_bar'.\n \"\"\"\n SYMLINK_SUFFIX = \".symlink.yml\"\n\n data = []\n\n items_in_folder = os.listdir(parent_path)\n symlinks = [f for f in items_in_folder if f.endswith(SYMLINK_SUFFIX)]\n\n for file_name in symlinks:\n\n full_path = os.path.join(parent_path, file_name)\n\n try:\n metadata = (\n yaml_cache.g_yaml_cache.get(full_path, deepcopy_data=False) or {}\n )\n except Exception as error:\n raise TankError(\n \"Cannot load config file '%s'. Error: %s\" % (full_path, error)\n )\n\n if \"target\" not in metadata:\n raise TankError(\n \"Did not find required 'target' parameter in \"\n \"symlink definition file '%s'\" % full_path\n )\n\n target_expression = metadata[\"target\"]\n\n symlink_name = file_name[: -len(SYMLINK_SUFFIX)]\n\n # this is a file path and it\n data.append((symlink_name, target_expression, metadata))\n\n return data\n\n def _read_metadata(self, full_path):\n \"\"\"\n Reads metadata file.\n\n :param full_path: Absolute path without extension\n :returns: Dictionary of file contents or None\n \"\"\"\n metadata = None\n # check if there is a yml file with the same name\n yml_file = \"%s.yml\" % full_path\n try:\n metadata = yaml_cache.g_yaml_cache.get(yml_file, deepcopy_data=False)\n except TankUnreadableFileError:\n pass\n except Exception as error:\n raise TankError(\n \"Cannot load config file '%s'. Error: %s\" % (yml_file, error)\n )\n\n return metadata\n\n ##########################################################################################\n # internal stuff\n\n def _load_schema(self, schema_config_path):\n \"\"\"\n Scan the config and build objects structure\n \"\"\"\n\n project_folders = self._get_sub_directories(schema_config_path)\n\n # make some space in our obj/entity type mapping\n self._entity_nodes_by_type[\"Project\"] = []\n\n for project_folder in project_folders:\n\n # read metadata to determine root path\n metadata = self._read_metadata(project_folder)\n\n if metadata is None:\n if os.path.basename(project_folder) == \"project\":\n\n # get the default root name from the config\n default_root = (\n self._tk.pipeline_configuration.get_primary_data_root_name()\n )\n\n if not default_root:\n raise TankError(\n \"Unable to identify a default storage root to use \"\n \"while loading the project schema. Check your \"\n \"config's roots.yml file to ensure at least one \"\n \"storage root is defined. You can specify the \"\n \"default root by adding a `default: true` \"\n \"key/value to a root's definition.\"\n )\n\n metadata = {\"type\": \"project\", \"root_name\": default_root}\n else:\n raise TankError(\n \"Project directory missing required yml file: %s.yml\"\n % project_folder\n )\n\n if metadata.get(\"type\") != \"project\":\n raise TankError(\n \"Only items of type 'project' are allowed at the root level: %s\"\n % project_folder\n )\n\n project_obj = Project.create(self._tk, project_folder, metadata)\n\n # store it in our lookup tables\n self._entity_nodes_by_type[\"Project\"].append(project_obj)\n\n # recursively process the rest\n self._process_config_r(project_obj, project_folder)\n\n def _process_config_r(self, parent_node, parent_path):\n \"\"\"\n Recursively scan the file system and construct an object\n hierarchy.\n\n Factory method for Folder objects.\n \"\"\"\n for full_path in self._get_sub_directories(parent_path):\n # check for metadata (non-static folder)\n metadata = self._read_metadata(full_path)\n if metadata:\n node_type = metadata.get(\"type\", \"undefined\")\n\n if node_type == \"shotgun_entity\":\n cur_node = Entity.create(self._tk, parent_node, full_path, metadata)\n\n # put it into our list where we group entity nodes by entity type\n et = cur_node.get_entity_type()\n if et not in self._entity_nodes_by_type:\n self._entity_nodes_by_type[et] = []\n self._entity_nodes_by_type[et].append(cur_node)\n\n elif node_type == \"shotgun_list_field\":\n cur_node = ListField.create(\n self._tk, parent_node, full_path, metadata\n )\n\n elif node_type == \"static\":\n cur_node = Static.create(self._tk, parent_node, full_path, metadata)\n\n elif node_type == \"user_workspace\":\n cur_node = UserWorkspace.create(\n self._tk, parent_node, full_path, metadata\n )\n\n elif node_type == \"shotgun_step\":\n cur_node = ShotgunStep.create(\n self._tk, parent_node, full_path, metadata\n )\n self._step_fields.append(cur_node)\n\n elif node_type == \"shotgun_task\":\n cur_node = ShotgunTask.create(\n self._tk, parent_node, full_path, metadata\n )\n\n else:\n # don't know this metadata\n raise TankError(\n \"Error in %s. Unknown metadata type '%s'\"\n % (full_path, node_type)\n )\n else:\n # no metadata - so this is just a static folder!\n # specify the type in the metadata chunk for completeness\n # since we are passing this into the hook later\n cur_node = Static.create(\n self._tk, parent_node, full_path, {\"type\": \"static\"}\n )\n\n # and process children\n self._process_config_r(cur_node, full_path)\n\n # process symlinks\n for (path, target, metadata) in self._get_symlinks_in_folder(parent_path):\n parent_node.add_symlink(path, target, metadata)\n\n # now process all files and add them to the parent_node token\n for f in self._get_files_in_folder(parent_path):\n parent_node.add_file(f)\n","repo_name":"shotgunsoftware/tk-core","sub_path":"python/tank/folder/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":11494,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"63"} +{"seq_id":"8593314327","text":"import asyncio\nfrom playwright.async_api import async_playwright\nimport nest_asyncio\nnest_asyncio.apply()\n\nglobal currentCommand\ncurrentCommand = \"idle\"\n\nasync def launch():\n global currentCommand\n with open('info.dat') as extract:\n file = extract.readlines()\n username = file[2]\n code = file[0]\n password = file[1]\n async with async_playwright() as p:\n browser = await p.chromium.launch(headless=False, args=['--start-maximized', '--use-fake-ui-for-media-stream'])\n page = await browser.new_page(no_viewport=True)\n context = await browser.new_context()\n await context.grant_permissions(permissions=['microphone'])\n await context.grant_permissions(permissions=['camera'])\n await page.goto('https://zoom.com/wc/join/'+code)\n await asyncio.sleep(0.1)\n await page.locator('#inputname').fill(username)\n await asyncio.sleep(0.1)\n await page.locator('#joinBtn').click()\n await asyncio.sleep(0.1)\n await page.locator('#inputpasscode').fill(password)\n await asyncio.sleep(0.1)\n await page.locator('#joinBtn').click()\n while True:\n await asyncio.sleep(0.1)\n if currentCommand == \"leave\":\n print(currentCommand)\n currentCommand = \"idle\"\n await browser.close()\n return\n if currentCommand == \"mic\":\n print(currentCommand)\n await page.evaluate(\"()=>{var a=document.querySelector('#wc-footer');a.classList.remove('footer--hidden');}\")\n await page.click('//*[@id=\"foot-bar\"]/div[1]/div[2]/button')\n currentCommand = \"idle\"\n if currentCommand == \"cam\":\n print(currentCommand)\n await page.evaluate(\"()=>{var a=document.querySelector('#wc-footer');a.classList.remove('footer--hidden');}\")\n await page.click('//*[@id=\"foot-bar\"]/div[1]/div[2]/button')\n currentCommand = \"idle\"","repo_name":"ambroseling/LINKUPSS","sub_path":"zoompythonscript/playwrightdemotest.py","file_name":"playwrightdemotest.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"201186743","text":"from fltk import *\n\n\ndef closewin(widget):\n print(\"shutting down\")\n w.hide()\n\ndef filecb(widget):\n fl_message(\"this is the filecb\")\n\ndef editcb(widget):\n fl_message(\"this is the editcb\")\n\ndef helpcb(widget):\n fl_message(\"this is the helpcb\")\n\ndef opencb(widget):\n fl_message(\"this is the opencb\")\n\nw = Fl_Window(600, 50, 300, 300, \"my gui\")\nw.begin()\nmenu=Fl_Menu_Bar(0,0, w.w(),25)\nmenu.add(\"File/Open\",0,opencb)\nmenu.add(\"File/Exit\",FL_F+5,closewin)\nmenu.add(\"Edit/Undo \",FL_F+2,editcb)\nmenu.add(\"Edit/Redo \",FL_F+2,editcb)\nmenu.add(\"Edit/Preferences \",FL_F+2,editcb)\n\nmenu.add(\"Hel&p/A&bout\",ord('h'),helpcb)\nmenu.add(\"Hel&p/S&oftware/Hi&story\",0,helpcb)\nmenu.add(\"Hel&p/S&oftware/Li&cense\",0,helpcb)\n\nw.end()\nw.callback(closewin)\nw.resizable()\nFl.scheme(\"plastic\")\nw.show()\nFl.run()\n","repo_name":"CLRPain/CS-2","sub_path":"kyle/menubar.py","file_name":"menubar.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11290219481","text":"import re\n'''\n从一段文字中提取手机号,手机号可能位于文字的任何地方\n也有可能有其他的数字存在。\n'''\n\ndef main():\n # 创建正则表达式对象,使用前瞻和回顾来保证手机号前后不应该出现数字\n # (?<=exp) 表示匹配exp后边的位置!这里表示的是匹配非数字后边的内容\n # (?=exp) 表示匹配exp前面的位置,这里表示匹配非数字前面的内容\n pattern = re.compile(r'(?<=\\D)1[345678]\\d{9}(?=\\D)')\n sentence = '''\n 重要的事情说8130123456789遍,我的手机号是13512346789这个靓号,\n 不是15600998765,也是110或119,王大锤的手机号才是15600998765。\n '''\n # 查找所有匹配并保存到一个列表中\n mylist = re.findall(pattern, sentence)\n print(mylist)\n print('----------分割线-----------')\n # 通过迭代器取出匹配对象并获得匹配的内容\n # 返回三个match对象,放在了迭代器中\n for temp in pattern.finditer(sentence):\n # 对match对象使用group()方法,返回match对象的字符串形式\n print(temp.group())\n print('---------分割线---------')\n # 通过search函数指定搜索位置找出所有匹配\n # search()是匹配到一个就会返回\n # 通过循环指定位置来匹配所有,而match对象的end()方法,就是返回\n # match对象的最后一个字符在原字符串的位置index\n m = pattern.search(sentence)\n while m:\n print(m.group())\n m = pattern.search(sentence, m.end())\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"WWEISONG/Python_Project_100d","sub_path":"Day09-正则表达式/练习02-提取手机号.py","file_name":"练习02-提取手机号.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"16288852794","text":"import functools\nimport sys\nfrom pathlib import Path\n\n\n@functools.lru_cache()\ndef get_exe_path():\n return Path(sys.argv[0])\n\n\nASSISTANT_FOLDER = Path(\"assistant\")\nASSISTANT_EXE_NAME = \"rivals_workshop_assistant.exe\"\nASSISTANT_TMP_EXE_NAME = \"rivals_workshop_assistant.exe_\"\n\nSPRITES_FOLDER = Path(\"sprites\")\nSCRIPTS_FOLDER = Path(\"scripts\")\nATTACKS_FOLDER = SCRIPTS_FOLDER / Path(\"attacks\")\nANIMS_FOLDER = Path(\"anims\")\nLOGS_FOLDER = Path(\"logs\")\n\nREPO_OWNER = \"Rivals-Workshop-Community-Projects\"\nASSISTANT_REPO_NAME = \"rivals-workshop-assistant\"\nLIBRARY_REPO_NAME = \"injector-library\"\nINJECT_FOLDER = ASSISTANT_FOLDER / Path(\".inject\")\nUSER_INJECT_FOLDER = ASSISTANT_FOLDER / Path(\"user_inject\")\n\nBACKUP_FOLDER = ASSISTANT_FOLDER / Path(\"backups\")\n\nLOCKFILE_PATH = ASSISTANT_FOLDER / Path(\".lock\")\n\nPATHS_TO_BACK_UP = [SPRITES_FOLDER, SCRIPTS_FOLDER, ANIMS_FOLDER]\n\nASEPRITE_LUA_SCRIPTS_FOLDER = Path(\"lua_scripts\")\n","repo_name":"Rivals-Workshop-Community-Projects/rivals-workshop-assistant","sub_path":"rivals_workshop_assistant/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"6135447719","text":"import numpy as np\nimport math\n\n\n# Dimensions du robot [m]\nrobotRadius = 0.08\nwheelRadius = 0.032\n\n# Activer les obstacles\nenableObstacles = True\n\n# Mouvement automatique de l'objectif\nautoMovingGoal = False\ngoTo = True\n\n# Param�tres du champ de potentiel\nkr = 10.0 # repulsive potential gain\nka = 5.0 # attractive potential gain\npmax = 4.0\n\n\ndef updateWheels(t, robotPos, robotYaw, goalPos, obstaclesPos):\n \"\"\"Appelée par le simulateur pour mettre à jour les vitesses du robot\n\n Arguments:\n t {float} -- temps écoulé depuis le début [s]\n speed {float[3]} -- les vitesses entrées dans les curseurs [m/s], [m/s], [rad/s]\n robotPos {float[2]} -- position du robot dans le repère monde [m], [m]\n robotYaw {float} -- orientation du robot dans le repère monde [rad]\n goalPos {float[2]} -- position cible du robot dans le repère monde\n\n Returns:\n float[3] -- les vitesses des roues [rad/s]\n \"\"\"\n p = path(robotPos, goalPos, obstaclesPos);\n print(p)\n \"\"\"if (goTo or autoMovingGoal):\n return goToPosition(robotPos, robotYaw, goalPos)\"\"\"\n\n #speedVect = np.matrix(speed).T\n\n #return np.asarray(kinematicMatrix() * speedVect).flatten()\n return [0,0,0]\n\n\"\"\" ********** Matrice de cinématique *********** \"\"\"\ndef kinematicMatrix():\n\n matrice = np.matrix([[-math.cos(np.pi/6), -math.sin(np.pi/6), robotRadius],\n [math.cos(np.pi/6), -math.sin(np.pi/6), robotRadius],\n [0, 1, robotRadius]]) / wheelRadius\n print(\"Kinematic matrix\", matrice)\n return matrice\n\n\"\"\" ********** Matrice de rotation *********** \"\"\"\ndef rotationMatrix(theta):\n\n matrice = np.matrix([[math.cos(theta), -math.sin(theta), 0],\n [math.sin(theta), math.cos(theta), 0],\n [0, 0, 1]])\n print(\"Rotataion matrix\", matrice)\n return matrice\n\n\"\"\" ******** Matrice de translation ********* \"\"\"\ndef translationMatrix(u):\n\n matrice = np.matrix([[1, 0, u[0]],\n [0, 1, u[1]],\n [0, 0, 1]])\n print(\"Translation matrix\", matrice)\n return matrice\n\n\"\"\" ******** Matrice de transformation ********* \"\"\"\ndef transformationMatrix(theta, u):\n\n matrice = rotationMatrix(theta) * translationMatrix(u)\n print(\"Transformation matrix\", matrice)\n return matrice\n\n\"\"\" ********** Move to specific position *********** \"\"\"\ndef goToPosition(robotPos, robotYaw, goalPos):\n distance = getDistance(robotPos, goalPos)\n robotSpeed = (1 / distance) / 3\n distanceX = goalPos[0] - robotPos[0]\n distanceY = goalPos[1] - robotPos[1]\n speed = [robotSpeed * distanceX, robotSpeed * distanceY, 0]\n s = np.matrix(speed).T\n return np.asarray(kinematicMatrix() * s).flatten()\n\n\"\"\" ************ Get distance between two positions ************ \"\"\"\ndef getDistance(p1, p2):\n return math.sqrt(math.pow((p1[0] - p2[0]), 2) + math.pow((p1[1] - p2[1]), 2))\n\n\"\"\" ************ Get attractive potential ************ \"\"\"\ndef attractivePotential(robotPos, goalPos):\n return (ka * np.hypot(robotPos[0] - goalPos[0], robotPos[1] - goalPos[1]) ** 2) / 2\n\n\"\"\" ************ Get repulsive potential ************ \"\"\"\ndef repulsivePotential(robotPos, obstaclesPos):\n maxDistance = np.hypot(robotPos[0] - obstaclesPos[0], robotPos[1] - obstaclesPos[1])\n if (maxDistance < pmax):\n \treturn kr / (maxDistance ** 2) / 2\n return 0\n\n\"\"\" ************ Get gradient attractive potential ************ \"\"\"\ndef attractivePotentialGradient(robotPos, goalPos):\n return ka * round(np.hypot(robotPos[0] - goalPos[0], robotPos[1] - goalPos[1]))\n\n\"\"\" ************ Get gradient repulsive potential ************ \"\"\"\ndef repulsivePotentialGradient(robotPos, obstaclesPos):\n maxDistance = np.hypot(robotPos[0] - obstaclesPos[0], robotPos[1] - obstaclesPos[1])\n if (maxDistance < pmax):\n \treturn kr * (obstaclesPos - robotPos)/ math.pow(maxDistance, 4)\n return 0\n\n\"\"\" *********** Calculate potential field *********** \"\"\"\ndef potentialField(robotPos, goalPos, obstaclesPos):\n repulsion = [repulsivePotentialGradient(robotPos, o) for o in obstaclesPos]\n attraction = attractivePotentialGradient(robotPos, goalPos)\n return repulsion + attraction\n\ndef path(robotPos, goalPos, obstaclesPos):\n s = 0.01\n n = 0.0\n lastPos = robotPos\n #print(np.hypot(robotPos[0] - goalPos[0], robotPos[1] - goalPos[1]))\n seuil = np.hypot(robotPos[0] - goalPos[0], robotPos[1] - goalPos[1])\n p = [robotPos]\n if(seuil > 0.1):\n gradient = np.gradient(potentialField(lastPos, goalPos, obstaclesPos))\n print (\"gradient = \", gradient)\n norm = np.linalg.norm(np.gradient(potentialField(lastPos, goalPos, obstaclesPos)))\n print (\"norm = \", norm)\n lastPos = s * gradient / -norm\n p.append(lastPos)\n return p\n","repo_name":"bbouchra28/Robotic","sub_path":"holo_obstacles/obstacle/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71673071881","text":"from typing import Any\n\nfrom ...error import GraphQLError\nfrom ...language import SchemaDefinitionNode\nfrom . import SDLValidationContext, SDLValidationRule\n\n\n__all__ = [\"LoneSchemaDefinitionRule\"]\n\n\nclass LoneSchemaDefinitionRule(SDLValidationRule):\n \"\"\"Lone Schema definition\n\n A GraphQL document is only valid if it contains only one schema definition.\n \"\"\"\n\n def __init__(self, context: SDLValidationContext):\n super().__init__(context)\n old_schema = context.schema\n self.already_defined = old_schema and (\n old_schema.ast_node\n or old_schema.query_type\n or old_schema.mutation_type\n or old_schema.subscription_type\n )\n self.schema_definitions_count = 0\n\n def enter_schema_definition(self, node: SchemaDefinitionNode, *_args: Any) -> None:\n if self.already_defined:\n self.report_error(\n GraphQLError(\n \"Cannot define a new schema within a schema extension.\", node\n )\n )\n else:\n if self.schema_definitions_count:\n self.report_error(\n GraphQLError(\"Must provide only one schema definition.\", node)\n )\n self.schema_definitions_count += 1\n","repo_name":"graphql-python/graphql-core","sub_path":"src/graphql/validation/rules/lone_schema_definition.py","file_name":"lone_schema_definition.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"63"} +{"seq_id":"10802030204","text":"import sys\nimport os\nimport shutil\nimport functools\nsys.path.insert(0, 'scripts')\nsys.path.insert(0, 'tools/families')\nsys.path.insert(0, 'tools/trees')\nimport experiments as exp\nimport fam\nfrom ete3 import Tree\n\n# download data from https://github.com/chaoszhang/A-pro_data\n\n\ndef extract(gene_trees, species_tree, datadir, cut_underscore = False):\n fam.init_top_directories(datadir)\n index = 0\n if (species_tree != None):\n shutil.copyfile(species_tree, fam.get_species_tree(datadir))\n print(\"\\tCopied species tree into \" + fam.get_species_tree(datadir))\n for line in open(gene_trees).readlines():\n if (line[0] == \"#\"):\n continue\n family = \"family_\" + str(index)\n tree_str = line.replace(\"[&U]\", \"\")\n tree = Tree(tree_str)\n leaves = tree.get_leaves()\n if (len(leaves) < 4):\n continue\n leaves_dict = {}\n species_to_genes = {}\n for leaf in leaves:\n species = leaf.name\n if (cut_underscore):\n species = species.split(\"_\")[0]\n if (not species in species_to_genes):\n species_to_genes[species] = []\n genes = species_to_genes[species]\n leaf.name = species + \"_\" + str(len(genes))\n genes.append(leaf.name)\n fam.init_family_directories(datadir, family)\n mapping_file = fam.get_mappings(datadir, family)\n fam.write_phyldog_mapping(species_to_genes, mapping_file)\n with open(fam.get_true_tree(datadir, family), \"w\") as writer:\n writer.write(tree.write())\n index += 1\n print(\"\\tPostprocessing data...\")\n fam.postprocess_datadir(datadir)\n\ndef extract_fungi(inputdir, datadir):\n gene_trees_path = os.path.join(inputdir, \"pep.ml.renamed.trees\")\n species_tree = os.path.join(inputdir, \"fungi-ref-nonewline.tre\")\n print(\"Extracting fungi astral-pro data...\")\n extract(gene_trees_path, species_tree, datadir)\n\ndef extract_plants(inputdir, datadir):\n gene_trees_path = os.path.join(inputdir, \"1kp-c12-genetrees.tre\")\n astral_species_tree = os.path.join(inputdir, \"single-copy-astral-localPP-recomputed-originalformula.tre\")\n print(\"Extracting 1kplant astral-pro data...\")\n extract(gene_trees_path, astral_species_tree, datadir)\n\ndef extract_tom(datadir):\n inputdir = os.path.join(exp.benoit_datasets_root, \"raw_data\", \"life92\")\n gene_trees_path = os.path.join(inputdir, \"gene_trees.txt\")\n species_tree = None\n extract(gene_trees_path, species_tree, datadir)\n\ndef extract_bigcyano36(datadir):\n inputdir = os.path.join(exp.benoit_datasets_root, \"raw_data\", \"bigcyano\")\n gene_trees_path = os.path.join(inputdir, \"gene_trees.txt\")\n species_tree = None\n extract(gene_trees_path, species_tree, datadir, True)\n\n\ndef extract_aprodata(rawdatadir):\n families_dir = os.path.join(exp.benoit_datasets_root, \"families\")\n fungi_input_dir = os.path.join(rawdatadir, \"fungi\")\n fungi_output_dir = os.path.join(families_dir, \"apro_fungi\")\n extract_fungi(fungi_input_dir, fungi_output_dir)\n plants_input_dir = os.path.join(rawdatadir, \"1kp\")\n plants_output_dir = os.path.join(families_dir, \"apro_plants\")\n #extract_plants(plants_input_dir, plants_output_dir)\n\n\n\nif (__name__ == \"__main__\"): \n if (len(sys.argv) < 2): \n print(\"Syntax: python \" + os.path.basename(__file__) + \" astralprodata_repository_path\")\n exit(1)\n #extract_aprodata(sys.argv[1])\n extract_tom(sys.argv[1])\n","repo_name":"BenoitMorel/phd_experiments","sub_path":"tools/families/generate_families_with_aprodata.py","file_name":"generate_families_with_aprodata.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"35611520918","text":"\"\"\"\nModule responsible for translating sequence annotation data\ninto GA4GH native objects.\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\nimport random\n\nimport ga4gh.protocol as protocol\nimport ga4gh.datamodel as datamodel\nimport ga4gh.sqliteBackend as sqliteBackend\nimport ga4gh.exceptions as exceptions\n\n# Note to self: There's the Feature ID as understood in a GFF3 file,\n# the Feature ID that is its server-assigned compoundId, and the\n# ID of the feature's row in the DB FEATURE table.\n# I need to be careful about which one is which.\n\n\"\"\"\nFor this implementation, `featureSetId` is required, while `parentId`\nis optional, and filters the features within the requested `featureSetId`\nby their parent.\n\nOnly return features on the reference with this name. Genomic positions\nare non-negative integers less than reference length.\nRequests spanning the join of circular genomes are represented as two\nrequests one on each side of the join (position 0) end is also required\nIf specified, this query matches only annotations which match one of the\nprovided feature types.\nFor now do not use the features array in search\n\nGFF3 data is represented by rows in a single table, named FEATURE.\nThe columns of the FEATURE table correspond to the columns of a GFF3,\nwith three additional columns prepended representing the ID\nof this feature, the ID of its parent (if any), and a whitespace\nseparated array of its child IDs.\n\n_featureColumns pairs represent the ordered (column_name, column_type).\n\"\"\"\n_featureColumns = [\n ('id', 'TEXT'), # a synthetic principal key generated on ETL\n ('parent_id', 'TEXT'),\n ('child_ids', 'TEXT'),\n ('reference_name', 'TEXT'),\n ('source', 'TEXT'),\n ('type', 'TEXT'), # corresponds to featureType, an ontology term\n ('start', 'INT'),\n ('end', 'INT'),\n ('score', 'REAL'),\n ('strand', 'TEXT'), # limited to one of '+'/'-' or none\n ('name', 'TEXT'), # the \"ID\" as found in GFF3, or '' if none\n ('gene_name', 'TEXT'), # as found in GFF3 attributes\n ('transcript_name', 'TEXT'), # as found in GFF3 attributes\n ('attributes', 'TEXT')] # JSON encoding of attributes dict\n\n\nclass Gff3DbBackend(sqliteBackend.SqliteBackedDataSource):\n \"\"\"\n Notes about the current implementation:\n For this implementation, `featureSetId` is required, while `parentId`\n is optional, and filters the features within the requested `featureSetId`\n by their parent.\n\n Genomic positions are non-negative integers less than reference length.\n Requests spanning the join of circular genomes are represented as two\n requests one on each side of the join (position 0)\n \"\"\"\n\n def __init__(self, dbFile):\n super(Gff3DbBackend, self).__init__(dbFile)\n self.featureColumnNames = [f[0] for f in _featureColumns]\n self.featureColumnTypes = [f[1] for f in _featureColumns]\n\n def countFeaturesSearchInDb(\n self, referenceName=None, start=0, end=0,\n parentId=None, featureTypes=None):\n \"\"\"\n Same parameters as searchFeaturesInDb,\n except without the pagetoken/size.\n \"\"\"\n # TODO: Refactor out common bits of this and the below search query.\n sql = (\"SELECT COUNT(*) FROM FEATURE WHERE \"\n \"reference_name = ? \"\n \"AND end > ? \" # compare this to query start\n \"AND start < ? \" # and this to query end\n )\n # TODO: Optimize by refactoring out string concatenation\n sql_args = (referenceName, start, end)\n if parentId is not None:\n sql += \"AND parent_id = ? \"\n sql_args += (parentId,)\n if featureTypes is not None and len(featureTypes) > 0:\n sql += \"AND type IN (\"\n sql += \", \".join([\"?\", ] * len(featureTypes))\n sql += \") \"\n sql_args += tuple(featureTypes)\n query = self._dbconn.execute(sql, sql_args)\n return (query.fetchone())[0]\n\n def searchFeaturesInDb(\n self, pageToken=0, pageSize=None,\n referenceName=None, start=0, end=0,\n parentId=None, featureTypes=None):\n \"\"\"\n Perform a full features query in database.\n\n :param pageToken: int representing first record to return\n :param pageSize: int representing number of records to return\n :param referenceName: string representing reference name, ex 'chr1'\n :param start: int position on reference to start search\n :param end: int position on reference to end search >= start\n :param parentId: string restrict search by id of parent node.\n :return an array of dictionaries, representing the returned data.\n \"\"\"\n # TODO: Refactor out common bits of this and the above count query.\n sql = (\n \"SELECT * FROM FEATURE WHERE \"\n \"reference_name = ? \"\n \"AND end > ? \" # compare this to query start\n \"AND start < ? \") # and this to query end\n # TODO: Optimize by refactoring out string concatenation\n sql_args = (referenceName, start, end)\n if parentId is not None:\n sql += \"AND parent_id = ? \"\n sql_args += (parentId,)\n if featureTypes is not None and len(featureTypes) > 0:\n sql += \"AND type IN (\"\n sql += \", \".join([\"?\", ] * len(featureTypes))\n sql += \") \"\n sql_args += tuple(featureTypes)\n sql += \"ORDER BY reference_name, start, end ASC \"\n sql += sqliteBackend.limitsSql(pageToken, pageSize)\n query = self._dbconn.execute(sql, sql_args)\n return sqliteBackend.sqliteRows2dicts(query.fetchall())\n\n def getFeatureById(self, featureId):\n \"\"\"\n Fetch feature by featureID.\n\n :param featureId: the FeatureID as found in GFF3 records\n :return: dictionary representing a feature object,\n or None if no match is found.\n \"\"\"\n sql = \"SELECT * FROM FEATURE WHERE id = ?\"\n query = self._dbconn.execute(sql, (featureId,))\n ret = query.fetchone()\n if ret is None:\n return None\n return sqliteBackend.sqliteRow2Dict(ret)\n\n\nclass AbstractFeatureSet(datamodel.DatamodelObject):\n \"\"\"\n A set of sequence features annotations\n \"\"\"\n compoundIdClass = datamodel.FeatureSetCompoundId\n\n def __init__(self, parentContainer, localId):\n super(AbstractFeatureSet, self).__init__(parentContainer, localId)\n self._name = localId\n self._sourceUri = \"\"\n self._referenceSet = None\n self._attributes = protocol.Attributes()\n\n def getReferenceSet(self):\n \"\"\"\n Returns the reference set associated with this FeatureSet.\n \"\"\"\n return self._referenceSet\n\n def setReferenceSet(self, referenceSet):\n \"\"\"\n Sets the reference set associated with this FeatureSet to the\n specified value.\n \"\"\"\n self._referenceSet = referenceSet\n\n def toProtocolElement(self):\n \"\"\"\n Returns the representation of this FeatureSet as the corresponding\n ProtocolElement.\n \"\"\"\n gaFeatureSet = protocol.FeatureSet()\n gaFeatureSet.id = self.getId()\n gaFeatureSet.datasetId = self.getParentContainer().getId()\n gaFeatureSet.referenceSetId = self._referenceSet.getId()\n gaFeatureSet.name = self._name\n gaFeatureSet.sourceUri = self._sourceUri\n gaFeatureSet.attributes = self._attributes\n return gaFeatureSet\n\n def getCompoundIdForFeatureId(self, featureId):\n \"\"\"\n Returns server-style compound ID for an internal featureId.\n\n :param long featureId: id of feature in database\n :return: string representing ID for the specified GA4GH protocol\n Feature object in this FeatureSet.\n \"\"\"\n if featureId is not None and featureId != \"\":\n compoundId = datamodel.FeatureCompoundId(\n self.getCompoundId(), str(featureId))\n else:\n compoundId = \"\"\n return str(compoundId)\n\n\nclass SimulatedFeatureSet(AbstractFeatureSet):\n \"\"\"\n Simulated data backend for FeatureSet, used for internal testing.\n \"\"\"\n def __init__(self, parentContainer, localId, randomSeed=1):\n self._randomSeed = randomSeed\n super(SimulatedFeatureSet, self).__init__(parentContainer, localId)\n\n def _getRandomfeatureType(self, randomNumberGenerator):\n ontologyTuples = [\n (\"gene\", \"SO:0000704\"),\n (\"exon\", \"SO:0000147\")]\n term = protocol.OntologyTerm()\n ontologyTuple = randomNumberGenerator.choice(ontologyTuples)\n term.term, term.id = ontologyTuple[0], ontologyTuple[1]\n term.sourceName = \"sequenceOntology\"\n term.sourceVersion = \"0\"\n return term\n\n def _generateSimulatedFeature(self, randomNumberGenerator):\n feature = protocol.Feature()\n feature.featureSetId = self.getId()\n feature.start = randomNumberGenerator.randint(1000, 2000)\n feature.end = feature.start + randomNumberGenerator.randint(1, 100)\n feature.featureType = self._getRandomfeatureType(\n randomNumberGenerator)\n references = [\"chr1\", \"chr2\", \"chrX\"]\n feature.referenceName = randomNumberGenerator.choice(references)\n strands = [protocol.Strand.POS_STRAND, protocol.Strand.NEG_STRAND]\n feature.strand = randomNumberGenerator.choice(strands)\n feature.attributes = protocol.Attributes()\n feature.attributes.vals = {\n \"gene_name\": [\"Frances\", ],\n \"gene_type\": [\"mRNA\", ],\n \"gene_status\": [\"UNKNOWN\", ]}\n return feature\n\n def getFeature(self, compoundId):\n \"\"\"\n Fetches a simulated feature by ID.\n\n :param compoundId: any non-null string\n :return: A simulated feature with id set to the same value as the\n passed-in compoundId.\n \":raises: exceptions.ObjectWithIdNotFoundException if None is passed\n in for the compoundId.\n \"\"\"\n if compoundId is None:\n raise exceptions.ObjectWithIdNotFoundException(compoundId)\n randomNumberGenerator = random.Random()\n randomNumberGenerator.seed(self._randomSeed)\n feature = self._generateSimulatedFeature(randomNumberGenerator)\n feature.id = str(compoundId)\n feature.parentId = \"\" # TODO: Test with nonempty parentIDs?\n return feature\n\n def getFeatures(\n self, referenceName, start, end,\n pageToken, pageSize,\n featureTypes=[], parentId=None, numFeatures=10):\n \"\"\"\n Returns a set number of simulated features.\n\n :param referenceName: name of reference to \"search\" on\n :param start: start coordinate of query\n :param end: end coordinate of query\n :param pageToken: None or int\n :param pageSize: None or int\n :param featureTypes: optional list of ontology terms to limit query\n :param parentId: optional parentId to limit query.\n :param numFeatures: number of features to generate in the return.\n 10 is a reasonable (if arbitrary) default.\n :return: Yields feature, nextPageToken pairs.\n nextPageToken is None if last feature was yielded.\n \"\"\"\n randomNumberGenerator = random.Random()\n randomNumberGenerator.seed(self._randomSeed)\n if pageToken is not None:\n nextPageToken = int(pageToken)\n else:\n nextPageToken = 0\n for featureId in range(numFeatures):\n gaFeature = self._generateSimulatedFeature(randomNumberGenerator)\n gaFeature.id = self.getCompoundIdForFeatureId(featureId)\n match = (\n gaFeature.start < end and\n gaFeature.end > start and\n gaFeature.referenceName == referenceName and (\n featureTypes is None or len(featureTypes) == 0 or\n gaFeature.featureType in featureTypes))\n if match:\n gaFeature.parentId = \"\" # TODO: Test nonempty parentIDs?\n if nextPageToken < numFeatures - 1:\n nextPageToken += 1\n else:\n nextPageToken = None\n yield gaFeature, (\n str(nextPageToken)\n if nextPageToken is not None else None)\n\n\nclass Gff3DbFeatureSet(AbstractFeatureSet):\n \"\"\"\n Stub class to directly read sequence annotation features from GFF3 files.\n Tests basic access, not to be used in production.\n \"\"\"\n def __init__(self, parentContainer, localId):\n super(Gff3DbFeatureSet, self).__init__(parentContainer, localId)\n self._sequenceOntologyTermMap = None\n self._dbFilePath = None\n self._db = None\n\n def setSequenceOntologyTermMap(self, sequenceOntologyTermMap):\n \"\"\"\n Sets the OntologyTermMap instance used by this FeatureSet to the\n specified value.\n \"\"\"\n self._sequenceOntologyTermMap = sequenceOntologyTermMap\n\n def populateFromFile(self, dataUrl):\n \"\"\"\n Populates the instance variables of this FeatureSet from the specified\n data URL.\n \"\"\"\n self._dbFilePath = dataUrl\n self._db = Gff3DbBackend(self._dbFilePath)\n\n def populateFromRow(self, row):\n \"\"\"\n Populates the instance variables of this FeatureSet from the specified\n DB row.\n \"\"\"\n self._dbFilePath = row[b'dataUrl']\n self._db = Gff3DbBackend(self._dbFilePath)\n\n def getDataUrl(self):\n \"\"\"\n Returns the URL providing the data source for this FeatureSet.\n \"\"\"\n return self._dbFilePath\n\n def getFeature(self, compoundId):\n \"\"\"\n Returns a protocol.Feature object corresponding to a compoundId\n :param compoundId: a datamodel.FeatureCompoundId object\n :return: a Feature object.\n :raises: exceptions.ObjectWithIdNotFoundException if invalid\n compoundId is provided.\n \"\"\"\n featureId = long(compoundId.featureId)\n with self._db as dataSource:\n featureReturned = dataSource.getFeatureById(featureId)\n\n if featureReturned is None:\n raise exceptions.ObjectWithIdNotFoundException(compoundId)\n else:\n gaFeature = self._gaFeatureForFeatureDbRecord(featureReturned)\n return gaFeature\n\n def _gaFeatureForFeatureDbRecord(self, feature):\n \"\"\"\n :param feature: The DB Row representing a feature\n :return: the corresponding GA4GH protocol.Feature object\n \"\"\"\n gaFeature = protocol.Feature()\n gaFeature.id = self.getCompoundIdForFeatureId(feature['id'])\n if feature.get('parent_id'):\n gaFeature.parentId = self.getCompoundIdForFeatureId(\n feature['parent_id'])\n else:\n gaFeature.parentId = \"\"\n gaFeature.featureSetId = self.getId()\n gaFeature.referenceName = feature['reference_name']\n gaFeature.start = int(feature['start'])\n gaFeature.end = int(feature['end'])\n if feature.get('strand', '') == '-':\n gaFeature.strand = protocol.Strand.NEG_STRAND\n else:\n # default to positive strand\n gaFeature.strand = protocol.Strand.POS_STRAND\n gaFeature.childIds = map(\n self.getCompoundIdForFeatureId,\n json.loads(feature['child_ids']))\n gaFeature.featureType = self._sequenceOntologyTermMap.getGaTermByName(\n feature['type'])\n gaFeature.attributes = protocol.Attributes()\n gaFeature.attributes.vals = json.loads(feature['attributes'])\n return gaFeature\n\n def getFeatures(self, referenceName, start, end,\n pageToken, pageSize,\n featureTypes=None, parentId=None):\n \"\"\"\n method passed to runSearchRequest to fulfill the request\n :param str referenceName: name of reference (ex: \"chr1\")\n :param start: castable to int, start position on reference\n :param end: castable to int, end position on reference\n :param pageToken: none or castable to int\n :param pageSize: none or castable to int\n :param featureTypes: array of str\n :param parentId: none or featureID of parent\n :return: yields a protocol.Feature at a time, together with\n the corresponding nextPageToken (which is null for the last\n feature served out).\n \"\"\"\n # parse out the various query parameters from the request.\n start = int(start)\n end = int(end)\n\n with self._db as dataSource:\n # featuresCount is needed to ensure that once the\n # request is fulfilled, no nextPageTokens past the\n # end of the actual dataset range are returned.\n featuresCount = dataSource.countFeaturesSearchInDb(\n referenceName=referenceName,\n start=start, end=end,\n parentId=parentId, featureTypes=featureTypes)\n featuresReturned = dataSource.searchFeaturesInDb(\n pageToken, pageSize,\n referenceName=referenceName,\n start=start, end=end,\n parentId=parentId, featureTypes=featureTypes)\n\n # pagination logic: None if last feature was returned,\n # else 1 + row number being returned (starting at row 0).\n if pageToken is not None:\n nextPageToken = int(pageToken)\n else:\n nextPageToken = 0\n for featureRecord in featuresReturned:\n gaFeature = self._gaFeatureForFeatureDbRecord(featureRecord)\n if nextPageToken < featuresCount - 1:\n nextPageToken += 1\n else:\n nextPageToken = None\n yield gaFeature, (\n str(nextPageToken)\n if nextPageToken is not None else None)\n","repo_name":"achave11-ucsc/server","sub_path":"ga4gh/datamodel/sequenceAnnotations.py","file_name":"sequenceAnnotations.py","file_ext":"py","file_size_in_byte":18028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"2456649456","text":"from typing import NamedTuple, Optional\n\nfrom bs4 import BeautifulSoup\nfrom yarl import URL\n\nimport libkol\n\nfrom .request import Request\n\n\nclass Response(NamedTuple):\n server_url: str\n challenge: Optional[str]\n\n\nclass homepage(Request[Response]):\n \"\"\"\n This request is most often used before logging in. It allows the KoL servers to assign a\n particular server number to the user. In addition, it gives us the user's login challenge\n so that we might login to the server in a more secure fashion.\n \"\"\"\n\n def __init__(self, session: \"libkol.Session\", server_number: int = 0) -> None:\n super().__init__(session)\n\n if server_number > 0:\n url = \"https://www{}.kingdomofloathing.com/main.php\".format(server_number)\n else:\n url = \"https://www.kingdomofloathing.com/\"\n\n self.request = session.request(url)\n\n @staticmethod\n async def parser(content: str, **kwargs) -> Response:\n url = kwargs[\"url\"] # type: URL\n\n soup = BeautifulSoup(content, \"html.parser\")\n\n challenge_input = soup.find(\"input\", attrs={\"name\": \"challenge\"})\n challenge = str(challenge_input[\"value\"]) if challenge_input else None\n\n return Response(str(url.origin()), challenge)\n","repo_name":"python-kol/libkol","sub_path":"libkol/request/homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"201146393","text":"key = input()\nmess = input()\nnew = ''\nfor x in mess:\n if x.isalpha():\n new+= x\n \nmess = '' \nfor let in range(len(new)):\n char = ord(new[let])\n num = ord(key[let%3])-65\n a = char+num\n if char + num > 90:\n a = 64 + (char+num-90)\n mess += chr(a)\n \nprint(mess)","repo_name":"CLRPain/CS-2","sub_path":"kyle/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1080375590","text":"# ----------------------------------------------------------------\n# Sliding window solution O(N^2)\n# ----------------------------------------------------------------\nclass Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n counter = 0\n # sliding window\n # iterate and calculate the sum total at the same time\n # increment the counter everytime the condition is satisfied\n for i in range (len(nums)):\n total = 0\n for j in range (i, len(nums)):\n total += nums[j]\n if(total == k):\n counter +=1\n return counter\n# ----------------------------------------------------------------\n# Dictionary solution O(N)\n# ----------------------------------------------------------------\nclass Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n counter = 0\n total = 0\n dict = {}\n # total 0 = 1\n dict[0] = 1\n # sum[i] - sum[j] = k => sum[j] = sum[i] - k\n # sliding window\n for i in range (len(nums)):\n total += nums[i]\n if((total-k) in dict):\n counter += dict[total-k]\n # save cumulative sums with the count of instances\n if((total) in dict):\n dict[total] += 1\n else:\n dict[total] = 1\n return counter","repo_name":"ayoubbensakhria/davinci","sub_path":"sliding window/subarraySum.py","file_name":"subarraySum.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"29163768460","text":"from rest_framework.test import APITestCase\r\nfrom rest_framework import status\r\nfrom product.models import Product, ProductDetail\r\nfrom product.views import ProductViewSet, ProductDetailViewSet\r\nfrom user.models import Merchant\r\nfrom django.contrib.auth.models import User\r\nfrom django.urls import reverse\r\n\r\n\r\n\r\n\r\nclass ProductViewSetTestCase(APITestCase):\r\n\r\n def setUp(self):\r\n self.product1 = Product.objects.create(name='product1')\r\n self.product2 = Product.objects.create(name='product2')\r\n\r\n def test_list_products(self):\r\n url = reverse('products-list')\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n self.assertEqual(len(response.data), 2)\r\n\r\n\r\nclass ProductDetailViewSetTestCase(APITestCase):\r\n\r\n def setUp(self):\r\n self.user = User.objects.create(username='testuser',\r\n first_name='firstname',\r\n last_name='lastname',\r\n email='test@email.com',\r\n is_staff=False,\r\n is_active=True)\r\n self.merchant = Merchant.objects.create(name='Test Merchant',\r\n address='Test address',\r\n user=self.user)\r\n self.product1 = Product.objects.create(name='product1')\r\n self.product_detail1 = ProductDetail.objects.create(product_id=self.product1,\r\n product_detail='description',\r\n merchant_id=self.merchant)\r\n\r\n def test_retrieve_product_detail(self):\r\n url = reverse('detail-detail', args=[self.product1.id])\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n self.assertEqual(response.data['product_detail'], 'description')","repo_name":"Byronvvvv5/Assignment_API","sub_path":"product/tests/view_test.py","file_name":"view_test.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31537300799","text":"\"\"\"education_platform_online URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path, include, re_path\nfrom django.views.generic import TemplateView\nfrom django.views.static import serve\n\nimport xadmin\nfrom education_platform_online.settings import MEDIA_ROOT, STATIC_ROOT\nfrom organization.views import OrgView\nfrom users import views\nfrom users.views import LoginView, RegisterView, ActiveUserView, ForgetPwdView, ResetView, ModifyPwdView, LogoutView, \\\n IndexView\n\nurlpatterns = [\n path('xadmin/', xadmin.site.urls),\n # path('', TemplateView.as_view(template_name='index.html'), name='index'),\n path('', IndexView.as_view(), name='index'),\n # path('login/', views.user_login, name='login'),\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(), name=\"logout\"),\n # path('login/', TemplateView.as_view(template_name='login.html'), name='login'),\n path('register/', RegisterView.as_view(), name='register'),\n path('captcha/', include('captcha.urls')),\n re_path('active/(?P.*)/', ActiveUserView.as_view(), name='user_active'),\n path('forget/', ForgetPwdView.as_view(), name='forget_pwd'),\n re_path('reset/(?P.*)/', ResetView.as_view(), name='reset_pwd'),\n path('modify_pwd/', ModifyPwdView.as_view(), name='modify_pwd'),\n # path('org_list/', OrgView.as_view(), name='org_list'),\n path(\"org/\", include('organization.urls', namespace=\"org\")),\n # 处理图片显示的url,使用Django自带serve,传入参数告诉它去哪个路径找,我们有配置好的路径MEDIAROOT\n re_path('^media/(?P.*)', serve, {\"document_root\": MEDIA_ROOT}),\n path('course/', include('course.urls', namespace='course')),\n path('users/', include('users.urls', namespace='users')),\n # 富文本编辑器url\n path('ueditor/', include('DjangoUeditor.urls')),\n # 404和500,生成环境汇总,必须设置debug = False\n # 一旦debug改为false,django就不会代管你的静态文件,所以这里要设置一个url处理静态文件\n # re_path(r'^static/(?P.*)', serve, {\"document_root\": STATIC_ROOT}),\n]\n\n# 全局404页面配置,当django的settings中DEBUG = False时后会自动调用,需要重新配置静态文件,DEBUG = True时的静态文件会失效\nhandler404 = 'users.views.pag_not_found'\n\nhandler500 = 'users.views.page_error'\n","repo_name":"haochengdu/education_platform_online","sub_path":"education_platform_online/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12160555454","text":"# -*- coding=utf-8 -*-\n\nimport os\nimport telebot\nimport logging\n# from src.queries_helper import read_queries\nfrom src.service import read_file\n\n# logging.basicConfig(filename=\"sample.log\", level=logging.INFO)\nLOG = logging.getLogger(\"ex\")\n\nAPP_NAME = os.environ.get('APP_NAME')\n\n# Аутентификация в Postgres\nDB_NAME = os.environ.get('DB_NAME')\nDB_HOST = os.environ.get('DB_HOST')\nDB_PORT = os.environ.get('DB_PORT')\nDB_USER = os.environ.get('DB_USER')\nDB_PASS = os.environ.get('DB_PASS')\nDB_APP_NAME = os.environ.get('DB_APP_NAME')\n\n# Подключение к телеграм\nTELEGRAM_TOKEN = os.environ.get('TELEGRAM_TOKEN')\nBOT = telebot.TeleBot(TELEGRAM_TOKEN)\n\nSETTINGS = {}\nADMINS = {}\nSEARCHES = {}\nEXTENSIONS = {}\nSNAPSHOTS = {}\n\nQUERY_EXTENSION = '.sql'\n\n# Запросы для логирования\nPATH_LOGGING = 'src/queries/logging/'\nINCOMING_LOG_QUERY = read_file(PATH_LOGGING + 'incoming' + QUERY_EXTENSION)\nOUTGOING_LOG_QUERY = read_file(PATH_LOGGING + 'outgoing' + QUERY_EXTENSION)\nERROR_LOG_QUERY = read_file(PATH_LOGGING + 'error' + QUERY_EXTENSION)\n\n\n# Запросы по сбору данных из задания\nPATH_EXECUTION = 'src/queries/execution/'\nEXECUTION_LOG_QUERY = read_file (PATH_EXECUTION + 'log' + QUERY_EXTENSION)\nEXECUTION_MAIN_QUERY = read_file (PATH_EXECUTION + 'main' + QUERY_EXTENSION)\nEXECUTION_MAP_QUERY = read_file (PATH_EXECUTION + 'map' + QUERY_EXTENSION)\n","repo_name":"arutunyan-gv/telegram_bot","sub_path":"prefs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22612671502","text":"'''\nPresents basic tensor math operations. Adding, multiplication, matrix multiplication etc.\n\n'''\nfrom __future__ import print_function\nimport torch\nimport time\n\nx = torch.empty(5, 3)\nprint(x)\n\n\nr1, c1 = (150, 50)\nr2, c2 = (150, 50)\n\na = torch.rand(r1, c1)\nb = torch.rand(r2, c2)\n\nt1 = time.perf_counter()\n\nc = a*b\n\nt2 = time.perf_counter()\nprint('cpu time={} result={}'.format(t2-t1, c.sum()))\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nif torch.cuda.is_available():\n device = torch.device(\"cuda\") # a CUDA device object\n print(device)\n\n print(torch.cuda.get_device_properties(0))\n a = a.cuda()\n b = b.cuda()\n torch.cuda.synchronize()\n torch.cuda.synchronize()\n\n t1 = time.perf_counter()\n\n c = a*b\n torch.cuda.synchronize()\n t2 = time.perf_counter()\n print('gpu time={} result={}'.format(t2-t1, c.sum()))\nelse:\n print('GPU not enabled')\n\n\n# batch matrix matrix mulltiplication\nimport numpy as np\n\n# b1 = torch.zeros(1, 1, 4).fill_(2) # [ [ [2,2,2,2] ] ]\n# b2 = torch.zeros(1, 4, 3).fill_(1) # [ [ [1,1,1,1] ] ]\n\nna = np.array([[1000, 100, 10, 1]], dtype=np.float32)\nb1 = torch.from_numpy(na)\nb1 = b1.unsqueeze(0) # add 3 dim (batch dim)\n\n\nna = np.arange(1, 13, dtype=np.float32)\nna = na.reshape([4, 3])\nb2 = torch.from_numpy(na)\nb2 = b2.unsqueeze(0) # add 3 dim (batch dim)\n\nprint(b1, b2)\nbatch_mul = torch.bmm(b1, b2)\nprint(batch_mul)\n\n","repo_name":"ksopyla/pytorch_fundamentals","sub_path":"tensors_math.py","file_name":"tensors_math.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"30404326439","text":"# #Excercise \n# # input num, *nums (list or touple)\n# # return list with power num\n# #Examle to_power(3,[1,2,3])\n# # output list [1**3, 8, 27]\n# # Use list comprehension\n\n# args can take any number of varibales \n\ndef to_power(num, *args):\n print(num)\n print(args)\n if not args:\n return \"Hey You did not enetered args in input\"\n else: \n return[numbers**num for numbers in args]\n\n\nnums = [1,2,3,4]\n\nprint(to_power(3,*nums))\n\n","repo_name":"tripaak/python","sub_path":"Practice_Files/143_args_exercise.py","file_name":"143_args_exercise.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"9748577867","text":"import logging\n\nfrom mock import Mock\n\nfrom slippinj.cli.scripts.valet import Valet\n\n\nclass TestValet:\n def test_script_can_be_configured(self):\n mocked_args_parser = Mock()\n mocked_args_parser.add_parser = Mock(return_value=mocked_args_parser)\n mocked_args_parser.add_argument = Mock(return_value=True)\n\n Valet(mocked_args_parser).configure()\n\n assert 3 == mocked_args_parser.add_argument.call_count\n\n def test_script_is_executable(self):\n mocked_ansible_client = Mock()\n mocked_ansible_client.run_playbook = Mock(return_value=True)\n\n logger = logging.getLogger('test')\n logger.addHandler(logging.NullHandler())\n\n mocked_injector = Mock()\n mocked_injector.get = Mock('ansible_client', side_effect=[logger, mocked_ansible_client])\n\n mocked_args = Mock()\n mocked_args.playbook = 'test'\n mocked_args.cluster_id = 'test'\n\n assert None == Valet(Mock()).run(mocked_args, mocked_injector)\n","repo_name":"scm-spain/slippin-jimmy","sub_path":"tests/slippinj/cli/scripts/test_valet.py","file_name":"test_valet.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"9666215371","text":"import tornado.websocket\nfrom datetime import datetime\nfrom app.main.view import BaseHandler\n\n\n\nclass WebSocketBaseHandler(tornado.websocket.WebSocketHandler):\n\n def check_origin(self, origin):\n \"\"\"\n 跨域请求处理\n \"\"\"\n return True\n\n\nclass ChatHandler(WebSocketBaseHandler):\n client_list = []\n def open(self):\n now_str_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[%s]建立连接: %s\" % (now_str_time, self))\n self.client_list.append(self)\n for c in self.client_list:\n c.write_message(\"[%s]系统消息: %s 进入群聊\" % (now_str_time, self))\n\n def on_close(self):\n now_str_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[%s]断开连接: %s\" % (now_str_time, self))\n self.client_list.remove(self)\n for c in self.client_list:\n c.write_message(\"[%s]系统消息: %s 离开群聊\" % (now_str_time, self))\n\n def on_message(self, message):\n now_str_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[%s]收到客户端: %s 消息: %s\" % (now_str_time, self, message))\n for c in self.client_list:\n c.write_message(\"[%s] %s说: %s\" % (now_str, self, message))\n","repo_name":"doom2020/hotme","sub_path":"app/chat/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"30901462651","text":"#Referenced from @Strefan\nclass Solution:\n #简洁版 next iter的使用\n def numMatchingSubseq(self, S, words):\n waiting = collections.defaultdict(list)\n for it in map(iter, words):\n waiting[next(it)].append(it)\n for c in S:\n for it in waiting.pop(c, ()):\n waiting[next(it, None)].append(it)\n return len(waiting[None])\n\n #容易理解版\n def numMatchingSubseq(self, S, words):\n ans = 0\n waiting = collections.defaultdict(list) #当然也可以是[[] for _ in range(26)]但是不用python自带的dict感觉亏一些\n for word in words:\n it = iter(word)\n waiting[next(it)].append(it)\n print(waiting)\n for c in S:\n old_bucket = waiting[c]\n waiting[c] = []\n while old_bucket:\n it = old_bucket.pop()\n nxt = next(it, None)\n if nxt:\n waiting[nxt].append(it)\n else:\n ans += 1\n return ans","repo_name":"YeahHuang/Leetcode","sub_path":"792_numberofMatchingSub.py","file_name":"792_numberofMatchingSub.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"19108201525","text":"import bisect\nfrom typing import List, Tuple, Optional, Dict\nfrom collections import defaultdict, deque, Counter\n\n\nclass Solution:\n def unhappyFriends(self, n: int, preferences: List[List[int]], pairs: List[List[int]]) -> int:\n p2p = [-1] * n\n for x, y in pairs:\n p2p[x], p2p[y] = y, x\n p2pref = [set()] * n # type: List[set[int]]\n for p, pref in enumerate(preferences):\n p2pref[p] = set(pref[:pref.index(p2p[p])])\n print(p2pref)\n cheater = set()\n for p in range(n):\n for other in p2pref[p]:\n if p in p2pref[other]:\n cheater.add(other)\n cheater.add(p)\n return len(cheater)\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.unhappyFriends(6,\n[[1,4,3,2,5],[0,5,4,3,2],[3,0,1,5,4],[2,1,4,0,5],[2,1,0,3,5],[3,4,2,0,1]],\n[[3,1],[2,0],[5,4]]))","repo_name":"rayguo233/leetcode-attempts","sub_path":"python/1583. [M] Count Unhappy Friends.py","file_name":"1583. [M] Count Unhappy Friends.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7389313208","text":"import numpy as np \nfrom fractions import Fraction\nfrom copy import copy\nimport math\n\n\n\ndef calc_l(array,array_size):\n L = array.copy()\n for i in range(0,array_size[0]):\n for j in range(0,array_size[1]):\n L[i][j]=0\n for i in range(0,array_size[0]):\n sum=0\n for k in range(0,i):\n print()\n if(k != i):\n sum_two=0\n for j in range(0,k):\n sum_two+=L[i][j]*L[k][j]\n num_one= int(array[i][k]-sum_two)\n num_two=int(L[k][k])\n \n L[i][k] = Fraction(num_one/num_two)\n \n else:\n sum+=array[k][i]\n L[k][k]=array[k][k]-sum\n for j in range(0,i):\n sum+=(L[i][j])**2\n numb=array[i][i]-sum\n sqrt_numb=numb**Fraction(1,2)\n L[i][i]=int(math.sqrt(array[i][i]-sum))\n return L\n\ndef calc_y(l_array,b_array,array_size):\n y_array=[]\n for i in range(0,array_size[0]):\n sum_y_l=0\n for j in range(0,i):\n sum_y_l=sum_y_l+l_array[i][j]*y_array[j]\n num_one=int(b_array[i]-sum_y_l)\n y=Fraction((num_one)/l_array[i][i])\n y_array.append(y)\n \n return y_array\n\n\n\ndef cacl_x(i,arr_size,l_arr,y_arr,x_array):\n l_array=l_arr\n y_array=y_arr\n array_size=arr_size\n \n if ( i==0):\n a=2\n else:\n sum_L_x=0\n for j in range(array_size[1]-1,i,-1):\n sum_L_x= sum_L_x+l_array[i-1][j-1] * x_array[j-1]\n num_one = y_array[i-1] - sum_L_x\n num_two = int(l_array[i-1][i-1])\n x=Fraction(num_one,num_two) \n \n x_array[i-1]=x\n\n cacl_x(i-1,array_size,l_arr,y_array,x_array)\n \n \n\n\n\n\ndef main():\n \n array = np.array([[1,2,3,1],[2,8,10,3],[3,10,22,7]]) \n # array = np.array([[4,2,-2,4,-12],[2,10,-7,4,9],[-2,-7,6,-5,-9],[-4,4,-5,18,39]])\n # array = np.array([[1,-2,3,1,-1],[-2,5,-8,1,-1],[3,-8,17,-7,3],[1,1,-7,18,-4]])\n \n b_array=np.array([1,3,7])\n # b_array=np.array([-12,9,-9,39])\n # b_array=np.array([-1,-1,3,-4])\n \n \n array = array + Fraction()\n print(array)\n array_size = array.shape\n x_array=[1 for i in range(0,array_size[1]-1)]\n l_array = calc_l(array,array_size)\n # print(l_array)\n # l_array = l_array + Fraction()\n \n l_trasnpose= np.transpose(l_array)\n \n y_array=calc_y(l_array,b_array,array_size)\n \n cacl_x(array_size[0],array_size,l_trasnpose, y_array,x_array)\n print(x_array)\nmain()\n\n\n\n","repo_name":"mrpodkalicki/gaussian-elimination-method-and-choleski","sub_path":"choleski.py","file_name":"choleski.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72355698762","text":"import os\nfrom flask_socketio import SocketIO, send, emit, join_room, leave_room\nfrom flask import Flask, session, render_template, request, redirect, jsonify\nfrom flask_session import Session\nimport requests\nfrom functools import wraps\nfrom collections import deque\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"my secret key\"\nsocketio = SocketIO(app)\n\nlogged_users = []\nchannels = []\nchannelsMessages = dict()\n\ndef login_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if session.get('username') is None:\n return redirect('/signin')\n else:\n return f(*args, **kwargs)\n return decorated\n\n@app.route(\"/\")\n@login_required\ndef index():\n return render_template('index.html', channels=channels)\n\n@app.route(\"/signin\", methods=['GET','POST'])\ndef signin():\n session.clear()\n username = request.form.get(\"username\")\n\n if request.method == 'POST':\n if len(username) < 2:\n return render_template('error.html', message=\"Username can't be empty!\")\n if username in logged_users:\n return render_template('error.html', message='User is already logged in!')\n\n session['username'] = username\n logged_users.append(username)\n # Remember the user session on a cookie if the browser is closed.\n session.permanent = True\n\n return redirect(\"/\")\n else:\n return render_template(\"signin.html\")\n\n@app.route(\"/logout\", methods=['GET'])\ndef logout():\n try:\n logged_users.remove(session.get('username'))\n session.clear()\n except: pass\n return redirect('/signin')\n\n\n@app.route('/create', methods=['GET','POST'])\ndef create():\n channel = request.form.get('channel')\n if request.method == 'POST':\n\n if channel == '':\n return render_template('error.html', message=\"Channel name can not be empty!\")\n if channel in channels:\n return render_template('chatroom.html', channel_name=channel, channels=channels, messages=channelsMessages[channel])\n else:\n channels.append(channel)\n channelsMessages[channel] = deque()\n return redirect(\"/channel/\"+str(channel))\n #return render_template('chatroom.html', channel_name=channel, channels=channels)\n\n\n\n@app.route(\"/channel/\", methods=['GET','POST'])\n@login_required\ndef view(channel):\n\n session['current_channel'] = channel\n return render_template('chatroom.html',channel_name=channel, channels=channels, messages=channelsMessages[channel])\n\n\n\n@socketio.on(\"joined\", namespace='/')\ndef joined():\n \"\"\" Send message to announce that user has entered the channel \"\"\"\n\n # Save current channel to join room.\n room = session.get('current_channel')\n\n join_room(room)\n print('da5lt', room,session.get('username'))\n emit('status', {\n 'userJoined': session.get('username'),\n 'channel': room,\n 'msg': session.get('username') + ' has entered the channel'},\n room=room)\n\n@socketio.on(\"left\", namespace='/')\ndef left():\n \"\"\" Send message to announce that user has left the channel \"\"\"\n\n room = session.get('current_channel')\n\n leave_room(room)\n emit('status', {\n 'msg': session.get('username') + ' has left the channel'},\n room=room)\n\n\n@socketio.on('send message')\ndef send_msg(msg, timestamp):\n \"\"\" Receive message with timestamp and broadcast on the channel \"\"\"\n\n # Broadcast only to users on the same channel.\n room = session.get('current_channel')\n\n # Save 100 messages and pass them when a user joins a specific channel.\n\n if len(channelsMessages[room]) > 100:\n # Pop the oldest message\n channelsMessages[room].popleft()\n print(msg)\n channelsMessages[room].append([timestamp, session.get('username'), msg])\n\n emit('announce message', {\n 'user': session.get('username'),\n 'timestamp': timestamp,\n 'msg': msg},\n room=room)\n","repo_name":"MahmoudHanyy/Flack","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"2883342790","text":"import numpy\nimport logging\n\n\nclass NaiveBayes(object):\n\n def __init__(self, _lambda: float = 1.):\n self.__logger = logging.getLogger(\"NaiveBayes\")\n self.__logger.setLevel(logging.DEBUG)\n\n self.__lambda = _lambda\n\n self.__sample_size: int = 0\n self.__unique_label: numpy.ndarray = numpy.array([])\n self.__label_counter: dict = {}\n self.__input_data: numpy.ndarray = numpy.array([])\n self.__input_label: numpy.ndarray = numpy.array([])\n\n def __calc_x_y(self, feature_dim_idx: int, feature_value: any, label: any, input_data: numpy.ndarray,\n input_label: numpy.ndarray):\n \"\"\"\n calculate P(X|Y) with Laplacian smoothing\n :param feature_dim_idx:\n :param feature_value:\n :param label:\n :param input_data:\n :param input_label:\n :return:\n \"\"\"\n\n cnt = 0\n\n for i in range(input_data.shape[0]):\n if input_data[i][feature_dim_idx] == feature_value:\n if input_label[i] == label:\n cnt += 1\n\n unique_feature_value = numpy.unique(input_data[:, feature_dim_idx])\n\n return (cnt + self.__lambda) / \\\n (self.__label_counter[label] + unique_feature_value.shape[0] * self.__lambda)\n\n def __calc_y(self, label: any):\n \"\"\"\n calculate P(Y) with Laplacian smoothing\n :param label:\n :return:\n \"\"\"\n upper = (self.__label_counter.get(label, 0) + self.__lambda)\n lower = (self.__sample_size + self.__unique_label.shape[0] * self.__lambda)\n result = upper / lower\n # self.__logger.debug(\"{} / {} = {}\".format(upper, lower, result))\n return result\n\n def fit(self, input_data: numpy.ndarray, input_label: numpy.ndarray):\n self.__label_counter = {}\n self.__sample_size = input_label.shape[0]\n self.__unique_label = numpy.unique(input_label)\n self.__input_data = input_data\n self.__input_label = input_label\n\n for label in input_label:\n self.__label_counter[label] = self.__label_counter.get(label, 0) + 1\n\n def __predict_one(self, data: numpy.ndarray):\n\n max_value = -0.1\n result = self.__unique_label[0]\n\n for label in self.__unique_label:\n\n buf = self.__calc_y(label)\n\n self.__logger.debug(\"P(Y = {}) = {}\".format(label, buf))\n\n for i in range(data.shape[0]):\n tmp = self.__calc_x_y(i, data[i], label, self.__input_data, self.__input_label)\n self.__logger.debug(\"P(X_{} = {} | Y = {}) = {}\".format(i, data[i], label, tmp))\n\n buf *= tmp\n\n self.__logger.debug(\"P(X = {} | Y = {}) = {}\".format(data, label, buf))\n\n if buf > max_value:\n result = label\n max_value = buf\n\n return result\n\n def predict(self, test_data: numpy.ndarray) -> numpy.ndarray:\n result_array = []\n\n for each in test_data:\n result_array.append(self.__predict_one(each))\n\n return numpy.array(result_array)\n","repo_name":"LucienShui/HelloMachineLearning","sub_path":"li_hang/naive_bayes/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"40198901738","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom tqdm import tqdm\nfrom glob import glob\nfrom PIL import Image\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass CustomResnet(nn.Module):\n def __init__(self):\n super(CustomResnet, self).__init__()\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n res = torchvision.models.resnet152(pretrained=True)\n self.layer = res._modules.get('avgpool')\n self.output_size = 2048\n\n res.eval()\n self.res = res.to(self.device)\n\n\n def get_feature_vector(self, images):\n embedding = torch.zeros(self.output_size).to(self.device)\n def copy_data(m, input, output):\n # print(\"output\", output.size())\n embedding.copy_(output.data.squeeze())\n\n h = self.layer.register_forward_hook(copy_data)\n\n inputs = images.to(self.device)\n\n self.res(inputs)\n h.remove()\n return embedding\n\n def forward(self, x):\n return res(x)\n\ndef get_features(files, embedder):\n tensor_img_size = 224\n resize_transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize((tensor_img_size, tensor_img_size)),\n transforms.ToTensor(),\n ])\n\n device = embedder.device\n\n features = torch.zeros(0).to(device)\n label_imgs = torch.zeros(0).to(device)\n images = torch.zeros(0).to(device)\n labels = []\n img_size = 56 # thumnbail image size\n\n for i in tqdm(range(len(files))):\n img = resize_transform(Image.open(files[i])).to(device)\n img = img.unsqueeze(0)\n # images = torch.cat((images, img))\n label_img = F.interpolate(img, size=img_size) # use 1st frame for visualize\n label_imgs = torch.cat((label_imgs, label_img))\n feature = embedder.get_feature_vector(img)\n features = torch.cat((features, feature.unsqueeze(0)))\n \n return features, label_imgs\n\n\nif __name__ == \"__main__\":\n writer = SummaryWriter(\"logs\")\n files = sorted(glob(\"/home/shimine/face/manazashi/cropped/*\"))\n embedder = CustomResnet()\n features, label_imgs = get_features(files, embedder)\n writer.add_embedding(features, label_img=label_imgs)\n # print(features.size(), label_imgs.size())\n writer.close()\n","repo_name":"sin392/manazashi","sub_path":"research/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18210777094","text":"#! /usr/bin/python\n\nimport hashlib\n\nkey = \"yzbqklnj\"\n\nfound = [False, False, False, False, False, False]\nary = [\"0\", \"00\", \"000\", \"0000\", \"00000\", \"000000\" ]\n\nnum_found = 0\n\ncount = -1\nwhile num_found < len(found):\n count += 1\n h = hashlib.md5(\"%s%s\" % (key, count)).hexdigest()\n for i in range(0, len(found)):\n if found[i]:\n continue\n if h.startswith(ary[i]):\n print(\"First hash starting with %s is number %s\" % (ary[i], count))\n num_found += 1\n found[i] = True\n\nif False:\n if not found5 and h.startswith(\"00000\"):\n print(\"First number with five zeros is %s\" % count)\n found5 = True\n if not found6 and h.startswith(\"000000\"):\n print(\"First number with six zeros is %s\" % count)\n found6 = True\n if found5 and found6:\n print(\"break\")\n","repo_name":"mjmusante/AdventOfCode","sub_path":"2015/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"407254123","text":"import time\r\nimport cv2\r\nimport numpy as np\r\nfrom picamera.array import PiRGBArray\r\nfrom picamera import PiCamera\r\nimport RPi.GPIO as GPIO\r\n\r\nbuzzer = 22\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setup(buzzer, GPIO.OUT)\r\ncamera = PiCamera()\r\ncamera.resolution = (320, 240) # a smaller resolution means faster processing\r\ncamera.framerate = 24\r\nrawCapture = PiRGBArray(camera, size=(320, 240))\r\nkernel = np.ones((2, 2), np.uint8)\r\ntime.sleep(0.1)\r\nfor still in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\r\n GPIO.output(buzzer, False)\r\n\r\n image = still.array\r\n # create a detection area\r\n widthAlert = np.size(image, 1) # get width of image\r\n heightAlert = np.size(image, 0) # get height of image\r\n yAlert = (heightAlert / 2) + 100 # determine y coordinates for area\r\n cv2.line(image, (0, yAlert), (widthAlert, yAlert), (0, 0, 255), 2) # draw a line to show area\r\n\r\n lower = [1, 0, 20]\r\n upper = [60, 40, 200]\r\n lower = np.array(lower, dtype=\"uint8\")\r\n upper = np.array(upper, dtype=\"uint8\")\r\n # use the color range to create a mask for the image and apply it to the image\r\n mask = cv2.inRange(image, lower, upper)\r\n output = cv2.bitwise_and(image, image, mask=mask)\r\n\r\n dilation = cv2.dilate(mask, kernel, iterations=3)\r\n closing = cv2.morphologyEx(dilation, cv2.MORPH_GRADIENT, kernel)\r\n closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)\r\n edge = cv2.Canny(closing, 175, 175)\r\n\r\n contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n threshold_area = 400\r\n centres = []\r\n\r\n if len(contours) != 0:\r\n\r\n for x in contours:\r\n # find the area of each contour\r\n area = cv2.contourArea(x)\r\n # find the center of each contour\r\n moments = cv2.moments(x)\r\n # weed out the contours that are less than our threshold\r\n if area > threshold_area:\r\n\r\n (x, y, w, h) = cv2.boundingRect(x)\r\n\r\n centerX = (x + x + w) / 2\r\n centerY = (y + y + h) / 2\r\n\r\n cv2.circle(image, (centerX, centerY), 7, (255, 255, 255), -1)\r\n\r\n if ((y + h) > yAlert):\r\n cv2.putText(image, \"ALERT!\", (centerX - 20, centerY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (255, 255, 255), 2)\r\n GPIO.output(buzzer, True)\r\n\r\n cv2.imshow(\"Display\", image)\r\n\r\n rawCapture.truncate(0)\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"):\r\n GPIO.output(buzzer, False)\r\n break","repo_name":"Rubayet19/Object_Detection_and_Path_Planning","sub_path":"threshold.py","file_name":"threshold.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"69968892040","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy.random as rnd\r\n\r\n#region functions for task 1 a-b --------------------\r\ndef geom_pmf(k,p):\r\n return p*(1-p)**k\r\n\r\ndef VaR_geom(alpha, p = 0.5):\r\n return np.ceil(np.log(1-alpha)/np.log(1-p) - 1)\r\n\r\n#endregion ------------------\r\n\r\n#1a)\r\n\r\nvar_095 = VaR_geom(0.95)\r\nprint(var_095)\r\n\r\n#1b)\r\nalphas = np.arange(0.9,1, 0.01)\r\nVaR_alphas = VaR_geom(alphas)\r\n\r\nplt.step(alphas, VaR_alphas)\r\nplt.xlabel(r\"Level of significance $\\alpha$\")\r\nplt.ylabel(r\"$VaR_\\alpha$\")\r\nplt.grid()\r\nplt.show()\r\n\r\nx_axis = np.array(range(0, 8))\r\nfig, ax = plt.subplots()\r\n\r\nplt.step(x_axis, geom_pmf(x_axis, 0.5), label = r\"$f_L(x)$\", where=\"post\")\r\n\r\n#Draw a vline for every unique VaR_alpha\r\ncolors = [\"red\", \"blue\", \"green\", \"purple\", \"pink\"]\r\nfor i in range(len(set(VaR_alphas))):\r\n val = list(VaR_alphas).index(list(set(VaR_alphas))[i])\r\n interval = np.where(VaR_alphas == VaR_alphas[val])\r\n plt.vlines(VaR_alphas[val], 0 , geom_pmf(VaR_alphas[val], 0.5), colors=colors[i], linestyles=\"dashed\", label = r\"$\\alpha\\in$\" + \"[\" + str(alphas[interval][0])[0:4] + \",\" + str(alphas[interval][-1])[0:4] + \"]\")\r\nplt.xlabel(r\"$x$\")\r\nplt.ylabel(r\"$f_L(x)$\")\r\nplt.grid()\r\nplt.legend()\r\nplt.show()\r\n\r\n#2a) \r\n\r\n#region functions -------------\r\ndef fac(n):\r\n if n == 0:\r\n return 1\r\n return n*fac(n-1)\r\n\r\ndef poisson_pdf(x, lamb):\r\n return np.exp(-lamb)*lamb**x/fac(x)\r\n\r\ndef poisson_cdf(x, lamb):\r\n if x < 0:\r\n return 0\r\n cumsum = 0\r\n for i in range(int(x) + 1):\r\n cumsum += poisson_pdf(i, lamb)\r\n return cumsum\r\n\r\ndef VaR_poisson(alpha, lamb):\r\n part = None\r\n if not isinstance(alpha, list) and not isinstance(alpha, np.ndarray):\r\n alpha = [alpha]\r\n if len(alpha) != 1:\r\n part = VaR_poisson(alpha[1:], lamb)\r\n\r\n x = 0\r\n while(poisson_cdf(x, lamb) < alpha[0]):\r\n x += 1\r\n\r\n if part == None:\r\n return [x]\r\n\r\n return [x] + part\r\n\r\ndef plot_VaR_poisson(alpha, lamb):\r\n VaR_alphas = VaR_poisson(alpha, lamb)\r\n plt.step(alphas, VaR_alphas, where = \"mid\")\r\n plt.xlabel(r\"$\\alpha$\")\r\n plt.ylabel(r\"$VaR_\\alpha$\")\r\n plt.grid()\r\n plt.show()\r\n\r\n#endregion ---------------------\r\n\r\nalphas = np.arange(0.9, 1, 0.01)\r\n\r\nplot_VaR_poisson(alphas, 1)\r\nplot_VaR_poisson(alphas, 2)\r\nplot_VaR_poisson(alphas, 3)\r\n\r\n\r\n\r\n\r\n","repo_name":"JakobA-Scott/FIN-417","sub_path":"FIN-417 Quantitative Risk Management/HW3/HW3E2 (1).py","file_name":"HW3E2 (1).py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4296165761","text":"from collections import defaultdict\nfrom typing import List, Dict\n\nclass Solution:\n def sortItems(self, n: int, m: int, group: List[int], beforeItems: List[List[int]]) -> List[int]:\n # Assign new groups for items without a group.\n # Start new group ids from m as [0 to m-1] already used.\n for i in range(n):\n if group[i] == -1:\n group[i] = m\n m += 1 # increase the group count\n \n # Create an adjacency list (dependency graph) for items\n item_graph = defaultdict(list)\n # In-degree count for all items. Used for topological sort.\n item_indegree = [0] * n\n # Create an adjacency list (dependency graph) for groups\n group_graph = defaultdict(list)\n # In-degree count for groups. Used for topological sort.\n group_indegree = [0] * m\n \n # Construct the graphs based on beforeItems list\n for i in range(n):\n for pre_item in beforeItems[i]:\n item_graph[pre_item].append(i)\n item_indegree[i] += 1\n \n # If the two items 'i' and 'pre_item' are from different groups, \n # add an edge between these groups.\n if group[i] != group[pre_item]:\n group_graph[group[pre_item]].append(group[i])\n group_indegree[group[i]] += 1\n \n # Topological sort on items and groups\n item_order = self.topological_sort(item_graph, item_indegree)\n group_order = self.topological_sort(group_graph, group_indegree)\n \n # If we can't get a valid order, return []\n if not item_order or not group_order:\n return []\n \n # Arrange items within each group in the sorted order\n group_to_items = defaultdict(list)\n for item in item_order:\n group_to_items[group[item]].append(item)\n \n # Combine items from all groups in the order of sorted groups.\n sorted_items = []\n for group_id in group_order:\n sorted_items.extend(group_to_items[group_id])\n \n return sorted_items\n \n def topological_sort(self, graph: Dict[int, List[int]], indegree: List[int]) -> List[int]:\n # Initialize a result list and a stack to keep nodes with 0 in-degree\n sorted_order = []\n zero_indegree = [i for i, deg in enumerate(indegree) if deg == 0]\n \n # While we have nodes with 0 in-degree, process them\n while zero_indegree:\n node = zero_indegree.pop() # get a node with 0 in-degree\n sorted_order.append(node) # add to result list\n # Decrease in-degree for all neighbors\n for neighbor in graph[node]:\n indegree[neighbor] -= 1\n # If neighbor becomes 0 in-degree, add to stack\n if indegree[neighbor] == 0:\n zero_indegree.append(neighbor)\n \n # Return sorted order if we have processed all nodes; otherwise, return []\n return sorted_order if len(sorted_order) == len(graph) else []\n","repo_name":"aurimas13/Solutions-To-Problems","sub_path":"LeetCode/Python Solutions/Sort Items by Groups Respecting Dependencies/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"63"} +{"seq_id":"11265813108","text":"from gql import gql\nfrom typing import Sequence\n\nclass listMethods:\n\n _ListIdentityTypesQuery = \"\"\"\n query ListIdentityTypes{\n IdentityTypes{\n config\n id\n identity_type\n }\n}\n \"\"\"\n\n def ListIdentityTypes(self):\n query = gql(self._ListIdentityTypesQuery)\n variables = {\n }\n operation_name = \"ListIdentityTypes\"\n return self.execute(query, variable_values=variables, operation_name=operation_name)\n","repo_name":"kivera-io/python-client","sub_path":"kivera/identitytypes/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39928691083","text":"#!/usr/bin/env python\nr\"\"\"\n-------------------------------------------------------------------------------\n\nThis script retrieves publication data from NASA ADS and writes out each\npublication as \\item to a latex file. Open access information can be attached as\nwell. The default output file is `publication_list.txt`.\n\nNotes:\n------\n\nSome UTF8 characters still might not work (like greek letters in paper titles)\nin which case those need to be declared in the header below, for example: \n\n \\DeclareUnicodeCharacter{3BC}{$\\mu$}\n \nThe `-p` option allows to include articles in press which are read from the text\nfile `in_press.txt` (or some file specified by `-in`). In this case the content\nis split up into `\\item`s and if open access information should be attached, it\nis taken from `oa_info.py`. This file should just contain something like\n\n OA_INPRESS = ['[OA]','[OA]','']\n\nif there are for example three paper in press and the first two should be listed\nas open access.\n\n\n-------------------------------------------------------------------------------\n\"\"\"\nimport urllib2, os, sys, re, json, codecs, argparse, subprocess\n#\n# set default values\n#\nOPENACCESS = False\nCITATIONS = False\nOA_INPRESS = []\nAUTHOR = 'Birnstiel'\nAUTHOR_F = 'Tilman'\nFILE = 'publication_list.txt'\nINFILE = 'in_press.txt'\nDEVKEY = os.environ['ADS_DEV_KEY']\nIN_PRESS = False\nRUN = 'txt'\nLATEX = 'pdflatex -interaction=nonstopmode'.split()\nDATABASE = ''\n#\n# handle command line arguments\n#\nif __name__ == '__main__':\n RTHF = argparse.RawTextHelpFormatter\n PARSER = argparse.ArgumentParser()\n PARSER = argparse.ArgumentParser(description=__doc__,formatter_class=RTHF)\n\n GROUP = PARSER.add_mutually_exclusive_group()\n GROUP.add_argument('-oa', '--openaccess',\\\n help='include open access information', action='store_true')\n GROUP.add_argument('-c', '--citations',\\\n help='include citation information', action='store_true')\n PARSER.add_argument('-r', '--run',\\\n help='what output to produce:\\n'+\\\n r'`txt` = just `\\item`s'+'\\n'+\\\n '`tex` = full latex document\\n'+\\\n '`pdf` = compiled pdf file\\n'+\\\n 'default=' + RUN,\\\n choices=['txt', 'tex', 'pdf'],\\\n type=str, default=RUN)\n PARSER.add_argument('-p', '--in-press',\\\n help='include articles in press', action='store_true')\n PARSER.add_argument('-db', '--database',\\\n help='select database, e.g. `astronomy`',\\\n type=str, default=DATABASE)\n PARSER.add_argument('-a', '--author',\\\n help='Last name of Author, default='+AUTHOR,\\\n type=str, default=AUTHOR)\n PARSER.add_argument('-i', '--initial',\\\n help='Authors first name, default='+AUTHOR_F,\\\n type=str, default=AUTHOR_F)\n PARSER.add_argument('-d', '--devkey',\\\n help='NASA ADS Dev Key, default=' + DEVKEY,\\\n type=str, default=DEVKEY)\n PARSER.add_argument('-out', '--output',\\\n help='output file, default=' + FILE,\\\n type=str, default=FILE)\n PARSER.add_argument('-in', '--input',\\\n help='in-press input file, default=' + INFILE,\\\n type=str, default=INFILE)\n PARSER.add_argument('-l', '--latex',\\\n help='latex command, default=' + ' '.join(LATEX),\\\n type=str, default=' '.join(LATEX))\n PARSER.add_argument('-ys', '--year-start',\\\n help='include only publications after (including) this year',\\\n type=float, default=-1e6)\n PARSER.add_argument('-ye', '--year-end',\\\n help='include only publications before (including) this year',\\\n type=float, default=1e6)\n PARSER.add_argument('-lid', '--library-id',\\\n help='Use personal library with this id instead of a general query (needs also --library-name)',\\\n type=str, default=None)\n PARSER.add_argument('-ln', '--library-name',\\\n help='Use personal library with this name instead of a general query (needs also --library-id)',\\\n type=str, default=None)\n ARGS = PARSER.parse_args()\n\n OPENACCESS = ARGS.openaccess\n CITATIONS = ARGS.citations\n\n RUN = ARGS.run\n\n IN_PRESS = ARGS.in_press\n DATABASE = ARGS.database.lower()\n AUTHOR = ARGS.author\n AUTHOR_F = ARGS.initial\n DEVKEY = ARGS.devkey\n FILE = ARGS.output\n INFILE = ARGS.input\n LATEX = ARGS.latex.split()\n#\n# print promotion\n#\nprint('-----------------------')\nprint('Publication List Script')\nprint('-----------------------\\n')\nprint('by Til Birnstiel')\nprint('https://github.com/birnstiel/get_publications\\n')\n#\n# process options\n#\nif DEVKEY == '':\n print('\\nERROR:\\n' +\n 'You need to specify a valid NASA ADS\\n' +\n 'developer key, either by setting the\\n' +\n 'environment variable `ADS_DEV_KEY` or\\n' +\n 'by using the argument `-d`.\\n')\n sys.exit(1)\n#\n# display some information\n#\nprint('Script will create publication list (file `{}`) for author: {} {}\\n'.\\\n format(os.path.splitext(FILE)[0] + '.' + RUN, AUTHOR_F, AUTHOR))\nif OPENACCESS:\n if IN_PRESS:\n from oa_info import OA_INPRESS\n print('- Including open access information')\n\nif CITATIONS:\n print('- Including citations')\n\nif RUN in ['tex', 'pdf']:\n FILE = os.path.splitext(FILE)[0] + '.tex'\nif RUN == 'txt':\n FILE = os.path.splitext(FILE)[0] + '.txt'\n#\n# set header and footer of the latex document\n#\nHEAD = r\"\"\"\n\\documentclass[11pt,letterpaper]{amsart}\n\\usepackage[margin=3cm]{geometry}\n\\usepackage{enumitem}\n\\usepackage[utf8]{inputenc}\n\\DeclareUnicodeCharacter{3B1}{$\\alpha$}\n\\DeclareUnicodeCharacter{3B4}{$\\delta$}\n\\DeclareUnicodeCharacter{3BC}{$\\mu$}\n\\usepackage{etaremune}\n\\usepackage{xspace}\n\\input{abbrev.tex}\n%%%%%%%%%%%%%%%%%%%%%%\n\\usepackage{fancyhdr}\n\\usepackage{lastpage}\n\\renewcommand{\\headrulewidth}{0pt}\n\\fancyhf{}\n\\fancyfoot[C]{%\n \\vspace{0.5cm}\\small\\emph{Page \\thepage\\ of \\pageref{LastPage}}\n}\n\\pagestyle{fancy}\n\\thispagestyle{fancy}\n%%%%%%%%%%%%%%%%%%%%%%\n\\begin{document}\n\n\\begin{center}\n\\uppercase{{\\large\\textbf{List of Publications}}}\\\\\n\\vspace{0.3cm}\n\\textsc{-- FIRSTNAME LASTNAME --}\\\\\n\\end{center}\n\\begin{etaremune}[topsep=0pt,itemsep=0.5ex,partopsep=1ex,parsep=1ex]\n\"\"\"\nHEAD = HEAD.replace('FIRSTNAME', AUTHOR_F + '.' * (len(AUTHOR_F) == 1))\nHEAD = HEAD.replace('LASTNAME', AUTHOR)\nFOOT = r'\\end{etaremune}'+'\\n'+r'\\end{document}'+'\\n'\n\ndef replace_journal_name(journal):\n \"\"\"\n Replace some of the journal names with default abbreviations\n as found in many journals, see for example here:\n\n http://doc.adsabs.harvard.edu/abs_doc/aas_macros.sty\n http://www.aanda.org/doc_journal/instructions/aa_instructions.pdf\n \"\"\"\n if journal == u'Annual Review of Astronomy and Astrophysics':\n return r'\\araa'\n elif journal == u'Astronomische Nachrichten':\n return r'AN'\n elif journal == u'Astronomy and Astrophysics':\n return r'\\aap'\n elif journal == u'Geochimica et Cosmochimica Acta Supplement':\n return r'GCA'\n elif journal == u'Icarus':\n return r'Icarus'\n elif journal == u'Monthly Notices of the Royal Astronomical Society':\n return r'\\mnras'\n elif journal == u'Nature':\n return r'\\nat'\n elif journal == u'Ph.D. Thesis':\n return r'PhD Thesis'\n elif journal == u'Physical Review D':\n return r'\\prd'\n elif journal == u'Physical Review Letters':\n return r'\\prl'\n elif journal == u'Protostars and Planets V':\n return r'PPV'\n elif journal == u'Protostars and Planets VI':\n return r'PPVI'\n elif journal == u'Science':\n return r'\\sci'\n elif journal == u'The Astronomical Journal':\n return r'\\aj'\n elif journal == u'The Astrophysical Journal':\n return r'\\apj'\n elif journal == u'The Astrophysical Journal Supplement Series':\n return r'\\apjs'\n else:\n print('Unknown Journal {}, no replacement done!'.format(journal))\n return journal\n \n#\n# read in_press file\n#\nsys.stdout.write('- reading from {} ... '.format(INFILE))\nsys.stdout.flush()\nif IN_PRESS:\n PUBS_INPRESS = re.findall('\\\\\\\\item.*', open(INFILE).read())\nprint('Done ')\n#\n# get the publication data via the ADS API\n#\nsys.stdout.write('- getting publication data from NASA ADS ... ')\nsys.stdout.flush()\nURL = r'https://api.adsabs.harvard.edu/v1/search/query?q=author:%22'+AUTHOR+',+'+AUTHOR_F[0]+'%22&rows=200&fl=author,title,pub,pubdate,year,volume,page,bibcode,citation_count,property,doi,abstract&fq=property:refereed&sort=pubdate+desc';\nif ARGS.library_id==None and ARGS.library_name==None:\n pass\nelif ARGS.library_id==None or ARGS.library_name==None:\n raise ArgumentError('both or none of --library-id and --library-name need to be specified')\nelse:\n raise ArgumentError('private libraries are not yet implemented for the new ADS API')\n URL = r'https://api.adsabs.harvard.edu/v1/search/query?q=author:%22'+AUTHOR+',+'+AUTHOR_F[0]+'%22&rows=200&fl=author,title,pub,pubdate,year,volume,page,bibcode,citation_count,property,doi,abstract&fq=property:refereed&sort=pubdate+desc';\n #URL = r'http://adsabs.harvard.edu/cgi-bin/export_privlib?libid='+ARGS.library_id+'&libname='+ARGS.library_name\n #BIBCODES = [b['bibcode'] for b in json.load(urllib2.urlopen(URL))['entries']]\n #URL = r'http://adslabs.org/adsabs/api/search/?q='+\\\n # 'bibcode:' + '&bibcode:'.join(BIBCODES) +\\\n # '&rows=2000&dev_key=' + DEVKEY\n\nrequest = urllib2.Request(URL, headers={\"Authorization\" : \"Bearer \"+DEVKEY})\ncontents = urllib2.urlopen(request).read()\nPUBS = json.loads(contents)['response']['docs']\nprint('Done')\n#\n# apply the database and year filters\n#\nN_PUBS = len(PUBS)\nif DATABASE != '':\n PUBS = [p for p in PUBS if any([db.lower() == DATABASE.lower() \\\n for db in p['database']])]\nif len(PUBS) < N_PUBS: print('- DATABASE FILTER: FILTERED OUT {:d} PUBLICATIONS'.format(N_PUBS - len(PUBS)))\n\nN_PUBS = len(PUBS)\nPUBS = [p for p in PUBS if float(p['year'])>=ARGS.year_start]\nif len(PUBS) < N_PUBS: print('- START YEAR FILTER: FILTERED OUT {:d} PUBLICATIONS'.format(N_PUBS - len(PUBS)))\n\nN_PUBS = len(PUBS)\nPUBS = [p for p in PUBS if float(p['year'])<=ARGS.year_end]\nif len(PUBS) < N_PUBS: print('- END YEAR FILTER: FILTERED OUT {:d} PUBLICATIONS'.format(N_PUBS - len(PUBS)))\n\n#\n# open file to write out results\n#\nsys.stdout.write('- writing files ... ')\nsys.stdout.flush()\nFID = codecs.open(FILE, 'w', 'utf-8')\nif RUN in ['tex', 'pdf']:\n FID.write(HEAD)\n#\n# write in_press articles\n#\nif IN_PRESS:\n for i, pub in enumerate(PUBS_INPRESS):\n if OPENACCESS:\n if OA_INPRESS[i] != '':\n pub = pub + ' ' + OA_INPRESS[i]\n FID.write((pub + '\\n').decode(\"utf-8\"))\n#\n# convert each publication in a latex item\n#\nfor pub in PUBS:\n string = r'\\item '\n #\n # get last names\n #\n authors = [a.split(',')[0] for a in pub['author']]\n #\n # boldface author if initial matches\n #\n if AUTHOR in authors:\n idx = authors.index(AUTHOR)\n if pub['author'][idx].split(',')[1][1] == AUTHOR_F[0]:\n authors[idx] = r'\\textbf{' + authors[idx] + '}'\n #\n # format with commas between, and finish with \", and lastauthor\"\n #\n if len(authors) == 1:\n authors = authors[0]\n elif len(authors) == 2:\n authors = ' and '.join(authors)\n else:\n authors = ', '.join(authors[0:-1]) + ', and ' + authors[-1]\n string += authors + ': '\n #\n # fix special characters in the title\n #\n title = pub['title'][0]\n #specials = ['{', '}', '_', '$', '^']\n #for c in specials:\n # title = title.replace(c, '\\\\' + c)\n title=re.sub('(.*?)',r'$_{\\1}$',title,flags=re.IGNORECASE)\n title=re.sub('(.*?)',r'$^{\\1}$',title,flags=re.IGNORECASE)\n if '$' not in title:\n title=re.sub('(_[^ ]*)',r'$\\1$',title)\n title=re.sub('\\{\\^(.*?)\\}',r'$^{\\1}$',title)\n print(title)\n title = r'\\textit{' + title + '}'\n string += title + ', '\n if 'pub' in pub.keys():\n journal = replace_journal_name(pub['pub'])\n else:\n journal = ''\n year = pub['pubdate'][0:4]\n string += journal + ' (' + year + ')'\n if 'volume' in pub.keys():\n volume = pub['volume']\n page = pub['page'][0]\n string += ', vol. ' + volume + ', ' + page\n string += '.'\n #\n # append the open access property\n #\n if OPENACCESS:\n if 'PUB_OPENACCESS' in pub['property']:\n #\n # paper is open access\n #\n string += ' [OA]'\n elif 'OPENACCESS' in pub['property']:\n # or any(['arXiv' in i for i in pub['identifier']]):\n #\n # paper is on arxiv\n #\n string += ' [OA]*'\n #\n # citation count\n #\n if CITATIONS:\n if 'citation_count' in pub.keys():\n c = pub['citation_count']\n else:\n c = 0\n string += ' [{:d} citation{}]'.format(c, 's' * (c != 1))\n #\n # write it out\n #\n FID.write(string + '\\n')\n#\n# append open access legend\n#\nif OPENACCESS:\n FID.write('\\\\\\\\[1em] \\n[OA] = gold open access \\\\\\\\[0em] \\n'+\\\n '[OA]* = green open access')\nif CITATIONS:\n FID.write('\\\\\\\\[1em] \\n Total number of citations: {:d}'.\\\n format(sum([pub['citation_count'] for pub in PUBS if 'citation_count' in pub.keys()])))\n\nif RUN in ['tex', 'pdf']:\n FID.write(FOOT)\nFID.close()\nprint('Done')\n\nif RUN == 'pdf':\n sys.stdout.write('- compiling latex file `{:}` ... '.format(FILE))\n sys.stdout.flush()\n #\n # run latex twice\n #\n for i in range(2):\n try:\n p = subprocess.Popen(LATEX + [FILE], stderr=subprocess.PIPE,\\\n stdout=subprocess.PIPE)\n ret_out, ret_err = p.communicate()\n ret_val = p.poll()\n except OSError as err:\n ret_out = \"OSError({0}): {1}\".format(err.errno, err.strerror)\n print('Done')\n #\n # clean up\n #\n for ext in ['aux', 'log', 'tex']:\n os.unlink(os.path.splitext(FILE)[0] + '.' + ext)\n\nprint('\\nScript finished!\\n')\n","repo_name":"birnstiel/get_publications","sub_path":"get_publications.py","file_name":"get_publications.py","file_ext":"py","file_size_in_byte":14041,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"74545345479","text":"'''\n OVERVIEW:\n This script will, for a logged in user with the subscription set appropriately, modify a Network Security Group\n to add in an IP address to an inbound rule OR create a new rule if neccesary and able. Steps\n\n - Check the NIC for Inbound rules, both allow and deny\n - Filter the rules to only those supporting the requested port number\n - Filter out allowable rules with the deny list as any allow rule with a higher priority than the deny\n rule will be ignored anyway.\n - Filter out remaining allow rules to only those that support IP addresses\n - If the list is empty, try and create an inbound rule with priority less than any deny rules.\n - If the list is not empty, check to see if the IP address is already part of the rule.\n - YES : Ignore and end script\n - NO : Add the IP to the existing rule and update it. \n\n REQUIREMENTS:\n - Azure Subscription\n - Virtual Machine deployment with Network Interface\n - Access to the resoruce group the virtual machine lives in .\n - Pip install\n azure-common\n azure-mgmt\n azure-cli\n azure-cli-core\n\n USE:\n Provide an IP address in port_ip and a resource group name. Optioal Network Interface name only if there is more \n than one in the resource group. \n'''\n\nfrom securityutils import *\nfrom resourceutils import * \n\n'''\n Port number external access is requested on \n'''\nport_access = 22\n'''\n IP to add to the access rule\n'''\nport_ip = '71.184.130.226'\n'''\n Resource group name where the Network interface resides\n'''\nresource_group_name = 'dangdeletetest'\n'''\n [optiona] Network Interface name. If the RG has only one, it's not neccesary \n to provide a name here, it will be searched for instead. \n'''\nselected_nic = None\n'''\n Azure resource type if looking for network interfaces on the resource group.\n'''\nnetwork_interface_provider = 'Microsoft.Network/networkInterfaces'\n\n\n\n\n# Gather the clients you need to get work done\nrsrcMgmtClient = getResourceManagementClient()\nnetworkClient = getNetworkClient()\n\n# If NIC not provided, acquire all NIC from the resource group. \nif not selected_nic:\n # Get the list of network interfaces from the resrouce group. If more than one, make them choose. \n nic_list = findResources(rsrcMgmtClient, resource_group_name, network_interface_provider)\n selected_nic = None\n\n if len(nic_list) > 1:\n print(\"Add in logic to choose\")\n else:\n selected_nic = nic_list[0]\n\n\n# If we have a nic then we are good\nif selected_nic:\n print(\"Using interface : \", selected_nic.name)\n\n nic_rules = getEffectiveRulesByName(networkClient, resource_group_name, selected_nic.name)\n # Appears that the last one is processed first\n active_nsg = nic_rules[-1]\n active_resource_group_name = active_nsg[\"resource_group\"]\n active_nsg_name = active_nsg[\"network_security_group\"]\n \n \n print(\"Checking NSG - \", active_nsg_name)\n active_rules = loadInboundSecurityRules(networkClient, active_resource_group_name, active_nsg_name)\n\n print(\"Nic has \", len(active_nsg[\"rules\"]), \"rules, and \", len(active_rules), \" were returned\")\n allow, deny = splitRules(active_rules, port_access)\n print(\"There are \", len(allow.keys()), \" allow rules and \",len(deny.keys()), \" delete rules....\")\n\n deny_priority = None\n original_allow_priorities = sorted(allow.keys())\n\n # If we have allow/deny clean it up and see if we still haven anything with access. \n if len(deny.keys()) > 0:\n # Remove any allow prior to \n deny_keys = sorted(deny.keys())\n # We only care about the highest priority deny, the lowest number\n deny_priority = deny_keys[0]\n\n # If we have allow rules, get rid of the ones blocked by the deny...\n if len(allow.keys()) > 0:\n allow_keys = sorted(allow.keys())\n delete_allow = []\n for key in allow_keys:\n if key > deny_priority:\n delete_allow.append(key)\n\n for delkey in delete_allow:\n del allow[delkey]\n \n \n # If we have allow rules left, get only the one(s) with IP access defined\n security_rule_to_update = None\n force_update = True\n print(\"There are \", len(allow.keys()), \" allow rules left after filtering on deny rules.\")\n if len(allow.keys()) > 0:\n # Filter out any allow that are NOT IP based\n print(\"Filter allow rules only on IP based rules..... \")\n delete_allow = []\n for key in allow.keys():\n if isRuleIpBased(allow[key]) == False:\n delete_allow.append(key)\n\n for delkey in delete_allow:\n del allow[delkey]\n \n # Now see if the IP is on that rule? \n print(\"There are \", len(allow.keys()), \" allow rules left after filtering on IP based rules.\")\n active_rule_key = None\n for key in allow.keys():\n if isIpPresent(allow[key], port_ip):\n active_rule_key = key\n break\n\n # If wae have an active rule key, we are all set, otherwise we are going to need to add the IP OR \n # create a rule. \n if active_rule_key:\n print(\"IP address is already part of the inbound rule \", allow[active_rule_key].name)\n force_update = False\n elif len(allow.keys()) > 0:\n sorted_keys = sorted(allow.keys())\n security_rule_to_update = allow[sorted_keys[0]]\n print(\"IP Address not found in any rules....use existing rule - \", security_rule_to_update.name)\n else:\n print(\"Have to create a new rule....\")\n\n \n if security_rule_to_update:\n print(\"Update rule \", security_rule_to_update.name)\n updateSecurityRule(networkClient, security_rule_to_update, active_resource_group_name, active_nsg_name, port_ip)\n elif force_update:\n print(\"Creating new rule\")\n\n rule_name = str(port_access) + \"_access_rule\"\n rule_port = port_access\n priority = 100\n valid_priority = False\n\n if not deny_priority:\n deny_priority = 1000\n\n while priority < deny_priority:\n if priority in original_allow_priorities:\n priority += 1\n else:\n valid_priority = True\n break\n\n if valid_priority:\n createSecurityRule(networkClient, active_resource_group_name, active_nsg_name, rule_name, priority, rule_port, port_ip )\n else:\n print(\"Could not find a slot to put new rule in.\")\n\n\n\nelse:\n print(\"No NIC selected....\")\n\n\nprint(\"Done\")\n \n","repo_name":"grecoe/EnableExternalIp","sub_path":"forceip.py","file_name":"forceip.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"33009599516","text":"import os\nfrom button import Button\nimport pygame\nfrom pygame import mixer\nfrom button import Button\nimport Pgm\ndef endgame():\n def get_font(size): # Returns Press-Start-2P in the desired size\n return pygame.font.Font(\"assets/font.ttf\", size)\n\n width = 750\n height = 750\n BLACK = (36, 36, 36)\n WHITE = (230, 241, 243)\n window = pygame.display.set_mode((width, height))\n poplft = width // 8\n poptop = height // 3\n popwdt = width * (6 / 8)\n pophgt = height // 3\n winpop = pygame.Rect(poplft, poptop, popwdt, pophgt)\n pygame.draw.rect(window, BLACK, winpop)\n pygame.draw.rect(window, WHITE, winpop, 1)\n MENU_MOUSE_POS = pygame.mouse.get_pos()\n\n MENU_TEXT = get_font(45).render(\"YOU WIN\", True, \"#b68f40\")\n MENU_RECT = MENU_TEXT.get_rect(center=(370, 300))\n\n REPLAY_BUTTON = Button(image=None, pos=(220, 400),\n text_input=\"REPLAY\", font=get_font(25), base_color=\"White\", hovering_color=\"#8cdb6a\")\n\n QUIT_BUTTON = Button(image=None, pos=(550, 400),\n text_input=\"QUIT\", font=get_font(25), base_color=\"White\", hovering_color=\"#8cdb6a\")\n\n window.blit(MENU_TEXT, MENU_RECT)\n\n for button in [REPLAY_BUTTON, QUIT_BUTTON]:\n button.changeColor(MENU_MOUSE_POS)\n button.update(window)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if PLAY_BUTTON.checkForInput(MENU_MOUSE_POS):\n play()\n Pgm.rungame()\n if QUIT_BUTTON.checkForInput(MENU_MOUSE_POS):\n pygame.quit()\n sys.exit()\n\n pygame.display.update()","repo_name":"rachelxx03/carogame","sub_path":"carogame/endgame.py","file_name":"endgame.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40024584272","text":"from scipy import *\nfrom scipy.special import sph_jn, sph_yn\n\n# The following is an entirely computationally inefficient draft, intended for basic orientation.\n\ndef jl(l,z):\n \"\"\"Wrapper for sph_jn (discards the unnecessary data)\"\"\"\n return sph_jn(n, z)[0][l]\n\ndef yl(l,z):\n \"\"\"Wrapper for sph_yn (discards the unnecessary data)\"\"\"\n return sph_yn(l, z)[0][l]\n\ndef h1l(l,z):\n \"\"\"First spherical Hankel function\"\"\"\n return jl(l,z) + 1j*yl(l,z)\n\ndef h2l(l,z):\n \"\"\"Second spherical Hankel function\"\"\"\n return j1(l,z) - 1j*yl(l,z)\n\ndef bf_coeff(l, km, k0, etam, eta0, r):\n \"\"\"Ratios between (b1lm,f1lm) and a1lm. See the single_spherical_wave_scatter.nb file\"\"\"\n sph_j_kmr = sph_jn(l, km*r)\n sph_j_k0r = sph_jn(l, k0*r)\n sph_y_k0r = sph_yn(l, k0*r)\n\n jm = sph_j_kmr[0][l]\n h01 = sph_j_k0r[0][l] + 1j * sph_y_k0r[0][l]\n h02 = sph_j_k0r[0][l] - 1j * sph_y_k0r[0][l]\n\n Jm = jm + km * r * sph_j_kmr[1][l]\n H01 = h01 + k0 * r * (sph_j_k0r[1][l] + 1j * sph_y_k0r[1][l])\n H02 = h02 + k0 * r * (sph_j_k0r[1][l] - 1j * sph_y_k0r[1][l])\n\n denom1 = h01*Jm*k0*eta0 - H01*jm*km*etam\n b1_a1 = - (h02*Jm*k0*eta0 - H02*jm*km*etam) / denom1\n f1_a1 = - k0 * sqrt(eta0*etam) * (H01*h02 - h01*H02) / denom1\n\n denom2 = (H01*jm*km*eta0 - h01*Jm*k0*etam)\n b2_a2 = - (H02*jm*km*eta0 - h02*Jm*k0*etam) / denom2\n f2_a2 = - k0 * sqrt(eta0*etam) * (-H01*h02 + h01*H02) / denom2\n \n return (b1_a1, f1_a1, b2_a2, f2_a2)\n\n\n","repo_name":"texnokrates/electroballz","sub_path":"electroballz/single_coeff.py","file_name":"single_coeff.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"16919628133","text":"# tracks the sentiment toward a given hashtag over time\n# 20 mins got twitter stream printed out\n# 40 mins got basic sentiment done\n# 60 mins got stock price done and checked into git with externalised env vars <3\n\nfrom birdy.twitter import StreamClient\nfrom textblob import TextBlob\nfrom datetime import datetime\nimport os\nimport sys\nimport csv\n\n\nCONSUMER_KEY = os.getenv('CONSUMER_KEY')\nCONSUMER_SECRET = os.getenv('CONSUMER_SECRET')\nACCESS_TOKEN = os.getenv('ACCESS_TOKEN')\nACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')\n\n\n#TwitterClient File Stuff\nclient = StreamClient(CONSUMER_KEY,\n CONSUMER_SECRET,\n ACCESS_TOKEN,\n ACCESS_TOKEN_SECRET)\n\n\n\n#NLP File stuff\n\n\nstock_price = [\n {\n \"time\": datetime.now(), \n \"price\": 0, \n \"tweet\": \"Initialising...\", \n \"polarity\": 0\n }\n]\n\ndef update_stock_price(tweet, polarity, score):\n stock_price.append({\"time\": datetime.now(), \"price\": score, \"tweet\": tweet, \"polarity\": polarity})\n\ndef launch(term, limit):\n print(\"Parsing tweets for %s\" %term)\n i = 1\n response = client.stream.statuses.filter.post(track=\"#%s\" % term)\n with open('tweets.csv', 'w', newline='') as csvfile:\n row_writer = csv.writer(csvfile, delimiter=',')\n for data in response.stream():\n if data.lang == 'en':\n tweet = TextBlob(data.text)\n score = stock_price[-1][\"price\"] + tweet.sentiment.polarity\n update_stock_price(data.text, tweet.sentiment.polarity, score)\n row_writer.writerow([tweet.sentiment.polarity, score, datetime.now(), data.text ])\n i += 1\n if i >= limit:\n quit()\n\n\n\nif __name__ == \"__main__\":\n term = sys.argv[1]\n limit = sys.argv[2]\n launch(term, int(limit))","repo_name":"peetdenny/twitter-sentiment-analyiser","sub_path":"twitter_parser.py","file_name":"twitter_parser.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"21049652538","text":"import wikipedia\r\n\r\nclass Section:\r\n\r\n def __init__(self, text, indLvl = 1):\r\n self.indentationLevel = indLvl\r\n self.content = ''\r\n self.subsections = []\r\n\r\n lines = [x for x in text.split('\\n') if x != '']\r\n\r\n title = lines[0]\r\n while title.startswith('='):\r\n title = title[1:]\r\n title = title[1:]\r\n while title.endswith('='):\r\n title = title[:-1]\r\n title = title[:-1]\r\n if title.endswith(\"Edit\"):\r\n title = title[:-4]\r\n self.title = title\r\n lines = lines[1:]\r\n for i in range(len(lines)):\r\n hLevel = self.isHeader(lines[i])\r\n if hLevel == -1:\r\n self.content += lines[i] + '\\n'\r\n else:\r\n lines = lines[i:]\r\n break\r\n else:\r\n return\r\n\r\n subsectionLines = []\r\n for line in lines:\r\n iLvl = self.isHeader(line)\r\n if iLvl == self.indentationLevel + 1:\r\n if len(subsectionLines) == 0:\r\n subsectionLines.append(line)\r\n continue\r\n self.subsections.append(Section('\\n'.join(subsectionLines), iLvl))\r\n subsectionLines = [line]\r\n else:\r\n subsectionLines.append(line)\r\n if len(subsectionLines) != 0:\r\n self.subsections.append(Section('\\n'.join(subsectionLines), self.indentationLevel + 1))\r\n\r\n def isHeader(self, s):\r\n if s.startswith('='):\r\n return s.find(\" \")\r\n else:\r\n return -1\r\n\r\n def __str__(self):\r\n ind = self.indentationLevel - 1\r\n indC = \"\\t\"\r\n i = ind * indC\r\n title = i + \"Title: \" + self.title + \"\\n\"\r\n content = i + \"Content: \" + self.content + \"\\n\" if len(self.content) != 0 else \"\"\r\n s = title + content\r\n\r\n acc = \"\"\r\n for section in self.subsections:\r\n acc += repr(section)\r\n s += acc\r\n\r\n return s.encode(\"ascii\") + '\\n'\r\n\r\n\r\n def __repr__(self):\r\n try:\r\n return self.__str__()\r\n except TypeError:\r\n return \"empty\"\r\n except UnicodeEncodeError:\r\n return \"unicode error\"\r\n\r\n\r\ndef createSection(topic):\r\n try:\r\n page = wikipedia.page(topic)\r\n c = page.content\r\n c = \"= \" + page.title + \" =\\n\" + c\r\n except wikipedia.exceptions.PageError:\r\n return None\r\n \r\n return Section(c)\r\n","repo_name":"yliu1021/def_hacks","sub_path":"WikiParser.py","file_name":"WikiParser.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8547971608","text":"import json\n\nfile = 'hospitalreviews.json'\nreviewDict = {}\n\nwith open(file) as json_file:\n data = json.load(json_file)\n\n\n\nhospitalRatings = {}\n\n\n\n#for each clinic\nfor i in data:\n name = i['name']\n try:\n overallRating = i['rating']\n ratings = []\n data = {\n \"document\":{\n \"type\":\"PLAIN_TEXT\",\n \"content\":\"\"\n },\n \"encodingType\": \"UTF8\"\n }\n\n\n #get the text from all reviews\n for review in i['reviews']:\n data[\"document\"][\"content\"] = data[\"document\"][\"content\"] + review['text']\n ratings.append(review['rating'])\n # print review['text']\n\n # print data\n hospitalRatings[name] = {'Overall rating: ': overallRating,\n 'Ratings: ': ratings\n }\n\n with open('reviews/'+name +'.json', 'w') as outfile:\n json.dump(data, outfile)\n\n except:\n hospitalRatings[name] = {'Overall rating: ': 'null',\n 'Ratings: ': []\n }\n\n print('completed ' + name)\n\nwith open('HospitalRatings.txt', 'w') as outfile:\n json.dump(hospitalRatings, outfile)\n\n#get the review 'text'\n#concat reviews\n\n\n # for p in data['people']:\n # print('Name: ' + p['name'])\n # print('Website: ' + p['website'])\n # print('From: ' + p['from'])\n # print('')\n#\n# jsonFile = open('hospitalReviewTrial.json', 'r')\n# values = json.load(jsonFile)\n# jsonFile.close()\n#\n#\n# print values[\"name\"]","repo_name":"joshuajimsim/UrbanPlanner.AI","sub_path":"hospital_reviews/convertJSON.py","file_name":"convertJSON.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"31925976047","text":"# Rock scissor and paper Game with Computer.\nimport random\n\n\ndef play():\n computers = random.choice([\"r\",\"p\",\"s\"])\n users = input(\"Enter (r) for rock (s) for scissor and (p) for paper :\").lower()\n if users == computers:\n print(\"Tie\")\n\n # Using different function in this function\n elif win(users,computers):\n return \"You win\"\n \n # Returning from the play function.\n return \"You loose\"\n \n \ndef win(user,computer):\n \n if user == \"r\" and computer == \"s\":\n print(\"You win\")\n elif(user == \"p\" and computer ==\"r\"):\n print(\"You win\")\n elif(user ==\"s\" and computer ==\"p\"):\n print(\"You win\")\n \n\nplay()","repo_name":"Saumya-ranjan/PythonProjects","sub_path":"coding projects/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71777125000","text":"import numpy as np\n\nimport scipy.sparse as sp\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.utils import check_consistent_length, check_array\n\nfrom .base import trmf, trmf_forecast_factors\nfrom .base import trmf_forecast_targets\n\n\nclass TRMFRegressor(BaseEstimator):\n r\"\"\"Time-Series Regularized Matrix Factorization with regression.\n\n Finds two matrices (Z, F) with F being nonnegative and Z behaving\n like an autoregressive process, whose product approximates the target\n matrix Y, see [1]_.\n\n This implementation supports optional exogenous regressors, that can\n be used for predicting the target matrix (if fit_regression is True).\n This factorization can be used, for example, for dimensionality reduction\n in multivariate time-series, when it is important to be able to forecast\n future dynamics. The objective function is for the decomposition with\n (optional) intercept \\mu, exogenous regressors X and their coefficients\n B is ::\n\n .. math::\n\n \\frac{1}{2 T n} \\| Y - Z F - X B + \\mu\\|_{fro}^2\n + \\frac{C_{\\phi}}{2 d p} \\| \\phi \\|_{fro}^2\n + \\frac{C_B}{2 m n} \\| B \\|_{fro}^2\n + \\frac{C_Z}{2} (\n \\frac{1 - \\eta_Z}{T d} \\|Z\\|_{fro}^2\n + \\frac{\\eta_Z}{(T - p) d} AR_p(Z))\n + \\frac{C_F}{2} * (\n \\frac{1 - \\eta_F}{d n} \\|F\\|_{fro}^2\n + \\frac{\\eta_F}{d n} R_G(F))\n\n Where::\n :math:`\\|A\\|_{fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm)\n\n :math:`AR_p(Z) = \\sum_{j,t} (Z_{tj} - \\hat{Z}_{t,j\\mid t-1})^2`\n (Autoregressive regularizer of order p)\n\n :math:`\\hat{Z}_{t,j\\mid t-1} = \\sum_{k} \\phi_{jk} * Z_{t-k,j}`\n (One-step ahead autoregressive forecast)\n\n :math:`R_G(F) = \\sum_{j\\in G} \\| F_{.j} - \\bar{F}_{.j} \\|^2`\n (L2 graph adjacency regularizer)\n\n :math:`\\bar{F}_{.j} = \\frac{1}{|G_j|} \\sum_{k \\in G_j} W_{jk} F_{.k}`\n (the weighted average over the endpoints of the outgoing edges\n from j)\n\n The objective function is minimized by cycling over minimization steps\n with respect to \\mu, B, factorization (Z, F) and \\phi.\n\n Parameters\n ----------\n n_components : int\n The number of latent autoregressive factors Z in the decompositon.\n\n n_order : int\n The assumed order of the latent autoregressive factors Z in the\n decompositon.\n\n C_Z : double, optional (default=0.1)\n Penalty parameter C_Z of the latent factor Z regularizer.\n\n C_F : double, optional (default=0.1)\n Penalty parameter C_F of the factor loadings F regularizer.\n\n C_phi : double, optional (default=0.01)\n Penalty parameter C_phi of the Ridge regilarizer for the\n coefficients of the autoregressive dynamics of the latent\n factors.\n\n eta_Z : double, optional (default=0.9)\n The regularization mixing parameter, with 0 <= eta_Z <= 1.\n For eta_Z = 0 the penalty on the factors Z is an elementwise\n L2 penalty.\n For eta_Z = 1 it is the L2 loss of an autoregressive forecasitng\n model of order `n_order` on the latent factors Z.\n For 0 < eta_Z < 1, the penalty is a combination of both.\n\n eta_F : double, optional (default=0.0)\n The regularization mixing parameter, with 0 <= eta_F <= 1.\n For eta_F = 0 the penalty on the factor loadings F is an\n elementwise L2 penalty.\n For eta_F = 1 it is the L2 graph adjacency regularizer.\n For 0 < eta_F < 1, the penalty is a combination of both.\n\n adj : sparse_matrix, optional (default=None)\n The precomputed adjacency matrix of a binary relation between\n the time series (columns of the matrix to be decomposed). Must\n be sparse and have at least as many rows and columns as there\n is time-series.\n\n C_B : double, optional (default=1.0)\n Penalty parameter C_B of the error term.\n\n fit_regression : boolean, optional (default=False)\n Whether to perform matrix decomposition on the residuals of\n the linear regression of a mutlivariate time-series on the\n provided exogenous data. If set to false, the fit method\n expects only the time-series matrix in `X` argument. If set\n to true, then the fit method expects the exogenous regressors\n and the multivariate time-series are provided in `X` and `y`,\n respectively.\n\n fit_intercept : boolean, optional (default=True)\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations (i.e.\n data is expected to be already centered).\n\n nonnegative_factors : bool, (default=True)\n Whether to impose the non-negativity constraint on the estimated\n loadings of the latent autoregressive factros. If set to true,\n uses Fast prox method for\n then leading to a much faster algorithm.\n\n tol : float, optional (default=1e-4)\n Tolerance for stopping criteria.\n\n n_max_iterations : int, (default=1000)\n The maximum number of iterations to be run.\n\n n_max_mf_iter : int, (default=5)\n The maximum number of the inner matrix decomposition iterations\n to be run.\n\n z_step_kind : str, (default=\"tron\")\n The optimization method to use for the latent factors. Available are\n \"tron\" custom implementation of the Trust Region Conjugate Gradient\n implementation, \"ncg\" scipy's Newton-Conjugate Gradient implementation,\n and \"lbfgs\" for scipy's L-BFGS-B implementation.\n\n f_step_kind : str, (default=\"tron\")\n The optimization method to use for the factor loadings. Available are\n \"tron\" custom implementation of the Trust Region Conjugate Gradient\n implementation, \"ncg\" scipy's Newton-Conjugate Gradient method,\n \"lbfgs\" for scipy's L-BFGS-B implementation, and \"fgm\" for the Fast\n Proximal Gradient method to ensure nonnegativity of the loadings.\n\n random_state : int, RandomState instance or None, optional (default=None)\n The seed of the pseudo random number generator to use when shuffling\n the data for the dual coordinate descent (if ``dual=True``). When\n ``dual=False`` the underlying implementation of :class:`LinearSVC`\n is not random and ``random_state`` has no effect on the results.\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Attributes\n ----------\n factors_ : array, shape = (n_samples, n_components)\n The estimates of the time-series of the latent factors.\n\n loadings_ : array, shape = (n_components, n_targets)\n The estimated factor loadings.\n\n ar_coef_ : array, shape = (n_components, n_order)\n The estimated coefficients of the latent autoregressive dynamics.\n The coefficients are stored in the reverse order of recency: the\n values in each row are ordered from lag `n_order` (the least recent)\n up to lag `1` (the most recent).\n\n coef_ : array, optional, shape = (n_features, n_targets)\n Optional coefficients of the regression on the exogenous data.\n\n intercept_ : array, optional, shape = (1, n_targets)\n Optional estimated constant term in decomposition.\n\n References\n ----------\n .. [1] Yu, H. F., Rao, N., & Dhillon, I. S., (2016). \"Temporal\n regularized matrix factorization for high-dimensional time\n series prediction.\" In Advances in neural information processing\n systems (pp. 847-855).\n \"\"\"\n def __init__(self,\n n_components,\n n_order,\n C_Z=1e-1,\n C_F=1e-1,\n C_phi=1e-2,\n eta_Z=0.5,\n eta_F=0.,\n adj=None,\n C_B=0.0,\n fit_regression=False,\n fit_intercept=True,\n nonnegative_factors=True,\n tol=1e-5,\n n_max_iterations=1000,\n n_max_mf_iter=5,\n z_step_kind=\"tron\",\n f_step_kind=\"tron\",\n random_state=None):\n super(TRMFRegressor, self).__init__()\n\n self.n_components = n_components\n self.n_order = n_order\n self.C_Z = C_Z\n self.C_F = C_F\n self.C_phi = C_phi\n self.eta_Z = eta_Z\n self.eta_F = eta_F\n self.adj = adj\n self.C_B = C_B\n self.fit_regression = fit_regression\n self.fit_intercept = fit_intercept\n self.tol = tol\n self.n_max_iterations = n_max_iterations\n self.n_max_mf_iter = n_max_mf_iter\n self.nonnegative_factors = nonnegative_factors\n self.random_state = random_state\n self.z_step_kind = z_step_kind\n self.f_step_kind = f_step_kind\n\n def fit(self, X, y=None, sample_weight=None):\n \"\"\"Fit the TRMF regression model to the given training data.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, ...)\n Training multivariate time series data either exogenous\n regressors or the mutlivariate time-series themselves, depending\n on the `fit_regression` setting.\n For fit_regression=True, the expected shape of X is\n (n_samples, n_features).\n For fit_regression=False, the expected shape of X is\n (n_samples, n_targets).\n\n y : None or array-like, shape (n_samples, n_targets)\n If fit_regression=True, then y is the target mutlivariate\n time-series, where n_samples is the number of observations\n in the time-series and n_targets is the number of observed\n series.\n For fit_regression=False, y must be `None`.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n if not self.fit_regression:\n if y is not None:\n raise TypeError(\"\"\"Exogenous regressors provided in `X`, \"\"\"\n \"\"\"yet `fit_regression` is false.\"\"\")\n X, y = None, X\n\n else:\n if y is None:\n raise TypeError(\"\"\"Endogenous data are is not provided \"\"\"\n \"\"\"in `y`, yet `fit_regression` is True.\"\"\")\n # end if\n\n # `y` is a matrix anyway (under any mode). no ensure_min_features=0\n # since regression mode still requires at least one feature column.\n y = check_array(y, dtype=\"numeric\", accept_sparse=True,\n ensure_min_samples=self.n_order + 1,\n ensure_2d=True)\n\n if X is not None:\n X = check_array(X, dtype=\"numeric\", accept_sparse=False,\n ensure_min_samples=self.n_order + 1, ensure_2d=True)\n else:\n X = np.empty((y.shape[0], 0))\n # end if\n\n check_consistent_length(X, y)\n\n f_step_kind = \"fgm\" if self.nonnegative_factors else self.f_step_kind\n estimates = trmf(y, self.n_components, self.n_order, self.C_Z,\n self.C_F, self.C_phi, self.eta_Z, self.eta_F,\n adj=self.adj, fit_intercept=self.fit_intercept,\n regressors=X, C_B=self.C_B, tol=self.tol,\n n_max_iterations=self.n_max_iterations,\n n_max_mf_iter=self.n_max_mf_iter,\n f_step_kind=f_step_kind,\n z_step_kind=self.z_step_kind,\n random_state=self.random_state)\n\n # Record the estimates in this instance's properties\n factors, loadings, ar_coef, intercept, beta = estimates\n\n self.factors_, self.loadings_ = factors, loadings\n self.ar_coef_ = ar_coef\n\n self.coef_, self.intercept_ = beta, intercept\n\n # self.fitted_ = np.dot(X, beta) + np.dot(factors, loadings) \\\n # + intercept\n\n return self\n\n def forecast_factors(self, n_ahead):\n r\"\"\"Compute the dynamic forecast of the latent factor time-series.\n\n Parameters\n ----------\n n_ahead : int\n The depth of the latent factors' forecast into the future.\n\n Details\n -------\n This computes a dynamic forecast of AR(p) process with :math:`p`\n equal to `n_order` and coefficients :math:`\\phi` fixed at `ar_coef_`::\n\n .. math ::\n\n \\hat{Z}_{t+h, j \\mid t}\n = \\sum_{k=1}^p \\phi_{jk} * Z_{t + h - k,j \\mid t}\n\n where::\n :math:`\\hat{Z}_{s,j \\mid t}` is :math:`Z_{s,j}` whenever\n :math:`t \\leq s`.\n\n \"\"\"\n\n return trmf_forecast_factors(n_ahead, self.ar_coef_,\n prehist=self.factors_)\n\n def predict(self, X=None, n_ahead=10):\n r\"\"\"Predict the targets based on the autoregressive decomposition.\n\n Parameters\n ----------\n X : None or array-like, shape (n_ahead, n_features)\n The future dynamics of the exogenous regressors or `None`,\n depending on the `fit_regression` setting.\n For fit_regression=True, the expected shape of X is\n (n_ahead, n_features).\n For fit_regression=False, X is expected to be `None`.\n\n n_ahead : int, optional (default=10)\n The depth of the latent factors' forecast into the future.\n\n Details\n -------\n This computes the prediction of the values of the time-series\n based on the `n`_ahead`-step dynamic forecasts of the latent\n factors and the regression coefficients.\n \"\"\"\n\n if self.fit_regression:\n X = check_array(X, dtype=\"numeric\", accept_sparse=False)\n else:\n X = np.empty((n_ahead, 0))\n # end if\n\n return trmf_forecast_targets(\n n_ahead, self.loadings_, self.ar_coef_, self.intercept_,\n self.coef_, self.factors_, regressors=X, mode=\"exog\")\n","repo_name":"ivannz/trmf","sub_path":"trmf/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":14131,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"8255192087","text":"from ctypes import *\nimport os\nimport sys\nimport time\n\nterminate=False\n\nclass Configuration:\n \"\"\" Holds configuration options for the Borg MOEA Python wrapper. \"\"\"\n\n @staticmethod\n def check():\n \"\"\" Checks if the Borg MOEA is initialized and ready to run; otherwise an error is raised. \"\"\"\n try:\n Configuration.libc\n except:\n raise OSError(\"The standard C library is not defined, please see Configuration.setStandardCLibrary()\")\n\n try:\n Configuration.libborg\n except:\n raise OSError(\"The Borg MOEA C library is not defined, please see Configuration.setBorgLibrary()\")\n\n @staticmethod\n def initialize(borg_path = None):\n \"\"\" Initializes the standard C and Borg MOEA libraries. \"\"\"\n Configuration.setStandardCLibrary()\n Configuration.setBorgLibrary(path = borg_path)\n Configuration.seed()\n Configuration.startedMPI = False\n\n @staticmethod\n def setStandardCLibrary(path=None):\n \"\"\" Override the standard C library (libc) used by the Python-to-C interface.\n\n If the path is not specified, this method will attempt to auto-detect the\n correct location of the standard C library. If this auto-detection fails,\n this method will return without error. This allows the module to load\n successfully and requires the user to manually invoke this method before\n using the Borg MOEA.\n \"\"\"\n\n if path:\n Configuration.libc = CDLL(path)\n elif os.name == \"posix\":\n try:\n Configuration.libc = CDLL(\"libc.so.6\")\n except OSError:\n return\n elif os.name == \"nt\" and cdll.msvcrt:\n Configuration.libc = cdll.msvcrt\n else:\n return\n\n try:\n Configuration.stdout = Configuration.libc.fdopen(sys.stdout.fileno(), \"w\")\n except AttributeError:\n Configuration.stdout = Configuration.libc._fdopen(sys.stdout.fileno(), \"w\")\n\n @staticmethod\n def setBorgLibrary(path=None):\n \"\"\" Override the location of the Borg MOEA shared object.\n\n If the path is not specified, this method attempts to auto-detect the location\n of the Borg MOEA C library. If auto-detection fails, this method returns\n without error. This allows the module to load successfully and requires the\n user to manually invoke this method before using the Borg MOEA\n \"\"\"\n\n if path:\n try:\n Configuration.libborg = CDLL(path)\n Configuration.libborg.BORG_Copyright\n Configuration.stdcall = False\n except AttributeError:\n # Not using __cdecl, try __stdcall instead\n if os.name == \"nt\":\n Configuration.libborg = WinDLL(path)\n Configuration.stdcall = True\n elif os.name == \"posix\":\n try:\n Configuration.libborg = CDLL(\"./libborg.so\")\n Configuration.stdcall = False\n except OSError:\n return\n elif os.name == \"nt\":\n try:\n Configuration.libborg = CDLL(\"./borg.dll\")\n Configuration.libborg.BORG_Copyright\n Configuration.stdcall = False\n except OSError:\n return\n except AttributeError:\n # Not using __cdecl, try __stdcall instead\n try:\n Configuration.libborg = WinDLL(\"./borg.dll\")\n Configuration.stdcall = True\n except OSError:\n return\n \n # Set result type of functions with non-standard types\n Configuration.libborg.BORG_Solution_get_variable.restype = c_double\n Configuration.libborg.BORG_Solution_get_objective.restype = c_double\n Configuration.libborg.BORG_Solution_get_constraint.restype = c_double\n Configuration.libborg.BORG_Operator_get_probability.restype = c_double\n\n @staticmethod\n def seed(value=None):\n \"\"\" Sets the pseudo-random number generator seed. \"\"\"\n Configuration.check()\n\n if value:\n Configuration.libborg.BORG_Random_seed(c_ulong(value))\n else:\n Configuration.libborg.BORG_Random_seed(c_ulong(os.getpid()*long(time.time())))\n\n @staticmethod\n def enableDebugging():\n \"\"\" Enables debugging output from the Borg MOEA. \"\"\"\n Configuration.check()\n Configuration.libborg.BORG_Debug_on()\n\n @staticmethod\n def disableDebugging():\n \"\"\" Disables debugging output from the Borg MOEA. \"\"\"\n Configuration.check()\n Configuration.libborg.BORG_Debug_off()\n\n @staticmethod\n def displayCopyright():\n \"\"\" Displays the copyright message for the Borg MOEA. \"\"\"\n Configuration.check()\n Configuration.libborg.BORG_Copyright(Configuration.stdout)\n\n @staticmethod\n def startMPI():\n \"\"\" Initializes MPI to enable master-slave and multi-master Borg MOEA runs. \"\"\"\n if Configuration.startedMPI:\n raise RuntimeError(\"MPI is already started\")\n\n if os.name != \"posix\":\n raise RuntimeError(\"MPI is only supported on Linux\")\n\n try:\n Configuration.libborg.BORG_Algorithm_ms_startup\n except AttributeError:\n # The serial Borg MOEA C library is loaded; switch to parallel\n try:\n Configuration.setBorgLibrary(\"./libborgmm.so\")\n except OSError:\n try:\n Configuration.setBorgLibrary(\"./libborgms.so\")\n except OSError:\n raise OSError(\"Unable to locate the parallel Borg MOEA C library\")\n\n # The following line is needed to load the MPI library correctly\n CDLL(\"libmpi.so\", RTLD_GLOBAL)\n\n # Pass the command-line arguments to MPI_Init\n argc = c_int(len(sys.argv))\n CHARPP = c_char_p * len(sys.argv)\n argv = CHARPP()\n\n for i in range(len(sys.argv)):\n argv[i] = sys.argv[i]\n\n Configuration.libborg.BORG_Algorithm_ms_startup(\n cast(addressof(argc), POINTER(c_int)),\n cast(addressof(argv), POINTER(CHARPP)))\n\n Configuration.startedMPI = True\n\n @staticmethod\n def stopMPI():\n \"\"\" Shuts down MPI; the master-slave and multi-master Borg MOEA can no longer be used. \"\"\"\n if not Configuration.startedMPI:\n raise RuntimeError(\"MPI is not started\")\n\n Configuration.libborg.BORG_Algorithm_ms_shutdown()\n Configuration.startedMPI = False\n\nclass RestartMode:\n \"\"\" Controls the mutation rate during restarts.\n\n DEFAULT - The mutation rate is fixed at 1/numberOfVariables\n RANDOM - The mutation rate is fixed at 100%\n RAMPED - The mutation rates are uniformly sampled between 1/numberOfVariables to 100%\n ADAPTIVE - The mutation rate adapts based on success of previous restarts\n INVERTED - Similar to ADAPTIVE, except the rate is inverted\n \"\"\"\n\n DEFAULT = 0\n RANDOM = 1\n RAMPED = 2\n ADAPTIVE = 3\n INVERTED = 4\n\nclass ProbabilityMode:\n \"\"\" Controls how operator probabilities are adapted.\n\n DEFAULT - Operator probabilities based on archive membership\n RECENCY - Operator probabilities based on recency (tracks recent additions to archive)\n BOTH - Operator probabilities based on archive membership and recency\n ADAPTIVE - Favors archive membership, but uses recency if insufficient archive size\n \"\"\"\n\n DEFAULT = 0\n RECENCY = 1\n BOTH = 2\n ADAPTIVE = 3\n\nclass InitializationMode:\n \"\"\" Controls how initial populations in the multi-master Borg MOEA are initialized.\n\n UNIFORM - Each master starts with a uniformly distributed population\n LATIN - Each master starts with a Latin hypercube sampled population\n GLOBAL_LATIN - A global Latin hypercube sampled population is generated, partitioned,\n and distributed to the master nodes\n \"\"\"\n\n UNIFORM = 0\n LATIN = 1\n GLOBAL_LATIN = 2\n\nclass Direction:\n \"\"\" The optimization direction of an objective (minimized or maximized).\n\n MINIMIZE - The objective is minimized towards negative infinity\n MAXIMIZE - The objective is maximized towards positive infinity\n \"\"\"\n\n MINIMIZE = 0\n MAXIMIZE = 1\n\nclass Borg:\n \"\"\" Solves an optimization problem using the Borg MOEA. \"\"\"\n\n def __init__(self, numberOfVariables, numberOfObjectives, numberOfConstraints, function, epsilons=None,\n bounds=None, directions=None, add_pysedsim_inputs=None, borg_path=None):\n \"\"\" Creates a new instance of the Borg MOEA.\n\n numberOfVariables - The number of decision variables in the optimization problem\n numberOfObjectives - The number of objectives in the optimization problem\n numberOfConstraints - The number of constraints in the optimization problem\n function - The function defining the optimization problem\n epsilons - The epsilon values for each objective\n bounds - The lower and upper bounds for each decision variable\n directions - The optimization direction (MINIMIZE or MAXIMIZE) for each objective\n \"\"\"\n\n Configuration.setBorgLibrary(path=borg_path)\n # Ensure the underlying library is available\n Configuration.check()\n\n # Validate input arguments\n if numberOfVariables < 1:\n raise ValueError(\"Requires at least one decision variable\")\n\n if numberOfObjectives < 1:\n raise ValueError(\"Requires at least one objective\")\n\n if numberOfConstraints < 0:\n raise ValueError(\"Number of constraints can not be negative\")\n\n # Construct Borg object\n self.numberOfVariables = numberOfVariables\n self.numberOfObjectives = numberOfObjectives\n self.numberOfConstraints = numberOfConstraints\n self.directions = directions\n if add_pysedsim_inputs is None:\n self.function = _functionWrapper(function, numberOfVariables, numberOfObjectives, numberOfConstraints, directions)\n else:\n # More PySedSim inputs are required\n self.function = _functionWrapper(function, numberOfVariables, numberOfObjectives, numberOfConstraints,\n directions, addl_inputs=add_pysedsim_inputs)\n\n if Configuration.stdcall:\n self.CMPFUNC = WINFUNCTYPE(c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double))\n else:\n self.CMPFUNC = CFUNCTYPE(c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double))\n\n self.callback = self.CMPFUNC(self.function)\n self.reference = c_void_p(Configuration.libborg.BORG_Problem_create(numberOfVariables, numberOfObjectives, numberOfConstraints, self.callback))\n\n if bounds:\n self.setBounds(*bounds)\n else:\n self.setBounds(*[[0, 1]]*numberOfVariables)\n\n if epsilons:\n self.setEpsilons(*epsilons)\n else:\n self.epsilonsAssigned = False\n\n def __del__(self):\n \"\"\" Deletes the underlying C objects. \"\"\"\n try:\n Configuration.libborg.BORG_Problem_destroy(self.reference)\n except AttributeError:\n pass\n\n def setBounds(self, *args):\n \"\"\" Sets the decision variable lower and upper bounds.\n\n The arguments to this function must be 2-ary lists defining the\n lower and upper bounds. The number of lists must equal the\n number of decision variables. For example:\n setBounds([0, 1], [-10, 10], [-1, 1])\n If each decision variable has the same bounds, this can be\n written compactly:\n setBounds(*[[0, 1]]*3)\n \"\"\"\n\n if len(args) != self.numberOfVariables:\n raise ValueError(\"Incorrect number of bounds specified\")\n\n for i in range(self.numberOfVariables):\n self._setBounds(i, args[i][0], args[i][1])\n\n def setEpsilons(self, *args):\n \"\"\" Sets the epsilons for the objective values.\n\n The epsilons control the granularity / resolution of the Pareto\n optimal set. Small epsilons typically result in larger Pareto\n optimal sets, but can reduce runtime performance. Specify one\n argument for each objective. For example:\n setEpsilons(0.01, 0.5)\n If all epsilons are the same, this can be written more compactly:\n setEpsilons(*[0.01]*2)\n \"\"\"\n\n if len(args) != self.numberOfObjectives:\n raise ValueError(\"Incorrect number of epsilons specified\")\n\n for i in range(self.numberOfObjectives):\n self._setEpsilon(i, args[i])\n\n self.epsilonsAssigned = True\n\n def _setEpsilon(self, index, value):\n \"\"\" Sets the epsilon value at the given index. \"\"\"\n Configuration.libborg.BORG_Problem_set_epsilon(self.reference, index, c_double(value))\n\n def _setBounds(self, index, lowerBound, upperBound):\n \"\"\" Sets the lower and upper decision variable bounds at the given index. \"\"\"\n Configuration.libborg.BORG_Problem_set_bounds(self.reference, index, c_double(lowerBound), c_double(upperBound))\n\n def solveMPI(self, islands=1, maxTime=None, maxEvaluations=None, initialization=None, runtime=None,\n allEvaluations=None, frequency=None):\n \"\"\" Runs the master-slave or multi-master Borg MOEA using MPI.\n\n islands - The number of islands\n maxTime - The maximum wallclock time to run, in hours\n maxEvaluations - The maximum NFE per island (total NFE is islands*maxEvaluations)\n initialization - Controls how the initial populations are generated\n runtime - Filename pattern for saving runtime dynamics (the filename should include\n one %d which gets replaced by the island index)\n allEvaluations - Filename pattern for saving all evaluations (the filename should include\n one %d which gets replaced by the island index). Since this can quickly\n generate large files, use this option with caution.\n frequency - Frequency of runtime output as integer (e.g., 500). Default is 100.\n\n Note: All nodes must invoke solveMPI. However, only one node will return the discovered\n Pareto optimal solutions. The rest will return None.\n \"\"\"\n\n if not self.epsilonsAssigned:\n raise RuntimeError(\"Epsilons must be assigned\")\n\n if not Configuration.startedMPI:\n raise RuntimeError(\"MPI is not started; call Configuration.startMPI() first\")\n\n if not maxTime and not maxEvaluations:\n raise ValueError(\"Must specify maxEvaluations or maxTime (or both)\")\n\n if islands > 1:\n try:\n Configuration.libborg.BORG_Algorithm_ms_islands(c_int(islands))\n except AttributeError:\n raise RuntimeError(\"The loaded Borg MOEA C library does not support multi-master\")\n\n if maxTime:\n Configuration.libborg.BORG_Algorithm_ms_max_time(c_double(maxTime))\n\n if maxEvaluations:\n Configuration.libborg.BORG_Algorithm_ms_max_evaluations(c_int(maxEvaluations))\n\n if initialization and islands > 1:\n Configuration.libborg.BORG_Algorithm_ms_initialization(c_int(initialization));\n\n if runtime:\n Configuration.libborg.BORG_Algorithm_output_runtime(c_char_p(runtime));\n\n if frequency:\n Configuration.libborg.BORG_Algorithm_output_frequency(c_char_p(frequency));\n\n if allEvaluations:\n Configuration.libborg.BORG_Algorithm_output_evaluations(c_char_p(allEvaluations));\n\n result = Configuration.libborg.BORG_Algorithm_ms_run(self.reference)\n\n return Result(result, self) if result else None\n\n def solve(self, settings={}):\n \"\"\" Runs the Borg MOEA to solve the defined optimization problem, returning the\n discovered Pareto optimal set.\n\n settings - Dictionary of parameters for the Borg MOEA. The key should match one\n of the parameters defined by the C Borg API. Default parameter values\n are used for any undefined parameters.\n \"\"\"\n\n if not self.epsilonsAssigned:\n raise RuntimeError(\"Epsilons must be set\")\n\n maxEvaluations = settings.get(\"maxEvaluations\", 10000)\n start = time.clock()\n\n pm = Configuration.libborg.BORG_Operator_create(\"PM\", 1, 1, 2, Configuration.libborg.BORG_Operator_PM)\n Configuration.libborg.BORG_Operator_set_parameter(pm, 0, c_double(settings.get(\"pm.rate\", 1.0 / self.numberOfVariables)))\n Configuration.libborg.BORG_Operator_set_parameter(pm, 1, c_double(settings.get(\"pm.distributionIndex\", 20.0)))\n \n sbx = Configuration.libborg.BORG_Operator_create(\"SBX\", 2, 2, 2, Configuration.libborg.BORG_Operator_SBX)\n Configuration.libborg.BORG_Operator_set_parameter(sbx, 0, c_double(settings.get(\"sbx.rate\", 1.0)))\n Configuration.libborg.BORG_Operator_set_parameter(sbx, 1, c_double(settings.get(\"sbx.distributionIndex\", 15.0)))\n Configuration.libborg.BORG_Operator_set_mutation(sbx, pm)\n\n de = Configuration.libborg.BORG_Operator_create(\"DE\", 4, 1, 2, Configuration.libborg.BORG_Operator_DE)\n Configuration.libborg.BORG_Operator_set_parameter(de, 0, c_double(settings.get(\"de.crossoverRate\", 0.1)))\n Configuration.libborg.BORG_Operator_set_parameter(de, 1, c_double(settings.get(\"de.stepSize\", 0.5)))\n Configuration.libborg.BORG_Operator_set_mutation(de, pm)\n\n um = Configuration.libborg.BORG_Operator_create(\"UM\", 1, 1, 1, Configuration.libborg.BORG_Operator_UM)\n Configuration.libborg.BORG_Operator_set_parameter(um, 0, c_double(settings.get(\"um.rate\", 1.0 / self.numberOfVariables)))\n\n spx = Configuration.libborg.BORG_Operator_create(\"SPX\", c_int(settings.get(\"spx.parents\", 10)), c_int(settings.get(\"spx.offspring\", 2)), 1, Configuration.libborg.BORG_Operator_SPX)\n Configuration.libborg.BORG_Operator_set_parameter(spx, 0, c_double(settings.get(\"spx.epsilon\", 3.0)))\n\n pcx = Configuration.libborg.BORG_Operator_create(\"PCX\", c_int(settings.get(\"pcx.parents\", 10)), c_int(settings.get(\"pcx.offspring\", 2)), 2, Configuration.libborg.BORG_Operator_PCX)\n Configuration.libborg.BORG_Operator_set_parameter(pcx, 0, c_double(settings.get(\"pcx.eta\", 0.1)))\n Configuration.libborg.BORG_Operator_set_parameter(pcx, 1, c_double(settings.get(\"pcx.zeta\", 0.1)))\n\n undx = Configuration.libborg.BORG_Operator_create(\"UNDX\", c_int(settings.get(\"undx.parents\", 10)), c_int(settings.get(\"undx.offspring\", 2)), 2, Configuration.libborg.BORG_Operator_UNDX)\n Configuration.libborg.BORG_Operator_set_parameter(undx, 0, c_double(settings.get(\"undx.zeta\", 0.5)))\n Configuration.libborg.BORG_Operator_set_parameter(undx, 1, c_double(settings.get(\"undx.eta\", 0.35)))\n\n algorithm = Configuration.libborg.BORG_Algorithm_create(self.reference, 6)\n Configuration.libborg.BORG_Algorithm_set_operator(algorithm, 0, sbx)\n Configuration.libborg.BORG_Algorithm_set_operator(algorithm, 1, de)\n Configuration.libborg.BORG_Algorithm_set_operator(algorithm, 2, pcx)\n Configuration.libborg.BORG_Algorithm_set_operator(algorithm, 3, spx)\n Configuration.libborg.BORG_Algorithm_set_operator(algorithm, 4, undx)\n Configuration.libborg.BORG_Algorithm_set_operator(algorithm, 5, um)\n\n Configuration.libborg.BORG_Algorithm_set_initial_population_size(algorithm, c_int(settings.get(\"initialPopulationSize\", 100)))\n Configuration.libborg.BORG_Algorithm_set_minimum_population_size(algorithm, c_int(settings.get(\"minimumPopulationSize\", 100)))\n Configuration.libborg.BORG_Algorithm_set_maximum_population_size(algorithm, c_int(settings.get(\"maximumPopulationSize\", 10000)))\n Configuration.libborg.BORG_Algorithm_set_population_ratio(algorithm, c_double(1.0 / settings.get(\"injectionRate\", 0.25)))\n Configuration.libborg.BORG_Algorithm_set_selection_ratio(algorithm, c_double(settings.get(\"selectionRatio\", 0.02)))\n Configuration.libborg.BORG_Algorithm_set_restart_mode(algorithm, c_int(settings.get(\"restartMode\", RestartMode.DEFAULT)))\n Configuration.libborg.BORG_Algorithm_set_max_mutation_index(algorithm, c_int(settings.get(\"maxMutationIndex\", 10)))\n Configuration.libborg.BORG_Algorithm_set_probability_mode(algorithm, c_int(settings.get(\"probabilityMode\", ProbabilityMode.DEFAULT)))\n\n runtimeformat = settings.get('runtimeformat', 'optimizedv')\n fp = None\n if \"frequency\" in settings:\n statistics = []\n lastSnapshot = 0\n frequency = settings.get(\"frequency\") \n if \"runtimefile\" in settings:\n fp = open(settings['runtimefile'], 'w')\n if runtimeformat == 'optimizedv':\n fp.write(\"//\")\n dynamics_header = [\n \"NFE\", \"ElapsedTime\", \n \"SBX\", \"DE\", \"PCX\", \"SPX\", \"UNDX\", \"UM\",\n \"Improvements\", \"Restarts\", \n \"PopulationSize\", \"ArchiveSize\"]\n if settings.get(\"restartMode\", None) == RestartMode.ADAPTIVE:\n dynamics_header.append(\"MutationIndex\")\n fp.write(\",\".join(dynamics_header))\n fp.write(\"\\n\")\n header = [\"NFE\"] \\\n + [\"dv{0}\".format(i) for i in range(self.numberOfVariables)] \\\n + [\"obj{0}\".format(i) for i in range(self.numberOfObjectives)] \\\n + [\"con{0}\".format(i) for i in range(self.numberOfConstraints)]\n fp.write(\",\".join(header))\n fp.write(\"\\n\")\n fp.flush()\n else:\n fp = None\n else:\n statistics = None\n\n data_header_written=False\n while Configuration.libborg.BORG_Algorithm_get_nfe(algorithm) < maxEvaluations:\n Configuration.libborg.BORG_Algorithm_step(algorithm)\n if terminate is True:\n break\n currentEvaluations = Configuration.libborg.BORG_Algorithm_get_nfe(algorithm)\n\n if statistics is not None and currentEvaluations-lastSnapshot >= frequency:\n entry = {}\n entry[\"NFE\"] = currentEvaluations\n entry[\"ElapsedTime\"] = time.clock() - start\n entry[\"SBX\"] = Configuration.libborg.BORG_Operator_get_probability(sbx)\n entry[\"DE\"] = Configuration.libborg.BORG_Operator_get_probability(de)\n entry[\"PCX\"] = Configuration.libborg.BORG_Operator_get_probability(pcx)\n entry[\"SPX\"] = Configuration.libborg.BORG_Operator_get_probability(spx)\n entry[\"UNDX\"] = Configuration.libborg.BORG_Operator_get_probability(undx)\n entry[\"UM\"] = Configuration.libborg.BORG_Operator_get_probability(um)\n entry[\"Improvements\"] = Configuration.libborg.BORG_Algorithm_get_number_improvements(algorithm)\n entry[\"Restarts\"] = Configuration.libborg.BORG_Algorithm_get_number_restarts(algorithm)\n entry[\"PopulationSize\"] = Configuration.libborg.BORG_Algorithm_get_population_size(algorithm)\n entry[\"ArchiveSize\"] = Configuration.libborg.BORG_Algorithm_get_archive_size(algorithm)\n\n if settings.get(\"restartMode\", RestartMode.DEFAULT) == RestartMode.ADAPTIVE:\n entry[\"MutationIndex\"] = Configuration.libborg.BORG_Algorithm_get_mutation_index(algorithm)\n if fp is None:\n statistics.append(entry)\n else:\n archive = Result(Configuration.libborg.BORG_Algorithm_get_result(algorithm), self, statistics)\n if runtimeformat == 'optimizedv':\n row = [\"{0}\".format(entry[dynamic]) for dynamic in dynamics_header]\n fp.write(\"//\")\n fp.write(\",\".join(row))\n fp.write(\"\\n\")\n delimiter = ','\n elif runtimeformat == 'borg':\n metrics = [ (\"NFE\", 'd'),\n (\"ElapsedTime\", '.17g'),\n (\"SBX\", '.17g'),\n (\"DE\", '.17g'),\n (\"PCX\", '.17g'),\n (\"SPX\", '.17g'),\n (\"UNDX\", '.17g'),\n (\"UM\", '.17g'),\n (\"Improvements\", 'd'),\n (\"Restarts\", 'd'),\n (\"PopulationSize\", 'd'),\n (\"ArchiveSize\", 'd')\n ]\n for metric,fmt in metrics:\n fp.write(\"//{0}={1}\\n\".format(metric,\"\".join([\"{0:\",fmt,\"}\"])).format(entry[metric]))\n if 'MutationIndex' in entry:\n fp.write(\"//MutationIndex={0:d}\\n\".format(entry['MutationIndex']))\n if \"data_header\" in settings and data_header_written is False:\n data_header_written=True\n data_header = [\"_\".join(x.split(\" \")) for x in settings['data_header']]\n data_header.insert(0, \"NFE\")\n fp.write(\" \".join(data_header))\n fp.write(\"\\n\")\n delimiter = \" \"\n\n for solution in archive:\n report = [entry[\"NFE\"]]\n report.extend(solution.getVariables())\n report.extend(solution.getObjectives())\n report.extend(solution.getConstraints())\n fp.write(delimiter.join(\"{0}\".format(v) for v in report))\n fp.write(\"\\n\")\n fp.flush()\n\n lastSnapshot = currentEvaluations\n\n result = Configuration.libborg.BORG_Algorithm_get_result(algorithm)\n if \"runtimefile\" in settings:\n fp.close()\n\n Configuration.libborg.BORG_Operator_destroy(sbx)\n Configuration.libborg.BORG_Operator_destroy(de)\n Configuration.libborg.BORG_Operator_destroy(pm)\n Configuration.libborg.BORG_Operator_destroy(um)\n Configuration.libborg.BORG_Operator_destroy(spx)\n Configuration.libborg.BORG_Operator_destroy(pcx)\n Configuration.libborg.BORG_Operator_destroy(undx)\n Configuration.libborg.BORG_Algorithm_destroy(algorithm)\n\n return Result(result, self, statistics)\n\nclass Solution:\n \"\"\" A solution to the optimization problem. \"\"\"\n\n def __init__(self, reference, problem):\n \"\"\" Creates a solution given a reference to the underlying C object. \"\"\"\n self.reference = reference\n self.problem = problem\n\n # There is no __del__ since the underlying C solutions are deleted when the associated\n # result object is deleted \n\n def getVariables(self):\n \"\"\" Returns the decision variable values for this solution. \"\"\"\n return [self._getVariable(i) for i in range(self.problem.numberOfVariables)]\n\n def getObjectives(self):\n \"\"\" Returns the objective values for this solution. \"\"\"\n return [self._getObjective(i) for i in range(self.problem.numberOfObjectives)]\n\n def getConstraints(self):\n \"\"\" Returns the constraint values for this solution. \"\"\"\n return [self._getConstraint(i) for i in range(self.problem.numberOfConstraints)]\n\n def _getVariable(self, index):\n \"\"\" Returns the decision variable at the given index. \"\"\"\n return Configuration.libborg.BORG_Solution_get_variable(self.reference, index)\n\n def _getObjective(self, index):\n \"\"\" Returns the objective value at the given index. \"\"\"\n value = Configuration.libborg.BORG_Solution_get_objective(self.reference, index)\n \n if self.problem.directions and self.problem.directions[index]:\n return -value\n else:\n return value\n\n def _getConstraint(self, index):\n \"\"\" Returns the constraint value at the given index. \"\"\"\n return Configuration.libborg.BORG_Solution_get_constraint(self.reference, index)\n\n def display(self, out=sys.stdout, separator=\" \"):\n \"\"\" Prints the decision variables, objectives, and constraints to standard output. \"\"\"\n print >> out, separator.join(map(str, self.getVariables() + self.getObjectives() + self.getConstraints()))\n\n def violatesConstraints(self):\n \"\"\" Returns True if this solution violates one or more constraints; False otherwise. \"\"\"\n return Configuration.libborg.BORG_Solution_violates_constraints(self.reference) != 0\n\nclass Result:\n \"\"\" A Pareto optimal set (the output of the Borg MOEA). \"\"\"\n\n def __init__(self, reference, problem, statistics=None):\n \"\"\" Creates a new Pareto optimal set given a reference to the underlying C object. \"\"\"\n self.reference = reference\n self.problem = problem\n self.statistics = statistics\n\n def __del__(self):\n \"\"\" Deletes the underlying C objects. \"\"\"\n Configuration.libborg.BORG_Archive_destroy(self.reference)\n\n def __iter__(self):\n \"\"\" Returns an iterator over the Pareto optimal solutions. \"\"\"\n return ResultIterator(self)\n\n def display(self, out=sys.stdout, separator=\" \"):\n \"\"\" Print the Pareto optimal solutions to standard output. \"\"\"\n for solution in self:\n solution.display(out, separator)\n\n def size(self):\n \"\"\" Returns the size of the Pareto optimal set. \"\"\"\n return Configuration.libborg.BORG_Archive_get_size(self.reference)\n\n def get(self, index):\n \"\"\" Returns the Pareto optimal solution at the given index. \"\"\"\n return Solution(Configuration.libborg.BORG_Archive_get(self.reference, index), self.problem)\n\nclass ResultIterator:\n \"\"\" Iterates over the solutions in a Pareto optimal set. \"\"\"\n\n def __init__(self, result):\n \"\"\" Creates an iterator over the given Pareto optimal set. \"\"\"\n self.result = result\n self.index = -1\n\n def next(self):\n \"\"\" Returns the next Pareto optimal solution in the set. \"\"\"\n self.index = self.index + 1\n\n if self.index >= self.result.size():\n raise StopIteration\n else:\n return self.result.get(self.index)\n\ndef _functionWrapper(function, numberOfVariables, numberOfObjectives, numberOfConstraints, directions=None,\n addl_inputs=None):\n \"\"\" Wraps a Python evaluation function and converts it to the function signature\n required by the C API.\n\n function - The Python evaluation function of the form (o, c) = f(v)\n numberOfVariables - The number of decision variables\n numberOfObjectives - The number of objectives\n numberOfConstraints - The number of constraints\n directions - The array of optimization directions\n \"\"\"\n\n def innerFunction(v,o,c):\n \"\"\" The function that gets passed to the C API.\n\n v - The array of decision variables (input)\n o - The array of objectives (output)\n c - The array of constraint values (output)\n \"\"\"\n global terminate\n try:\n if addl_inputs is None:\n result = function(*[v[i] for i in range(numberOfVariables)])\n else:\n result = function([v[i] for i in range(numberOfVariables)], addl_inputs)\n\n objectives = None\n constraints = None\n\n if isinstance(result, tuple):\n if len(result) > 0:\n objectives = result[0]\n if len(result) > 1:\n constraints = result[1]\n elif isinstance(result, list):\n objectives = result\n else:\n objectives = [result]\n\n if objectives:\n if len(objectives) != numberOfObjectives:\n raise ValueError(\"Incorrect number of objectives returned by function\")\n for i in range(len(objectives)):\n if directions and directions[i]:\n o[i] = -objectives[i]\n else:\n o[i] = objectives[i]\n elif numberOfObjectives > 0:\n raise ValueError(\"No objectives returned by function\")\n\n if constraints:\n if len(constraints) != numberOfConstraints:\n raise ValueError(\"Incorrect number of constraints returned by function\")\n for i in range(len(constraints)):\n c[i] = constraints[i]\n elif numberOfConstraints > 0:\n raise ValueError(\"No constraints returned by function\")\n\n return 0\n except KeyboardInterrupt:\n terminate=True\n return 1\n return innerFunction\n\nclass Constraint:\n \"\"\" Helper functions for defining constraints.\n\n These functions ensure several conditions hold. First, if the\n constraint is satisfied, the value is 0. If the constraint is\n violated, then the value is non-zero and will scale linearly\n with the degree of violation.\n \"\"\"\n\n precision = 0.1\n\n @staticmethod\n def greaterThan(x, y, epsilon=0.0):\n \"\"\" Defines the constraint x > y. \"\"\"\n return 0.0 if x > y-epsilon else y-x+Constraint.precision\n\n @staticmethod\n def lessThan(x, y, epsilon=0.0):\n \"\"\" Defines the constraint x < y. \"\"\"\n return 0.0 if x < y+epsilon else x-y+Constraint.precision\n\n @staticmethod\n def greaterThanOrEqual(x, y, epsilon=0.0):\n \"\"\" Defines the constraint x >= y. \"\"\"\n return 0.0 if x >= y-epsilon else y-x+Constraint.precision\n\n @staticmethod\n def lessThanOrEqual(x, y, epsilon=0.0):\n \"\"\" Defines the constraint x <= y. \"\"\"\n return 0.0 if x <= y+epsilon else x-y+Constraint.precision\n\n @staticmethod\n def equal(x, y, epsilon=0.0):\n \"\"\" Defines the constraint x == y. \"\"\"\n return 0.0 if abs(y-x) < epsilon else abs(y-x)+Constraint.precision\n\n @staticmethod\n def zero(x, epsilon=0.0):\n \"\"\" Defines the constraint x == 0. \"\"\"\n return Constraint.equal(x, 0.0, epsilon)\n\n @staticmethod\n def nonNegative(x, epsilon=0.0):\n \"\"\" Defines the constraint x >= 0. \"\"\"\n return Constraint.greaterThanOrEqual(x, 0.0, epsilon)\n\n @staticmethod\n def positive(x, epsilon=0.0):\n \"\"\" Defines the constraint x > 0. \"\"\"\n return Constraint.greaterThan(x, 0.0, epsilon)\n\n @staticmethod\n def negative(x, epsilon=0.0):\n \"\"\" Defines the constraint x < 0. \"\"\"\n return Constraint.lessThan(x, 0.0, epsilon)\n\n @staticmethod\n def all(*args):\n \"\"\" Requires all conditions to be satisfied. \"\"\"\n return sum(args)\n\n @staticmethod\n def any(*args):\n \"\"\" Requres at least one condition to be satisfied. \"\"\"\n return 0.0 if 0.0 in args else sum(args)\n\n#Configuration.initialize()\n","repo_name":"FeralFlows/pysedsim","sub_path":"pysedsim/optimization/borg.py","file_name":"borg.py","file_ext":"py","file_size_in_byte":35383,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"63"} +{"seq_id":"27938180327","text":"# import os\n# os.chdir(\"/Users/Tom/Desktop/Python/nuketests/Tests/Apps/Nuke/TestUnits/CaraVR/Tests\")\nimport basicCaraTestClass as BCTC\n\n'''\n Written by Thomas Rice Research QA - Any Questions Please Ask - thomas.rice@thefoundry.co.uk 27/07/2016\n\n To Do:\n - Find a way to get into the group and list the nodes\n'''\n\nnode = BCTC.createNode('C_CameraSolver1_0')\nBCTC.loadImages(node)\nBCTC.CS_matchCameras(node)\nBCTC.CS_solveCameras(node)\nBCTC.setExportOption(node, 3)\nBCTC.exportSelection(node)\n\n\nnodeConnectionSet = set()\n#As we create groups of nodes with this operation we will need to go into it to check what's in there. \ngoIntoGroup = nuke.toNode(\"RigWorkflow2D\")\nwith goIntoGroup:\n\t#Get all of the inputs so we can check their connections\n inputGroup = nuke.allNodes('Input')\n for node in inputGroup:\n nodeConnectionSet.update(BCTC.checkNodeConnections(node.name(), \"Output1\"))\n\n\n\n# The Format and connections that should be present\noriginalNodes = set([('AlphaMaskGenerator', 'C_SphericalTransform7', 'JoinViews1', 'C_Blender1', 'Output1'), ('AlphaMaskGenerator5', 'C_SphericalTransform5', 'JoinViews1', 'C_Blender1', 'Output1'), ('AlphaMaskGenerator4', 'C_SphericalTransform4', 'JoinViews1', 'C_Blender1', 'Output1'), ('AlphaMaskGenerator3', 'C_SphericalTransform3', 'JoinViews1', 'C_Blender1', 'Output1'), ('AlphaMaskGenerator2', 'C_SphericalTransform2', 'JoinViews1', 'C_Blender1', 'Output1'), ('AlphaMaskGenerator1', 'C_SphericalTransform1', 'JoinViews1', 'C_Blender1', 'Output1')])\n\n\n\n\n\n\n\nresult = nodeConnectionSet.difference(originalNodes)\n\nif result != set([]):\n nuke.tprint('Test failed as the there was a difference in the node connections, they were .. ' , result )\n sys.exit(1)\nelse:\n pass\n sys.exit(0) \n\n","repo_name":"Thomas-Rice/Test_Harness","sub_path":"TestUnits/CaraVR/Exports/Export_CameraSolver_Manual2D_Z0.py","file_name":"Export_CameraSolver_Manual2D_Z0.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74602063240","text":"import json\nimport os\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nfrom win32api import GetSystemMetrics\nimport win32gui as win32gui\n\nfrom ConfirmWindow import *\n\n\nclass ConfirmWindow(QWidget):\n def __init__(self, parent, text, position, size, onConfirm):\n QWidget.__init__(self, None, Qt.WindowStaysOnTopHint)\n self.parent = parent\n self.onConfirm = onConfirm\n\n # Setting dimensions\n self.screenW = GetSystemMetrics(0)\n self.screenH = GetSystemMetrics(1)\n\n padding = 15\n\n if size == None:\n self.windowW = 200\n self.windowH = 100\n else:\n self.windowW = size[0]\n self.windowH = size[1]\n\n self.windowPaddingLeft = 30\n self.windowsPaddingTop = 70\n\n if position == None:\n x = int(self.screenW/2 - self.windowW/2)\n y = int(self.screenH/2 - self.windowH/2)\n self.position = QPoint(x, y)\n else:\n self.position = position\n\n self.text = text\n\n self.mainLayout = QVBoxLayout()\n self.mainLayout.setContentsMargins(padding, padding, padding, padding)\n\n self.renderWindow()\n\n self.setLayout(self.mainLayout)\n \n\n def renderWindow(self):\n frame = QFrame()\n\n self.buttonMinW = 100\n self.buttonHeight = 25\n\n print(self.position)\n\n # Configurations\n self.setWindowFlags(Qt.Window | Qt.CustomizeWindowHint | Qt.WindowStaysOnTopHint) \n self.setGeometry(self.position.x(), self.position.y(), self.windowW, self.windowH)\n self.setStyleSheet(\"font-size: 16px;\")\n\n confirmLabel = QLabel(self.text)\n labelWidth = self.windowW - 30\n confirmLabel.setFixedWidth(labelWidth)\n confirmLabel.setWordWrap(True) \n\n yesButton = QPushButton(\"Yes\", self)\n yesButton.resize(yesButton.sizeHint())\n yesButton.setStyleSheet(\"background-color: rgb(32, 207, 76);\")\n yesButton.setFixedWidth(self.buttonMinW)\n yesButton.pressed.connect(self.confirm)\n\n noButton = QPushButton(\"No\", self)\n noButton.resize(noButton.sizeHint())\n noButton.setStyleSheet(\"background-color: rgb(32, 123, 207 );\")\n noButton.setFixedWidth(self.buttonMinW)\n noButton.pressed.connect(self.closeWindow)\n\n layout = QFormLayout(frame)\n layout.addRow(confirmLabel)\n layout.addRow(yesButton, noButton)\n\n self.mainLayout.addWidget(frame)\n\n\n def confirm(self):\n self.onConfirm()\n self.close()\n\n\n def closeWindow(self):\n self.close()","repo_name":"damianazur/Usability-Testing-Dissertation","sub_path":"LocalApp/src/ConfirmWindow.py","file_name":"ConfirmWindow.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20045733618","text":"from django.conf import settings\nfrom django.urls import path\nfrom django.conf.urls.static import static\n\nfrom .views import *\n\nurlpatterns = [\n path('', FakeCSVMain.as_view(), name='main'),\n\n path('new_schema/', new_schema, name='new_schema'),\n path('/', edit_schema, name='edit_schema'),\n path('delete_schema//', delete_schema, name='delete_schema'),\n\n path('/add_column/', add_column, name='add_column'),\n path('/edit_column//', edit_column, name='edit_column'),\n path('/delete_column//', delete_column, name='delete_column'),\n\n path('/data_sets/', data_sets_view, name='data_sets'),\n path('/data_sets/generate_data/', generate_data, name='generate_data'),\n\n path('logout/', logout_user, name='logout'),\n path('login/', LoginUser.as_view(), name='login'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Artemoskalenko/FakeCSV","sub_path":"fake_csv/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6229465913","text":"\"\"\"\nFile Name: genome_domain_dataset.py\nProject: bioseq-learning\n\nFile Description:\n\nThis file contains functions and classes for the conserved domain dataset\nfor genomes. Each eligible genome will be transformed into a sentence\nof words of conserved domains, along with some special words.\nTODO: what would be the target of each \"sentence\"?\n\nThe workflow of this dataset class initialization is shown below:\n1. get all the contig conserved domain csv files\n2. perform train/test split of csv files\n - randomly\n - stratified on organism (with hold-out organism)\n3. get the vocabulary of domains from training set\n4. construct training, validation, and test sets\n (1) tokenize the genome contigs with special characters\n (2) summarize the number of sequences and prepare the indexing\n\n\"\"\"\nimport os\nimport pickle\nimport logging\nimport resource\nfrom glob import glob\nfrom enum import Enum\nfrom bisect import bisect\nfrom collections import Counter\nfrom dataclasses import dataclass\nfrom multiprocessing import Pool, cpu_count\nfrom typing import Dict, List, Optional, Set, Tuple\n\nimport torch\nimport pandas as pd\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset\n\nfrom src import (\n RAW_DATA_DIR_PATH,\n INTERIM_DATA_DIR_PATH,\n PROCESSED_DATA_DIR_PATH,\n)\n\n\nBACTERIA_GENOME_SUMMARY_CSV_FILE_PATH = os.path.join(\n RAW_DATA_DIR_PATH, 'genomes', 'bacteria.csv')\nREF_N_REP_BACTERIA_GENOME_SUMMARY_CSV_FILE_PATH = os.path.join(\n RAW_DATA_DIR_PATH, 'genomes', 'reference_or_representative_bacteria.csv')\nREF_OR_REP_GNOME_PARENT_DIR_PATH = os.path.join(\n INTERIM_DATA_DIR_PATH,\n 'genomes/reference_or_representative_bacteria',\n)\nREF_OR_REP_BACTERIA_CONTIGS_WITH_CDS_FILE_PATH = os.path.join(\n PROCESSED_DATA_DIR_PATH,\n 'genomes/reference_or_representative_bacteria_contigs_'\n 'with_conserved_domains.{annotation}.pickle'\n)\nREF_OR_REP_BACTERIA_CONTIG_CDS_SEQS_FILE_PATH = os.path.join(\n PROCESSED_DATA_DIR_PATH,\n 'genomes/reference_or_representative_bacteria_contig_'\n 'sequences.{annotation}.pickle'\n)\n\n_LOGGER = logging.getLogger(__name__)\nresource.setrlimit(\n resource.RLIMIT_NOFILE,\n (4096, resource.getrlimit(resource.RLIMIT_NOFILE)[1]),\n)\n\n# max allowed overlap for conserved domains\nMAX_CD_OVERLAP: float = 0.5\nHALF_MAX_CD_OVERLAP: float = MAX_CD_OVERLAP / 2\n\n# special makers for domain sequences\nCONTIG_BEGIN_MARKER: str = ''\nCONTIG_END_MARKER: str = ''\nGENE_BEGIN_MARKER: str = ''\nUNKNOWN_MARKER: str = ''\nPADDING_MARKER: str = ''\n\nSPECIAL_MARKERS: Set[str] = {\n GENE_BEGIN_MARKER,\n CONTIG_BEGIN_MARKER,\n CONTIG_END_MARKER,\n UNKNOWN_MARKER,\n PADDING_MARKER,\n}\nSPECIAL_MARKER_TOKENIZER: Dict[str, int] = {\n GENE_BEGIN_MARKER: 1,\n CONTIG_BEGIN_MARKER: 2,\n CONTIG_END_MARKER: 3,\n UNKNOWN_MARKER: 4,\n PADDING_MARKER: 0,\n}\n\n\nclass Annotation(Enum):\n \"\"\"Enum class for genome annotation sources in PATRIC database (\n reference: https://docs.patricbrc.org/user_guides/organisms_taxon/\n genome_annotations.html)\n \"\"\"\n PATRIC = 'PATRIC'\n RefSeq = 'RefSeq'\n\n\nclass Organism(Enum):\n \"\"\"Enum class for bacterial organisms in PATRIC database (reference:\n https://en.wikipedia.org/wiki/PATRIC).\n \"\"\"\n BACILLUS = 'bacillus'\n BARTONELLA = 'bartonella'\n BORRELIA = 'borrelia'\n BRUCELLA = 'brucella'\n BURKHOLDERIA = 'burkholderia'\n CAMPYLOBACTER = 'campylobacter'\n CHLAMYDOPHILA = 'chlamydophila'\n CLOSTRIDIUM = 'clostridium'\n COXIELLA = 'coxiella'\n EHRLICHIA = 'ehrlichia'\n ESCHERICHIA = 'escherichia'\n FRANCISELLA = 'francisella'\n HELICOBACTER = 'helicobacter'\n LISTERIA = 'listeria'\n MYCOBACTERIUM = 'mycobacterium'\n RICKETTSIA = 'rickettsia'\n SALMONELLA = 'salmonella'\n SHIGELLA = 'shigella'\n STAPHYLOCOCCUS = 'staphylococcus'\n VIBRIO = 'vibrio'\n YERSINIA = 'yersinia'\n OTHERS = 'others'\n\n\n@dataclass\nclass ContigWithConservedDomains:\n \"\"\"Data class for a genome contig, annotated with features (PATRIC or\n RefSeq) and the corresponding conserved domains.\n \"\"\"\n genome_id: str\n genome_name: Optional[str]\n organism: Optional[Organism]\n ncbi_taxon_id: str\n annotation: Annotation\n contig_accession: str\n contig_feature_df: pd.DataFrame\n contig_conserved_domain_df: pd.DataFrame\n contig_feature_csv_file_path: str\n contig_conserved_domain_csv_file_path: str\n\n\ndef __get_organism_from_genome_name(genome_name: str) -> Organism:\n \"\"\"Get the organism from the name of the genome by naive string parse,\n that is, if its name contains any of the organism strings, the genome\n is of that particular organism.\n\n :param genome_name:\n :type genome_name:\n :return:\n :rtype:\n \"\"\"\n __lower_genome_name = genome_name.lower()\n for __organism in Organism:\n if __organism.value in __lower_genome_name:\n return __organism\n return Organism.OTHERS\n\n\ndef _convert_contigs_to_contigs_with_conserved_domains(\n annotation: Annotation,\n genome_parent_dir_path: str,\n genome_summary_csv_file_path: Optional[str] = None,\n) -> List[ContigWithConservedDomains]:\n \"\"\"Get all the contigs inside a parent directory path into a list of\n ContigWithConservedDomains, which is essentially a data class\n with all the information on features and conserved domain annotations.\n\n :param annotation:\n :type annotation:\n :param genome_parent_dir_path:\n :type genome_parent_dir_path:\n :param genome_summary_csv_file_path: optional genome summary CSV file\n path, which could be downloaded from PATRIC server. If given, this\n function will only process the genomes included in the CSV file by\n checking the \"genome_id\" column.\n :type genome_summary_csv_file_path: str\n :return:\n :rtype:\n \"\"\"\n\n # load the genome summary dataframe\n try:\n _genome_summary_df = pd.read_csv(\n genome_summary_csv_file_path,\n index_col=None,\n usecols=[\n 'Genome ID',\n 'Genome Name',\n 'Organism Name',\n 'NCBI Taxon ID',\n ],\n dtype={\n 'Genome ID': str,\n 'Genome Name': str,\n 'Organism Name': str,\n 'NCBI Taxon ID': int,\n }\n )\n _genome_summary_df.columns = [\n 'genome_id',\n 'genome_name',\n 'organism_name',\n 'ncbi_taxon_id',\n ]\n _genome_summary_df = _genome_summary_df.set_index('genome_id')\n _genome_ids: Set[str] = set(_genome_summary_df.index.values)\n except (ValueError, FileNotFoundError):\n _warning_msg = \\\n f'Failed to load the summary dataframe for all the ' \\\n f'genomes in directory {genome_parent_dir_path}.'\n _LOGGER.warning(_warning_msg)\n _genome_summary_df = None\n\n # get all the paths to the *.{annotation}.csv files in parent dir\n genome_parent_dir_path = os.path.abspath(genome_parent_dir_path)\n _contig_conserved_domain_csv_file_path_pattern = os.path.join(\n genome_parent_dir_path, '**', f'*.{annotation.value}.csv')\n _contig_conserved_domain_csv_file_paths: List[str] = glob(\n _contig_conserved_domain_csv_file_path_pattern, recursive=True)\n\n # construct conserved domain data class for every contig\n _contig_conserved_domains = []\n for __contig_conserved_domain_csv_file_path in \\\n tqdm(_contig_conserved_domain_csv_file_paths):\n\n __split_path = \\\n __contig_conserved_domain_csv_file_path.split(os.sep)\n __genome_id = __split_path[-3]\n __contig_feature_csv_file_path = \\\n __contig_conserved_domain_csv_file_path.replace(\n '/conserved_domains/', '/features/').replace('.csv', '.tsv')\n\n # skip the config the the feature does not exist (should not happen)\n if not os.path.exists(__contig_feature_csv_file_path):\n _warning_msg = \\\n f'The feature table file ({__contig_feature_csv_file_path}) ' \\\n f'for current contig is missing. Skipping ...'\n _LOGGER.warning(_warning_msg)\n\n # skip the contig if the genome ID is not in the summary\n __genome_name, __organism = None, None\n if (_genome_summary_df is not None) and \\\n (__genome_id not in _genome_ids):\n _warning_msg = \\\n f'Genome {__genome_id} is not listed in the genome table ' \\\n f'located in {genome_summary_csv_file_path}. Skipping ...'\n _LOGGER.warning(_warning_msg)\n continue\n\n __contig_accession = __split_path[-1].split('.', 1)[0]\n __contig_feature_df = pd.read_csv(\n __contig_feature_csv_file_path,\n sep='\\t',\n header=0,\n index_col=None,\n dtype={'genome_id': str},\n )\n\n __contig_conserved_domain_df = pd.read_csv(\n __contig_conserved_domain_csv_file_path,\n header=0,\n index_col=None,\n dtype={\n 'genome_id': str,\n 'pssm_id': str,\n 'superfamily_pssm_id': str,\n }\n )\n\n # get the genome organism from genome name in the feature dataframe\n if __genome_name is None:\n __genome_names = __contig_feature_df.genome_name.unique().tolist()\n if len(__genome_names) > 1:\n __genome_name = max(__genome_names, key=len)\n _warning_msg = \\\n f'More than one genome names ({__genome_names}) in ' \\\n f'a single contig feature dataframe for contig ' \\\n f'{__contig_accession} in genome with ID {__genome_id}. ' \\\n f'Using the longest genome name {__genome_name} ...'\n _LOGGER.warning(_warning_msg)\n else:\n __genome_name = __genome_names[0]\n __organism = __get_organism_from_genome_name(__genome_name)\n\n # clean up the feature dataframe\n __contig_feature_df = __contig_feature_df[\n __contig_feature_df.accession == __contig_accession]\n if len(__contig_feature_df) == 0:\n _warning_msg = \\\n f'There are no features for accession {__contig_accession} ' \\\n f'in the feature table of genome with ID {__genome_id}.'\n _LOGGER.warning(_warning_msg)\n continue\n __contig_feature_df.drop('genome_id', axis=1, inplace=True)\n __contig_feature_df.drop('genome_name', axis=1, inplace=True)\n __contig_feature_df.drop('accession', axis=1, inplace=True)\n __contig_feature_df.drop('annotation', axis=1, inplace=True)\n\n # clean up the conserved domain dataframe\n __contig_conserved_domain_df.drop('genome_id', axis=1, inplace=True)\n __contig_conserved_domain_df.drop('genome_name', axis=1, inplace=True)\n\n __contig_with_conserved_domain = ContigWithConservedDomains(\n __genome_id,\n __genome_name,\n __organism,\n _genome_summary_df.loc[__genome_id, 'ncbi_taxon_id'],\n annotation,\n __contig_accession,\n __contig_feature_df,\n __contig_conserved_domain_df,\n __contig_feature_csv_file_path,\n __contig_conserved_domain_csv_file_path\n )\n _contig_conserved_domains.append(__contig_with_conserved_domain)\n return _contig_conserved_domains\n\n\ndef _get_contigs_with_conserved_domains(\n annotation: Annotation,\n genome_parent_dir_path: str,\n genome_summary_csv_file_path: Optional[str] = None,\n) -> List[ContigWithConservedDomains]:\n\n contigs_with_cds_file_path: str = \\\n REF_OR_REP_BACTERIA_CONTIGS_WITH_CDS_FILE_PATH.format(annotation=annotation.value)\n if os.path.exists(contigs_with_cds_file_path):\n with open(contigs_with_cds_file_path, 'rb') as _fh:\n return pickle.load(_fh)\n else:\n _contigs = _convert_contigs_to_contigs_with_conserved_domains(\n annotation=annotation,\n genome_parent_dir_path=genome_parent_dir_path,\n genome_summary_csv_file_path=genome_summary_csv_file_path,\n # genome_parent_dir_path=REF_OR_REP_GNOME_PARENT_DIR_PATH,\n # genome_summary_csv_file_path=REF_N_REP_BACTERIA_GENOME_SUMMARY_CSV_FILE_PATH,\n )\n with open(contigs_with_cds_file_path, 'wb') as _fh:\n pickle.dump(_contigs, _fh)\n return _contigs\n\n\ndef __convert_single_contig_to_domain_sequence(\n contig_with_cds: ContigWithConservedDomains,\n) -> Tuple[str, List[str]]:\n\n _id: str = \\\n f'{contig_with_cds.genome_id}/' \\\n f'{contig_with_cds.contig_accession}'\n _annotation: Annotation = contig_with_cds.annotation\n\n _feature_df: pd.DataFrame = \\\n contig_with_cds.contig_feature_df\n _feature_df: pd.DataFrame = _feature_df[\n _feature_df['feature_type'] == 'CDS'\n ]\n if _annotation == Annotation.PATRIC:\n _feature_df: pd.DataFrame = _feature_df[\n ['patric_id', 'product', 'plfam_id', 'pgfam_id']]\n else:\n _feature_df: pd.DataFrame = _feature_df[\n ['refseq_locus_tag', 'product', 'plfam_id', 'pgfam_id']]\n _feature_df: pd.DataFrame = _feature_df.reset_index(drop=True)\n _feature_df.columns = ['seq_id', 'product', 'plfam_id', 'pgfam_id']\n\n _conserved_domain_df: pd.DataFrame = \\\n contig_with_cds.contig_conserved_domain_df\n\n _hit_types = {'Specific', 'Non-specific', 'Superfamily'}\n _conserved_domain_df: pd.DataFrame = _conserved_domain_df[\n _conserved_domain_df['hit_type'].isin(_hit_types)\n ]\n _conserved_domain_df: pd.DataFrame = \\\n _conserved_domain_df[[\n 'seq_id', 'accession', 'hit_type', 'pssm_id',\n 'start', 'end', 'e_value', 'bitscore',\n ]]\n _conserved_domain_df: pd.DataFrame = \\\n _conserved_domain_df.reset_index(drop=True)\n\n def __get_seq_id(__seq_id: str):\n if __seq_id.count('|') == 1:\n return __seq_id\n elif __seq_id.count('|') >= 2 and \\\n _annotation == Annotation.PATRIC:\n return __seq_id.rstrip('|').rsplit('|', 1)[0]\n elif __seq_id.count('|') >= 2 and \\\n _annotation == Annotation.RefSeq:\n return __seq_id.rstrip('|').rsplit('|', 2)[1]\n else:\n _warning_msg = \\\n f'cannot parse the PATRIC ID from FASTA ' \\\n f'sequence record with name {__seq_id}.'\n print(_warning_msg)\n return ''\n\n _conserved_domain_df['seq_id'] = \\\n _conserved_domain_df['seq_id'].apply(__get_seq_id)\n \n _feature_df = _feature_df.set_index('seq_id')\n _cds_seq_ids = _feature_df.index.values\n _ret_seq: List[str] = [CONTIG_BEGIN_MARKER]\n for __cds_seq_id in _cds_seq_ids:\n __cds_conserved_domain_df = _conserved_domain_df[\n _conserved_domain_df['seq_id'] == __cds_seq_id\n ].copy()\n __cds_conserved_domain_df.sort_values(\n by=['e_value', 'bitscore'],\n ascending=[True, False],\n inplace=True,\n )\n __cds_proc_conserved_domain_df = \\\n pd.DataFrame([], columns=__cds_conserved_domain_df.columns)\n\n while len(__cds_conserved_domain_df) > 0:\n __curr_conserved_domain = \\\n __cds_conserved_domain_df.iloc[0]\n __curr_start = __curr_conserved_domain.start\n __curr_end = __curr_conserved_domain.end\n __cds_proc_conserved_domain_df = \\\n __cds_proc_conserved_domain_df.append(\n __curr_conserved_domain,\n ignore_index=True,\n )\n # drop the hits with more than MAX_CD_OVERLAP\n __overlap = int((__curr_end - __curr_start) * HALF_MAX_CD_OVERLAP)\n __curr_start_w_overlap = __curr_start + __overlap\n __curr_end_w_overlap = __curr_end - __overlap\n __cds_conserved_domain_df.drop(\n __cds_conserved_domain_df[(\n (__cds_conserved_domain_df.start < __curr_end_w_overlap) &\n (__cds_conserved_domain_df.end > __curr_start_w_overlap)\n )].index,\n inplace=True,\n )\n __cds_conserved_domain_df.reset_index(drop=True, inplace=True)\n\n __cds_proc_conserved_domain_df.sort_values(\n by=['start', 'bitscore'],\n inplace=True,\n )\n __cds_product = _feature_df.loc[__cds_seq_id, 'product']\n __cds_plfam_id = _feature_df.loc[__cds_seq_id, 'plfam_id']\n __cds_pgfam_id = _feature_df.loc[__cds_seq_id, 'pgfam_id']\n _ret_seq.append(f'{GENE_BEGIN_MARKER}/{__cds_product}/{__cds_plfam_id}/{__cds_pgfam_id}')\n _ret_seq.extend(__cds_proc_conserved_domain_df['accession'].to_list())\n\n _ret_seq.append(CONTIG_END_MARKER)\n return _id, _ret_seq\n\n\ndef _convert_contigs_to_domain_sequences(\n contigs_with_cds: List[ContigWithConservedDomains],\n) -> Dict[str, List[str]]:\n __arg_list_for_single_contig: List[Tuple[ContigWithConservedDomains]] = []\n print('Preparing the arguments for contig conversion ...')\n for __contig_with_cds in tqdm(contigs_with_cds):\n __arg_list_for_single_contig.append((__contig_with_cds, ))\n print('Converting contigs into sequences of conserved domains ...')\n with Pool(cpu_count()) as _pool:\n _contig_cds_seq: List[Tuple[str, List[str]]] = \\\n _pool.starmap(\n __convert_single_contig_to_domain_sequence,\n tqdm(__arg_list_for_single_contig),\n )\n return {__c[0]: __c[1] for __c in _contig_cds_seq}\n\n\nclass GenomeDomainDataset(Dataset):\n \"\"\"Dataset class for (conserved) domains on genomes.\n \"\"\"\n __slots__ = [\n # arguments\n 'annot', # Annotation\n 'sized_seq_len', # int\n 'max_num_paddings', # int\n 'num_domain_vocab', # int\n 'num_gene_target_vocab', # int\n 'include_gene_target', # bool\n # public attr\n 'domain_seqs', # Dict[str, List[str]]\n 'domain_vocab', # Set[str]\n 'domain_tokenizer', # Dict[str, int]\n 'gene_target_vocab', # Optional[Set[str]]\n 'gene_target_tokenizer', # Optional[Dict[str, int]]\n 'tokenized_domain_seqs', # Dict[str, torch.LongTensor]\n # private attr\n '_len' # int\n '_seq_ids' # Sequence[str]\n '_accumulated_num_sized_seqs' # Sequence[int]\n ]\n\n def __init__(\n self,\n annot: Annotation,\n sized_seq_len: int,\n max_num_paddings: int,\n num_domain_vocab: int,\n num_gene_target_vocab: int,\n ):\n self.annot: Annotation = annot\n self.sized_seq_len: int = sized_seq_len\n self.max_num_paddings: int = max_num_paddings\n self.num_domain_vocab: int = num_domain_vocab\n self.num_gene_target_vocab: int = num_gene_target_vocab\n self.include_gene_target: bool = (num_gene_target_vocab >= 0)\n self._check_arg_sanity()\n\n self.domain_seqs: Dict[str, List[str]] = self._get_domain_seqs()\n self.domain_vocab: Set[str] = \\\n self._get_domain_vocab()\n self.domain_tokenizer: Dict[str, int] = \\\n self._get_domain_tokenizer()\n self.gene_target_vocab: Optional[Set[str]] = \\\n self._get_gene_target_vocab()\n self.gene_target_tokenizer: Optional[Dict[str, int]] = \\\n self._get_gene_target_tokenizer()\n self.tokenized_domain_seqs: Dict[str, torch.LongTensor] = \\\n self._get_tokenized_domain_seqs()\n self._prepare_indexing()\n\n def _check_arg_sanity(self):\n if self.max_num_paddings > self.sized_seq_len:\n _warning_msg = \\\n f'The maximum number of padding is greater than the ' \\\n f'window size of the sequences, which could yield ' \\\n f'sequences fully made of paddings.'\n _LOGGER.warning(_warning_msg)\n\n def _get_domain_seqs(self) -> Dict[str, List[str]]:\n contig_cds_seq_file_path = \\\n REF_OR_REP_BACTERIA_CONTIG_CDS_SEQS_FILE_PATH.format(\n annotation=self.annot.value\n )\n if os.path.exists(contig_cds_seq_file_path):\n with open(contig_cds_seq_file_path, 'rb') as __fh:\n contig_cds_seqs: Dict[str, List[str]] = pickle.load(__fh)\n else:\n contigs_with_cds: List[ContigWithConservedDomains] = \\\n _get_contigs_with_conserved_domains(\n self.annot,\n REF_OR_REP_GNOME_PARENT_DIR_PATH,\n REF_N_REP_BACTERIA_GENOME_SUMMARY_CSV_FILE_PATH,\n )\n contig_cds_seqs = _convert_contigs_to_domain_sequences(\n contigs_with_cds=contigs_with_cds,\n )\n with open(contig_cds_seq_file_path, 'wb') as __fh:\n pickle.dump(\n contig_cds_seqs,\n __fh, protocol=pickle.HIGHEST_PROTOCOL,\n )\n return contig_cds_seqs\n\n def _get_domain_vocab(self) -> Set[str]:\n if hasattr(self, 'domain_vocab'):\n return self.domain_vocab\n assert self.num_domain_vocab > len(SPECIAL_MARKERS)\n _domain_seqs: Dict[str, List[str]] = self._get_domain_seqs()\n _vocab_counter = Counter()\n for __seq in _domain_seqs.values():\n _vocab_counter.update(__seq)\n _vocab: Set[str] = SPECIAL_MARKERS.copy()\n for __v, _ in _vocab_counter.most_common():\n if __v.startswith(f'{GENE_BEGIN_MARKER}/'):\n continue\n _vocab.add(__v)\n if len(_vocab) == self.num_domain_vocab:\n break\n return _vocab\n\n def _get_domain_tokenizer(self) -> Dict[str, int]:\n if hasattr(self, 'domain_tokenizer'):\n return self.domain_tokenizer\n _token: Dict[str, int] = SPECIAL_MARKER_TOKENIZER\n for __v in self._get_domain_vocab():\n if __v not in _token:\n _token[__v] = len(_token)\n return _token\n\n def _get_gene_target_vocab(self) -> Optional[Set[str]]:\n if hasattr(self, 'gene_target_vocab'):\n return self.gene_target_vocab\n if not self.include_gene_target:\n return None\n _domain_seqs: Dict[str, List[str]] = self._get_domain_seqs()\n _vocab_counter = Counter()\n for __seq in _domain_seqs.values():\n _vocab_counter.update([\n # __d has the format of '/product/plfam_id/pgfam_id'\n # TODO: should we treat hypothetical proteins as if\n # they are unknown?\n __d.split('/')[-1] for __d in __seq\n if __d.startswith(f'{GENE_BEGIN_MARKER}/')\n ])\n print(f'total number of gene targets: {len(_vocab_counter)}')\n _vocab: Set[str] = set([UNKNOWN_MARKER] + [\n __v for __v, _ in\n _vocab_counter.most_common(self.num_gene_target_vocab - 1)\n ])\n return _vocab\n\n def _get_gene_target_tokenizer(self) -> Optional[Dict[str, int]]:\n if hasattr(self, 'gene_target_tokenizer'):\n return self.gene_target_tokenizer\n if not self.include_gene_target:\n return None\n _token: Dict[str, int] = {UNKNOWN_MARKER: 0}\n for __v in self._get_gene_target_vocab():\n if __v not in _token:\n _token[__v] = len(_token)\n return _token\n\n def _get_tokenized_domain_seqs(self) -> Dict[str, torch.LongTensor]:\n \"\"\"get the tokenized domain sequences with padding\n \"\"\"\n if hasattr(self, 'tokenized_domain_seqs'):\n return self.tokenized_domain_seqs\n print(\n f'Tokenizing {len(self.domain_seqs)} contig sequences '\n f'of domains with {len(self.domain_vocab)} tokens ...'\n )\n _unk_token: int = SPECIAL_MARKER_TOKENIZER[UNKNOWN_MARKER]\n _pad_token: int = SPECIAL_MARKER_TOKENIZER[PADDING_MARKER]\n _paddings: List[int] = [_pad_token] * self.max_num_paddings\n _tk_seqs: Dict[str, torch.LongTensor] = {}\n for __seq_id, __seq in tqdm(self._get_domain_seqs().items()):\n # split('/') to strip the gene targets from gene marker\n # e.g. '/product/plfam_id/pgfam_id' -> ''\n __tk_seq: torch.LongTensor = torch.LongTensor([\n self.domain_tokenizer.get(__cd.split('/')[0], _unk_token)\n for __cd in __seq\n ] + _paddings)\n _tk_seqs[__seq_id] = __tk_seq\n return _tk_seqs\n\n def _prepare_indexing(self):\n # sized sequences = padded sequences of given window size\n _seq_ids: List[str] = []\n _total_num_sized_domain_seqs: int = 0\n _accumulated_num_sized_domain_seqs: List[int] = []\n\n for __seq_id, __seq in self._get_tokenized_domain_seqs().items():\n _seq_ids.append(__seq_id)\n _num_seqs = len(__seq) - self.sized_seq_len + 1\n _total_num_sized_domain_seqs += _num_seqs\n _accumulated_num_sized_domain_seqs.append(\n _total_num_sized_domain_seqs)\n\n self._len = _total_num_sized_domain_seqs\n self._seq_ids = _seq_ids\n self._accumulated_num_sized_domain_seqs = \\\n _accumulated_num_sized_domain_seqs\n\n def __len__(self) -> int:\n if not hasattr(self, '_len'):\n self._prepare_indexing()\n return self._len\n\n def __get_sized_tk_gene_target_seq(\n self,\n seq_id: str,\n sized_seq_start_pos: int,\n sized_seq_end_pos: int,\n ) -> Optional[torch.LongTensor]:\n if not self.include_gene_target:\n return None\n _domain_seq: List[str] = self.domain_seqs[seq_id]\n _sized_domain_seq: List[str] = \\\n _domain_seq[sized_seq_start_pos: sized_seq_end_pos]\n _tk_gene_target_seq: List[int] = []\n _unk_token: int = self.gene_target_tokenizer[UNKNOWN_MARKER]\n for __d in _sized_domain_seq:\n if __d.startswith(f'{GENE_BEGIN_MARKER}/'):\n __gene_target: str = __d.split('/')[1]\n __tk_gene_target: int = \\\n self.gene_target_tokenizer.get(__gene_target, _unk_token)\n _tk_gene_target_seq.append(__tk_gene_target)\n return torch.LongTensor(_tk_gene_target_seq)\n\n def __getitem__(\n self,\n index: int,\n ) -> Tuple[torch.LongTensor, Optional[torch.LongTensor]]:\n # index = index of the data set\n # seq index = index of all contig sequences of domains\n _seq_index: int = \\\n bisect(self._accumulated_num_sized_domain_seqs, index)\n _seq_id: str = self._seq_ids[_seq_index]\n\n _sized_seq_start_pos: int = index if _seq_index == 0 else \\\n (index - self._accumulated_num_sized_domain_seqs[_seq_index - 1])\n _sized_seq_end_pos: int = _sized_seq_start_pos + self.sized_seq_len\n\n _tk_domain_seq: torch.LongTensor = \\\n self.tokenized_domain_seqs[_seq_id]\n _sized_tk_domain_seq: torch.LongTensor = \\\n _tk_domain_seq[_sized_seq_start_pos: _sized_seq_end_pos]\n\n _sized_tk_gene_target_id_seq: Optional[torch.LongTensor] = \\\n self.__get_sized_tk_gene_target_seq(\n seq_id=_seq_id,\n sized_seq_start_pos=_sized_seq_start_pos,\n sized_seq_end_pos=_sized_seq_end_pos,\n )\n return _sized_tk_domain_seq, _sized_tk_gene_target_id_seq\n\n\ndset = GenomeDomainDataset(\n annot=Annotation.PATRIC,\n sized_seq_len=1000,\n max_num_paddings=500,\n num_domain_vocab=2000,\n num_gene_target_vocab=2000,\n)\n\n# class GenomeDomainIterDataset(IterableDataset, GenomeDataset)\n# _tokenized_gene_begin_marker: int = \\\n# SPECIAL_MARKER_TOKENIZER[GENE_BEGIN_MARKER]\n# _num_genes, _num_domains = 0, 0\n# for _, __tokenized_domain_seq in \\\n# tqdm(dset.tokenized_domain_seqs.items()):\n# __num_genes = sum(__tokenized_domain_seq == _tokenized_gene_begin_marker)\n# __num_domains = len(__tokenized_domain_seq) - 2 - __num_genes - dset.max_num_paddings\n# _num_genes += __num_genes\n# _num_domains += __num_domains\n","repo_name":"xduan7/bioseq-learning","sub_path":"src/datasets/genome_domain_dataset.py","file_name":"genome_domain_dataset.py","file_ext":"py","file_size_in_byte":28705,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"26090374087","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.pages import Paginator, Page\nfrom discord.commands import slash_command, Option\nfrom colorama import Fore\n\nclass Help(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n print(Fore.GREEN + '| help.py loaded')\n\n\n @slash_command(description=\"Zeige eine Liste aller Befehle\")\n async def help(self, ctx):\n\n embed1 = discord.Embed(\n title=\"**__ALLGEMEINE HILFE__**\",\n color=discord.Color.blue()\n )\n embed1.add_field(name=\"/ping\", value=\"Zeigt dir den Ping vom Bot\", inline=False)\n embed1.add_field(name=\"/about\", value=\"Zeigt dir Infos über den Bot\", inline=False)\n embed1.set_footer(text=\"Seite 1\")\n\n embed2 = discord.Embed(\n title=\"**__LEVEL SYSTEM HILFE__**\", \n color=discord.Color.green())\n embed2.add_field(name=\"/level\", value=\"Zeige dein Level an\", inline=False)\n embed2.add_field(name=\"/lvl_leaderboard\", value=\"Zeige das Level Leaderboard an\", inline=False)\n embed2.add_field(name=\"/change_xp\", value=\"Verändere die Xp eines Users (nur für Admins)\", inline=False)\n embed2.set_footer(text=\"Seite 2\")\n\n embed3 = discord.Embed(\n title=\"**__ECONEMY SYSTEM HILFE__**\", \n color=discord.Color.gold())\n embed3.add_field(name=\"/daily\", value=\"Hole dir deine Tägliche Belohnung\", inline=False)\n embed3.add_field(name=\"/event\", value=\"Führe ein Event durch\", inline=False)\n embed3.add_field(name=\"/flammen\", value=\"Zeige deine Flammen an\", inline=False)\n embed3.add_field(name=\"/eco_leaderboard\", value=\"Zeige das Econemy Leaderboard an\", inline=False)\n embed3.add_field(name=\"/change_flame\", value=\"Verändere die Flammen eines Users (nur für Admins)\")\n embed3.set_footer(text=\"Seite 3\")\n\n embed4 = discord.Embed(\n title=\"**__Admin Hilfe__**\", \n color=0xDF0101)\n embed4.add_field(name=\"/kick\", value=\"Kicke einen User\", inline=False)\n embed4.add_field(name=\"/ban\", value=\"Banne einen User\", inline=False)\n embed4.add_field(name=\"/timeout\", value=\"TimeOute einen User\", inline=False)\n embed4.add_field(name=\"/setup\", value=\"Stelle deinen Server ein\")\n embed4.set_footer(text=\"Seite 4\")\n \n pages = [\n Page(embeds=[embed1]),\n Page(embeds=[embed2]),\n Page(embeds=[embed3]),\n Page(embeds=[embed4])\n ]\n paginator = Paginator(pages=pages, author_check=True, disable_on_timeout=True)\n \n await paginator.respond(ctx.interaction, ephemeral=True) \n\n \ndef setup(bot):\n bot.add_cog(Help(bot))","repo_name":"niklasksr/fire-bot.py","sub_path":"cogs/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71230727319","text":"\"\"\"\nDesarrollar un programa generar N números al azar de\nun dígito positivo. N se ingresa por teclado. Mostrar los\nnúmeros a medida que se van creando. Al finalizar\nmostrar su suma.\"\"\"\n\nimport random\n\nnum = int(input(\"Ingrese cantidad de números a generar: \"))\nsuma = 0\nfor i in range(num+1):\n aleatorio = random.randint(i, num)\n suma = suma + aleatorio\n print(aleatorio)\n\nprint(f'La suma de todos los aleatorios es: {suma}')","repo_name":"arielden/Basic_training","sub_path":"FreePractice/Argentina_Programa/clase_17_random.py","file_name":"clase_17_random.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22568596589","text":"import pytest\n\nfrom solution import is_alt\n\n\ntests = [\n (\"amazon\", True),\n (\"apple\", False),\n (\"banana\", True),\n (\"orange\", False),\n (\"helipad\", True),\n (\"yay\", True),\n]\n\n\n@pytest.mark.parametrize(\n \"s, expected\", tests\n)\ndef test_is_alt(s, expected):\n assert is_alt(s) == expected\n","repo_name":"estraviz/codewars","sub_path":"6_kyu/Are we alternate?/python/test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"2172462447","text":"import math\n\n# a)\n\nN = []\n\nfor i in range(10):\n\n n = float(input(\"Unesi točku: \"))\n\n N.append(n)\n\nar_sred = sum(N)/len(N)\n\nsum = 0\n\nfor i in N : \n\n sum += (i - ar_sred)**2\n\nstand_dev = math.sqrt(sum/ (len(N)-1))\n\nprint(N)\nprint(ar_sred)\nprint(stand_dev)\n\n# b) korištenje gotovih modula , npr. statistics\n\nimport statistics \n\nN = []\n\nfor i in range(10):\n\n n = float(input(\"Unesi točku: \"))\n\n N.append(n)\n\nprint(N)\nprint(statistics.mean(N))\nprint(statistics.stdev(N))","repo_name":"leajambr/PAF","sub_path":"Vjezbe_3/arithm.py","file_name":"arithm.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2868924800","text":"from ratelimit.db.base import ProjectKey\nfrom ratelimit.db.limit import LimitDatabase\nfrom ratelimit.db.project import ProjectDatabase\nfrom ratelimit.db.key import KeyDatabase\n\nclass ProjectKeyDatabase(LimitDatabase, ProjectDatabase, KeyDatabase):\n \"\"\"\n Для работы с таблицей ProjectKey в базе данных.\n\n ProjectKeySubquery:\n вывод sql запроса:\n {'id': 1, 'project_id': 1, 'key_id': 1}\n\n ProjectKey_Project_Key:\n вывод sql запроса:\n {'project_id': 1, 'key_id': 1, 'project_sentry_id': '1', 'slug': 'testing', 'id': 1, 'key_sentry_id': '00000000000000000000000000000000', 'limit_id': 12}\n\n get_project_key:\n получает список всех проектов и их ключей\n \n creating_array_project_and_key:\n создает список из проектов и их ключей\n добавляет к проектам данные о лимитах\n добавляет к проектам список ключей\n добавляет к ключам данные о лимитах\n \"\"\"\n\n def __init__(self, **kwargs):\n self.projects_keys: list = []\n self.project_list: list = []\n super().__init__(**kwargs)\n\n ProjectKeyAlias = ProjectKey.alias()\n ProjectKeySubquery = (ProjectKeyAlias\n .select(ProjectKeyAlias)\n .alias('tProjectKey')\n )\n\n ProjectKey_Project_Key = (ProjectKeyAlias\n .select(\n ProjectKeyAlias.project_id, \n ProjectKeyAlias.key_id, \n ProjectDatabase.ProjectAlias.project_sentry_id, \n ProjectDatabase.ProjectAlias.slug, \n KeyDatabase.KeyAlias\n )\n .join(ProjectDatabase.ProjectAlias)\n .switch()\n .join(KeyDatabase.KeyAlias)\n )\n\n def get_project_key(self):\n for data_dict in (self.ProjectKey_Project_Key).dicts():\n self.projects_keys.append(data_dict)\n \n return self.projects_keys\n\n def creating_array_project_and_key(self):\n for project_data in self.projects:\n details = project_data.copy()\n key_details = []\n\n for limit_data in self.limits:\n if project_data['limit_id'] == limit_data['id']:\n details['limit'] = limit_data\n\n for project_key_data in self.projects_keys:\n if project_key_data['project_id'] == project_data['id']:\n \n for key_data in self.keys:\n if project_key_data['key_id'] == key_data['id']:\n\n for limit_data in self.limits:\n if key_data['limit_id'] == limit_data['id']:\n key_data['limit'] = limit_data\n\n key_details.append(key_data)\n\n details['keys'] = key_details\n self.project_list.append(details)\n \n return self.project_list\n","repo_name":"antohhh93/sentry-rate-limit","sub_path":"ratelimit/db/project_key.py","file_name":"project_key.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36548330047","text":"# in progress \nclass Node:\n def __init__(self, data=None, next=None, prev=None):\n self.data = data\n self.next = next\n self.prev = prev\n\nclass DoubleLinkedList:\n def __init__(self):\n self.head = None\n\n def prepend(self, data):\n new_node = Node(data, self.head, None) #next node is the old head\n self.head = new_node \n\n def append(self, data):\n if self.head is None:\n self.head = Node(data, None, None)\n return\n \n current = self.head\n while current.next: #while current is not null\n current = current.next\n \n current.next = Node(data, None, current) #since it is at the end of the list, next node is None \n\n def print_forward(self):\n if self.head is None:\n print(\"linked list is empty\")\n return\n \n current = self.head\n output = \"\"\n while current:\n output += str(current.data) + \" --> \"\n current = current.next\n \n output += \"null\"\n print(output)\n\n def print_backward(self):\n if self.head is None:\n print(\"linked list is empty\")\n return\n current = self.head\n while current.next:\n current = current.next\n while current:\n output += str(current.data) + \" --> \"\n current = current.prev\n\n output += \"null\"\n print(output)\n\n def insert_values(self, data_list):\n self.head = None\n for data in data_list:\n self.append(data)\n\n def get_length(self):\n count = 0\n current = self.head\n while current:\n count += 1\n current = current.next\n \n return count \n \n def remove_at(self, index):\n if index<0 or index>=self.get_length():\n raise Exception(\"invalid index\")\n \n if index==0:\n self.head = self.head.next #python automatically removes prev self.head\n\n count = 0\n current = self.head\n while current:\n if count == index - 1:\n current.next = current.next.next\n break \n current = current.next\n count += 1\n \n def insert_at(self, index, data):\n if index<0 or index>=self.get_length():\n raise Exception(\"invalid index\")\n \n if index==0:\n self.prepend(data)\n return \n \n count = 0\n current = self.head\n while current:\n if count == index - 1:\n node = Node(data, current.next, current.prev)\n current.next = node\n current = current.next\n count += 1\n","repo_name":"melloweentea/data-structures-and-algorithms","sub_path":"dbl_linked_list.py","file_name":"dbl_linked_list.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39584159217","text":"import requests\nimport os\nfrom tqdm import tqdm\nimport hashlib\nimport re\n\n# Define the base URL for your Hugging Face repository\nbase_download_url = \"https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main\"\n\nfile_urls = [\n f\"{base_download_url}/text_encoder/config.json\", \n f\"{base_download_url}/unet/config.json\", \n f\"{base_download_url}/vae/config.json\", \n f\"{base_download_url}/tokenizer/merges.txt\",\n f\"{base_download_url}/tokenizer/special_tokens_map.json\",\n f\"{base_download_url}/tokenizer/tokenizer_config.json\",\n f\"{base_download_url}/tokenizer/vocab.json\",\n f\"{base_download_url}/scheduler/scheduler_config.json\",\n f\"{base_download_url}/model_index.json\",\n f\"{base_download_url}/safety_checker/pytorch_model.bin\",\n f\"{base_download_url}/text_encoder/pytorch_model.bin\",\n f\"{base_download_url}/text_encoder/model.fp16.safetensors\",\n f\"{base_download_url}/vae/diffusion_pytorch_model.bin\",\n f\"{base_download_url}/vae/diffusion_pytorch_model.fp16.safetensors\",\n f\"{base_download_url}/unet/diffusion_pytorch_model.bin\", \n f\"{base_download_url}/unet/diffusion_pytorch_model.fp16.safetensors\", \n]\n\nlocal_file_paths = [\n \"text_encoder/config.json\",\n \"unet/config.json\",\n \"vae/config.json\",\n \"tokenizer/merges.txt\",\n \"tokenizer/special_tokens_map.json\",\n \"tokenizer/tokenizer_config.json\",\n \"tokenizer/vocab.json\",\n \"scheduler/scheduler_config.json\",\n \"model_index.json\",\n \"safety_checker/pytorch_model.bin\",\n \"text_encoder/pytorch_model.bin\",\n \"text_encoder/model.fp16.safetensors\",\n \"vae/diffusion_pytorch_model.bin\",\n \"vae/diffusion_pytorch_model.fp16.safetensors\",\n \"unet/diffusion_pytorch_model.bin\",\n \"unet/diffusion_pytorch_model.fp16.safetensors\"\n]\n\nscript_directory = os.path.dirname(os.path.abspath(__file__))\nsd_root = \"models/StableDiffusion/\"\n\ndef download_file(url, local_path):\n response = requests.get(url, stream=True)\n total_size = int(response.headers.get('content-length', 0))\n with open(local_path, 'wb') as file:\n with tqdm(total=total_size, unit='B', unit_scale=True, desc=local_path, leave=True) as pbar:\n for data in response.iter_content(chunk_size=1024):\n file.write(data)\n pbar.update(len(data))\n\nfor url, local_path in zip(file_urls, local_file_paths):\n # Determine full local path\n local_path_full = os.path.join(script_directory, sd_root, local_path)\n os.makedirs(os.path.dirname(local_path_full), exist_ok=True)\n\n should_download = False\n # For bin, safetensors, and ckpt files, perform SHA-256 check\n if local_path_full.endswith(('.bin', '.safetensors', '.ckpt')):\n try:\n with open(local_path_full, 'rb') as file:\n local_sha256 = hashlib.sha256(file.read()).hexdigest()\n except FileNotFoundError:\n print(f\"Local file {local_path_full} not found. Redownloading.\")\n should_download = True\n else:\n sha256_url = url.replace(\"/resolve/\", \"/raw/\")\n response = requests.get(sha256_url)\n\n remote_sha256_response = response.text\n match = re.search(r'sha256:(\\w+)', remote_sha256_response)\n\n if match:\n remote_sha256 = match.group(1)\n if local_sha256 != remote_sha256:\n print(f\"local_sha256 {local_sha256} doesn't match remote {remote_sha256}. Redownloading: {local_path_full}\")\n should_download = True\n else:\n print(f\"Already have {local_path_full}. Skipping...\")\n else:\n print(f\"Failed to extract SHA-256 hash from response.\")\n should_download = True\n else: # For txt and json files, check file size\n response = requests.head(url)\n remote_size = int(response.headers.get('content-length', 0))\n try:\n local_size = os.path.getsize(local_path_full)\n except FileNotFoundError:\n local_size = 0\n\n if local_size != remote_size:\n print(f\"Local file size {local_size} doesn't match remote size {remote_size}. Redownloading: {local_path_full}\")\n should_download = True\n else:\n print(f\"Already have{local_path_full}.\")\n\n if should_download:\n try:\n download_file(url, local_path_full)\n print(f\"Downloaded: {local_path_full}\")\n except Exception as e:\n print(f\"Failed to download {url}: {e}\")\n","repo_name":"Slowly-Grokking/AD-Evo-Tuner","sub_path":"EZ_Facehugger.py","file_name":"EZ_Facehugger.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"13417652205","text":"import os\nimport pathlib\nimport re\nimport typing\n\nimport dotenv\nimport github as pygithub\nimport yaml\n\n\ndotenv.load_dotenv(dotenv.find_dotenv())\n\n\nif __name__ == '__main__':\n config: dict = yaml.load(pathlib.Path(\"config.yml\").open(mode=\"r\"), yaml.FullLoader)\n\n gh = pygithub.Github(login_or_token=os.getenv(\"GITHUB_TOKEN\"))\n user = gh.get_user(config.get(\"user\"))\n\n # print(\"Public open PRs:\")\n # for pr in gh.search_issues(query=f'author:{user.login} is:public type:pr state:open'):\n # print(pr.repository.name, pr)\n #\n # print(\"Public merged PRs:\")\n # for pr in gh.search_issues(query=f'author:{user.login} is:public type:pr is:merged'):\n # print(pr.repository.name, pr)\n\n prs_output = [\n \"\",\n \"## ⛙ My Pull Requests\",\n \"| Repository | Pull Request | Status | Changes |\",\n \"| --- | --- | --- | --- |\",\n ]\n for pr_link in config.get(\"pull-requests\"):\n pr_link: typing.Dict\n repo = gh.get_repo(pr_link.get(\"repo\"))\n pr = repo.get_pull(pr_link.get(\"id\"))\n prs_output.append(\n f\"| `{repo.full_name.split('/')[0]}`/`{repo.name}` | [{pr.title}]({pr.html_url}) \"\n f\"| {'✔' if pr.merged else ''} | `+{pr.additions}/-{pr.deletions}` |\"\n )\n prs_output.append(\"\")\n\n readme_file = pathlib.Path(\"README.md\")\n readme_content = readme_file.read_text(encoding=\"utf-8\")\n\n readme_content = re.sub(f\"{prs_output[0]}.*{prs_output[-1]}\", \"\\n\".join(prs_output), readme_content, flags=re.DOTALL)\n\n readme_file.write_text(readme_content, encoding=\"utf-8\")\n","repo_name":"soar/soar","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4961709907","text":"import sys\r\ninput = lambda: sys.stdin.readline().rstrip()\r\n\r\nif __name__ == '__main__':\r\n N = int(input())\r\n memo = [0] * (N + 1)\r\n\r\n for x in range(1, N + 1):\r\n if x < 7:\r\n memo[x] = x\r\n continue\r\n\r\n memo[x] = max(memo[x - 3] * 2, memo[x - 4] * 3, memo[x - 5] * 4)\r\n\r\n # 성질 1. Ctrl+A, Ctrl+C, Ctrl+V는 한몸이다. 서로 떨어져 있을 때보다 같이 붙어있을 때 최대이기 때문이다.\r\n # 성질 2. Ctrl+V의 경우 연속으로 최대 3번까지 쓸 수 있다.\r\n # -> 4번 이상 사용할 경우 Ctrl+A,C,V를 두 번 사용하는 것이 이득이다. 왜냐하면, 값은 동일하게 4배로 증가하지만\r\n # 버퍼가 두 배 차이나기 때문이다.\r\n print(memo[N])\r\n","repo_name":"cutehammond772/problem-solving","sub_path":"백준/Gold/11058. 크리보드/크리보드.py","file_name":"크리보드.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14296634081","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.conf import settings\nfrom django.utils.timezone import now\nfrom mailmanclient import Client, MailmanConnectionError\n\nfrom hyperkitty.lib.cache import cache\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nMailmanClient = Client\ndef get_mailman_client():\n # easier to patch during unit tests\n client = MailmanClient('%s/3.0' %\n settings.MAILMAN_REST_SERVER,\n settings.MAILMAN_API_USER,\n settings.MAILMAN_API_PASS)\n return client\n\n\ndef subscribe(list_address, user):\n client = get_mailman_client()\n rest_list = client.get_list(list_address)\n subscription_policy = rest_list.settings.get(\n \"subscription_policy\", \"moderate\")\n if subscription_policy in (\"moderate\", \"confirm_then_moderate\"):\n return # We don't want to bypass moderation, don't subscribe\n try:\n member = rest_list.get_member(user.email)\n except ValueError:\n # not subscribed yet, subscribe the user without email delivery\n member = rest_list.subscribe(user.email,\n \"%s %s\" % (user.first_name, user.last_name))\n member.preferences[\"delivery_status\"] = \"by_user\"\n member.preferences.save()\n cache.delete(\"User:%s:subscriptions\" % user.id)\n\n\nclass FakeMMList:\n def __init__(self, name):\n self.fqdn_listname = name\n self.display_name = name.partition(\"@\")[0]\n self.settings = {\n \"description\": \"\",\n \"subject_prefix\": \"[%s] \" % self.display_name,\n \"created_at\": now().isoformat(),\n \"archive_policy\": \"public\",\n }\n\n\ndef sync_with_mailman():\n from hyperkitty.models import MailingList, Sender\n for mlist in MailingList.objects.all():\n mlist.update_from_mailman()\n # Now sync Sender.mailman_id with Mailman's User.user_id\n # There can be thousands of senders, break into smaller chuncks to avoid\n # hogging up the memory\n buffer_size = 1000\n query = Sender.objects.filter(mailman_id__isnull=True)\n prev_count = query.count()\n lower_bound = 0\n upper_bound = buffer_size\n while True:\n try:\n for sender in query[lower_bound:upper_bound]:\n sender.set_mailman_id()\n except MailmanConnectionError:\n break # Can't refresh at this time\n count = query.count()\n if count == 0:\n break # all done\n if count == prev_count:\n # no improvement...\n if count < upper_bound:\n break # ...and all users checked\n else:\n # there may be some more left\n lower_bound = upper_bound\n upper_bound += buffer_size\n prev_count = count\n logger.info(\"%d emails left to refresh\", count)\n","repo_name":"hyperkitty/hyperkitty","sub_path":"hyperkitty/lib/mailman.py","file_name":"mailman.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"85"} +{"seq_id":"950933452","text":"from flask import Flask,render_template,Response\nimport came\nimport cv2\n\n\napp = Flask(__name__)\ndef cam(): \n global camera \n camera = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc('X','V','I','D')\n cap_rec = cv2.VideoWriter('output.avi',fourcc,40.0,(640,480))\n while True:\n ret,frame=camera.read()\n if not ret:\n break\n cap_rec.write(frame)\n ret, buffer = cv2.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n \n@app.route(\"/\")\ndef front():\n release_camera()\n return render_template('index.html')\n\ncamera=cv2.VideoCapture()\ndef use_camera():\n global camera\n camera=cv2.VideoCapture(0)\n \ndef release_camera():\n if camera.isOpened():\n camera.release()\n\n@app.route(\"/camera\")\ndef camer():\n release_camera()\n return render_template(\"data.html\",dic = came.proces())\n \n@app.route('/video_feed')\ndef video_feed():\n return Response(cam(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\nif __name__=='__main__':\n app.run(debug=True)\n","repo_name":"khokharhaseeb/Tasks","sub_path":"task3/flas.py","file_name":"flas.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6189038548","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .forms import (\n UserRegisterForm,\n UserUpdateForm,\n ProfileUpadateForm,\n)\nfrom django.views.generic import (\n DetailView,\n UpdateView,\n)\n\nfrom django.contrib.auth.models import User\nfrom .models import Profile\nfrom main.models import Ivent\n\n\n# Авторизационный код:\ndef register(request):\n if request.user.is_authenticated:\n return redirect('ivent-list')\n elif request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Account created for {username}!')\n return redirect('login')\n else:\n form = UserRegisterForm()\n context = {'form': form}\n return render(request, 'users/register.html', context)\n\n@login_required\ndef update (request):\n print(1)\n current_profile=Profile.objects.get(user=request.user)\n if request.method == 'POST':\n form = ProfileUpadateForm(request.POST, \n request.FILES, \n instance=current_profile\n )\n if form.is_valid():\n form.save()\n messages.success(request, f'Профиль обновлен!')\n return redirect(current_profile.get_absolute_url()) \n else:\n form = ProfileUpadateForm(instance=current_profile)\n \n context = {'form' : form}\n\n return render(request, 'users/profile_update.html', context)\n\n\nclass ProfileDetailView(DetailView):\n \"\"\"This detail view maintains user's profile and events he started\"\"\"\n model = Profile\n # 'slug' is used for non-id resource identethication.\n # The field slug refers to must be unique\n slug_field = 'user'\n template_name = 'users/profile.html'\n context_object_name = 'profile'\n def get_context_data(self, *args, **kwargs):\n \"\"\"This function handles additional data to template. Key of the 'context' dictionary is\n name of the relation in the template, in this piece it's 'user_ivents'\"\"\"\n\n context = super().get_context_data()\n context['user_ivents'] = Ivent.objects.filter(created_by=Profile.objects.get(user=self.request.user)).order_by('-date_posted')\n return context\n\n","repo_name":"rianriant/ru_hackathon","sub_path":"ruhackathon/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3640182941","text":"#!/usr/bin/env python\n\nimport time\n\ndef safe_mode(func):\n def wrapper(*args, **kwargs):\n resp = None\n retries = 1 # Number of attempts\n while retries > 0:\n try:\n resp = func(*args, **kwargs)\n retries = 0\n except Exception as e:\n print(e)\n retries -= 1\n time.sleep(3)\n return resp\n return wrapper\n\n","repo_name":"jmfl1129/ScheduleSmart","sub_path":"scripts/eventfinders/facebook/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"34082019691","text":"#!/usr/bin/python3\n\nfrom Volunteer import Volunteer, job, priority\nimport csv\nimport random\n# import time\n\n\nclass Slotter():\n def __init__(self) -> 'None':\n volunteers: list[Volunteer] = []\n slottedVolunteers: dict[job, list[Volunteer]] = {}\n requiredVolunteers: dict[job, int] = {}\n\n requiredVolunteers[job.TIMER] = 22\n requiredVolunteers[job.JUDGE] = 22\n requiredVolunteers[job.SIGN_IN] = 8\n requiredVolunteers[job.JIGS_AND_MEASURES] = 8\n requiredVolunteers[job.SECURITY] = 6\n requiredVolunteers[job.NONE] = 0\n requiredVolunteers[job.SPECIAL] = 0\n requiredVolunteers[job.WITHDRAWN] = 0\n\n # flesh out the dicts.\n for j in job:\n slottedVolunteers[j] = []\n\n # Load the csv file into memory\n with open('volunteers.csv') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n for row in csv_reader:\n preferred_job = job.NONE\n if row['Preferred Job'] != \"\":\n preferred_job = job[row['Preferred Job']]\n\n volunteers.append(\n Volunteer(\n first_name=row['name (First)'],\n last_name=row['name (Last)'],\n will_train=bool(row['Wednesday']),\n will_setup=bool(row['Friday']),\n saturday_games=bool(row['Saturday']),\n saturday_takedown=bool(row['Saturday Takedown']),\n prior_security=bool(row['Security']),\n prior_timer=bool(row['Timer']),\n prior_judge=bool(row['Judge']),\n prior_signin=bool(row['Sign-in']),\n prior_measure=bool(row['Jigs and Measure']),\n preferred_job=preferred_job,\n # start_time=time.strptime(row['Start Time'], '%H:%M'),\n # end_time=time.strptime(row['End Time'], '%H:%M'),\n )\n )\n\n # start by slotting in the \"PREFERRED\" positions. i.e. people who've\n # explicitly asked to do this. This also includes people with a\n # WITHDRAWN status.\n for j in job:\n slot(\n sourceVolunteers=volunteers,\n targetVolunteers=slottedVolunteers[j],\n priority=[priority.PREFERRED],\n job=j,\n maxAssignment=0,\n )\n\n # Slot people who can only be timers as timers.\n for v in volunteers:\n if v.canDoArray == [job.TIMER] and v.slotted == job.NONE:\n j = job.TIMER\n slottedVolunteers[j].append(v)\n v.slotted = j\n # print(f\"restricted slotted {v.first_name} {v.last_name} \" +\n # f\"as {j.name}\")\n\n # Slot people who an only do 1 other job into those positions.\n for j in [job.JIGS_AND_MEASURES, job.SIGN_IN]:\n for v in volunteers:\n if (\n j in v.canDoArray\n and len(v.canDoArray) == 2\n and v.slotted == job.NONE\n ):\n slottedVolunteers[j].append(v)\n v.slotted = j\n # print(f\"restricted slotted {v.first_name} \" +\n # f\"{v.last_name} as {j.name}\")\n\n # Shuffle the volunteer order such that people are randomly slotted\n random.shuffle(volunteers)\n\n # Slot people into the jobs in the most need of volunteers.\n while True:\n neededVolunteers: float = -99999999\n i = 0.0\n needy_job: job = job.NONE\n # find the job with the biggest gap\n for j in [job.TIMER, job.JUDGE, job.JIGS_AND_MEASURES,\n job.SIGN_IN, job.SECURITY]:\n\n # is will be the biggest for the job that has the thinnest\n # selction of volunteers. Note that if exceeds 1, slotting\n # will not succeed, as not enough volunteers are available to\n # fill that job.\n\n if availableVolunteerCount(volunteers, j) > 0:\n i = (requiredVolunteers[j] - len(slottedVolunteers[j])) / \\\n availableVolunteerCount(volunteers, j)\n\n if i > neededVolunteers:\n neededVolunteers = i\n needy_job = j\n\n # done when all jobs have the minimum allotment.\n if i == 0.0:\n break\n\n for v in volunteers:\n # print(f\"Can {v.first_name} {v.last_name} \" +\n # f\"do {needy_job.name}: {v.canDo(needy_job)} \" +\n # f\"current slotting: {v.slotted}\")\n\n if v.canDo(needy_job) and v.slotted == job.NONE:\n # print(f\"needy slotted {v.first_name} {v.last_name} \" +\n # f\"as {needy_job.name}\")\n\n slottedVolunteers[needy_job].append(v)\n v.slotted = needy_job\n break\n\n # slot the cross training positions. Note that we'll limit the\n # number of volunteers slotted at this point.\n for j in job:\n slot(\n sourceVolunteers=volunteers,\n targetVolunteers=slottedVolunteers[j],\n priority=[priority.CROSS_TRAINING],\n job=j,\n maxAssignment=requiredVolunteers[j],\n )\n\n # slot in the experienced or trained volunteers\n for j in job:\n slot(\n sourceVolunteers=volunteers,\n targetVolunteers=slottedVolunteers[j],\n priority=[priority.EXPERIENCE_OR_TRAINING, priority.PRIOR_JUDGE],\n job=j,\n maxAssignment=requiredVolunteers[j],\n )\n\n # Slot all the people who can judge now.\n slot(\n sourceVolunteers=volunteers,\n targetVolunteers=slottedVolunteers[job.JUDGE],\n priority=[priority.EXPERIENCE_OR_TRAINING],\n job=job.JUDGE,\n maxAssignment=0,\n )\n\n # Slot timers such that there are the same number of timers and judges\n slot(\n sourceVolunteers=volunteers,\n targetVolunteers=slottedVolunteers[job.TIMER],\n priority=[priority.EXPERIENCE_OR_TRAINING],\n job=job.TIMER,\n maxAssignment=len(slottedVolunteers[job.JUDGE]),\n )\n\n # See who we've failed to slot\n for v in volunteers:\n if v.slotted == job.NONE:\n slottedVolunteers[job.NONE].append(v)\n\n # Print out the assignments\n for j in job:\n print(f\"Volunteers for {j.name} - {len(slottedVolunteers[j])}\")\n for v in slottedVolunteers[j]:\n print(f\" {v.first_name} {v.last_name} - \"\n f\"{v.slotted.name} - {v.jobPriority(v.slotted).name}\")\n\n\n# Slot volunteers into positions.\ndef slot(\n sourceVolunteers: list[Volunteer],\n targetVolunteers: list[Volunteer],\n priority: list[priority],\n job: job,\n maxAssignment: int,\n) -> None:\n for v in sourceVolunteers:\n if (maxAssignment > 0 and maxAssignment == len(targetVolunteers)):\n break\n if v.jobPriority(job) in priority and v.slotted == job.NONE:\n targetVolunteers.append(v)\n v.slotted = job\n\n\ndef availableVolunteerCount(\n volunteers: list[Volunteer],\n j: job,\n) -> int:\n count = 0\n for v in volunteers:\n if v.canDo(j) and v.slotted == job.NONE:\n count = count + 1\n return count\n\n\nSlotter()\n","repo_name":"BruceJL/mrg","sub_path":"python/slotter.py","file_name":"slotter.py","file_ext":"py","file_size_in_byte":7654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8710512831","text":"t=int(input())\r\nresult=[]\r\nfor i in range(t):\r\n paper,position=map(int,input().split())\r\n \r\n if paper==1:\r\n result.append(1)\r\n null=input()\r\n continue\r\n else:\r\n arr=input().split()\r\n arr=[int(j) for j in arr]\r\n count=0\r\n out=arr[0]\r\n m=max(arr[1:])\r\n\r\n while(True):\r\n if out>=m and position==0:\r\n count+=1\r\n result.append(count)\r\n break\r\n elif out>=m and position!=0:\r\n position-=1\r\n count+=1\r\n arr=arr[1:]\r\n out=arr[0]\r\n m=max(arr)\r\n elif out val_loss:\n best_val_loss = val_loss\n seleted_acc = test_acc\n if args.dataset in ['VISDA-C','VLCS','terra_incognita'] or args.imbalance:\n selected_per_class_acc = test_per_class_acc\n torch.save(model.state_dict(),os.path.join(save_path,'val_best_ckpt.pth'))\n \n # Best acc\n if test_acc > best_acc:\n best_acc = test_acc\n if args.dataset in ['VISDA-C','VLCS','terra_incognita'] or args.imbalance:\n best_per_class_acc = test_per_class_acc\n torch.save(model.state_dict(),os.path.join(save_path,'test_best_ckpt.pth'))\n # Model save\n torch.save(model.state_dict(),os.path.join(save_path,'last_ckpt.pth'))\n \n if args.dataset in ['VISDA-C','VLCS','terra_incognita'] or args.imbalance:\n return best_acc, seleted_acc, result['test_acc'], best_per_class_acc, selected_per_class_acc, test_per_class_acc\n else:\n return best_acc, seleted_acc, result['test_acc']\n\ndef get_ckpt(args):\n domain_dict = {\n 'office_home' : ['A','C','P','R'],\n 'PACS' : ['A','C','P','S'],\n 'VLCS' : ['C','L','S','V'],\n 'office31' : ['A','D','W'],\n 'terra_incognita' : ['L100','L38','L43','L46'],\n 'VISDA-C' : ['T','V'],\n 'office_home_RSUT' : ['C','P','R'],\n 'VISDA-C_RSUT' : ['T','V'],\n }\n\n if args.pretrain == 'SHOT':\n root_dir = args.ckpt_dir\n \n ckpts = {}\n ckpts['netF'] = os.path.join(root_dir,f'{args.dataset}/{domain_dict[args.dataset][args.source]}/source_F.pt')\n ckpts['netB'] = os.path.join(root_dir,f'{args.dataset}/{domain_dict[args.dataset][args.source]}/source_B.pt')\n ckpts['netC'] = os.path.join(root_dir,f'{args.dataset}/{domain_dict[args.dataset][args.source]}/source_C.pt')\n \n elif args.pretrain == 'SHOT_LP' or args.pretrain == 'IMGNET_LP':\n root_dir = args.ckpt_dir\n ckpts = os.path.join(root_dir,f'source{args.source}/target{args.target}/last_ckpt.pth')\n print('Load LP model')\n \n elif args.pretrain == 'IMGNET':\n ckpts = None\n print('Source model is pretrained with ImageNet')\n \n else:\n raise NotImplementedError \n \n return ckpts\n\ndef main():\n # Save config\n args.work_dir = os.path.join(f'./logs/{args.dataset}',f'{args.work_dir}/source{args.source}')\n os.makedirs(args.work_dir,exist_ok=True)\n with open(os.path.join(args.work_dir,'config.json'),'w') as f:\n json.dump(args.__dict__,f,indent=2)\n \n # Set seed\n utils.set_seed(args.seed)\n\n # Accuracy\n last_target_acc = {}\n selected_target_acc = {}\n best_target_acc = {}\n # Per-class Accuracy\n last_target_p_acc = {}\n selected_target_p_acc = {}\n best_target_p_acc = {}\n \n if args.dataset == 'office31' or args.dataset == 'office_home_RSUT':\n num_d = 3\n elif args.dataset == 'VISDA-C' or args.dataset == 'VISDA-C_RSUT':\n num_d = 2\n else:\n num_d = 4\n \n # Adapt to all possible target domains from given source pretrained model\n for target in range(num_d):\n if target == args.source:\n continue\n args.target = target\n args.ckpts = get_ckpt(args)\n print(f\"{args.dataset} source {args.source} -> target {target}\")\n if args.dataset in ['VISDA-C','VLCS','terra_incognita'] or args.imbalance:\n per_class_acc = {}\n best_acc,selected_acc,last_acc,best_per_class_acc,selected_per_class_acc,last_per_class_acc = train_on_target(args)\n else:\n best_acc,selected_acc,last_acc = train_on_target(args)\n \n last_target_acc[f'source{args.source}@target{target}'] = last_acc\n selected_target_acc[f'source{args.source}@target{target}'] = selected_acc\n best_target_acc[f'source{args.source}@target{target}'] = best_acc\n \n if args.dataset in ['VISDA-C','VLCS','terra_incognita'] or args.imbalance:\n last_target_p_acc[f'source{args.source}@target{target}'] = last_per_class_acc\n selected_target_p_acc[f'source{args.source}@target{target}'] = selected_per_class_acc\n best_target_p_acc[f'source{args.source}@target{target}'] = best_per_class_acc\n\n # Aggregate Results\n best_target_acc = OrderedDict(sorted(best_target_acc.items()))\n selected_target_acc = OrderedDict(sorted(selected_target_acc.items()))\n last_target_acc = OrderedDict(sorted(last_target_acc.items()))\n with open(os.path.join(args.work_dir,'best_target.json'),'w') as f:\n json.dump(best_target_acc,f)\n with open(os.path.join(args.work_dir,'selected_target.json'),'w') as f:\n json.dump(selected_target_acc,f)\n with open(os.path.join(args.work_dir,'last_target.json'),'w') as f:\n json.dump(last_target_acc,f)\n print(f\"Best target acc : \\n{best_target_acc}\")\n print(f\"Selected target acc : \\n{selected_target_acc}\")\n print(f\"Last target acc : \\n{last_target_acc}\")\n \n if args.dataset in ['VISDA-C','VLCS','terra_incognita'] or args.imbalance:\n best_target_p_acc = OrderedDict(sorted(best_target_p_acc.items()))\n selected_target_p_acc = OrderedDict(sorted(selected_target_p_acc.items()))\n last_target_p_acc = OrderedDict(sorted(last_target_p_acc.items()))\n with open(os.path.join(args.work_dir,'best_target.json'),'w') as f:\n json.dump(best_target_p_acc,f)\n with open(os.path.join(args.work_dir,'selected_target.json'),'w') as f:\n json.dump(selected_target_p_acc,f)\n with open(os.path.join(args.work_dir,'last_target.json'),'w') as f:\n json.dump(last_target_p_acc,f)\n print(f\"Best target Per-class acc : \\n{best_target_p_acc}\")\n print(f\"Selected target Per-class acc : \\n{selected_target_p_acc}\")\n print(f\"Last target Per-class acc : \\n{last_target_p_acc}\")\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"daintlab/fewshot-SFDA","sub_path":"target_finetune.py","file_name":"target_finetune.py","file_ext":"py","file_size_in_byte":16128,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"29789877380","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import Range\nfrom geometry_msgs.msg import PointStamped\nimport math\nimport numpy as np\nimport threading\n\nclass CalculateVelRef:\n \n\n\n def __init__(self):\n\n self.VARIANCE_LIMIT = 200\n self.TIME_LIMIT = 5000\n\n self.CLOSEST_ALLOWED_CENTER_DISTANCE = 50\n self.CLOSEST_ALLOWED_SIDE_DISTANCE = 30\n\n self.MAX_SIDE_CONTROL_DISTANCE = 150\n\n self.UPPER_LIMIT_VELREF_X = 1\n self.ABS_LIMIT_VELREF_Y = 0.5\n self.VELREF_Y_SCALING = 50\n\n self.MAX_CENTER_MEASUREMENT = 500\n\n rospy.init_node('velrefcalc', anonymous=True)\n\n \n self.velref_pub = rospy.Publisher(\"control/velref\", PointStamped, queue_size=10) #No special reason for the queue_size of 10.\n \n #Subscribing to the three filtered ultrasonic sensordata\n self.ultra_left_sub = rospy.Subscriber(\"ultrasound/left/filtered\", Range, self.store_new_certain_measurement, callback_args=\"left\")\n self.ultra_center_sub = rospy.Subscriber(\"ultrasound/center/filtered\", Range, self.store_new_certain_measurement, callback_args=\"center\")\n self.ultra_right_sub = rospy.Subscriber(\"ultrasound/right/filtered\", Range, self.store_new_certain_measurement, callback_args=\"right\")\n \n #Initializations\n self.last_certain_measurement = {\"left\": Range(), \"center\": Range(), \"right\": Range()}\n self.velref = PointStamped()\n self.velref.header.stamp = rospy.Time.now()\n self.velref.header.frame_id = \"/ultrasound/center\"\n self.velref_measurement_counter = 0 \n\n \n\n \n\n\n def store_new_certain_measurement(self, new_range_message, sensor):\n if new_range_message.field_of_view < self.VARIANCE_LIMIT:\n self.last_certain_measurement[sensor] = new_range_message\n self.velref_measurement_counter += 1\n \n #Only calculate velref every three measurements\n if (self.velref_measurement_counter % 3) == 0:\n self.calculate_and_publish_velref()\n\n def calculate_new_velref(self):\n new_velref = self.velref\n \n # If too close to the car, just stop.\n if (self.last_certain_measurement[\"center\"] < self.CLOSEST_ALLOWED_CENTER_DISTANCE or\n self.last_certain_measurement[\"right\"] < self.CLOSEST_ALLOWED_SIDE_DISTANCE or\n self.last_certain_measurement[\"left\"] < self.CLOSEST_ALLOWED_SIDE_DISTANCE):\n new_velref.point.x = 0\n new_velref.point.y = 0\n \n else:\n # Setting the forward speed scaled based on how far we can see. The closer the distance, the slower the car moves.\n new_velref.point.x = (self.last_certain_measurement[\"center\"].range/self.MAX_CENTER_MEASUREMENT)*self.UPPER_LIMIT_VELREF_X\n \n # If there is a lot of space on each side, there is no point in controlling for it.\n if ((self.last_certain_measurement[\"right\"].range < self.MAX_SIDE_CONTROL_DISTANCE) or\n (self.last_certain_measurement[\"left\"].range < self.MAX_SIDE_CONTROL_DISTANCE)):\n # Takes the difference of the side mesurements and scales it before ouputting.\n new_velref.point.y = (self.last_certain_measurement[\"left\"].range-self.last_certain_measurement[\"right\"].range)/self.VELREF_Y_SCALING\n \n else:\n new_velref.point.y = 0\n \n # Make sure that the velref is within bounds\n velref = self.set_within_bounds(new_velref)\n self.velref.header.stamp = rospy.Time.now()\n return velref\n\n def calculate_and_publish_velref(self):\n new_velref = self.calculate_new_velref()\n self.velref = new_velref\n self.velref_pub.publish(new_velref)\n return new_velref\n\n def set_within_bounds(self,velref):\n if velref.point.x > self.UPPER_LIMIT_VELREF_X:\n velref.point.x = self.UPPER_LIMIT_VELREF_X\n elif velref.point.x < 0:\n velref.point.x = 0\n \n if abs(velref.point.y) > self.ABS_LIMIT_VELREF_Y:\n velref.point.y = float(np.sign(velref.point.y))*self.ABS_LIMIT_VELREF_Y\n return velref\n\n\n\n \n\ncalculator = CalculateVelRef()\ntry:\n rospy.spin()\nexcept KeyboardInterrupt:\n print(\"Shutting down\")\n","repo_name":"FuelFighter/ultrasonic_rospy","sub_path":"calculate_velref.py","file_name":"calculate_velref.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38379343108","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import current_app\nfrom flask import url_for\nimport hashlib\nfrom itsdangerous import BadSignature\nfrom itsdangerous import SignatureExpired\nfrom itsdangerous import URLSafeTimedSerializer\nfrom structlog import get_logger\n\nfrom .models import User\nfrom .signals import confirmation_instructions_sent\nfrom .utilities import generate_token\n\nfrom oldhawaii_metadata.mail import send_mail\n\nlogger = get_logger()\n\n\ndef confirm_confirmation_token(token):\n \"\"\"Returns the expired status, invalid status, user of a confirmation\n token, and the token itself. For example::\n\n expired, invalid, user, token = confirm_confirmation_token('...')\n\n :param token: Confirmation token as str\n \"\"\"\n max_age_key = 'USERS_REGISTER_CONFIRMATION_TOKEN_MAX_AGE_IN_SECONDS'\n max_age = current_app.config[max_age_key]\n\n salt = current_app.config['USERS_REGISTER_CONFIRMATION_TOKEN_SALT']\n serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])\n\n user, data = None, None\n expired, invalid = False, False\n\n try:\n data = serializer.loads(\n token,\n max_age=max_age,\n salt=salt)\n except SignatureExpired:\n d, data = serializer.loads_unsafe(token, salt=salt)\n expired = True\n except (BadSignature, TypeError, ValueError):\n invalid = True\n\n if data:\n user = User.get(id=data[0])\n\n expired = expired and (user is not None)\n\n logger.debug(\"confirmation token confirmed?\",\n expired=expired, invalid=invalid, user=user, data=data)\n\n return expired, invalid, user, data\n\n\ndef generate_confirmation_token(user):\n \"\"\"Generates a confirmation token for the user\n\n :param user: User to generate confirmation token for\n \"\"\"\n salt = current_app.config['USERS_REGISTER_CONFIRMATION_TOKEN_SALT']\n user_data = [\n str(user.id),\n hashlib.md5(user.email.encode('utf-8')).hexdigest()\n ]\n return generate_token(user_data, salt=salt)\n\n\ndef generate_confirmation_token_and_link(user):\n \"\"\"Generates a confirmation token and a link for the user\n\n :param user: User to generate confirmation token and link for\n \"\"\"\n token = generate_confirmation_token(user)\n url = url_for('users.confirm_email', token=token, _external=True)\n return token, url\n\n\ndef send_confirmation_instructions(user):\n \"\"\"Sends confirmation instructions email to the specified user\n\n :param user: User to send instructions to\n \"\"\"\n\n confirmation_token, confirmation_link = \\\n generate_confirmation_token_and_link(user)\n\n signal_context = {'user': user, 'confirmation_token': confirmation_token}\n mail_context = {'user': user, 'confirmation_link': confirmation_link}\n\n send_mail(\n 'Please confirm your email address',\n current_app.config['MAIL_DEFAULT_SENDER'],\n user.email,\n plain_template_path='users/emails/confirmation_instructions.txt.html',\n html_template_path='users/emails/confirmation_instructions.html',\n **mail_context)\n\n confirmation_instructions_sent.send(\n current_app._get_current_object(),\n **signal_context)\n\n\ndef system_requires_confirmation():\n \"\"\"Returns `True` if the system requires confirmation.\"\"\"\n return current_app.config['USERS_CONFIRMATION_REQUIRED']\n\n\ndef user_requires_confirmation(user):\n \"\"\"Returns `True` if the user requires confirmation.\n\n :param user: User that needs confirmation\n \"\"\"\n return system_requires_confirmation() and \\\n user.confirmed_at is None\n\n\n# vim: filetype=python\n","repo_name":"oldhawaii/oldhawaii-metadata","sub_path":"www/oldhawaii_metadata/apps/users/confirmable.py","file_name":"confirmable.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"69862753237","text":"\n#!/usr/bin/env python3\n# The above shebang (#!) operator tells Unix-like environments\n# to run this file as a python3 script\nfrom flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nimport json\nfrom flask_cors import CORS\n\n# For HTTP Calls\nimport requests\n\n\n# import sys\n# import os\n# import random\n# import datetime\n\n# Communication patterns:\n# Use a message-broker with 'direct' exchange to enable interaction\n# Use a reply-to queue and correlation_id to get a corresponding reply\n# import pika\n# If see errors like \"ModuleNotFoundError: No module named 'pika'\", need to\n# make sure the 'pip' version used to install 'pika' matches the python version used.\n# import uuid\n# import csv\n\napp = Flask(__name__)\n# Database name in this case is match\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root@localhost:3306/sglovelah_match'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\ndb = SQLAlchemy(app)\nCORS(app)\n\nchatURL = 'http://localhost:5009'\n\n\nclass Match(db.Model):\n __tablename__ = 'match'\n\n matchid = db.Column(db.Integer, primary_key=True)\n id1 = db.Column(db.Integer)\n id2 = db.Column(db.Integer)\n ready_status_1 = db.Column(db.Boolean, default=False)\n ready_status_2 = db.Column(db.Boolean, default=False)\n\n def __init__(self, id1, id2, ready_status_1=0, ready_status_2=0, matchid=None):\n self.matchid = matchid\n self.id1 = id1\n self.id2 = id2\n self.ready_status_1 = ready_status_1\n self.ready_status_2 = ready_status_2\n\n def json(self):\n return {\"matchid\": self.matchid, \"id1\": self.id1, \"id2\": self.id2, \"ready_status_1\": self.ready_status_1, \"ready_status_2\": self.ready_status_2}\n\n@app.route(\"/match/\",methods=['POST'])\ndef add_match(id1 = None,id2 = None):\n \"\"\"\n Creates a match in the Match DB.\n\n Passes in json data in the format\n {\n \"id1\"=\"\",\n \"id2\"=\"\"\n }\n\n If there is an error creating in account DB, return error message\n \n Else, after adding to account DB, gets the matchid of the newly created match,\n and creates a corresponding row in chat DB, using HTTP Call.\n\n If there is an error creating in chat DB, return error message.\n Else, returns success message.\n\n \"\"\"\n\n # Gets variables if done through http call\n\n # Implemented to account for function call from match_receiver\n\n if id1 == None and id2 == None:\n data = request.get_json()\n id1 = data[\"id1\"]\n id2 = data[\"id2\"]\n\n # Checks if there are currently any existing matches in the database\n match_check = Match.query.filter_by(id1=id1, id2=id2).first()\n match_check_2 = Match.query.filter_by(id1=id2, id2=id1).first()\n\n \n # Returns error message if the match already exists\n if match_check != None or match_check_2 != None:\n return jsonify({\"message\": f\"The match with id1:{id1} and id2:{id2} already exists.\"}), 500\n \n # Adds to match db if match does not exist\n match = Match(id1,id2)\n \n try: \n db.session.add(match)\n db.session.commit()\n\n # Returns a MySQL row corresponding to the newly created match\n matchid = Match.query.filter_by(id1=id1,id2=id2).first()\n\n except Exception as e:\n return jsonify({\"message\": f\"An error {e} occured creating the match.\"}), 500\n\n # Obtains the newly created matchid for creating row in chat\n matchid = matchid.matchid\n\n # Creates a new rom in chat, through HTTP Call.\n create_chat_url = chatURL + \"/createchat\"\n\n # Prepares POST message for sending\n message = {\"matchid\":int(matchid)}\n\n # Sends request with message as JSON\n r = requests.post(create_chat_url,json=message)\n\n # Saves response\n result = r.json()\n\n # Returns error message or success message accordingly\n try:\n error = result[\"message\"]\n return jsonify({\"message\":error})\n\n except KeyError:\n return jsonify({\"message\":\"Successful creation into DB\"}), 200\n # 201 is create\n\n\n\n@app.route(\"/match//\",methods=['GET'])\ndef get_match_id(id1,id2):\n \"\"\"\n Retrieves a matchid from the Match DB.\n\n Takes in a url in the format\n\n /match/id1/id2\n\n If match exists:\n \n Returns the match id corresponding to id1 and id2\n\n Else, return error message match does not exist\n \"\"\"\n # id1 = request.args.get(\"id1\")\n # id2 = request.args.get(\"id2\")\n \n matchid1 = Match.query.filter_by(id1 = id1, id2 = id2).first()\n matchid2 = Match.query.filter_by(id1 = id2, id2 = id1).first()\n\n if matchid1 != None:\n return jsonify(matchid1.matchid)\n elif matchid2 != None:\n return jsonify(matchid2.matchid)\n\n return jsonify({\"message\":f\"A match with userid pair {id1} and {id2} does not exist.\"}) , 404\n\n@app.route(\"/ready//\")\ndef get_partner_ready_status(matchid,vid):\n \"\"\"\n Retrieves the ready status of the partner of an id in the Match DB.\n\n Takes in a url in the format\n\n /match/1/2\n \n If match exists:\n \n Returns the ready status of the other partner\n\n Else, return error message match does not exist\n \"\"\"\n # id1 = request.args.get(\"id1\")\n # id2 = request.args.get(\"id2\")\n \n matchid1 = Match.query.filter_by(matchid = matchid, id1 = vid).first()\n matchid2 = Match.query.filter_by(matchid = matchid, id2 = vid).first()\n\n if matchid1 != None:\n return json.dumps(matchid1.ready_status_2)\n elif matchid2 != None:\n return json.dumps(matchid2.ready_status_1)\n\n return jsonify({\"message\":f\"A match with matchID does not exist.\"}) , 404\n\n@app.route(\"/allmatches/\",methods=['GET'])\ndef get_all_matches(id):\n \"\"\"\n Retrieves all the matches of an id in the Match DB. For use in calling chat\n\n Takes in a url in the format\n\n /match/1\n \n If match exists:\n \n Returns the ready status of the other partner\n\n Else, return error message match does not exist\n \"\"\"\n # id1 = request.args.get(\"id1\")\n # id2 = request.args.get(\"id2\")\n \n # First checks for matches with user as id1\n matchids = Match.query.filter_by(id1 = id)\n\n # Subsequently checks for matches with user as id2\n matchids2 = Match.query.filter_by(id2 = id)\n\n if matchids == None and matchids2 == None:\n\n # Returns error message if user is in neither id1 nor id2\n return jsonify({\"message\":f\"User {id} is not found.\"})\n \n # Returns a list of users that are matched with user as id1 and id2\n return jsonify({\"matchids\": [matchid.id2 for matchid in matchids] + [matchid2.id1 for matchid2 in matchids2]})\n\n@app.route(\"/ready//\",methods=['PUT'])\ndef update_partner_ready_status(matchid,userid):\n \"\"\"\n Updates the ready status of of an id in the Match DB\n The ready status of the id1 will be updated to true.\n\n Takes in a url in the format\n\n /match/?id1=123&id2=234\n \n If match exists:\n \n Updates the ready status and returns success of update.\n\n Else, return error message match does not exist\n \"\"\"\n matchid1 = Match.query.filter_by(matchid = matchid, id1 = userid).first()\n matchid2 = Match.query.filter_by(matchid = matchid, id2 = userid).first()\n if matchid1 != None:\n matchid1.ready_status_1 = 1\n else:\n matchid1.ready_status_2 = 1\n\n try:\n db.session.commit()\n return jsonify({\"message\": f\"The update status of {id1} has been updated!\"})\n except Exception as e:\n return jsonify({\"message\": f\"An error {e} occured updating the database.\"})\n\n data = request.get_json()\n id1 = data[\"id1\"]\n id2 = data[\"id2\"]\n \n matchid1 = Match.query.filter_by(id1 = id1, id2 = id2).first()\n matchid2 = Match.query.filter_by(id1 = id2, id2 = id1).first()\n\n if matchid1 != None:\n matchid1.ready_status_1 = 1\n\n try:\n db.session.commit()\n return jsonify({\"message\": f\"The update status of {id1} has been updated!\"})\n except Exception as e:\n return jsonify({\"message\": f\"An error {e} occured updating the database.\"})\n \n elif matchid2 != None:\n matchid2.ready_status_2 = 1\n\n try:\n db.session.commit()\n return jsonify({\"message\": f\"The update status of {id2} has been updated!\"})\n except Exception as e:\n return jsonify({\"message\": f\"An error {e} occured updating the database.\"}) \n\n return jsonify({\"message\":f\"A match with userid pair {id1} and {id2} does not exist.\"}) , 404\n\n@app.route(\"/checkreadystatus/\")\ndef checkreadystatus(matchid):\n matchids = Match.query.filter_by(matchid = matchid).first()\n if matchids != None:\n return jsonify({\"id1\":matchids.id1,\"status_1\":matchids.ready_status_1,\"id2\":matchids.id2,\"status_2\":matchids.ready_status_2})\n # if matchids.ready_status_1 == 1 and matchids.ready_status_2 == 1:\n # return jsonify({\"message\": \"success\"})\n # else:\n # return jsonify({\"message\":\"fail\"})\n\n\n# Execute this program if it is run as a main script (not by 'import')\nif __name__ == \"__main__\":\n app.run(port=7007, debug=True)\n # print(\"This is \" + os.path.basename(__file__) + \": creating an order...\")\n # order = create_order(\"sample_order.txt\")\n # send_order(order)\n # receiveOrder()\n# print(get_all())\n# print(find_by_order_id(3))\n","repo_name":"Alwyn-Ong/ESD","sub_path":"backend/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":9339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"23778555898","text":"\"\"\" Command-line program for restoring files from backup. \"\"\"\n\nimport argparse\nfrom hed.errors.exceptions import HedFileError\nfrom hed.tools.remodeling.backup_manager import BackupManager\n\n\ndef get_parser():\n \"\"\" Create a parser for the run_remodel_restore command-line arguments.\n\n Returns:\n argparse.ArgumentParser: A parser for parsing the command line arguments.\n\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Restores the backup files for the original data.\")\n parser.add_argument(\"data_dir\", help=\"Full path of dataset root directory.\")\n parser.add_argument(\"-bd\", \"--backup_dir\", default=\"\", dest=\"backup_dir\",\n help=\"Directory for the backup that is being created\")\n parser.add_argument(\"-bn\", \"--backup_name\", default=BackupManager.DEFAULT_BACKUP_NAME, dest=\"backup_name\",\n help=\"Name of the default backup for remodeling\")\n parser.add_argument(\"-t\", \"--task-names\", dest=\"task_names\", nargs=\"*\", default=[], help=\"The names of the task.\")\n parser.add_argument(\"-v\", \"--verbose\", action='store_true',\n help=\"If present, output informative messages as computation progresses.\")\n return parser\n\n\ndef main(arg_list=None):\n \"\"\" The command-line program for restoring a remodel backup.\n\n Parameters:\n arg_list (list or None): Called with value None when called from the command line.\n Otherwise, called with the command-line parameters as an argument list.\n\n :raises HedFileError:\n - if the specified backup does not exist.\n\n \"\"\"\n parser = get_parser()\n args = parser.parse_args(arg_list)\n if args.backup_dir:\n backups_root = args.backup_dir\n else:\n backups_root = None\n backup_man = BackupManager(args.data_dir, backups_root=backups_root)\n if not backup_man.get_backup(args.backup_name):\n raise HedFileError(\"BackupDoesNotExist\", f\"{args.backup_name}\", \"\")\n backup_man.restore_backup(args.backup_name, task_names=args.task_names, verbose=args.verbose)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hed-standard/hed-python","sub_path":"hed/tools/remodeling/cli/run_remodel_restore.py","file_name":"run_remodel_restore.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"85"} +{"seq_id":"73890082197","text":"import re, requests, json, urllib, datetime, scrapy, time\nimport pandas as pd\nfrom globocrawl.items import GlobocrawlItem\nfrom globocrawl.middlewares import GlobocrawlSpiderMiddleware\n\n\nclass UrlcrawlSpider(scrapy.Spider):\n name = 'urlcrawl'\n # allowed_domains = ['google.com']\n # start_urls = ['http://google.com/']\n\n def __init__(self,file):\n first_url = 'https://www.google.com/search?q=economia+site:https://g1.globo.com/economia/noticia/2011/0{}&lr=&newwindow=1&hl=ko&tbm=nws&ei=htJ0X8v2HNfrwQPypbaIDA&start={}0&sa=N&ved=0ahUKEwjL0fGKxZHsAhXXdXAKHfKSDcE48AEQ8tMDCIkB&biw=1012&bih=838&dpr=1.13'\n self.urls = []\n for i in range(1,13):\n if i < 10:\n a = str(i).rjust(2,'0')\n else:\n a = i\n \n for j in range(30):\n \n self.urls.append(first_url.format(a,j))\n\n def start_requests(self) :\n for url in self.urls :\n yield scrapy.Request(url=url)\n\n\n def parse(self, response):\n #print(response.text)\n result_dic = json.loads(response.text)\n item = InstacrawlItem()\n # self.crawling_count += 1\n # if self.crawling_count % 200 == 0:\n # time.sleep(700)\n\n #print(\"*\"*50)\n #print(result_dic['graphql']['user']['username'])\n #print(result_dic['graphql']['user']['biography'])\n item['articleurl'] = result_dic['data']['user']['reel']['id']\n yield item","repo_name":"quotation3/Project","sub_path":"브라질뉴스로_환율예측/brazilnewscrawl/brazilnewscrawl/spiders/urlcrawl.py","file_name":"urlcrawl.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39443687341","text":"from django.shortcuts import render\nfrom django.http.response import StreamingHttpResponse\nfrom datetime import datetime\nimport cv2\nimport mediapipe as mp\n\n\n# Create your views here.\n\nfrom django.http import HttpResponse\ndef sayhello(request):\n return HttpResponse(\"Hello django!\")\ndef hello2(reuest,username):\n return HttpResponse(\"Hello\" + username)\ndef hello3(request,username):\n now=datetime.now()\n return render(request,\"hello3.html\",locals())\ndef run(request,username):\n now=datetime.now()\n return render(request,\"index_test.html\",locals())\ndef track_human():\n mp_drawing = mp.solutions.drawing_utils\n # mp_holistic = mp.solutions.holistic\n mPose = mp.solutions.pose\n pose = mPose.Pose()\n\n # cap = cv2.VideoCapture(0)\n cap = cv2.VideoCapture(\"hack_app/sample1_encoded.mp4\")\n #cap.set(cv2.CAP_PROP_POS_FRAMES,500)\n # success, image = cap.read()\n cv2.namedWindow(\"Main\")\n\n professorOffset = 100\n\n while True:\n # with mp_holistic.Holistic(\n # min_detection_confidence=0.5,\n # min_tracking_confidence=0.5) as holistic:\n\n widthOfProfessor = 0\n heightOfProfessor = 0\n topLeftPoint = [0, 0]\n furthestLeft = 1\n furthestRight = 0\n furthestTop = 1\n furthestBottom = 0\n windowX = cv2.getWindowImageRect(\"Main\")[0]\n windowY = cv2.getWindowImageRect(\"Main\")[1]\n windowWidth = cv2.getWindowImageRect(\"Main\")[2]\n windowHeight = cv2.getWindowImageRect(\"Main\")[3]\n\n success, image = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n # If loading a video, use 'break' instead of 'continue'.\n continue\n\n imagePoseDetection = image.copy()\n imageBlackboardDetection = image.copy()\n image.flags.writeable = False\n imagePoseDetection.flags.writeable = False\n imageBlackboardDetection.flags.writeable = False\n imgRGB = cv2.cvtColor(imagePoseDetection, cv2.COLOR_BGR2RGB)\n results = pose.process(imgRGB)\n\n if results.pose_landmarks:\n mp_drawing.draw_landmarks(imagePoseDetection, results.pose_landmarks, mPose.POSE_CONNECTIONS)\n\n for id, lm in enumerate(results.pose_landmarks.landmark):\n # get furthest left point\n if (lm.x < furthestLeft):\n furthestLeft = lm.x\n # get furthest right point\n if (lm.x > furthestRight):\n furthestRight = lm.x\n # get furthest top point\n if (lm.y < furthestTop):\n furthestTop = lm.y\n # get furthest bottom point\n if (lm.y > furthestBottom):\n furthestBottom = lm.y\n # calculate width and height\n widthOfProfessor = round(((windowWidth * furthestRight) - (windowWidth * furthestLeft)) + (professorOffset * 2))\n heightOfProfessor = round(((windowHeight * furthestBottom) - (windowHeight * furthestTop)) - professorOffset)\n topLeftPoint[0] = max(round((furthestLeft * (windowWidth - widthOfProfessor / 2)) - professorOffset), 0)\n topLeftPoint[1] = max(round((furthestTop * (windowHeight - heightOfProfessor / 2))), 0)\n\n # (x, y, w, h) = cv2.boundingRect(c)\n # cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 20)\n # roi = frame[y:y+h, x:x+w]\n\n # cv2.rectangle(image,(topLeftPoint[0],topLeftPoint[1]),(topLeftPoint[0] + widthOfProfessor,topLeftPoint[1] + heightOfProfessor),(255,255,255),20)\n # cv2.circle(image, (round(windowWidth/2),round(windowHeight/2)), radius=20, color=(255, 0, 255), thickness=-1)\n\n # professorCropped = image[topLeftPoint[1]:topLeftPoint[1] + 1000, topLeftPoint[0]:topLeftPoint[0] + 500]\n professorCropped = image[topLeftPoint[1]:topLeftPoint[1] + heightOfProfessor,\n topLeftPoint[0]:topLeftPoint[0] + widthOfProfessor]\n blackboardCropped = imageBlackboardDetection[85:windowHeight - 80, 95:windowWidth - 100]\n\n # Find blackboard\n # imageBlackboardDetection = cv2.cvtColor(imageBlackboardDetection, cv2.COLOR_BGR2GRAY) #Turn it grayscale\n # ret, thresh = cv2.threshold(imageBlackboardDetection, 150, 255, cv2.THRESH_BINARY) #Apply binary threshold\n###############################\n # cv2.imshow(\"Blackboard\", blackboardCropped)\n # cv2.imshow(\"Professor\", professorCropped)\n # cv2.imshow('Main', imagePoseDetection)\n (flag, encodedImage) = cv2.imencode(\".jpg\", professorCropped)\n cv2.waitKey(10)\n if not flag:\n continue\n\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encodedImage) + b'\\r\\n')\n\n# def generate():\n# \t# grab global references to the output frame and lock variables\n# \tglobal outputFrame, lock\n# \t# loop over frames from the output stream\n# \twhile True:\n# \t\t# wait until the lock is acquired\n# \t\twith lock:\n# \t\t\t# check if the output frame is available, otherwise skip\n# \t\t\t# the iteration of the loop\n# \t\t\tif outputFrame is None:\n# \t\t\t\tcontinue\n# \t\t\t# encode the frame in JPEG format\n# \t\t\t(flag, encodedImage) = cv2.imencode(\".jpg\", outputFrame)\n# \t\t\t# ensure the frame was successfully encoded\n# \t\t\tif not flag:\n# \t\t\t\tcontinue\n# \t\t# yield the output frame in the byte format\n# \t\tyield(b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n# \t\t\tbytearray(encodedImage) + b'\\r\\n')\n\n# def generate():\n# while True:\n# track_human_cv = track_human()\n# yield(b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' + track_human_cv + b'\\r\\n')\n\nfrom django.http.response import StreamingHttpResponse\ndef video_feed(request):\n return StreamingHttpResponse(track_human(), content_type='multipart/x-mixed-replace; boundary=frame')\n#, content_type='multipart/x-mixed-replace; boundary=framex'\n#style=\"width: 100%; height: 100%;\"\n","repo_name":"kullatnunu/best_app_hackdfw","sub_path":"hack/hack_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31629721611","text":"\"\"\"Base Set Covering class\"\"\"\n\nfrom collections import Counter\nimport numpy as np\nfrom typing import Dict, List, Set\n\n\n\nfrom src.utils import BaseLogger\n\n\nclass SetCovering(BaseLogger):\n \n \n def __init__(self, subsets: List, costs: List) -> None:\n \"\"\" Set convering initialization\n \"\"\"\n super().__init__()\n self.subsets = subsets\n self.costs = costs\n self.universe = self.__identify_unique_items()\n self.total_set_elements = len(self.universe)\n self.total_subsets = len(subsets)\n self.item_scores = self.__calculate_item_scores()\n self.set_scores = self.__calculate_set_scores()\n self.set_probabilities = self.__calculate_set_probabilities()\n self.max_len_set = max([len(subset) for subset in self.subsets])\n \n def __identify_unique_items(self) -> Set:\n \"\"\"Find all unique elements of data structure\n \n \"\"\"\n return { item for instance in self.subsets for item in instance }\n \n def __calculate_item_scores(self):\n item_counts = dict(\n Counter([item for sublist in self.subsets for item in sublist])\n )\n \n item_values = {}\n for key, value in item_counts.items():\n item_values[key] = 1 / (value / self.total_subsets)\n \n SCORE_MAX = max(item_values.values())\n SCORE_MIN = min(item_values.values())\n \n item_scores = {}\n for key, value in item_values.items():\n item_scores[key] = SetCovering.max_min_normalizer(value, max_val = SCORE_MAX, min_val = SCORE_MIN)\n \n return item_scores\n\n def __calculate_set_probabilities(self):\n \"\"\"Return the average score for each subset\"\"\"\n scores = [np.mean([self.item_scores[i] for i in subset]) for subset in self.subsets]\n \n return self.__calculate_probabilities(scores)\n \n \n def __calculate_probabilities(self, vals):\n total_sum = sum(vals)\n return [ val / total_sum for val in vals ]\n \n def __calculate_set_scores(self):\n \n total_subsets = []\n for subset in self.subsets:\n subset_scores = [self.item_scores[i] for i in subset]\n subset_overall_score = np.average(subset_scores, weights= [item / sum(subset_scores) for item in subset_scores])\n total_subsets.append(subset_overall_score)\n return total_subsets\n \n \n def cover(self, probs = None):\n \"\"\" Create a set covering solution\n \"\"\"\n \n if not probs:\n prob_dist = self.set_probabilities\n \n all_available_subsets = [*range(self.total_subsets)]\n \n \n covered = set()\n selected_subsets = []\n cost = 0\n while covered != self.universe:\n\n subset_idx = self.__select_set(set_list = all_available_subsets)\n subset = set( self.subsets[subset_idx] )\n \n selected_subsets.append(subset_idx)\n covered |= subset\n \n cost += self.costs[subset_idx]\n all_available_subsets.remove(subset_idx)\n \n \n self.logger.info(f\">>> Total covering cost: {cost}\")\n self.logger.info(f\">>> Total subsets selected: {len(selected_subsets)}\")\n \n return selected_subsets, cost\n\n \n def __select_set(self, set_list: List, probs = None):\n \"\"\"Select a set index, regarding if the set already have appended to a subset_list\"\"\"\n subset_idx = random.randint(0, len(set_list) - 1)\n return set_list[subset_idx]\n \n @staticmethod\n def max_min_normalizer(num, max_val, min_val):\n return (num - min_val) / (max_val - min_val)\n\n ","repo_name":"abdala9512/ant-colony-optimization-implementation","sub_path":"src/algorithms/base_set_cover.py","file_name":"base_set_cover.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1902641997","text":"import math\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.utils import timezone\n\nfrom websitecoverpage.models import WebsiteCoverPage\n\n\ndef websitecoverpage(request):\n config = getattr(settings, 'WEBSITE_COVERPAGE', {})\n\n # bail if cookie already set, i.e. if the user has already\n # seen the coverpage we don't want to show it again\n cookie_name = config.get('cookie_name', 'coverpage')\n if cookie_name in request.COOKIES:\n # coverpage cookie exists: the user has already\n # seen the coverpage so no need to display it again\n return {}\n\n # bail if not a non-Ajax GET request\n if request.method != 'GET' or request.is_ajax():\n return {}\n\n # get ignore_urls\n ignore_urls = config.get('ignore_urls', '[]') + [\n '/favicon.ico',\n '/robots.txt'\n ]\n for ig in ignore_urls:\n if request.path.startswith(ig):\n return {}\n\n # ignore common bots\n ua = request.META.get('HTTP_USER_AGENT', '').lower()\n bots = [\n '360spider',\n 'adsbot-google',\n 'ahrefs',\n 'apachebench', # not a bot, but it can go here\n 'archive.org',\n 'baiduspider',\n 'bingbot',\n 'bingpreview',\n 'dotbot',\n 'duckduckgo',\n 'duckduckbot',\n 'exabot',\n 'facebook',\n 'feedfetcher-google',\n 'googlebot',\n 'googleimageproxy',\n 'ia_archiver',\n 'mediapartners-google',\n 'mj12bot',\n 'msnbot',\n 'panscient.com',\n 'pinterest',\n 'slackbot',\n 'slurp',\n 'sogou',\n 'surveybot',\n 'twitterbot',\n 'voilabot',\n 'yahoo-mmcrawler',\n 'yahoomailproxy',\n 'yandexbot'\n ]\n for bot in bots:\n if bot in ua:\n return {}\n\n # attempt to find from cache\n cache_key = config.get('cache_key', 'website-coverpage')\n coverpage = cache.get(cache_key, None)\n if coverpage is None:\n # find next available page from the database\n now = timezone.now()\n page = WebsiteCoverPage.objects \\\n .filter(end_datetime__gt=now) \\\n .order_by('start_datetime', 'end_datetime') \\\n .first()\n\n if page is None:\n # there are no valid pages in the database\n # set a long, empty cache\n coverpage = {}\n cache_timeout = 60 * 60 * 24 * 28\n else:\n if page.start_datetime < now:\n # a coverpage is active\n # cache the results until it's end_datetime\n coverpage = {\n 'websitecoverpage': {\n 'cookie_name': cookie_name,\n 'html': page.html,\n 'style': page.style\n }\n }\n cache_timeout = (page.end_datetime - now).total_seconds()\n else:\n # the next page is in the future\n # set an empty cache until that time\n coverpage = {}\n cache_timeout = (page.start_datetime - now).total_seconds()\n\n # save to cache\n cache.set(cache_key, coverpage, math.ceil(cache_timeout))\n\n # return\n return coverpage","repo_name":"joncombe/django-website-coverpage","sub_path":"websitecoverpage/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23455607857","text":"\nf = open('day7_input.txt')\ntext = f.readlines()\nf.close()\n\nvalid = 0\nfor line in text:\n line = line.strip()\n abba = False\n noabba = True\n hyper = False\n for i in range(3, len(line)):\n if line[i] == '[': hyper = True\n if line[i] == ']': hyper = False\n if line[i] == line[i-3] and line[i-1] == line[i-2] and line[i] != line[i-1]:\n if hyper:\n noabba = False\n else:\n abba = True\n if abba and noabba:\n valid += 1\n \nprint(valid)\n","repo_name":"scheidguy/Advent_of_Code","sub_path":"2016/day7-1.py","file_name":"day7-1.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71934428438","text":"from flask import render_template\nfrom .ibob_screener import get_charts\nimport os\nimport datetime\nimport path_config\n\nfrom . import app\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/strategy/inside-bar-pattern')\ndef ib_pattern():\n return render_template(\"ib_strategy.html\")\n@app.route('/tools/ibob_stock_screener')\ndef ibob_screener():\n img_files = []\n img_dir = \"\"\n end = datetime.date.today() - datetime.timedelta(days=1)\n\n # Set start/end date for our dataframes (21 days)\n current_EST_Time = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=-5), 'EST'))\n start=datetime.date.today() - datetime.timedelta(days=14)\n\n # check if it's a weekend\n weekno = datetime.datetime.today().weekday()\n today = datetime.date.today()\n if os.path.exists(f\"{path_config.imageDir}/{today}\") == True:\n end = today\n img_dir = f\"{path_config.imageDir}/images/{end}\"\n elif weekno > 5:\n # it's the weekend, get the past friday\n now = datetime.date.today()\n last_friday = now + datetime.timedelta(days=(4-now.weekday()))\n end = last_friday\n img_dir = f\"{path_config.imageDir}/{end}\"\n # If before 5PM EST, get previous day stats\n elif weekno == 0 and current_EST_Time.hour < 17:\n now = datetime.date.today()\n last_friday = now - datetime.timedelta(days=now.weekday()) + datetime.timedelta(days=4, weeks=-1)\n end = last_friday\n img_dir = f\"{path_config.imageDir}/{end}\"\n elif weekno < 5 and current_EST_Time.hour < 17:\n end=datetime.date.today() - datetime.timedelta(days=1)\n img_dir = f\"{path_config.imageDir}/{end}\"\n elif weekno < 5:\n end=datetime.date.today()\n img_dir = f\"{path_config.imageDir}/{end}\"\n else:\n img_dir = f\"{path_config.imageDir}/{end}\"\n for img in os.listdir(img_dir):\n f = os.path.join(img_dir, img)\n if os.path.isfile(f):\n img_files.append(f\"/{end}/{img}\")\n return render_template('ibob_screener.html', images=img_files)","repo_name":"adamwhiles/apetrader","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18194430633","text":"import os\nimport re\n# resolves name to an object has in the repo\nfrom util.ref_handling.ref_resolver import ref_resolver\nfrom util.repo_handling.repo_dir import repo_dir\n\n\ndef object_resolve(repo, name):\n candidates = list()\n hashReg = re.compile(r\"^[0-9A-Fa-f]{1.16}$\")\n smallHashReg = re.compile(r\"^[0-9A-Fa-f]{1.16}$\")\n\n if not name.strip():\n return None\n\n if name == \"HEAD\":\n return [ref_resolver(repo, \"HEAD\")]\n\n if hashReg.match(name):\n if len(name) == 40: # Complete hash length\n return [name.lower()]\n elif len(name) >= 4: # Small hash can't be smaller than 4\n name = name.lower()\n prefix = name[0:2]\n path = repo_dir(repo, \"objects\", prefix, mkdir=False)\n if path:\n rem = name[2:]\n for file in os.listdir(path):\n if file.startswith(rem):\n candidates.appendd(prefix + file)\n\n return candidates","repo_name":"anderslatif/alg","sub_path":"util/object_handling/object_resolve.py","file_name":"object_resolve.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18490306246","text":"import pygame\nimport pygame_gui\n\n\nclass MainMenu:\n def __init__(self):\n self.manager = pygame_gui.UIManager((1280, 720))\n\n title = pygame_gui.elements.UILabel(relative_rect=pygame.Rect((1280/2-100, 720/2-40), (200, 30)),\n text=\"HELIXTEUS III\",\n manager=self.manager)\n\n self.play_button = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((1280/2-60, 720/2), (120, 30)),\n text=\"Play\",\n manager=self.manager)\n reset_button = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((1280/2-60, 720/2 + 35), (120, 30)),\n text=\"Reset\",\n manager=self.manager)\n\n def process_event(self, event):\n self.manager.process_events(event)\n\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.USEREVENT:\n if event.user_type == 'ui_button_pressed':\n if event.ui_element == self.play_button:\n self.play_game()\n\n def update(self, delta):\n self.manager.update(delta)\n\n def draw(self, screen):\n screen.fill((0, 0, 20))\n self.manager.draw_ui(screen)\n\n def play_game(self):\n from pygame_tests.game import show_planet_view\n show_planet_view()\n","repo_name":"Apple0726/hx3","sub_path":"pygame_tests/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23219100288","text":"\n# Hacky quick script to extract captions from base game vo sound events.\n# Written to make vo_captions.md less painful.\n# replace � with …\n\nimport os\n\ncurrent_dir = os.path.dirname(__file__)\nprint(\"Current path:\", current_dir)\n\nfile = open(os.path.join(current_dir, \"__test_vo_soundevents.txt\"), \"r\")\nlines = file.readlines()\nfile.close()\n\nfile = open(os.path.join(current_dir, \"__test_vo_captions.txt\"), \"w\")\nlooking_for_text = False\nsoundevent_name = \"\"\nsoundevents = {}\n\nfor line in lines:\n line = line.strip()\n # Searching for line_text after encountering sound event\n if looking_for_text:\n if line.startswith(\"line_text = \"):\n text = line[12:].replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n #file.write(f\"{soundevent_name} {text} \\n\")\n soundevents[soundevent_name] = text\n looking_for_text = False\n # Searching for start of sound event\n elif line.startswith(\"vo.\"):\n soundevent_name = line[3:-2]\n looking_for_text = True\n\n# Sorting by name\nfor soundevent in sorted(soundevents.items()):\n file.write(f\"{soundevent[0]} {soundevent[1]} \\n\")\n \nprint(\"Done extracting captions.\")\n","repo_name":"FrostSource/hla_extravaganza","sub_path":"tools/extract_captions.py","file_name":"extract_captions.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"85"} +{"seq_id":"29460518991","text":"\"\"\"Implementation of Rule L034.\"\"\"\n\nfrom sqlfluff.core.rules.base import BaseRule, LintFix, LintResult\nfrom sqlfluff.core.rules.doc_decorators import document_fix_compatible\n\n\n@document_fix_compatible\nclass Rule_L034(BaseRule):\n \"\"\"Use wildcards then simple targets before calculations and aggregates in select statements.\n\n | **Anti-pattern**\n\n .. code-block:: sql\n\n select\n a,\n *,\n row_number() over (partition by id order by date) as y,\n b\n from x\n\n\n | **Best practice**\n | Order \"select\" targets in ascending complexity\n\n .. code-block:: sql\n\n select\n *,\n a,\n b,\n row_number() over (partition by id order by date) as y\n from x\n\n \"\"\"\n\n def _validate(self, i, segment):\n # Check if we've seen a more complex select target element already\n if self.seen_band_elements[i + 1 : :] != [[]] * len(\n self.seen_band_elements[i + 1 : :]\n ):\n # Found a violation (i.e. a simpler element that *follows* a more\n # complex element.\n self.violation_exists = True\n self.current_element_band = i\n self.seen_band_elements[i].append(segment)\n\n def _eval(self, segment, parent_stack, **kwargs):\n self.violation_buff = []\n self.violation_exists = False\n # Bands of select targets in order to be enforced\n select_element_order_preference = (\n (\"wildcard_expression\",),\n (\n \"object_reference\",\n \"literal\",\n \"cast_expression\",\n (\"function\", \"cast\"),\n (\"expression\", \"cast_expression\"),\n ),\n )\n\n # Track which bands have been seen, with additional empty list for the non-matching elements\n # If we find a matching target element, we append the element to the corresponding index\n self.seen_band_elements = [[] for i in select_element_order_preference] + [[]]\n\n if segment.is_type(\"select_clause\"):\n # Ignore select clauses which belong to:\n # - set expression, which is most commonly a union\n # - insert_statement\n # - create table statement\n #\n # In each of these contexts, the order of columns in a select should\n # be preserved.\n if len(parent_stack) >= 2 and parent_stack[-2].is_type(\n \"insert_statement\", \"set_expression\"\n ):\n return None\n if len(parent_stack) >= 3 and parent_stack[-3].is_type(\n \"create_table_statement\"\n ):\n return None\n\n select_clause_segment = segment\n select_target_elements = segment.get_children(\"select_clause_element\")\n if not select_target_elements:\n return None\n\n # Iterate through all the select targets to find any order violations\n for segment in select_target_elements:\n # The band index of the current segment in select_element_order_preference\n self.current_element_band = None\n\n # Compare the segment to the bands in select_element_order_preference\n for i, band in enumerate(select_element_order_preference):\n for e in band:\n # Identify simple select target\n if segment.get_child(e):\n self._validate(i, segment)\n\n # Identify function\n elif type(e) == tuple and e[0] == \"function\":\n try:\n if (\n segment.get_child(\"function\")\n .get_child(\"function_name\")\n .raw\n == e[1]\n ):\n self._validate(i, segment)\n except AttributeError:\n # If the segment doesn't match\n pass\n\n # Identify simple expression\n elif type(e) == tuple and e[0] == \"expression\":\n try:\n if (\n segment.get_child(\"expression\").get_child(e[1])\n and segment.get_child(\"expression\").segments[0].type\n in (\n \"column_reference\",\n \"object_reference\",\n \"literal\",\n )\n # len == 2 to ensure the expression is 'simple'\n and len(segment.get_child(\"expression\").segments)\n == 2\n ):\n self._validate(i, segment)\n except AttributeError:\n # If the segment doesn't match\n pass\n\n # If the target doesn't exist in select_element_order_preference then it is 'complex' and must go last\n if self.current_element_band is None:\n self.seen_band_elements[-1].append(segment)\n\n if self.violation_exists:\n # Create a list of all the edit fixes\n # We have to do this at the end of iterating through all the select_target_elements to get the order correct\n # This means we can't add a lint fix to each individual LintResult as we go\n ordered_select_target_elements = [\n segment for band in self.seen_band_elements for segment in band\n ]\n fixes = [\n LintFix(\n \"edit\",\n initial_select_target_element,\n replace_select_target_element,\n )\n for initial_select_target_element, replace_select_target_element in zip(\n select_target_elements, ordered_select_target_elements\n )\n ]\n # Anchoring on the select statement segment ensures that\n # select statements which include macro targets are ignored\n # when ignore_templated_areas is set\n lint_result = LintResult(anchor=select_clause_segment, fixes=fixes)\n self.violation_buff = [lint_result]\n\n return self.violation_buff or None\n","repo_name":"rohithmiryala/sqlfluff","sub_path":"src/sqlfluff/rules/L034.py","file_name":"L034.py","file_ext":"py","file_size_in_byte":6769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21828289667","text":"from __future__ import annotations\n\nfrom xia2.Experts.LatticeExpert import ApplyLattice\n\n\ndef _parse_idxref_lp_distance_etc(lp_file_lines):\n \"\"\"Parse the LP file for refined distance, beam centre and so on...\"\"\"\n\n beam = None\n distance = None\n\n for line in lp_file_lines:\n if \"DETECTOR COORDINATES\" in line and \"DIRECT BEAM\" in line:\n beam = tuple(map(float, line.split()[-2:]))\n if \"CRYSTAL TO DETECTOR\" in line:\n distance = float(line.split()[-1])\n if distance < 0:\n distance *= -1\n\n return beam, distance\n\n\ndef _parse_idxref_index_origin(lp_file_lines):\n \"\"\"Parse the LP file for the possible index origin etc.\"\"\"\n\n origins = {}\n\n i = 0\n while i < len(lp_file_lines):\n line = lp_file_lines[i]\n i += 1\n if \"INDEX_\" in line and \"QUALITY\" in line and \"DELTA\" in line:\n while \"SELECTED\" not in line:\n line = lp_file_lines[i]\n i += 1\n try:\n hkl = tuple(map(int, line.split()[:3]))\n quality, delta, xd, yd = tuple(map(float, line.split()[3:7]))\n origins[hkl] = quality, delta, xd, yd\n except Exception:\n pass\n\n return origins\n\n raise RuntimeError(\"should never reach this point\")\n\n\ndef _parse_idxref_lp(lp_file_lines):\n \"\"\"Parse the list of lines from idxref.lp.\"\"\"\n\n lattice_character_info = {}\n\n i = 0\n\n mosaic = 0.0\n\n while i < len(lp_file_lines):\n line = lp_file_lines[i]\n i += 1\n\n # get the mosaic information\n\n if \"CRYSTAL MOSAICITY\" in line:\n mosaic = float(line.split()[-1])\n\n # get the lattice character information - coding around the\n # non-standard possibility of mI, by simply ignoring it!\n # bug # 2355\n\n if \"CHARACTER LATTICE OF FIT a b c\" in line:\n # example line (note potential lack of white space between b and c cell parameters):\n # 9 hR 999.0 3966.3 5324.610528.6 85.6 64.6 132.0\n j = i + 1\n while lp_file_lines[j].strip() != \"\":\n l = lp_file_lines[j].replace(\"*\", \" \")\n character = int(l[:12].strip())\n lattice = l[12:23].strip()\n fit = float(l[23:32].strip())\n cell = tuple(\n float(c)\n for c in (\n l[32:39],\n l[39:46],\n l[46:53],\n l[53:59],\n l[59:65],\n l[65:71],\n )\n )\n\n # FIXME need to do something properly about this...\n # bug # 2355\n\n if lattice == \"mI\":\n j += 1\n continue\n\n # reindex_card = tuple(map(int, record[9:]))\n reindex_card = () # XXX need example where this is present in the IDXREF.LP\n constrained_cell = ApplyLattice(lattice, cell)[0]\n\n lattice_character_info[character] = {\n \"lattice\": lattice,\n \"fit\": fit,\n \"cell\": constrained_cell,\n \"mosaic\": mosaic,\n \"reidx\": reindex_card,\n }\n\n j += 1\n\n return lattice_character_info\n\n\ndef _parse_idxref_lp_subtree(lp_file_lines):\n\n subtrees = {}\n\n i = 0\n\n while i < len(lp_file_lines):\n line = lp_file_lines[i]\n i += 1\n\n if line.split() == [\"SUBTREE\", \"POPULATION\"]:\n j = i + 1\n line = lp_file_lines[j]\n while line.strip():\n subtree, population = tuple(map(int, line.split()))\n subtrees[subtree] = population\n j += 1\n line = lp_file_lines[j]\n\n return subtrees\n\n\ndef _parse_idxref_lp_quality(lp_file_lines):\n fraction = None\n rmsd = None\n rmsphi = None\n\n for record in lp_file_lines:\n if \"OUT OF\" in record and \"SPOTS INDEXED\" in record:\n fraction = float(record.split()[0]) / float(record.split()[3])\n if \"STANDARD DEVIATION OF SPOT POSITION\" in record:\n rmsd = float(record.split()[-1])\n if \"STANDARD DEVIATION OF SPINDLE POSITION\" in record:\n rmsphi = float(record.split()[-1])\n\n return fraction, rmsd, rmsphi\n","repo_name":"xia2/xia2","sub_path":"src/xia2/Wrappers/XDS/XDSIdxrefHelpers.py","file_name":"XDSIdxrefHelpers.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"85"} +{"seq_id":"41984268765","text":"import pyray as pr\nfrom raylib import KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_DELETE, FLAG_WINDOW_RESIZABLE\n\nfrom .agent import Screen\n\nWIDTH = 640\nHEIGHT = 480\n\npr.set_config_flags(FLAG_WINDOW_RESIZABLE)\npr.init_window(WIDTH, HEIGHT, \"The Ball Pit.\")\npr.set_target_fps(60)\nscreen = Screen(pr, 1)\ncolours = [\n (255, 255, 255),\n (255, 0, 0),\n (0, 255, 0),\n (0, 0, 255)\n]\n\nwhile not pr.window_should_close():\n pr.begin_drawing()\n pr.clear_background(pr.BLACK)\n\n if pr.is_window_resized():\n screen.update_size((pr.get_screen_width(), pr.get_screen_height()))\n\n screen.step(1)\n\n pr.draw_text(\n \"Delete to remove balls | L/R arrow keys for colour\", 10, screen.size[1]-20, 20, pr.WHITE\n )\n pr.draw_text(\n \"Click to add balls | Right-Click to add 10 balls\", 10, screen.size[1]-40, 20, pr.WHITE\n )\n pr.draw_text(\"Arrow key up to increase speed\", 10, 20, 20, pr.WHITE)\n pr.draw_text(\"Arrow key down to decrease speed\", 10, 40, 20, pr.WHITE)\n if screen.speed > 4:\n pr.draw_text(f\"Speed: {round(screen.speed, 1)}\", 10, 60, 40, pr.RED)\n elif screen.speed < 0:\n pr.draw_text(f\"Speed: {round(screen.speed, 1)}\", 10, 60, 40, pr.BROWN)\n elif 0 <= screen.speed < 1:\n pr.draw_text(f\"Speed: {round(screen.speed, 1)}\", 10, 60, 40, pr.BLUE)\n else:\n pr.draw_text(f\"Speed: {round(screen.speed, 1)}\", 10, 60, 40, pr.GREEN)\n pr.end_drawing()\n\n if pr.is_mouse_button_pressed(0):\n position = pr.get_mouse_position()\n screen.spawn((position.x, position.y))\n if pr.is_mouse_button_pressed(1):\n for _ in range(10):\n position = pr.get_mouse_position()\n screen.spawn((position.x, position.y))\n\n if pr.is_key_down(KEY_UP) and screen.speed < 4.9:\n screen.speed += 0.1\n if pr.is_key_down(KEY_DOWN) and screen.speed > -4.9:\n screen.speed -= 0.1\n \n if pr.is_key_pressed(KEY_LEFT):\n i = colours.index(screen.colour)\n screen.colour = colours[i-1]\n if pr.is_key_pressed(KEY_RIGHT):\n i = colours.index(screen.colour)\n if i == len(colours)-1:\n screen.colour = colours[0]\n else:\n screen.colour = colours[i+1]\n \n if pr.is_key_pressed(KEY_DELETE):\n screen.agents = []\n\n\npr.close_window()","repo_name":"TheLegendBeacon/ball-sim-python","sub_path":"simulation/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38464393010","text":"\nimport click\n\nfrom pyvboxmanage import __title__ as NAME\nfrom pyvboxmanage import __version__ as VERSION\nfrom pyvboxmanage.utils import logger\nfrom pyvboxmanage.PyVBoxManage import PyVBoxManage\nfrom pyvboxmanage.exceptions.PyVBoxManageException import PyVBoxManageException\n\n\n@click.command(no_args_is_help=True)\n@click.argument('configuration_files', required=True, nargs=-1)\n@click.option('-s', '--setting', help='Set variable as per; var_name=\"Some value\"', multiple=True)\n@click.option('-v', '--verbose', is_flag=True, help='Verbose logging messages (debug level).')\n@click.option('-q', '--quiet', is_flag=True, help='Quiet mode, with priority over --verbose')\n@click.option('-d', '--dry-run', is_flag=True, help='Dry run mode, output the commands that would execute only.')\n@click.version_option(VERSION)\ndef pyvboxmanage(configuration_files, setting, verbose, quiet, dry_run):\n \"\"\"\n PyVBoxManage is a wrapper tool around VBoxManage that facilitates the orchestration of VBoxManage commands from a\n simple YAML configuration file that matches the input opts/args for VBoxManage. This makes it possible to\n implement common sequences of VBoxManage commands such as spinning up a new dev/test instance with different\n hardware just by using a single command line with a configuration file.\n\n Variables, output redirection, exit-triggers and returncode-exceptions are available to make flexible setups.\n\n Documentation available https://pyvboxmanage.readthedocs.io\n \"\"\"\n\n ctx = click.get_current_context()\n ctx.ensure_object(dict)\n\n if quiet:\n logger.init(name=NAME, level='critical')\n elif verbose:\n logger.init(name=NAME, level='debug')\n else:\n logger.init(name=NAME, level='info')\n\n logger.debug('{} v{}'.format(NAME, VERSION))\n\n variable_settings = {}\n for s in setting:\n if '=' not in s:\n raise PyVBoxManageException('--setting values must be in the format var_name=\"Some value\"', s)\n variable_settings[s.partition('=')[0].strip()] = s.partition('=')[-1].strip()\n\n PyVBoxManage(configuration_files=configuration_files, variable_settings=variable_settings, dry_run=dry_run).main()\n","repo_name":"ndejong/pyvboxmanage","sub_path":"src/pyvboxmanage/cli/click.py","file_name":"click.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"12349127785","text":"import sip\n\nfrom PyQt5 import QtWidgets, QtCore\nfrom PyQt5.QtWidgets import QTreeWidget\n\nfrom server.server import Server\n\n\nclass ProjectTree(QTreeWidget):\n \"\"\"A QTreeWidget that has any open project and all its lists as \n QTreeWidgetItem.\n \"\"\"\n def __init__(self):\n super(ProjectTree, self).__init__()\n self.setHeaderHidden(True)\n self.setColumnCount(1)\n self.projectIds = {}\n self.listsOpen = {}\n self.trees = {}\n\n self.itemDoubleClicked.connect(self.onItemDoubleClicked)\n\n def addTree(self, id: int):\n \"\"\"Takes the projectId searchs for the project name on the server, then\n the function creates a QtWidgets.QTreeWidgetItem using the project name\n and defines this item as an class top level item.\n \"\"\"\n server = Server()\n name = server.getProjectName(id)\n self.projectIds[name] = id\n self.trees[name] = QtWidgets.QTreeWidgetItem([name])\n self.addTopLevelItem(self.trees[name])\n\n def addBranches(self, id: int):\n \"\"\"Searches for all the lists in the server that have id as their \n projectId. Creates a QTreeWidgetItem for each list and add the item into\n the top level QTreeWidgetItem.\n \"\"\"\n server = Server()\n name = server.getProjectName(id)\n branches = server.getListsNameFromProject(id)\n self.listsOpen[name] = branches\n for branch in branches:\n item = QtWidgets.QTreeWidgetItem(branch)\n self.trees[name].addChild(item)\n self.trees[name].setExpanded(True)\n\n def removeTree(self, id: int):\n \"\"\"Searches for a key in the projectIds dictionary that has the project\n id as a value, when the key is found, this function uses the key to \n delete entries in projectIds, trees and also to delete the C object \n stored in trees[key].\n \"\"\"\n for key in self.projectIds:\n if self.projectIds[key] == id:\n del self.projectIds[key]\n sip.delete(self.trees[key])\n del self.trees[key]\n break\n\n def removeBranches(self, id: int):\n \"\"\"Searches for a key in the projectIds dictionary that has the project\n id as a value and uses the key to delete the entry in listsOpen list. \n \"\"\"\n for key in self.projectIds:\n if self.projectIds[key] == id:\n del self.listsOpen[key]\n break\n\n def onItemDoubleClicked(self, it: QtWidgets.QTreeWidgetItem, col: int):\n \"\"\"When a item in ProjectTree is double clicked this function is \n activated. The function checks if QTreeWidgetItem has a parent, if the \n item has a parent the function will search the list id in the database\n and it will create a TagTable in the Tab widget.\n \"\"\"\n if it.parent() is not None:\n server = Server()\n listName = it.text(col)\n projectId = self.projectIds[str(it.parent().text(col))]\n listId = server.getListIdFromProject(listName, projectId)\n self.openList(listId)\n \n def setOpenListFunction(self, func):\n \"\"\"Receives the openList function from interface.window.MainWindow and\n set the function as a class variable.\n \"\"\"\n self.openList = func\n\n def getOpenProjectNames(self) -> list:\n \"\"\"Return a list that has the names of the projects currently open in\n the ProjectTree widget.\n \"\"\"\n return self.projectIds.keys()\n\n def updateTree(self, projectName: str):\n \"\"\"Checks if the project with projectName is in the ProjectTree and\n also if the project is not updated, if both question return true, the\n fuction searches on the database for the missing list names and add it \n to the ProjectTree.\n \"\"\"\n isProjectOpen = projectName in self.projectIds.keys()\n isTreeUpdated = self.isTreeUpdated(projectName)\n if isProjectOpen and not isTreeUpdated:\n server = Server()\n dbList = server.getListsNameFromProject(self.projectIds[projectName])\n openSet = set(self.listsOpen[projectName])\n toUpdate = [item for item in dbList if item not in openSet]\n for value in toUpdate:\n item = QtWidgets.QTreeWidgetItem(value)\n self.trees[projectName].addChild(item)\n \n def isTreeUpdated(self, projectName: str) -> bool:\n \"\"\"If any project is currently open in the ProjectTree, the function\n checks if those projects are update. If the project is updated the \n function returns true.\n \"\"\"\n if len(self.listsOpen) > 0:\n server = Server()\n dbList = server.getListsNameFromProject(self.projectIds[projectName])\n \n if len(self.listsOpen[projectName]) != len(dbList):\n return False\n else:\n return sorted(self.listsOpen[projectName]) == sorted(dbList)\n else:\n return True\n\n\n\n","repo_name":"bmartins95/TagApp","sub_path":"interface/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32006386268","text":"import sys\n\nimport json\nimport os, errno\nimport pandas as pd\nimport numpy as np\n\nimport torch\n\nimport repair_syserr_models.gen_utils as gen_utils\n\nimport repair_syserr_models.parser_arguments as parser_arguments\nfrom repair_syserr_models.train_eval_models import training_phase, evaluation_phase, repair_phase\n\nfrom repair_syserr_models.model_utils import frange_cycle_linear\n\nfrom repair_syserr_models.trainer_utils import StandardTrainer\n\n\ndef compute_metrics(\n model,\n data_loader_X,\n X,\n dataset_obj,\n args,\n epoch,\n losses_save,\n X_clean,\n target_errors,\n trusted_mask,\n mode,\n kl_beta=1.0,\n reg_scheduler_val=1.0,\n):\n\n # get epoch metrics on outlier detection for train dataset\n\n # outlier analysis\n loss_ret, metric_ret, loss_trusted_ret, metric_trusted_ret = evaluation_phase(\n model,\n data_loader_X,\n X,\n dataset_obj,\n args,\n mode,\n trusted_mask,\n kl_beta,\n reg_scheduler_val,\n target_errors,\n )\n\n # repair analysis\n repair_ret, repair_trusted_ret = repair_phase(\n model,\n X,\n X_clean,\n dataset_obj,\n args,\n target_errors,\n mode,\n trusted_mask,\n )\n\n print(\"\\n\\n\\n\\n\")\n out_string = (\n \"====> \"\n + mode\n + \" set: Epoch: {} Avg. Loss: {:.3f}\\t\".format(epoch, loss_ret[\"total_loss\"])\n )\n out_string += \"\".join(\n [\n \"{}: {:.3f}\\t\".format(\"Avg. \" + _key.upper(), _value)\n for _key, _value in loss_ret.items()\n if _key != \"total_loss\"\n ]\n )\n print(out_string)\n\n # calc cell metrics\n auc_cell_nll, auc_vec_nll, avpr_cell_nll, avpr_vec_nll = gen_utils.cell_metrics(\n target_errors, metric_ret[\"nll_score\"]\n )\n\n # calc row metrics\n auc_row_nll, avpr_row_nll = gen_utils.row_metrics(\n target_errors, metric_ret[\"nll_score\"]\n )\n\n # trusted set analysis\n if trusted_mask is not None:\n # calc cell metrics\n (\n auc_cell_nll_ts,\n auc_vec_nll_ts,\n avpr_cell_nll_ts,\n avpr_vec_nll_ts,\n ) = gen_utils.cell_metrics(\n target_errors[trusted_mask, :], metric_trusted_ret[\"nll_score\"]\n )\n\n # calc row metrics\n auc_row_nll_ts, avpr_row_nll_ts = gen_utils.row_metrics(\n target_errors[trusted_mask], metric_trusted_ret[\"nll_score\"]\n )\n\n else:\n auc_cell_nll_ts, auc_vec_nll_ts, avpr_cell_nll_ts, avpr_vec_nll_ts = (\n -10.0,\n -10.0,\n -10.0,\n -10.0,\n )\n auc_row_nll_ts, avpr_row_nll_ts = -10.0, -10.0\n\n if args.semi_supervise: # \"semi_y_VAE\" in args.model_type\n # TODO: or any other model with classifier like score.\n auc_row_class_y, avpr_row_class_y = gen_utils.row_metrics_classifier(\n target_errors, metric_ret[\"class_y_score\"]\n )\n if trusted_mask is not None:\n auc_row_class_y_ts, avpr_row_class_y_ts = gen_utils.row_metrics_classifier(\n target_errors, metric_ret[\"class_y_score\"]\n )\n else:\n auc_row_class_y_ts, avpr_row_class_y_ts = -10.0, -10.0\n else:\n auc_row_class_y = -10.0\n avpr_row_class_y = -10.0\n auc_row_class_y_ts = -10.0\n avpr_row_class_y_ts = -10.0\n\n if args.verbose_metrics_epoch:\n print(\" (Cell) Avg. \" + mode + \" AUC: {} \".format(auc_cell_nll))\n print(\" (Cell) Avg. \" + mode + \" AVPR: {} \".format(avpr_cell_nll))\n print(\"\\n\\n\")\n if args.verbose_metrics_feature_epoch:\n # TODO: might want to restrict if image?\n print(\" AUC per feature: \\n {}\".format(auc_vec_nll))\n print(\" AVPR per feature: \\n {}\".format(avpr_vec_nll))\n print(\"\\n\\n\")\n print(\" (Row) \" + mode + \" AUC: {} \".format(auc_row_nll))\n print(\" (Row) \" + mode + \" AVPR: {} \".format(avpr_row_nll))\n print(\"\\n\\n\")\n\n if args.semi_supervise:\n print(\n \" (Row) \" + mode + \" CLASSF_Y AUC: {} \".format(auc_row_class_y)\n )\n print(\n \" (Row) \"\n + mode\n + \" CLASSF_Y AVPR: {} \".format(avpr_row_class_y)\n )\n print(\"\\n\\n\")\n print(\n \" (Cell) SMSE \"\n + mode\n + \" Lower Bound (on dirty pos): {:.3f}\".format(\n repair_ret[\"mse_lower_bd_dirtycells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" Upper Bound (on dirty pos): {:.3f}\".format(\n repair_ret[\"mse_upper_bd_dirtycells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" Repair (on dirty pos): {:.3f}\".format(\n repair_ret[\"mse_repair_dirtycells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" Repair (on clean pos): {:.3f}\".format(\n repair_ret[\"mse_repair_cleancells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" Repair (on clean pos for dirty points): {:.3f}\".format(\n repair_ret[\"mse_repair_cleancells_outliers\"]\n )\n )\n print(\"\\n\\n\")\n\n if trusted_mask is not None:\n print(\"\\n\\n\")\n out_string = \"====> trusted set: Epoch: {} Avg. Loss: {:.3f}\\t\".format(\n epoch, loss_trusted_ret[\"total_loss\"]\n )\n out_string += \"\".join(\n [\n \"{}: {:.3f}\\t\".format(\"Avg. \" + _key.upper(), _value)\n for _key, _value in loss_trusted_ret.items()\n if _key != \"total_loss\"\n ]\n )\n print(out_string)\n\n if args.verbose_metrics_epoch:\n print(\n \" (Cell) Avg. \"\n + mode\n + \" (trusted set) AUC: {} \".format(auc_cell_nll_ts)\n )\n print(\n \" (Cell) Avg. \"\n + mode\n + \" (trusted set) AVPR: {} \".format(avpr_cell_nll_ts)\n )\n print(\"\\n\\n\")\n if args.verbose_metrics_feature_epoch:\n # TODO: might want to restrict if image dataset?\n print(\" AUC per feature: \\n {}\".format(auc_vec_nll_ts))\n print(\" AVPR per feature: \\n {}\".format(avpr_vec_nll_ts))\n print(\"\\n\\n\")\n print(\n \" (Row) \"\n + mode\n + \" (trusted set) AUC: {} \".format(auc_row_nll_ts)\n )\n print(\n \" (Row) \"\n + mode\n + \" (trusted set) AVPR: {} \".format(avpr_row_nll_ts)\n )\n print(\"\\n\\n\")\n if args.semi_supervise:\n print(\n \" (Row) \"\n + mode\n + \" (trusted set) CLASSF_Y AUC: {} \".format(auc_row_class_y_ts)\n )\n print(\n \" (Row) \"\n + mode\n + \" (trusted set) CLASSF_Y AVPR: {} \".format(avpr_row_class_y_ts)\n )\n print(\"\\n\\n\")\n print(\n \" (Cell) SMSE \"\n + mode\n + \" (trusted set) Lower Bound (on dirty pos): {:.3f}\".format(\n repair_trusted_ret[\"mse_lower_bd_dirtycells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" (trusted set) Upper Bound (on dirty pos): {:.3f}\".format(\n repair_trusted_ret[\"mse_upper_bd_dirtycells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" (trusted set) Repair (on dirty pos): {:.3f}\".format(\n repair_trusted_ret[\"mse_repair_dirtycells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" (trusted set) Repair (on clean pos): {:.3f}\".format(\n repair_trusted_ret[\"mse_repair_cleancells\"]\n )\n )\n print(\n \" (Cell) SMSE \"\n + mode\n + \" (trusted set) Repair (on clean pos for dirty points): {:.3f}\".format(\n repair_trusted_ret[\"mse_repair_cleancells_outliers\"]\n )\n )\n print(\"\\n\\n\")\n\n if args.save_on:\n losses_save[mode][epoch] = list(loss_ret.values())\n losses_save[mode][epoch] += [\n auc_cell_nll,\n avpr_cell_nll,\n auc_row_nll,\n avpr_row_nll,\n auc_row_class_y,\n avpr_row_class_y,\n repair_ret[\"mse_lower_bd_dirtycells\"],\n repair_ret[\"mse_upper_bd_dirtycells\"],\n repair_ret[\"mse_repair_dirtycells\"],\n repair_ret[\"mse_repair_cleancells\"],\n repair_ret[\"mse_repair_cleancells_outliers\"],\n ]\n if (mode == \"train\") and (trusted_mask is not None):\n losses_save[\"trusted\"][epoch] = list(loss_trusted_ret.values())\n losses_save[\"trusted\"][epoch] += [\n auc_cell_nll_ts,\n avpr_cell_nll_ts,\n auc_row_nll_ts,\n avpr_row_nll_ts,\n auc_row_class_y_ts,\n avpr_row_class_y_ts,\n repair_trusted_ret[\"mse_lower_bd_dirtycells\"],\n repair_trusted_ret[\"mse_upper_bd_dirtycells\"],\n repair_trusted_ret[\"mse_repair_dirtycells\"],\n repair_trusted_ret[\"mse_repair_cleancells\"],\n repair_trusted_ret[\"mse_repair_cleancells_outliers\"],\n ]\n\n elif (mode == \"train\") and (trusted_mask is None):\n losses_save[\"trusted\"][epoch] = [-10.0] * len(loss_ret.values())\n losses_save[\"trusted\"][epoch] += [-10.0] * 11\n\n\ndef save_to_csv(\n model,\n data_loader_X,\n X_data,\n X_data_clean,\n target_errors,\n trusted_mask,\n attributes,\n losses_save,\n dataset_obj,\n folder_output,\n args,\n mode=\"train\",\n kl_beta=1.0,\n reg_scheduler_val=1.0,\n):\n\n \"\"\" This method performs all operations needed to save the data to csv \"\"\"\n\n # Create saving folderes\n try:\n os.makedirs(folder_output, mode=0o777)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ### Evaluate model\n _, metric_ret, _, metric_trusted_ret = evaluation_phase(\n model,\n data_loader_X,\n X_data,\n dataset_obj,\n args,\n mode,\n trusted_mask,\n kl_beta,\n reg_scheduler_val,\n target_errors,\n )\n\n repair_ret, repair_trusted_ret = repair_phase(\n model,\n X_data,\n X_data_clean,\n dataset_obj,\n args,\n target_errors,\n mode,\n trusted_mask,\n )\n\n ## calc cell metrics\n auc_cell_nll, auc_vec_nll, avpr_cell_nll, avpr_vec_nll = gen_utils.cell_metrics(\n target_errors, metric_ret[\"nll_score\"]\n )\n\n # store AVPR for features (cell only)\n df_avpr_feat_cell = pd.DataFrame([], index=[\"AVPR_nll\"], columns=attributes)\n df_avpr_feat_cell.loc[\"AVPR_nll\"] = avpr_vec_nll\n df_avpr_feat_cell.to_csv(folder_output + \"/\" + mode + \"_avpr_features.csv\")\n\n # store AUC for features (cell only)\n df_auc_feat_cell = pd.DataFrame([], index=[\"AUC_nll\"], columns=attributes)\n df_auc_feat_cell.loc[\"AUC_nll\"] = auc_vec_nll\n df_auc_feat_cell.to_csv(folder_output + \"/\" + mode + \"_auc_features.csv\")\n\n # trusted set compute analysis\n if (trusted_mask is not None) and (mode == \"train\"):\n ## calc cell metrics\n (\n auc_cell_nll_ts,\n auc_vec_nll_ts,\n avpr_cell_nll_ts,\n avpr_vec_nll_ts,\n ) = gen_utils.cell_metrics(\n target_errors[trusted_mask, :], metric_trusted_ret[\"nll_score\"]\n )\n\n # store AVPR for features (cell only)\n df_avpr_feat_cell_ts = pd.DataFrame([], index=[\"AVPR_nll\"], columns=attributes)\n df_avpr_feat_cell_ts.loc[\"AVPR_nll\"] = avpr_vec_nll_ts\n df_avpr_feat_cell_ts.to_csv(folder_output + \"/trusted_avpr_features.csv\")\n\n # store AUC for features (cell only)\n df_auc_feat_cell_ts = pd.DataFrame([], index=[\"AUC_nll\"], columns=attributes)\n df_auc_feat_cell_ts.loc[\"AUC_nll\"] = auc_vec_nll_ts\n df_auc_feat_cell_ts.to_csv(folder_output + \"/trusted_auc_features.csv\")\n\n ### Store data from Epochs\n columns = [\"Avg. \" + col_name.upper() for col_name in model.loss_ret_names]\n columns += [\n \"AUC Cell nll score\",\n \"AVPR Cell nll score\",\n \"AUC Row nll score\",\n \"AVPR Row nll score\",\n \"AUC Row class_y score\",\n \"AVPR Row class_y score\",\n \"Error lower-bound on dirty pos\",\n \"Error upper-bound on dirty pos\",\n \"Error repair on dirty pos\",\n \"Error repair on clean pos\",\n \"Error repair on clean pos - dirty points\",\n ]\n\n df_out = pd.DataFrame.from_dict(losses_save[mode], orient=\"index\", columns=columns)\n df_out.index.name = \"Epochs\"\n df_out.to_csv(folder_output + \"/\" + mode + \"_epochs_data.csv\")\n\n if (trusted_mask is not None) and (mode == \"train\"):\n df_out_ts = pd.DataFrame.from_dict(\n losses_save[\"trusted\"], orient=\"index\", columns=columns\n )\n df_out_ts.index.name = \"Epochs\"\n df_out_ts.to_csv(folder_output + \"/trusted_epochs_data.csv\")\n\n ### Store errors per feature\n\n df_errors_repair = pd.DataFrame(\n [],\n index=[\n \"error_lowerbound_dirtycells\",\n \"error_repair_dirtycells\",\n \"error_upperbound_dirtycells\",\n \"error_repair_cleancells\",\n \"error_repair_cleancells_dirtypoints\",\n ],\n columns=attributes,\n )\n\n df_errors_repair.loc[\"error_lowerbound_dirtycells\"] = repair_ret[\n \"errors_per_feature\"\n ][0].cpu()\n df_errors_repair.loc[\"error_repair_dirtycells\"] = repair_ret[\"errors_per_feature\"][\n 1\n ].cpu()\n df_errors_repair.loc[\"error_upperbound_dirtycells\"] = repair_ret[\n \"errors_per_feature\"\n ][2].cpu()\n df_errors_repair.loc[\"error_repair_cleancells\"] = repair_ret[\"errors_per_feature\"][\n 3\n ].cpu()\n df_errors_repair.loc[\"error_repair_cleancells_dirtypoints\"] = repair_ret[\n \"errors_per_feature\"\n ][4].cpu()\n\n df_errors_repair.to_csv(folder_output + \"/\" + mode + \"_error_repair_features.csv\")\n\n if (trusted_mask is not None) and (mode == \"train\"):\n\n df_errors_repair_ts = pd.DataFrame(\n [],\n index=[\n \"error_lowerbound_dirtycells\",\n \"error_repair_dirtycells\",\n \"error_upperbound_dirtycells\",\n \"error_repair_cleancells\",\n \"error_repair_cleancells_dirtypoints\",\n ],\n columns=attributes,\n )\n\n df_errors_repair_ts.loc[\"error_lowerbound_dirtycells\"] = repair_trusted_ret[\n \"errors_per_feature\"\n ][0].cpu()\n df_errors_repair_ts.loc[\"error_repair_dirtycells\"] = repair_trusted_ret[\n \"errors_per_feature\"\n ][1].cpu()\n df_errors_repair_ts.loc[\"error_upperbound_dirtycells\"] = repair_trusted_ret[\n \"errors_per_feature\"\n ][2].cpu()\n df_errors_repair_ts.loc[\"error_repair_cleancells\"] = repair_trusted_ret[\n \"errors_per_feature\"\n ][3].cpu()\n df_errors_repair_ts.loc[\n \"error_repair_cleancells_dirtypoints\"\n ] = repair_trusted_ret[\"errors_per_feature\"][4].cpu()\n\n df_errors_repair_ts.to_csv(folder_output + \"/trusted_error_repair_features.csv\")\n\n\n# Running Options:\n#\n#\n#\n\n\ndef main(args):\n\n # NOTE: use flag: --semi-supervise for now, then make model dependent?\n\n # Load datasets\n\n # train\n (\n train_loader,\n X_train,\n target_errors_train,\n dataset_obj,\n attributes,\n trusted_mask,\n ) = gen_utils.load_data(\n args.data_folder,\n args.batch_size,\n is_train=True,\n get_data_idxs=True,\n semi_sup_data=True,\n use_binary_img=args.use_binary_img,\n trust_set_name=args.trust_set_name,\n )\n\n train_loader_no_shuff = torch.utils.data.DataLoader(\n dataset_obj, batch_size=args.batch_size, shuffle=False\n )\n\n # validation\n (\n valid_loader,\n X_valid,\n target_errors_valid,\n dataset_valid_obj,\n _,\n ) = gen_utils.load_data(\n args.data_folder,\n args.batch_size,\n is_train=False,\n use_binary_img=args.use_binary_img,\n )\n\n valid_loader_no_shuff = torch.utils.data.DataLoader(\n dataset_valid_obj, batch_size=args.batch_size, shuffle=False\n )\n\n # test\n test_loader, X_test, target_errors_test, dataset_test_obj, _ = gen_utils.load_data(\n args.data_folder,\n args.batch_size,\n is_train=False,\n use_binary_img=args.use_binary_img,\n )\n\n test_loader_no_shuff = torch.utils.data.DataLoader(\n dataset_test_obj, batch_size=args.batch_size, shuffle=False\n )\n\n # -> clean versions for evaluation\n (\n train_clean_loader,\n X_train_clean,\n _,\n dataset_obj_train_clean,\n _,\n ) = gen_utils.load_data(\n args.data_folder,\n args.batch_size,\n is_train=True,\n is_clean=True,\n stdize_dirty=True,\n use_binary_img=args.use_binary_img,\n )\n\n train_clean_loader_no_shuff = torch.utils.data.DataLoader(\n dataset_obj_train_clean, batch_size=args.batch_size, shuffle=False\n )\n\n _, X_valid_clean, _, dataset_obj_valid_clean, _ = gen_utils.load_data(\n args.data_folder,\n args.batch_size,\n is_train=False,\n is_clean=True,\n stdize_dirty=True,\n use_binary_img=args.use_binary_img,\n )\n\n valid_clean_loader_no_shuff = torch.utils.data.DataLoader(\n dataset_obj_valid_clean, batch_size=args.batch_size, shuffle=False\n )\n\n _, X_test_clean, _, dataset_obj_test_clean, _ = gen_utils.load_data(\n args.data_folder,\n args.batch_size,\n is_train=False,\n is_clean=True,\n stdize_dirty=True,\n use_binary_img=args.use_binary_img,\n )\n\n test_clean_loader_no_shuff = torch.utils.data.DataLoader(\n dataset_obj_test_clean, batch_size=args.batch_size, shuffle=False\n )\n\n # if runnin on gpu, then load data there\n # TODO: Account for large datasets (big image datasets might overload GPU MEM)\n if args.cuda_on:\n X_train = X_train.cuda()\n X_valid = X_valid.cuda()\n X_test = X_test.cuda()\n\n target_errors_train = target_errors_train.cuda()\n target_errors_valid = target_errors_valid.cuda()\n target_errors_test = target_errors_test.cuda()\n\n X_train_clean = X_train_clean.cuda()\n X_valid_clean = X_valid_clean.cuda()\n X_test_clean = X_test_clean.cuda()\n\n trusted_mask = trusted_mask.cuda()\n\n # if supervised loss uses\n if args.use_sup_weights and args.semi_supervise:\n _num_outliers_ts = (trusted_mask * target_errors_train.any(dim=1)).sum()\n _num_inliers_ts = (\n trusted_mask * torch.logical_not(target_errors_train.any(dim=1))\n ).sum()\n args.qy_sup_weights = [\n 1.0,\n max(1.0, (_num_inliers_ts / _num_outliers_ts).item()),\n ]\n\n else:\n args.qy_sup_weights = None\n\n # Import model from the correct file\n runin_model = __import__(args.model_type)\n model = runin_model.VAE(dataset_obj, args)\n if args.load_model:\n model.load_state_dict(torch.load(args.load_model_path))\n\n print(args)\n\n if args.cuda_on:\n model.cuda()\n\n train_optim = StandardTrainer(\n model, args, lr_opt=args.lr, weight_decay_opt=args.l2_reg\n )\n\n # structs for saving data\n losses_save = {\n \"train\": {},\n \"validation\": {},\n \"test\": {},\n \"trusted\": {},\n \"train_per_feature\": {},\n \"validation_per_feature\": {},\n \"test_per_feature\": {},\n \"trusted_per_feature\": {},\n }\n\n # KL annealing scheduling\n kl_anneal = args.kl_anneal\n kl_beta_n_cycles = args.kl_anneal_cycles\n kl_beta_ratio = args.kl_anneal_ratio # 0.75; 0.25\n\n delay_n_epochs = args.kl_anneal_delay_epochs\n\n if kl_anneal and args.number_epochs > delay_n_epochs:\n if delay_n_epochs > 0:\n delay_beta_vec = np.zeros(delay_n_epochs) # 0.0 # 0.001 # 1e-6\n _delay_n_epochs = delay_n_epochs\n else:\n delay_beta_vec = []\n _delay_n_epochs = 0\n\n kl_beta_vec = frange_cycle_linear(\n args.kl_anneal_start,\n args.kl_anneal_stop,\n args.number_epochs - _delay_n_epochs,\n n_cycle=kl_beta_n_cycles,\n ratio=kl_beta_ratio,\n )\n\n kl_beta_vec = np.concatenate((delay_beta_vec, kl_beta_vec))\n\n else:\n kl_beta_vec = np.ones(args.number_epochs) * args.kl_beta_const\n # 1.0, 0.0; 0.001;\n\n print(kl_beta_vec)\n\n # Regularizer scheduling\n if args.dist_corr_reg and args.number_epochs > args.reg_delay_n_epochs:\n\n if args.reg_delay_n_epochs > 0:\n delay_reg_vec = np.zeros(args.reg_delay_n_epochs)\n _delay_n_epochs = args.reg_delay_n_epochs\n else:\n delay_reg_vec = []\n _delay_n_epochs = 0\n\n reg_schedule_vec = frange_cycle_linear(\n 1e-6,\n 1.0,\n args.number_epochs - _delay_n_epochs,\n n_cycle=1,\n ratio=args.reg_schedule_ratio,\n )\n\n reg_schedule_vec = np.concatenate((delay_reg_vec, reg_schedule_vec))\n\n else:\n reg_schedule_vec = np.ones(args.number_epochs)\n\n print(reg_schedule_vec)\n\n # option: train on clean data instead (e.g. for testing \"compression hypothesis\")\n if args.train_on_clean_data:\n _train_loader_used = train_clean_loader\n _train_loader_no_shuff = train_clean_loader_no_shuff\n _valid_loader_no_shuff = valid_clean_loader_no_shuff\n _test_loader_no_shuff = test_clean_loader_no_shuff\n _X_train = X_train_clean\n _X_valid = X_valid_clean\n _X_test = X_test_clean\n\n else:\n # standard\n _train_loader_used = train_loader\n _train_loader_no_shuff = train_loader_no_shuff\n _valid_loader_no_shuff = valid_loader_no_shuff\n _test_loader_no_shuff = test_loader_no_shuff\n _X_train = X_train\n _X_valid = X_valid\n _X_test = X_test\n\n # Run epochs\n for epoch in range(1, args.number_epochs + 1):\n\n kl_beta_val = kl_beta_vec[epoch - 1]\n reg_schedule_val = reg_schedule_vec[epoch - 1]\n\n print(kl_beta_val)\n\n print(reg_schedule_val)\n\n ## Train Phase\n training_phase(\n model,\n train_optim,\n _train_loader_used, # train_loader (done)\n args,\n epoch,\n trusted_mask,\n kl_beta_val,\n reg_schedule_val,\n )\n\n # Compute losses and metrics per epoch\n compute_metrics(\n model,\n _train_loader_no_shuff,\n _X_train,\n dataset_obj,\n args,\n epoch,\n losses_save,\n X_train_clean,\n target_errors_train,\n trusted_mask,\n mode=\"train\",\n kl_beta=kl_beta_val,\n reg_scheduler_val=reg_schedule_val,\n )\n\n ## Validation Phase\n compute_metrics(\n model,\n _valid_loader_no_shuff,\n _X_valid,\n dataset_valid_obj,\n args,\n epoch,\n losses_save,\n X_valid_clean,\n target_errors_valid,\n None,\n mode=\"validation\",\n kl_beta=kl_beta_val,\n reg_scheduler_val=reg_schedule_val,\n )\n\n ## Test Phase\n compute_metrics(\n model,\n _test_loader_no_shuff,\n _X_test,\n dataset_test_obj,\n args,\n epoch,\n losses_save,\n X_test_clean,\n target_errors_test,\n None,\n mode=\"test\",\n kl_beta=kl_beta_val,\n reg_scheduler_val=reg_schedule_val,\n )\n\n # save to folder AVPR / AUC per feature\n if args.save_on:\n\n # create folder for saving experiment data (if necessary)\n folder_output = args.output_folder + args.model_type # \"/\" +\n\n ### Train Data\n save_to_csv(\n model,\n _train_loader_no_shuff,\n _X_train,\n X_train_clean,\n target_errors_train,\n trusted_mask,\n attributes,\n losses_save,\n dataset_obj,\n folder_output,\n args,\n mode=\"train\",\n kl_beta=kl_beta_vec[-1],\n reg_scheduler_val=reg_schedule_vec[-1],\n )\n\n ### Validation Data\n save_to_csv(\n model,\n _valid_loader_no_shuff,\n _X_valid,\n X_valid_clean,\n target_errors_valid,\n None,\n attributes,\n losses_save,\n dataset_valid_obj,\n folder_output,\n args,\n mode=\"validation\",\n kl_beta=kl_beta_vec[-1],\n reg_scheduler_val=reg_schedule_vec[-1],\n )\n\n ### Test Data\n save_to_csv(\n model,\n _test_loader_no_shuff,\n _X_test,\n X_test_clean,\n target_errors_test,\n None,\n attributes,\n losses_save,\n dataset_test_obj,\n folder_output,\n args,\n mode=\"test\",\n kl_beta=kl_beta_vec[-1],\n reg_scheduler_val=reg_schedule_vec[-1],\n )\n\n # save model parameters\n model.cpu()\n torch.save(model.state_dict(), folder_output + \"/model_params.pth\")\n\n # save to .json file the args that were used for running the model\n with open(folder_output + \"/args_run.json\", \"w\") as outfile:\n json.dump(vars(args), outfile, indent=4, sort_keys=True)\n\n return locals() # to be used in printing / notebooks / debug\n\n\nif __name__ == \"__main__\":\n\n args = parser_arguments.getArgs(sys.argv[1:])\n\n dict_main_vars = main(args)\n","repo_name":"sfme/clsvae-error-repair","sub_path":"src/repair_syserr_models/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34270010509","text":"from symbol import parameters\nimport pika\nimport time\nimport socket\nimport os\nfrom functools import partial\n\nclass EmailService (object):\n\n def __init__(self):\n \n self.connection = None\n self.channel = None\n self.exchange = \"topic_logs\"\n\n try:\n amqp_url = os.environ['AMQP_URL']\n parameters = pika.URLParameters(amqp_url)\n self.connection = pika.BlockingConnection(parameters) \n self.on_open ()\n \n except KeyboardInterrupt:\n if (self.connection != None):\n self.connection.close()\n \n except Exception as e:\n print(repr(e))\n \n\n def on_open (self):\n \"\"\"create a new channel\n\n Returns:\n BOOL: return TRUE if the channel creation has succeded\n \"\"\"\n try:\n print (\"CONNECTION OPEN\")\n \n self.channel = self.connection.channel()\n return self.on_channel_open()\n except Exception as e:\n print(repr(e))\n if self.connection != None:\n self.connection.close()\n return False\n\n def on_channel_open (self):\n \"\"\" declare the exchange (type=topic) after channel creation. The exchange is called \"topic_logs\". \n\n Returns:\n BOOL: outcome of the definition, True if succeded\n \"\"\"\n try:\n print (\"CHANNEL OPEN\")\n self.channel.exchange_declare(exchange='topic_logs', exchange_type='topic')\n return self.on_exchange ()\n except Exception as e:\n print(repr(e))\n if self.connection != None:\n self.connection.close()\n return False\n\n \n def on_exchange(self):\n \"\"\" Define request and response queues\n\n Returns:\n BOOL: outcome of the definition, True if succeded\n \"\"\"\n try :\n print('Have exchange')\n #CODA PER LE RICHIESTE PROVENIENTI DALL'ACCOUNT\n result = self.channel.queue_declare(queue='emailQueue')\n self.requestQueue = result.method.queue\n\n return self.on_queue()\n except Exception as e:\n print(repr(e))\n if self.connection != None:\n self.connection.close()\n return False\n\n def on_queue(self):\n try:\n print('Have queue')\n\n # This call tells the server to send us 1 message in advance.\n # This helps overall throughput, but it does require us to deal\n # with the messages we have promptly.\n self.channel.basic_qos(prefetch_count=1)\n return self.on_qos()\n except Exception as e:\n print(repr(e))\n if self.connection != None:\n self.connection.close()\n return False\n\n\n def on_qos(self):\n try:\n print('Set QoS')\n self.channel.queue_bind(queue=self.requestQueue, exchange=self.exchange)\n return self.on_bind()\n except Exception as e:\n print(repr(e))\n if self.connection != None:\n self.connection.close()\n return False\n \n\n\n def on_bind(self):\n \"\"\" define the CALLBACK FUNCTION\n\n Returns:\n BOOL: outcome of the definition\n \"\"\"\n try:\n print('Bound')\n self.channel.basic_consume(\n queue=self.requestQueue,\n on_message_callback=self.onRequest,\n auto_ack=True)\n\n print(\"Starting EMAIL SERVICE. [x] Awaiting email requests\")\n self.channel.start_consuming()\n return True\n except Exception as e:\n print(repr(e))\n if self.connection != None:\n self.connection.close()\n return False\n \n\n def onRequest(self,ch,method,props,body):\n \"\"\"CALLBACK FUNCTION that simulate the email sending\n\n Args:\n ch (pika.channel.Channel): channel\n method (pika.spec.Basic.Deliver):\n properties (pika.spec.BasicProperties): properties associated to message consumed\n body (bytes): message consumed\n \"\"\"\n try:\n \n #TODO SEND EMAIL\n tokens = body.decode(\"utf-8\").split('#')\n print(\"\\nREQUEST: {}\".format(tokens))\n toQueue = tokens[-1]\n print (tokens[-1])\n\n message = \"EMAIL has been sent to {}\".format(str(body))\n if toQueue == \"Accounting\":\n self.channel.basic_publish(exchange='topic_logs', routing_key=\"Accounting.response\", body=message)\n elif toQueue == \"Reservation\":\n self.channel.basic_publish(exchange='topic_logs', routing_key=\"Reservation.response\", body=message)\n elif toQueue == \"Payment\":\n self.channel.basic_publish(exchange='topic_logs', routing_key=\"Payment.response\", body=message) \n\n except Exception as e:\n print(repr(e))\n if self.connection != None:\n self.connection.close()\n \n \nprint(\"Starting EMAIL SERVICE. [x] BEFORE INIT\")\n\nemailService = EmailService()\n\n","repo_name":"ludovico99/SEAt","sub_path":"SEAT_Project/emailService/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"24619078682","text":"import urllib.parse\nimport requests\n\nclass HostifyAPI:\n def __init__(self, session, gqlAddress=\"https://gql.hostify.cz/gql\"):\n self.address = gqlAddress\n self.cookies = {\"session\": session}\n if self.getLoggedIn():\n self.getShortAccount()\n print(\"[INFO] Logged in as\", self.account[\"username\"])\n else:\n raise Exception(\"[ERROR] Could not log in, please try again!\")\n def __makeRequest(self, data, method=\"POST\", url=None):\n if url == None:\n url = self.address\n if method == \"POST\":\n r = requests.post(url, cookies=self.cookies, json=data)\n if method == \"GET\":\n r = requests.post(url, cookies=self.cookies)\n try:\n return r.json()\n except BaseException as err:\n print(\"[ERROR] Error while parsing JSON from request:\", err)\n return r.text\n ## API endpoints starts here\n def getLoggedIn(self):\n data = {\"operationName\":\"loggedIn\",\"variables\":{},\"query\":\"query loggedIn {\\n loggedIn\\n}\\n\"}\n r = self.__makeRequest(data)\n return r[\"data\"][\"loggedIn\"]\n\n def getShortAccount(self):\n data = {\"operationName\":\"getShortAccount\",\"variables\":{},\"query\":\"query getShortAccount {\\n account {\\n ...ShortUser\\n __typename\\n }\\n}\\n\\nfragment ShortUser on User {\\n id\\n username\\n email\\n credit\\n activated\\n shortid\\n hash\\n role\\n agreements {\\n vop\\n gdpr\\n __typename\\n }\\n __typename\\n}\\n\"}\n r = self.__makeRequest(data)\n self.account = r[\"data\"][\"account\"]\n\n def getAlerts(self):\n data = {\"operationName\":\"getAlerts\",\"variables\":{},\"query\":\"query getAlerts {\\n alerts {\\n id\\n type\\n message\\n start\\n __typename\\n }\\n}\\n\"}\n r = self.__makeRequest(data)\n return r[\"data\"][\"alerts\"]\n\n def getUserServices(self):\n data = {\"operationName\":\"getUserServices\",\"variables\":{},\"query\":\"query getUserServices {\\n account {\\n services {\\n id\\n name\\n type\\n shared\\n __typename\\n }\\n __typename\\n }\\n}\\n\"}\n r = self.__makeRequest(data)\n return r[\"data\"][\"account\"][\"services\"]\n\n def getUserNoSharedServices(self):\n services = self.getUserServices()\n cache = []\n for service in services:\n if not service[\"shared\"]:\n cache.append(service)\n return cache\n\n def getAllServers(self):\n data = {\"operationName\":\"getAllServers\",\"variables\":{},\"query\":\"query getAllServers {\\n minecraftServers {\\n ...FullMinecraftServer\\n __typename\\n }\\n}\\n\\nfragment FullMinecraftServer on MinecraftServer {\\n id\\n name\\n ip\\n port\\n dns\\n eula\\n jar\\n expires\\n shared\\n status\\n ownedBy\\n favicon\\n dedicated {\\n hostname\\n __typename\\n }\\n permissions {\\n read\\n write\\n __typename\\n }\\n storage {\\n used\\n reserved\\n __typename\\n }\\n package {\\n ram\\n __typename\\n }\\n players {\\n online\\n max\\n list\\n __typename\\n }\\n version {\\n type\\n version\\n image\\n __typename\\n }\\n resources {\\n type\\n tps\\n __typename\\n }\\n __typename\\n}\\n\"}\n r = self.__makeRequest(data)\n return r[\"data\"][\"minecraftServers\"]\n\n def getShortMinecraftServer(self, id):\n data = {\"operationName\":\"getShortMinecraftServer\",\"variables\":{\"id\":id},\"query\":\"query getShortMinecraftServer($id: ID!) {\\n minecraftServer(id: $id) {\\n ...GlobalMinecraftServer\\n __typename\\n }\\n}\\n\\nfragment GlobalMinecraftServer on MinecraftServer {\\n id\\n name\\n locked\\n teamspeakActive\\n permissions {\\n read\\n write\\n __typename\\n }\\n dedicated {\\n hostname\\n __typename\\n }\\n __typename\\n}\\n\"}\n r = self.__makeRequest(data)\n return r[\"data\"][\"minecraftServer\"]\n\n def getMinecraftServerDashboard(self, id):\n data = {\"operationName\":\"getMinecraftServerDashboard\",\"variables\":{\"id\":id},\"query\":\"query getMinecraftServerDashboard($id: ID!) {\\n minecraftServer(id: $id) {\\n ...MinecraftServerDashboard\\n __typename\\n }\\n}\\n\\nfragment MinecraftServerDashboard on MinecraftServer {\\n id\\n name\\n ip\\n port\\n dns\\n expires\\n status\\n jar\\n eula\\n shared\\n ownedBy\\n alerts {\\n type\\n message\\n __typename\\n }\\n resources {\\n cpu\\n ram\\n tps\\n type\\n __typename\\n }\\n package {\\n ram\\n __typename\\n }\\n storage {\\n used\\n reserved\\n __typename\\n }\\n players {\\n online\\n max\\n list\\n __typename\\n }\\n commands {\\n id\\n name\\n command\\n custom\\n __typename\\n }\\n __typename\\n}\\n\"}\n r = self.__makeRequest(data)\n return r[\"data\"][\"minecraftServer\"]\n\n def getMinecraftServerLogs(self, id):\n data = {\"operationName\":\"getMinecraftServerLogs\",\"variables\":{\"id\":id},\"query\":\"query getMinecraftServerLogs($id: ID!) {\\n minecraftServer(id: $id) {\\n logs\\n __typename\\n }\\n}\\n\"}\n r = self.__makeRequest(data)\n return r[\"data\"][\"minecraftServer\"][\"logs\"]\n\n def minecraftServerControlRestart(self, id):\n data = {\"operationName\":\"minecraftServerControlRestart\",\"variables\":{\"id\":id},\"query\":\"mutation minecraftServerControlRestart($id: ID!) {\\n minecraftServerControlRestart(id: $id)\\n}\\n\"}\n r = self.__makeRequest(data)\n try:\n return r[\"data\"][\"minecraftServerControlRestart\"]\n except:\n return False\n\n def minecraftServerControlKill(self, id):\n data = {\"operationName\":\"minecraftServerControlKill\",\"variables\":{\"id\":id},\"query\":\"mutation minecraftServerControlKill($id: ID!) {\\n minecraftServerControlKill(id: $id)\\n}\\n\"}\n r = self.__makeRequest(data)\n try:\n return r[\"data\"][\"minecraftServerControlKill\"]\n except:\n return False\n\n def minecraftServerControlStop(self, id):\n data = {\"operationName\":\"minecraftServerControlStop\",\"variables\":{\"id\":id},\"query\":\"mutation minecraftServerControlStop($id: ID!) {\\n minecraftServerControlStop(id: $id)\\n}\\n\"}\n r = self.__makeRequest(data)\n try:\n return r[\"data\"][\"minecraftServerControlStop\"]\n except:\n return False\n\n def minecraftServerControlStart(self, id):\n data = {\"operationName\":\"minecraftServerControlStart\",\"variables\":{\"id\":id},\"query\":\"mutation minecraftServerControlStart($id: ID!) {\\n minecraftServerControlStart(id: $id)\\n}\\n\"}\n r = self.__makeRequest(data)\n try:\n return r[\"data\"][\"minecraftServerControlStart\"]\n except:\n return False\n def getMinecraftServerBackups(self, id):\n path = urllib.parse.quote(\"/backups/minecraft/\"+str(id))\n url = \"https://gql.hostify.cz/files/list?path=\"+path\n r = requests.get(url, cookies=self.cookies).json()\n return r[\"files\"]\n def getMinecraftServerBackupDownloadURL(self, id, backup):\n path = \"/backups/minecraft/\"+str(id)+\"/\"+str(backup)\n path = urllib.parse.quote(path)\n url = \"https://gql.hostify.cz/files/data?path=\"+path\n return url\n def minecraftServerReinstall(self, id, version, clean=False):\n data = {\"operationName\":\"minecraftServerReinstall\",\"variables\":{\"id\":id,\"version\":version,\"clean\":clean},\"query\":\"mutation minecraftServerReinstall($id: ID!, $version: ID!, $clean: Boolean!) {\\n minecraftServerReinstall(id: $id, version: $version, clean: $clean) {\\n id\\n __typename\\n }\\n}\\n\"}\n r = self.__makeRequest(data)\n return r","repo_name":"HonzaLed/Hostify-API","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74139122519","text":"from odoo import models, fields, api,_ , exceptions\nfrom datetime import datetime\nfrom odoo.exceptions import ValidationError\n\nclass CumulativeVendorEvaluation(models.Model):\n _name = 'cumulative.vendor.evaluation'\n _description = 'Vendor Evaluation'\n\n vendor_id = fields.Many2one(comodel_name='res.partner', string='Vendor')\n name = fields.Char(related=\"vendor_id.name\")\n cumulative_eval = fields.Float(string='Cumulative',compute=\"_value_compute\")\n last_eval = fields.Float('Last Evaluation')\n owner = fields.Selection([\n ('account', 'Finance'),\n ('purchase' , 'Purchase'),\n ('stock' , 'Stock'),\n ], string='Department')\n line_ids = fields.One2many(comodel_name='vendor.evaluation', inverse_name='evaluation_id', string='')\n\n\n @api.depends('line_ids.value')\n def _value_compute(self):\n sum = 0\n last_eval = 0 \n for rec in self:\n for line in rec.line_ids:\n sum += line.value\n last_eval = line.value\n if len(rec.line_ids) > 0:\n rec.cumulative_eval = sum /len(rec.line_ids)\n else:\n rec.cumulative_eval = 0\n rec.write({\n 'last_eval' : last_eval\n })\n sum = 0\n\n\nclass VendoeEvaluation(models.Model):\n _name = 'vendor.evaluation'\n _description = 'description'\n\n\n evaluation_id = fields.Many2one(comodel_name='cumulative.vendor.evaluation', string='Cumulative Evaluation')\n vendor_id = fields.Many2one(comodel_name='res.partner', string='Vendor')\n inv_vendor_id = fields.Integer(related='vendor_id.id')\n owner = fields.Selection([\n ('account', 'Finance'),\n ('purchase' , 'Purchase'),\n ('stock' , 'Stock'),\n ], string='Department' )\n inv_owner = fields.Selection([\n ('account', 'Finance'),\n ('purchase' , 'Purchase'),\n ('stock' , 'Stock'),\n ], related='owner')\n value = fields.Float(string='Value',compute=\"_value_compute\")\n date = fields.Date(string='Date' ,default=datetime.today())\n line_ids = fields.One2many(comodel_name='evaluation.details', inverse_name='evaluation_id', string='Details')\n res_id = fields.Integer(string='')\n vendor_type_id = fields.Many2one(comodel_name='vendor.type', string='Vendor Type')\n \n @api.onchange('vendor_type_id')\n def _onchange_vendor_type(self):\n if self.vendor_type_id:\n self.line_ids = False\n criteria = []\n criteria_ids = self.env['evaluation.criteria'].search([('owner' , '=' , self.owner),('vendor_type' , '=' , self.vendor_type_id.id)]).ids\n if len(criteria_ids) == 0:\n raise exceptions.ValidationError(_(\"Sorry There is No Criteria related to purchase Department to Evaluate This Vendor\"))\n for criteria_id in criteria_ids:\n criteria.append((0,0,{'criteria_id' : criteria_id}))\n self.line_ids = criteria\n @api.model\n def create(self, vals):\n vendor_id = vals['inv_vendor_id']\n owner = vals['inv_owner']\n vendor_type_id = vals['vendor_type_id']\n cumulative_evaluation = self.env['cumulative.vendor.evaluation'].search([('owner' , '=' , owner) , ('vendor_id' , '=' , vendor_id)])\n if not cumulative_evaluation:\n cumulative_evaluation = self.env['cumulative.vendor.evaluation'].create({\n 'owner' : owner,\n 'vendor_id' : vendor_id,\n 'line_ids' : False\n\n })\n vals['evaluation_id'] = cumulative_evaluation.id\n return super(VendoeEvaluation, self).create(vals)\n\n \n\n @api.depends('line_ids.value')\n def _value_compute(self):\n sum = 0\n for rec in self:\n for line in rec.line_ids:\n sum += line.value\n if len(rec.line_ids) > 0:\n rec.value = sum /len(rec.line_ids)\n else:\n rec.value = 0\n sum = 0\n\nclass EvaluatioinDetails(models.Model):\n _name = 'evaluation.details'\n _description = 'evaluation.details'\n\n evaluation_id = fields.Many2one(comodel_name='vendor.evaluation', string='Evaluatioin')\n criteria_id = fields.Many2one(comodel_name='evaluation.criteria', string='Criteria')\n value = fields.Float(string='Value',default=0.0)\n vendor_id = fields.Integer(related=\"evaluation_id.vendor_id.id\",store=True)\n\n @api.constrains('value')\n def _constrains_value(self):\n for rec in self:\n if rec.value < 0 or rec.value > 10:\n raise ValidationError(_(\"Sorry Evaluation must be between 1 and 10\"))\n \n\n \n\n \n \n \n\n \n \n \n \n \n\n \n\n\n \n\n ","repo_name":"McMillanWoods/odex25-Authority","sub_path":"odex25_purchase/vendor_evaluation/models/vendor_evaluation.py","file_name":"vendor_evaluation.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38318008424","text":"from collections import OrderedDict\n\nimport binascii\nimport json\nfrom shutil import ExecError\nimport time\n\nimport Crypto\nimport Crypto.Random\nfrom Crypto.Hash import SHA256\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_v1_5\n\nimport requests\nimport uuid\nfrom classes.utxo import Utxo\n\nfrom exceptions.transaction import InvalidTransactionException\nfrom utils.debug import log\n\nclass Transaction:\n def __init__(\n self, \n sender_address, \n receiver_address, \n amount, \n transaction_inputs,\n sender_private_key: str = None, \n transaction_outputs=None, \n signature=None, \n transaction_id=None,\n trans_uuid=None,\n timestamp=None\n ):\n self.sender_address = sender_address\n self.receiver_address = receiver_address\n self.amount = amount\n self.trans_uuid = trans_uuid or transaction_id or uuid.uuid4().bytes\n self.transaction_id = transaction_id or SHA256.new(self.trans_uuid).hexdigest() #hexnumber\n self.transaction_inputs: list(Utxo) = transaction_inputs\n self.transaction_outputs = transaction_outputs or []\n self.signature = signature or self.sign_transaction(sender_private_key)\n self.timestamp = timestamp or time.time()\n \n def sign_transaction(self, sender_private_key):\n \"\"\"\n Sign transaction with private key\n \"\"\"\n\n util = SHA256.new(self.trans_uuid)\n key = RSA.importKey(sender_private_key)\n signer = PKCS1_v1_5.new(key)\n signature = signer.sign(util)\n\n return signature\n \n def verify_signature(self):\n '''Verification of a received transaction\n\t\t'''\n key = RSA.importKey(self.sender_address)\n util = SHA256.new(self.trans_uuid)\n if PKCS1_v1_5.new(key).verify(util, self.signature):\n log.success('Transaction verified: ' + self.transaction_id)\n return True\n else:\n raise InvalidTransactionException(transaction=self, message='Error in transaction verification')\n\n def calculate_outputs(self):\n total = 0\n for utxo in self.transaction_inputs:\n #already validated so I'm just adding bro\n total += utxo.amount\n \n # Receiver utxo\n receiver_utxo = Utxo(\n previous_trans_id=self.transaction_id,\n amount=self.amount,\n recipient=self.receiver_address\n )\n \n sender_utxo = Utxo(\n previous_trans_id=self.transaction_id,\n amount=total - self.amount, #RESTA\n recipient=self.sender_address.decode()\n )\n transaction_outputs = [receiver_utxo, sender_utxo]\n self.transaction_outputs = transaction_outputs\n \n return transaction_outputs\n\n# Everything is serialized except sender's private key\n def to_dict(self):\n transaction_inputs = [Utxo.to_dict(ti) for ti in self.transaction_inputs]\n transaction_outputs = [Utxo.to_dict(to) for to in self.transaction_outputs]\n return dict(\n sender_address = self.sender_address.decode(),\n receiver_address = self.receiver_address,\n amount = self.amount,\n transaction_id = self.transaction_id,\n trans_uuid = self.trans_uuid.decode(encoding=\"ISO-8859-1\"),\n transaction_inputs = transaction_inputs,\n transaction_outputs = transaction_outputs,\n signature = self.signature.decode(encoding=\"ISO-8859-1\"),\n timestamp = self.timestamp\n )\n\n @staticmethod\n def from_dict(dictionary: dict):\n transaction_inputs = [Utxo.from_dict(ti) for ti in dictionary['transaction_inputs']]\n transaction_outputs = [Utxo.from_dict(to) for to in dictionary['transaction_outputs']]\n return Transaction(\n sender_address=dictionary['sender_address'].encode(),\n receiver_address=dictionary['receiver_address'],\n amount=dictionary['amount'],\n transaction_id=dictionary['transaction_id'],\n trans_uuid = dictionary['trans_uuid'].encode(encoding=\"ISO-8859-1\"),\n transaction_inputs=transaction_inputs,\n transaction_outputs=transaction_outputs,\n signature = dictionary['signature'].encode(encoding=\"ISO-8859-1\"),\n timestamp=dictionary['timestamp']\n )\n\n def __str__(self):\n return json.dumps(self.to_dict(), indent=4)\n\n def __repr__(self):\n return self.amount + ' NBC from ' + self.sender_address + ' to ' + self.receiver_address + self.transaction_inputs + ' at ' + self.timestamp ","repo_name":"adonistseriotis/noobcash","sub_path":"src/backend/classes/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9137434661","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport requests\nimport json\nimport urllib3\nimport yaml\nfrom collections import OrderedDict\nimport argparse\nfrom copy import deepcopy\n\nurllib3.disable_warnings()\n\n\n\nclass SonicAPI:\n REQUEST_GET = 1\n REQUEST_PUT = 2\n REQUEST_POST = 3\n REQUEST_DELETE = 4\n AUTH_TYPE_BASIC_HTTP = 1\n AUTH_TYPE_HTTP_DIGEST = 2\n AUTH_TYPE_TFA_BEARER = 3\n\n\n def __send_post_request(self, url, body):\n if body == None:\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n r = requests.post(url, auth=self.auth_basic_http_param, \\\n headers=self.headers, verify=False)\n else:\n r = requests.post(url, headers=self.headers, verify=False)\n else:\n body_str = json.dumps(body)\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n r = requests.post(url, auth=self.auth_basic_http_param, \\\n headers=self.headers, data=body_str, verify=False)\n else:\n r = requests.post(url, headers=self.headers, data=body_str, \\\n verify=False)\n return r\n\n def __send_get_request(self, url, body):\n if body == None:\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n r = requests.get(url, auth=self.auth_basic_http_param, \\\n headers=self.headers, verify=False)\n else:\n r = requests.get(url, headers=self.headers, verify=False)\n else:\n body_str = json.dumps(body)\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n r = requests.get(url, auth=self.auth_basic_http_param, \\\n headers=self.headers, data=body_str, verify=False)\n else:\n r = requests.get(url, headers=self.headers, data=body_str, \\\n verify=False)\n return r\n\n def __send_put_request(self, url, body):\n if body == None:\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n r = requests.put(url, auth=self.auth_basic_http_param, \\\n headers=self.headers, verify=False)\n else:\n r = requests.put(url, headers=self.headers, verify=False)\n else:\n body_str = json.dumps(body)\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n r = requests.put(url, auth=self.auth_basic_http_param, \\\n headers=self.headers, data=body_str, verify=False)\n else:\n r = requests.put(url, headers=self.headers, data=body_str, \\\n verify=False)\n return r\n\n def send_api_request(self, request_type, api_endpoint, body):\n url = self.baseurl + api_endpoint\n if request_type == self.REQUEST_GET:\n print(\"GET \" + url)\n return self.__send_get_request(url, body)\n elif request_type == self.REQUEST_PUT:\n print(\"PUT \" + url)\n return self.__send_put_request(url, body)\n elif request_type == self.REQUEST_POST:\n print(\"POST \" + url)\n return self.__send_post_request(url, body)\n elif request_type == self.REQUEST_DELETE:\n print(\"DELETE \" + url)\n return requests.delete(url, verify=False)\n return\n\n def login(self):\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n r = self.send_api_request(self.REQUEST_POST, \"/auth\", None)\n if r.status_code != 200:\n print(\"Login failed. HTTP Response Code: \" + str(r.status_code))\n return False\n else:\n print(\"Login successful.\")\n return True\n elif self.auth_type == self.AUTH_TYPE_HTTP_DIGEST:\n print(\"HTTP Digest is not yet supported\")\n elif self.auth_type == self.AUTH_TYPE_TFA_BEARER:\n tfa = input(\"Please enter in TFA Code:\")\n try:\n tfa_schema = deepcopy(self.sonicos_schema[\"tfa\"][\"properties\"])\n except ValueError:\n print(\"No Schemas loaded. Please load YAML file.\")\n tfa_schema[\"user\"] = self.username\n tfa_schema[\"password\"] = self.password\n tfa_schema[\"tfa\"] = tfa\n tfa_schema[\"override\"] = True\n r = self.send_api_request(self.REQUEST_POST, \"/tfa\", tfa_schema)\n \n if r.status_code != 200:\n print(\"Login failed. HTTP Response Code: \" + str(r.status_code))\n return False\n else:\n response = r.json()\n print(response[\"status\"][\"info\"])\n print(\"-----\")\n response_info = response[\"status\"][\"info\"][0]\n print(response_info)\n try:\n self.bearer_token = deepcopy(response_info['bearer_token'])\n except ValueError:\n print(\"Login failed. Bearer token not acquired.\")\n\n print(\"TFA Login successful.\")\n\n self.headers['Authorization'] = \"Bearer \" + self.bearer_token \n return True\n return r.status_code\n\n def logout(self):\n r = self.send_api_request(self.REQUEST_DELETE, \"/auth\", None)\n if r.status_code != 200:\n print(\"Logout failed. HTTP Response Code: \" + str(r.status_code))\n return False\n else:\n print(\"Logout successful.\")\n response = r.json()\n return True\n\n def load_yaml(self, yaml_file):\n #Load YAML file into JSON\n print(\"Loading \"+yaml_file+\" YAML file...\")\n with open(yaml_file, 'r') as stream:\n try:\n sonicos_json = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n try:\n self.sonicos_schema = sonicos_json['components']['schemas']\n except ValueError:\n print(\"No Schemas in SonicOS API\")\n return\n\n def set_fw_url(self, hostname):\n self.baseurl = 'https://{0}/api/sonicos'.format(hostname)\n return\n\n def set_auth_parameters(self, username, password, auth_type):\n self.auth_type = auth_type\n self.username = username\n self.password = password\n if self.auth_type == self.AUTH_TYPE_BASIC_HTTP:\n self.auth_basic_http_param = (self.username, self.password)\n return\n\n def __init__(self, hostname):\n #Set firewall settings\n self.baseurl = 'https://{0}/api/sonicos'.format(hostname)\n self.headers = OrderedDict([\n ('Accept', 'application/json'),\n ('Content-Type', 'application/json'),\n ('Accept-Encoding', 'application/json'),\n ('Charset', 'UTF-8')])\n \n #Set authentication\n self.auth_type = self.AUTH_TYPE_BASIC_HTTP\n self.username = \"\"\n self.password = \"\"\n return\n\ndef main():\n print(\"SonicOS API Python Class\\n\")\n \n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"tuysnwl/sonicos7api","sub_path":"sonicapi.py","file_name":"sonicapi.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17399355309","text":"from unittest.case import TestCase\n\n\ndef _binary_search(sorted_sequence, start, end, element):\n '''\n Good Case: last element is on of the same. In this case, we only need one bynary search, so it is log(n)\n Worst Case: Sum is not there and minor in\n '''\n if start >= end:\n return start\n else:\n mid = (start + end) // 2\n mid_element = sorted_sequence[mid]\n if mid_element == element:\n return mid\n elif mid_element < element:\n return _binary_search(sorted_sequence, mid + 1, end, element)\n\n else:\n return _binary_search(sorted_sequence, start, mid_element - 1, element)\n\n\ndef _element_sum_rec(sorted_sequence, start, end, sum):\n if start >= end:\n raise ElementsNotFound()\n bigger_value = sorted_sequence[end]\n other_element = sum - bigger_value\n other_element_index = _binary_search(sorted_sequence, start, end, other_element)\n minor_element = sorted_sequence[other_element_index]\n if minor_element == other_element:\n return minor_element, bigger_value\n return _element_sum_rec(sorted_sequence, other_element_index, end - 1, sum)\n\n\ndef elements_sum(sorted_sequence, sum):\n start = 0\n end = len(sorted_sequence) - 1\n return _element_sum_rec(sorted_sequence, start, end, sum)\n\n\nclass ElementsNotFound(Exception):\n pass\n\n\nclass ElementsSumK(TestCase):\n def test_empty_list(self):\n self.assertRaises(ElementsNotFound, elements_sum, [], 0)\n\n def test_one_element(self):\n self.assertRaises(ElementsNotFound, elements_sum, [1], 0)\n\n def test_two_elements_with_sum(self):\n self.assertEqual((1, 2), elements_sum([1, 2], 3))\n\n def test_sum_on_midle(self):\n self.assertEqual((13, 14), elements_sum(list(range(9)) + [13, 14] + list(range(29, 39)), 27))\n\n def test_sum_not_found(self):\n self.assertRaises(ElementsNotFound, elements_sum, list(range(10)), 20)\n","repo_name":"renzon/code_interview_training","sub_path":"old/two_elements_sum.py","file_name":"two_elements_sum.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"85"} +{"seq_id":"26487606389","text":"import cv2\n\nfrom sinner.validators.AttributeLoader import Rules\nfrom sinner.processors.frame.BaseFrameProcessor import BaseFrameProcessor\nfrom sinner.typing import Frame\nfrom sinner.utilities import is_int, is_float\n\n\nclass FrameResizer(BaseFrameProcessor):\n emoji: str = '🔍'\n\n scale: float\n height: int\n width: int\n height_max: int\n width_max: int\n height_min: int\n width_min: int\n\n def rules(self) -> Rules:\n return [\n {\n 'parameter': {'scale'},\n 'attribute': 'scale',\n 'default': 1,\n 'valid': lambda attribute, value: is_float(value),\n 'help': 'Select frame resize scale'\n },\n {\n 'parameter': {'height'},\n 'attribute': 'height',\n 'default': None,\n 'valid': lambda attribute, value: is_int(value),\n 'help': 'Select resize height'\n },\n {\n 'parameter': {'width'},\n 'attribute': 'width',\n 'default': None,\n 'valid': lambda attribute, value: is_int(value),\n 'help': 'Select resize width'\n },\n {\n 'parameter': {'height-max'},\n 'attribute': 'height_max',\n 'default': None,\n 'valid': lambda attribute, value: is_int(value),\n 'help': 'Select maximal allowed height'\n },\n {\n 'parameter': {'width-max'},\n 'attribute': 'width_max',\n 'default': None,\n 'valid': lambda attribute, value: is_int(value),\n 'help': 'Select maximal allowed width'\n },\n {\n 'parameter': {'height-min'},\n 'attribute': 'height_min',\n 'default': None,\n 'valid': lambda attribute, value: is_int(value),\n 'help': 'Select minimal allowed height'\n },\n {\n 'parameter': {'width-min'},\n 'attribute': 'width_min',\n 'default': None,\n 'valid': lambda attribute, value: is_int(value),\n 'help': 'Select minimal allowed width'\n },\n {\n 'module_help': 'This module changes images resolution'\n }\n ]\n\n def calculate_scale(self, frame: Frame) -> float:\n current_height, current_width = frame.shape[:2]\n if self.height_max is not None and current_height > self.height_max and (self.height is None or self.height > self.height_max):\n self.height = self.height_max\n if self.width_max is not None and current_width > self.width_max and (self.width is None or self.width > self.width_max):\n self.width = self.width_max\n if self.height_min is not None and current_height < self.height_min and (self.height is None or self.height < self.height_min):\n self.height = self.height_min\n if self.width_min is not None and current_width < self.width_min and (self.width is None or self.width < self.width_min):\n self.width = self.width_min\n\n if self.height is not None:\n return self.height / current_height\n elif self.width is not None:\n return self.width / current_width\n else:\n return self.scale\n\n def process_frame(self, frame: Frame) -> Frame:\n current_height, current_width = frame.shape[:2]\n scale = self.calculate_scale(frame)\n return cv2.resize(frame, (int(current_width * scale), int(current_height * scale)))\n","repo_name":"pozitronik/sinner","sub_path":"sinner/processors/frame/FrameResizer.py","file_name":"FrameResizer.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"85"} +{"seq_id":"682473968","text":"import os\nimport re\nimport pandas as pd\nimport numpy as np\n\n\n# plot videos\ndef MovieAvgRating(DF, fig=(15, 10)):\n return DF.plot.line(\n x='primaryTitle',\n y=['averageRating', 'numVotes'],\n secondary_y='numVotes',\n # stacked=True ,\n rot=90,\n figsize=fig,\n xticks=range(0, DF['primaryTitle'].count()),\n\n )\n\n\ndef MovieRuntime(DF, fig=(15, 10)):\n return DF.plot.line(\n x='primaryTitle',\n y=['minutes', 'averageRating'],\n secondary_y='averageRating',\n rot=90,\n figsize=fig,\n xticks=range(0, DF['primaryTitle'].count())\n )\n\n\ndef MovieHexBin(DF, fig=(15, 10), title=\"Movies hexbin\"):\n DF.plot.hexbin(\n C='minutes',\n y='averageRating',\n x='numVotes',\n # reduce_C_function=np.sum,\n gridsize=10,\n cmap=\"viridis\",\n title=title,\n figsize=fig\n )\n\n\n# TV Shows\ndef group_tvshows(df):\n ep = df.groupby('TVShow').E.count()\n mins = df.groupby('TVShow').minutes.sum()\n rate = df.groupby('TVShow').averageRating.sum()\n votes = df.groupby('TVShow').numVotes.sum()\n enum = pd.DataFrame(ep)\n enum['minutes'] = mins\n enum['averageRating'] = rate / enum['E']\n enum['numVotes'] = votes\n enum = enum.sort_values(by=\"averageRating\").reset_index()\n\n return enum\n# TV Shows\ndef group_tvshows_season(df):\n ep = df.groupby('S').E.count()\n mins = df.groupby('S').minutes.sum()\n rate = df.groupby('S').averageRating.sum()\n votes = df.groupby('S').numVotes.sum()\n enum = pd.DataFrame(ep)\n enum['minutes'] = mins\n enum['averageRating'] = rate / enum['E']\n enum['numVotes'] = votes\n enum = enum.sort_values(by=\"averageRating\").reset_index()\n\n return enum\n\n\ndef TVShowAvgRating(DF, fig=(15, 10), x='Episode'):\n return DF.plot.line(\n x=x,\n y=['averageRating', 'numVotes'],\n secondary_y='numVotes',\n # stacked=True ,\n rot=90,\n figsize=fig,\n xticks=range(0, DF['E'].count()),\n fontsize=10.0,\n xlabel=x,\n # ylabel=[ 'averageRating', 'numVotes' ]\n\n )\n\n\ndef TVShowRuntime(DF, fig=(15, 10), x='Episode'):\n return DF.plot.line(\n x=x,\n y=['minutes', 'averageRating'],\n secondary_y='averageRating',\n rot=90,\n figsize=fig,\n xticks=range(0, DF['E'].count()),\n fontsize=10.0,\n xlabel=x,\n # ylabel='minutes'\n )\n\n\ndef TVShowHexBin(DF, fig=(15, 10), title=\"TVShow hexbin\", c='minutes', y='averageRating', x='numVotes'):\n DF.plot.hexbin(\n C=c,\n y=y,\n x=x,\n reduce_C_function=np.sum,\n gridsize=10,\n cmap=\"viridis\",\n title=title,\n figsize=fig,\n xlabel=x,\n ylabel=y,\n\n )\n\n\nclass IMDB:\n def __init__(self, tbasic, ratings):\n basics = pd.read_csv(tbasic, sep='\\t', low_memory=False)\n ratings = pd.read_csv(ratings, sep='\\t')\n self.tbasics = basics.join(ratings.set_index('tconst'), on=\"tconst\").replace('\\\\N', np.nan)\n self.genres = []\n self.Videos = None\n self.TVShow = None\n\n def find_video(self, col, match):\n return self.Videos[col].str.contains(match).sum()\n\n def find_tv(self, col, match):\n return self.TVShow[col].str.contains(match).sum()\n\n def titleTypeCount(self):\n return self.tbasics.groupby(['titleType']).titleType.count()\n\n def genres_imdb(self):\n if self.TVShow is not None:\n return self.TVShow.groupby(['genres']).titleType.count()\n elif self.Videos is not None:\n return self.Videos.groupby(['genres']).titleType.count()\n else:\n return self.tbasics.groupby(['genres']).titleType.count()\n\n # returns a count of each genre found\n def set_genres(self):\n cg = {}\n # reset\n self.genres = []\n if self.TVShow is not None:\n data = self.TVShow.genres.dropna()\n elif self.Videos is not None:\n data = self.Videos.genres.dropna()\n else:\n data = self.tbasics.genres.dropna()\n for x in data:\n for g in self.process_genres(x):\n if g not in self.genres:\n self.genres.append(g)\n cg[g] = 1\n else:\n cg[g] = cg[g] + 1\n return cg\n\n def process_genres(self, g):\n return str(g).split(',')\n\n # Filter out the TV shows and episodes.\n\n def filter_tv(self):\n self.Videos = self.tbasics.loc[\n (self.tbasics['titleType'] != \"tvSeries\") & (self.tbasics['titleType'] != \"tvEpisode\")\n ].rename(columns={\n \"startYear\": \"year\",\n \"runtimeMinutes\": \"minutes\"\n }).drop([\"endYear\"], axis=1)\n\n def set_tvshows(self, episodes):\n # set \\\\N to zero hear since some TV databases have shows with season 0 and episode 0 for some specials\n e = pd.read_csv(episodes, sep='\\t').rename(columns={\n \"seasonNumber\": \"S\",\n \"episodeNumber\": \"E\"\n }).replace('\\\\N', 0)\n\n self.TVShow = e.join(self.tbasics.drop([\n 'isAdult',\n 'runtimeMinutes',\n 'genres',\n 'titleType',\n 'averageRating',\n 'numVotes'\n ], axis=1).rename(columns={\n \"tconst\": \"parentTconst\",\n \"primaryTitle\": \"TVShow\"\n }).set_index('parentTconst'), on=\"parentTconst\").join(self.tbasics.drop([\n \"endYear\"\n ], axis=1).rename(columns={\n \"primaryTitle\": \"episodeTitle\",\n \"originalTitle\": \"originalEpisode\",\n \"startYear\": \"year\",\n \"runtimeMinutes\": \"minutes\"\n }).set_index('tconst'), on=\"tconst\").drop([\n 'parentTconst'\n ], axis=1)\n self.filter_tv()\n\n def search_video_df(self, search, ttype='movie', votes=100000):\n df = self.Videos.loc[\n self.Videos[\"primaryTitle\"].str.contains(search) &\n (self.Videos['titleType'] == ttype) &\n (self.Videos['numVotes'] > votes)\n ].dropna().drop(columns=['titleType', 'originalTitle', 'isAdult']).sort_values(by='year')\n df['minutes'] = df['minutes'].astype('int64', errors='ignore')\n df['year'] = df['year'].astype('int64', errors='ignore')\n return df\n\n def search_tvshow_df(self, search, votes=100):\n df = self.TVShow.loc[\n (self.TVShow.numVotes > votes) &\n self.TVShow[\"TVShow\"].str.contains(search, regex=True)\n ].drop(columns=[\n 'titleType', 'originalTitle', 'isAdult',\n \"year\", \"originalEpisode\", \"genres\", \"endYear\"\n ]).sort_values(by='startYear').dropna()\n df['startYear'] = df['startYear'].astype('int64', errors='ignore')\n df['minutes'] = df['minutes'].astype('int64', errors='ignore')\n df['episodeTitle'] = df['S'].astype('str') + 'x' + df['E'].astype('str') + ' ' + df['episodeTitle']\n df['S'] = df['S'].astype('int64', errors='ignore')\n df['E'] = df['E'].astype('int64', errors='ignore')\n return df\n","repo_name":"salamcast/IMDBnotebooks-DataAnalysis","sub_path":"MediaProc.py","file_name":"MediaProc.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41660040282","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom deepgtav.messages import Start, Stop, Scenario, Commands, frame2numpy\nfrom deepgtav.client import Client\n\nimport argparse\nimport time\nimport cv2\n\nclass Model:\n\tdef run(self,frame):\n\t\treturn [1.0, 0.0, 0.0] # throttle, brake, steering\n\n# Controls the DeepGTAV vehicle\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description=None)\n\tparser.add_argument('-l', '--host', default='localhost', help='The IP where DeepGTAV is running')\n\tparser.add_argument('-p', '--port', default=8000, help='The port where DeepGTAV is running')\n\targs = parser.parse_args()\n\n\t# Creates a new connection to DeepGTAV using the specified ip and port. \n\t# If desired, a dataset path and compression level can be set to store in memory all the data received in a gziped pickle file.\n\t# We don't want to save a dataset in this case\n\tclient = Client(ip=args.host, port=args.port)\n\t\n\t# We set the scenario to be in manual driving, and everything else random (time, weather and location). \n\t# See deepgtav/messages.py to see what options are supported\n\tscenario = Scenario(drivingMode=-1) #manual driving\n\t\n\t# Send the Start request to DeepGTAV. Dataset is set as default, we only receive frames at 10Hz (320, 160)\n\tclient.sendMessage(Start(scenario=scenario))\n\t\n\t# Dummy agent\n\tmodel = Model()\n\n\t# Start listening for messages coming from DeepGTAV. We do it for 80 hours\n\tstoptime = time.time() + 80*3600\n\twhile time.time() < stoptime:\n\t\ttry:\n\t\t\t# We receive a message as a Python dictionary\n\t\t\tmessage = client.recvMessage()\t\n\t\t\tprint(message)\n\t\t\t\t\n\t\t\t# The frame is a numpy array that can we pass through a CNN for example\t\t\n\t\t\timage = frame2numpy(message['frame'], (320,160))\n\t\t\tcommands = model.run(image)\n\t\t\t# We send the commands predicted by the agent back to DeepGTAV to control the vehicle\n\t\t\tclient.sendMessage(Commands(commands[0], commands[1], commands[2]))\n\t\texcept KeyboardInterrupt:\n\t\t\tbreak\n\t\t\t\n\t# We tell DeepGTAV to stop\n\tclient.sendMessage(Stop())\n\tclient.close()\n","repo_name":"tchaton/Self-Driving-Car-on-GTAV","sub_path":"drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"85"} +{"seq_id":"26567703531","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\n\ndef rx(t): \n return np.cos(t)/(1+np.sin(t)**2)\ndef ry(t):\n return np.sin(t)*np.cos(t)\\\n /(1+np.sin(t)**2)\ndef kappa(t):\n return 3*np.abs(np.cos(t))\\\n /(1+np.sin(t)**2)\n\nt = 0\n\nfig,(ax1, ax2) = plt.subplots(1,2, \n figsize=(10,6))\nplt.subplots_adjust(bottom=0.2)\n\nax1.set_aspect('equal')\nax1.set_ylim(-0.5,0.5)\nax1.set_xlim(-1.2,1.2)\nax1.title.set_text('Parametric curve')\nax1.grid('on')\n\nr, = ax1.plot(rx(t), ry(t), \n 'b', markersize=3) \nPnt1, = ax1.plot(rx(t), ry(t), \n 'ro', markersize=6) \n\nax2.set_aspect('equal')\nax2.set_ylim(-1,4)\nax2.set_xlim(0,4*np.pi)\nax2.title.set_text('Curvature')\nax2.grid('on')\n\nkap, = ax2.plot(t, kappa(t), \n 'r', markersize=3) \nPnt2, = ax2.plot(t, kappa(t), \n 'bo', markersize=6) \n\naxt = plt.axes([0.25, 0.32, 0.5, 0.02])\nt_slide = Slider(axt, 't', \n 0, 4*np.pi, valstep=0.001, \n valinit=t)\n\ndef update(val):\n t = t_slide.val\n T = np.linspace(0,t,200)\n r.set_data(rx(T),ry(T)) \n Pnt1.set_data(rx(t),ry(t)) \n kap.set_data(T, kappa(T)) \n Pnt2.set_data(t,kappa(t)) \n fig.canvas.draw_idle()\n\nt_slide.on_changed(update)\n\nplt.show()\n","repo_name":"siriwarwick/book","sub_path":"Chapter3/.py files/curvature.py","file_name":"curvature.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"71638639915","text":"import tkinter as tk\r\nimport playsound\r\nfrom PIL.ImageTk import PhotoImage\r\nimport pyautogui as pg\r\nimport time\r\nimport os\r\n\r\n\r\nos.system(\"title WinHelpTool SHELL\")\r\n\r\ndef cmd():\r\n os.system(\"cmd\")\r\n print(\"started COMMAND PROMPT\")\r\n\r\ndef chrome():\r\n os.startfile(\"chrome.exe\")\r\n print(\"started CHROME\")\r\n\r\ndef xampp():\r\n os.chdir(\"C:/xampp\")\r\n os.startfile(\"xampp-control.exe\")\r\n print(\"started XAMPP CONTROL PANEL\")\r\n\r\ndef python():\r\n os.startfile(\"python.exe\")\r\n print(\"started PYTHON\")\r\n\r\ndef settings():\r\n os.system(\"start ms-settings:\")\r\n print(\"started ms-settings:\")\r\n\r\ndef calc():\r\n os.system(\"calc\")\r\n print(\"started CALCULATOR\")\r\n\r\ndef brave():\r\n os.startfile(\"brave.exe\")\r\n print(\"started BRAVE BROWSER\")\r\n\r\n\r\nwindow = tk.Tk()\r\nwindow.title(\"WinHelpTool\")\r\nwindow.iconbitmap(\"windows.ico\")\r\nwindow.geometry(\"600x400\")\r\nwindow.tk_setPalette(\"#FFFFFF\")\r\nimg2 = tk.PhotoImage(file=\"CHROME.png\")\r\nimg3 = tk.PhotoImage(file=\"xamppphoto.png\")\r\nimg4 = tk.PhotoImage(file=\"python.png\")\r\nimg5 = tk.PhotoImage(file=\"windows.png\")\r\nimg6 = tk.PhotoImage(file=\"calc.png\")\r\nimg7 = tk.PhotoImage(file=\"Brave_lion.png\")\r\nbutton2 = tk.Button(text = \"CHROME\", image = img2, compound=\"top\", command = chrome)\r\nbutton2.pack(side = \"left\")\r\nbutton3 = tk.Button(text = \"XAMPP\", image = img3, compound = \"top\", command = xampp)\r\nbutton3.pack(side = \"left\")\r\nbutton4 = tk.Button(text = \"PYTHON\", image = img4, compound = \"top\", command = python)\r\nbutton4.pack(side = \"left\")\r\nbutton5 = tk.Button(text = \"SETTINGS\", image = img5, compound = \"top\", command = settings)\r\nbutton5.pack(side = \"left\")\r\nbutton6 = tk.Button(text = \"CALCULATOR\", image = img6, compound = \"top\", command = calc)\r\nbutton6.pack(side = \"left\")\r\nbutton7 = tk.Button(text = \"BRAVE\", image = img7, compound= \"top\", command = brave)\r\nbutton7.pack(side = \"left\")\r\n\r\nwindow.mainloop()","repo_name":"gitcloneAlperTuna/TkAssistant","sub_path":"WinHelpTool/WinHelpTool.py","file_name":"WinHelpTool.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73393748715","text":"from gpiozero import Servo\nfrom time import sleep\n\nservo = Servo(17)\n\nservo.mid()\nsleep(3)\nangle = -1\ngo = True\n\nwhile True:\n # servo.min()\n if go:\n angle += 0.1\n\n if angle >= 0.7:\n go = not go\n continue\n \n else:\n angle -= 0.1\n \n if angle <= -1:\n go = not go\n continue\n \n \n servo.value = angle\n \n sleep(1)","repo_name":"Jollokim/digifab.projects","sub_path":"final_project/turret_code/test_servo.py","file_name":"test_servo.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3267550234","text":"from flaskez._basic.html import t as templates\nimport typing\nimport os\n\n\nclass Page:\n \"\"\"\n Class for representing pages in the instant_setup() function.\n \"\"\"\n\n def __init__(self, template_or_code: typing.Union[str, typing.TextIO] = \"base\", route: str = \"/\") -> None:\n if isinstance(template_or_code, typing.TextIO):\n self.source = template_or_code.read()\n elif isinstance(template_or_code, str):\n self.source = template_or_code\n else:\n raise TypeError('Invalid type for parameter \"template_or_code\"')\n\n self._route = route if route.startswith(\"/\") else \"/\" + route\n\n @property\n def source(self) -> str:\n return self._source\n\n @source.setter\n def source(self, value: str) -> None:\n if value in templates:\n self._source = templates[value]\n else:\n if os.path.isfile(value):\n with open(value) as f:\n self._source = f.read()\n else:\n self._source = value\n\n @property\n def route(self) -> str:\n return self._route\n\n @route.setter\n def route(self, value: str) -> None:\n self._route = value if value.startswith(\"/\") else \"/\" + value\n","repo_name":"IHasBone/flaskez","sub_path":"flaskez/Public/Models.py","file_name":"Models.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10090377534","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#@created: 10.10.2013\n#@author: Aleksey Komissarov\n#@contact: ad3002@gmail.com\n\nimport sys, os\nfrom trseeker.tools.jellyfish import sc_count_and_dump_kmers_for_file\nfrom PyBioSnippets.hiseq.fastq_tools import fastq_to_fasta\nimport argparse\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser(description='Count')\n\tparser.add_argument('-p','--prefix', help='Fastq file prefix', required=True)\n\tparser.add_argument('-k','--ksize', help='K', required=False, default=23)\n\tparser.add_argument('-m','--mintf', help='mintf', required=False, default=0)\n\tparser.add_argument('-d','--dumpmintf', help='dumpmintf', required=False, default=100)\n\targs = vars(parser.parse_args())\n\n\tprefix = args[\"prefix\"]\n\tk = args[\"ksize\"]\n\tmintf = args[\"mintf\"]\n\tdumpmintf = args[\"dumpmintf\"]\n\n\tfastq_file = prefix + \".fastq\"\n\tfasta_file = prefix + \".fa\"\n\tjf_db = prefix + \".%s.jf\" % k\n\tjf_dat = prefix + \".%s.dat\" % k\n\n\tif os.path.isfile(fastq_file) and not os.path.isfile(fasta_file):\n\t\tfastq_to_fasta(fastq_file, fasta_file)\n\n\tsc_count_and_dump_kmers_for_file(fasta_file, \".\", jf_db, jf_dat, k, mintf, dumpmintf)","repo_name":"ad3002/PyBioSnippets","sub_path":"hiseq/count_kmers.py","file_name":"count_kmers.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"17916359488","text":"# 1283. Find the Smallest Divisor Given a Threshold\n# Medium\n\n# 514\n\n# 85\n\n# Add to List\n\n# Share\n# Given an array of integers nums and an integer threshold, we will choose a positive integer divisor and divide all the array by it and sum the result of the division. Find the smallest divisor such that the result mentioned above is less than or equal to threshold.\n\n# Each result of division is rounded to the nearest integer greater than or equal to that element. (For example: 7/3 = 3 and 10/2 = 5).\n\n# It is guaranteed that there will be an answer.\n\n \n\n# Example 1:\n\n# Input: nums = [1,2,5,9], threshold = 6\n# Output: 5\n# Explanation: We can get a sum to 17 (1+2+5+9) if the divisor is 1. \n# If the divisor is 4 we can get a sum to 7 (1+1+2+3) and if the divisor is 5 the sum will be 5 (1+1+1+2). \n# Example 2:\n\n# Input: nums = [2,3,5,7,11], threshold = 11\n# Output: 3\n# Example 3:\n\n# Input: nums = [19], threshold = 5\n# Output: 4\n \n\n# Constraints:\n\n# 1 <= nums.length <= 5 * 10^4\n# 1 <= nums[i] <= 10^6\n# nums.length <= threshold <= 10^6\n\n# This solution works !\n'''\nuse math.ceil to verify the sum\nbinary search\ndont include 0 in binary search to avoid zero division - left = 1 to initialize\nif the helper return True, any number smaller than that is eliminated, so move the right = mid\nif its invalid, move left = mid+1\n'''\n\nimport math\nclass Solution:\n def smallestDivisor(self, nums: List[int], threshold: int) -> int:\n left = 1\n right = max(nums)\n while left < right:\n mid = (left + right) // 2\n if self.helper(mid, nums, threshold):\n right = mid\n else:\n left = mid+1\n return left\n \n def helper(self, divisor, nums, threshold):\n total = 0\n for num in nums:\n total += math.ceil(num/divisor)\n if total <= threshold:\n return True\n else:\n return False","repo_name":"akimi-yano/algorithm-practice","sub_path":"lc/1283.FindSmallestDivisorGiven.py","file_name":"1283.FindSmallestDivisorGiven.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25688904750","text":"import datetime\nimport sys \nimport math \n\n#1 Write a Python program to display the current date and time.\ndef date_time():\n now = datetime.datetime.now()\n print (\"Current date - time: \")\n print (now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n#2 Write a Python program which accepts the user's first and last name and print them in reverse order with a space between them.\ndef accept_user_name():\n first_name = input(\"Your first name: \")\n last_name = input(\"Your last name: \")\n print(last_name + \" \" + first_name)\n\n#3 Write a Python program to accept a filename from the user and print the extension of that.\ndef accept_inputed_filename():\n file_name = input(\"Input the file name with extention: \")\n splitting_array = file_name.split(\".\",1)\n print(\"File extention: \", splitting_array[1])\n\n#4 Write a Python program to display the first and last element from the following list.\ndef display_elements_from_list():\n color_list = [\"Red\",\"Green\",\"White\",\"Black\"]\n for element in color_list:\n print(element)\n\n#5 Write a Python program to calculate number of days between two dates.\ndef calc_diff_date():\n from datetime import date\n today = date.today()\n print(today)\n date_1 = date(2019,3,20)\n date_2 = date(2019,3,18)\n diff = date_1 - date_2\n print(diff.days)\n\n \n#6 Function to calculate the sum of three given numbers, if the values are equal then return thrice of their sum.\ndef calc_three_numbers(d1,d2,d3):\n if d1==d2==d3:\n sum = d1 * 3\n else: \n sum = d1 + d2 + d3\n print(\"Summe: \", sum)\n\n#7 Write a Python program to find whether a given number (accept from the user) is even or odd, print out an appropriate message to the user. \ndef out_user_infos(message, dig, info):\n print(message, dig, info)\n\ndef deside_even_or_odd_number(number):\n if (number % 2) != 0:\n print(\"even\")\n else:\n print(\"odd\")\n\n\ndef input_numbers():\n print(\"Ende mit '-1'\")\n x = 0 \n while x != -1: \n x = int(input(\"Input a digit or '-1' for ENDE \"))\n if x != -1:\n deside_even_or_odd_number(x)\n \n\n#8 to check whether a specified value is contained in a group of values.\ndef is_value_contained_in_group():\n a = [1,5,8,3] \n x = int(input(\"Input your search digit: \"))\n for i in a:\n if x == i:\n print(\"List containt :\", x)\n else:\n print(\"this element was not foundet in the list\")\n\n#9. to print out a set containing all the colors from color_list_1 which are not present in color_list_2. \n# Test Data : \n# color_list_1 = set([\"White\", \"Black\", \"Red\"]) \n# color_list_2 = set([\"Red\", \"Green\"])\n# Expected Output : \n# {'Black', 'White'}\ndef search_for_key(): \n color_list_1 = set([\"White\", \"Black\", \"Red\"])\n color_list_2 = set([\"Red\", \"Green\"])\n color_list_3 = set([])\n\n for i in color_list_1:\n color_list_3.add(i) \n for j in color_list_2: \n print(i,j)\n if i == j: \n color_list_3.remove(i)\n print(\"{\") \n for k in color_list_3:\n print(k)\n print(\"}\")\n \n#10. to compute the distance between the points (x1, y1) and (x2, y2).\ndef compare_distance_between_points(x1, y1, x2, y2):\n d = math.sqrt((x2-x1)+(y2-y1))\n print(\"Distance between two Points is: \", d)\n\n\n# this is a main procedere for defined exersice \ndef main():\n# date_time()\n# accept_user_name()\n# accept_inputed_filename()\n# display_elements_from_list()\n# calc_diff_date()\n# calc_three_numbers(1,1,3)\n# calc_three_numbers(2,2,2)\n# input_numbers()\n# is_value_contained_in_group()\n# search_for_key()\n compare_distance_between_points(2,2,3,3)\n\n#\n\nif __name__==\"__main__\":\n main()\n\n","repo_name":"device999/Machine_Learning","sub_path":"HelloWorld/Package_I/bshecht/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38813503514","text":"a = 'Hello World'\n# b = 'Hello World!'\nb = a + 'abc'\na = 'Hello Worldabc'\nprint(a is b, a == b)\n\n\n# a = [1, 2, 3]\n# b = a\n# for i in range(len(b)):\n# b[i] += 1\n# print(a)\n# print(b)\n\n\n# b = a[:]\n# a.append(4)\n# b += [4]\n# print(a)\n# print(b)\n\n\n# a = list()\n# for i in range(1000):\n# a.append(1)\n# if i % 2 == 0:\n# a.append(2)\n# print(a)\n\n\n# fruits = ['apple', 'orange', 'lemon', 'pineapple', 'pear']\n# a = {\n# index: fruit\n# for index, fruit in enumerate(fruits)\n# if fruit.startswith('p')\n# }\n\n# print(a)","repo_name":"Amaterasq/python_tasks","sub_path":"live_coding/base_python.py","file_name":"base_python.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"6840919487","text":"\"\"\"Combine all available (score, annotation) pairs into tsv files.\"\"\"\n\nimport os\nimport pandas as pd\nfrom pathlib import Path\n\nfrom . import cli\nfrom .common import (\n ANNOTATIONSCOREDUPLES,\n DATASPLITS,\n DATASETSUMMARYFILE,\n)\nfrom .joint_parser import (\n parseAnnotationAndScore,\n parseAnnotationAndAnnotation,\n)\n\n\ndef generateDataset(synthesize=False, texturize=False, tsvDir=\"dataset\"):\n statsdict = {\n \"file\": [],\n \"annotation\": [],\n \"score\": [],\n \"collection\": [],\n \"split\": [],\n \"misalignmentMean\": [],\n \"qualityMean\": [],\n \"incongruentBassMean\": [],\n }\n datasetDir = f\"{tsvDir}-synth\" if synthesize else tsvDir\n Path(datasetDir).mkdir(exist_ok=True)\n for split, files in DATASPLITS.items():\n Path(os.path.join(datasetDir, split)).mkdir(exist_ok=True)\n for nickname in files:\n print(nickname)\n annotation, score = ANNOTATIONSCOREDUPLES[nickname]\n if not synthesize:\n df = parseAnnotationAndScore(annotation, score)\n else:\n df = parseAnnotationAndAnnotation(\n annotation, texturize=texturize\n )\n outpath = os.path.join(datasetDir, split, nickname + \".tsv\")\n df.to_csv(outpath, sep=\"\\t\")\n collection = nickname.split(\"-\")[0]\n statsdict[\"file\"].append(nickname)\n statsdict[\"annotation\"].append(annotation)\n statsdict[\"score\"].append(score)\n statsdict[\"collection\"].append(collection)\n statsdict[\"split\"].append(split)\n misalignment = round(df.measureMisalignment.mean(), 2)\n statsdict[\"misalignmentMean\"].append(misalignment)\n qualitySquaredSum = round(df.qualitySquaredSum.mean(), 2)\n statsdict[\"qualityMean\"].append(qualitySquaredSum)\n incongruentBass = round(df.incongruentBass.mean(), 2)\n statsdict[\"incongruentBassMean\"].append(incongruentBass)\n df = pd.DataFrame(statsdict)\n df.to_csv(os.path.join(datasetDir, DATASETSUMMARYFILE), sep=\"\\t\")\n return df\n\n\nif __name__ == \"__main__\":\n parser = cli.tsv()\n args = parser.parse_args()\n kwargs = vars(args)\n generateDataset(**kwargs)\n","repo_name":"napulen/AugmentedNet","sub_path":"AugmentedNet/dataset_tsv_generator.py","file_name":"dataset_tsv_generator.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"73"} +{"seq_id":"22721173087","text":"BATCH_SIZE = 16\nNUM_EPOCHS = 100\nLEARNING_RATE = 0.001\nWEIGHT_DECAY = 1e-3\nCLIP_NORM = 5\n\nletters = ['2', '3', '4', '5', '6', '7', '8', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'k', 'm', 'n', 'p', 'r', 'w', 'x', 'y']\nvocabulary = [\"-\"] + letters\nidx2char = {k:v for k,v in enumerate(vocabulary, start=0)}\nchar2idx = {v:k for k,v in idx2char.items()}","repo_name":"YeJinJeon/CaptchaOCR","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"20222351303","text":"N=int(input())\nlst=[]\n\n\nfor loop in range(N):\n lst.append(list(map(int,input().split())))\n\nlst.append([0,1])\nmaximum=0\n\ndp=[[0]*N for loop2 in range(0,N)]\n\n\nprint(dp)\nprint(lst)\n\n\n\n","repo_name":"cmg7111/Algorithm","sub_path":"not_14501.py","file_name":"not_14501.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"44029459174","text":"import os\nimport pickle\nimport pandas as pd\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines import PPO2, DQN\nfrom rlenv.StockTradingEnv0 import StockTradingEnv\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\n\nfont = fm.FontProperties(fname='font/wqy-microhei.ttc')\n# plt.rc('font', family='Source Han Sans CN')\nplt.rcParams['axes.unicode_minus'] = False\n\nINITIAL_ACCOUNT_BALANCE = 10000\n\n\ndef stock_trade(stock_file_train):\n df_train = pd.read_csv(stock_file_train)\n df_train = df_train.sort_values('date')\n\n # The algorithms require a vectorized environment to run\n env_train = DummyVecEnv([lambda: StockTradingEnv(df_train)])\n\n model = PPO2(MlpPolicy, env_train, verbose=0, tensorboard_log='./log')\n # model = DQN(\"MlpPolicy\", env_train, verbose=0, tensorboard_log='./log')\n model.learn(total_timesteps=int(1e4))\n\n # -----------------Test Model --------------------------------------\n day_profits = []\n buy_hold_profit = []\n\n df_test = pd.read_csv(stock_file_train.replace('train', 'test'))\n\n env_test = DummyVecEnv([lambda: StockTradingEnv(df_test)])\n obs = env_test.reset()\n no_of_shares = 0\n for i in range(len(df_test) - 1):\n action, _states = model.predict(obs)\n obs, rewards, done, info = env_test.step(action)\n profit = env_test.render()\n day_profits.append(profit)\n if i == 0:\n buy_hold_profit.append(0)\n no_of_shares = INITIAL_ACCOUNT_BALANCE // df_test.iloc[0]['close']\n print('Buy ' + str(no_of_shares) + ' shares and hold')\n else:\n buy_hold_profit.append(no_of_shares * (df_test.iloc[i]['close'] - df_test.iloc[i - 1]['close']))\n if done:\n break\n return day_profits, buy_hold_profit\n\n\ndef find_file(path, name):\n # print(path, name)\n for root, dirs, files in os.walk(path):\n for fname in files:\n if name in fname:\n return os.path.join(root, fname)\n\n\ndef test_a_stock_trade(stock_code):\n stock_file_train = find_file('./stockdata/train', str(stock_code))\n\n daily_profits, buy_hold_profit = stock_trade(stock_file_train)\n fig, ax = plt.subplots()\n ax.plot(daily_profits, '-o', label=stock_code, marker='o', ms=10, alpha=0.7, mfc='orange')\n ax.plot(buy_hold_profit, '-o', label=stock_code, marker='o', ms=10, alpha=0.7, mfc='blue')\n ax.grid()\n plt.xlabel('step')\n plt.ylabel('profit')\n ax.legend(prop=font)\n # plt.show()\n plt.savefig(f'./img/{stock_code}.png')\n\n\ndef multi_stock_trade():\n start_code = 600000\n max_num = 3000\n\n group_result = []\n\n for code in range(start_code, start_code + max_num):\n stock_file = find_file('./stockdata/train', str(code))\n if stock_file:\n try:\n profits = stock_trade(stock_file)\n group_result.append(profits)\n except Exception as err:\n print(err)\n\n with open(f'code-{start_code}-{start_code + max_num}.pkl', 'wb') as f:\n pickle.dump(group_result, f)\n\n\nif __name__ == '__main__':\n # multi_stock_trade()\n test_a_stock_trade('sh.600028')\n # ret = find_file('./stockdata/train', '600036')\n # print(ret)\n","repo_name":"guofeng201507/ISA-IPA-2020-05-05-IS1PT-GRP-High5-StockTrading","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"10118297128","text":"import pygame\nfrom settings import *\nfrom auxiliar import Auxiliar\n\n\n\n\nclass Botiquin:\n \n def __init__(self,x,y,w,h,type=0):\n self.image = Auxiliar.getSurfaceFromSpriteSheet(\"images/assets/bag_elemental_pouch/bag_elemental_pouch__x1_1_png_1354831495.png\",1,1)[type]\n self.image = pygame.transform.scale(self.image,(w,h))\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.rect_ground_collition = self.rect\n \n\n def draw(self,screen):\n if(DEBUG):\n pygame.draw.rect(screen,GREEN,self.rect)\n\n screen.blit(self.image,self.rect)\n\n ","repo_name":"FacundoM22/Juego-UTN","sub_path":"botiquin.py","file_name":"botiquin.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22041202957","text":"\"\"\"Takes the 366-smalltalk-prompts.txt and creates a .json file with the\ncontents prettified\"\"\"\n\nimport os\nimport json\nimport datetime\n\nlanguages = ['en', 'de', 'fr']\n\nquestions = {}\nfor lang in languages:\n questions[lang] = open(f\"questions-{lang}.txt\", \"r\").read().split(\"\\n\")[:367] # cut off last newline\n\n\ndef doy_to_date(doy):\n \"\"\"Converts a day-of-year integer (from 1 to 366) to a date-string in mm-dd format (e.g. 04-20).\n It assumes a leap year, and doy starts at 0, not 1!\"\"\"\n jan1 = datetime.date(2020, 1, 1)\n new_date = jan1 + datetime.timedelta(days=doy)\n return new_date.strftime('%m-%d')\n\n\nd = {\n doy_to_date(i): {\n lang: questions[lang][i] for lang in languages\n }\n for i in range(366)\n}\n\n# In the next block, I set `i` so that an output JSON file does not get\n# overwritten. Instead, we append underscores until a non-existing filename\n# is encountered.\n# This is so that manually added translations don't accidentally get lost\n# if you forgot to commit again :)\noutfile = 'questions{}.json'\ni = 0\nwhile os.path.exists(outfile.format('_'*i)):\n i += 1\n\nwith open(outfile.format('_'*i), 'w', encoding='utf8') as fp:\n json.dump(d, fp, indent=4, ensure_ascii=False)\n","repo_name":"AlexEngelhardt/speech-prompts","sub_path":"questions/create-json.py","file_name":"create-json.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5461783228","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtSql import *\n\nimport tools, hashlib, uuid, random, mailer, workman, keydialog\nimport sys, datetime\n\n\n\n\nclass Guard(QDialog):\n def __init__(self, parent=None):\n super(Guard, self).__init__(parent)\n\n self.trial_days = 15\n self.k_prod = u\"K6F1MB9VEJ8HFVGNR0P77TGCQ\"\n self.activated_prod = False\n \n self.init()\n\n\n def closeEvent(self, event):\n self.reject()\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Escape:\n pass\n\n def shutdown(self):\n self.reject()\n sys.exit(0)\n\n\n def checkActivationKey(self):\n raw_key = self.active_dialog.key_code.text()\n key = self.active_dialog.key_code.text().replace(\"-\", \"\")\n if key == self.k_prod:\n q_settings = QSettings(\"Matlle\", \"InterNotes\")\n\n q_settings.beginGroup(\"as\")\n \n q_settings.setValue(u\"s\", 1)\n q_settings.setValue(u\"k\", raw_key)\n\n q_settings.endGroup()\n\n self.activated_prod = True\n\n QMessageBox.information(self.active_dialog, \"Licence activé - InterNotes\", \n u\"Clé valide.\\n\" \n u\"Le logiciel est maintenant activé!\")\n\n self.active_dialog.close()\n self.close()\n else:\n QMessageBox.critical(self.active_dialog, u\"Erreur d'activation - InterNotes\",\n u\"Cette clé n'est pas valide. Veuillez réessayer.\")\n\n\n def activeOkBtnActivation(self):\n if self.key_code.text().length() < 29:\n self.btn_ok.setEnabled(False)\n else:\n self.btn_ok.setEnabled(True)\n\n\n def enterKeyDialog(self, alone=False):\n self.active_dialog = keydialog.KeyDialog(self)\n self.active_dialog.setWindowModality(Qt.WindowModal)\n\n \n #events\n if not alone:\n self.connect(self.active_dialog.btn_cancel, SIGNAL(\"clicked()\"), self.active_dialog.reject)\n else:\n self.connect(self.active_dialog.btn_cancel, SIGNAL(\"clicked()\"), self.shutdown)\n self.connect(self.active_dialog.btn_ok, SIGNAL(\"clicked()\"), self.checkActivationKey)\n\n #self.connect(self.active_dialog.key_code, SIGNAL(\"cursorPositionChanged(int, int)\"), \n # self.activeOkBtnActivation)\n\n\n if not alone:\n self.active_dialog.show()\n else:\n return self.active_dialog.exec_()\n\n\n\n \n def showRemainingTimeDialog(self, days):\n self.label_infos_time = QLabel(\n u\"Il vous reste \" + str(days) + \" \"\n u\"jours avant la fin de la periode d'essai.
\"\n u\"Pensez à activer le logiciel avec une clé valide.

\"\n u\"Ou contactez (Matlle) le fabricant du produit
\"\n u\"pour obtenir une clé d'activation:

\"\n u\"Email: matllesoftware@gmail.com
\"\n u\"Tel: (+225) 07 08 68 98 / \"\n u\" 41 87 07 68 / \"\n u\" 01 58 03 30\"\n )\n\n self.label_infos_time.setStyleSheet(\"font-size: 14px;\")\n\n\n btn_key = QPushButton(u\"Entrez la clé d'activation\")\n btn_key.setIcon(QIcon(\":/images/button_apply.png\"))\n\n btn_cancel = QPushButton(u\"Annuler\")\n btn_cancel.setIcon(QIcon(\":/images/editdelete.png\"))\n\n\n layout_form = QFormLayout()\n layout_form.addRow(u\"\", self.label_infos_time)\n\n\n layout_btn = QHBoxLayout()\n layout_btn.addWidget(btn_key)\n layout_btn.addWidget(btn_cancel)\n layout_btn.setAlignment(Qt.AlignRight)\n\n\n\n layout_main = QVBoxLayout()\n layout_main.addLayout(layout_form)\n layout_main.addLayout(layout_btn)\n\n self.setLayout(layout_main)\n self.resize(500, 100)\n self.setWindowTitle(u\"Periode d'essai - InterNotes\")\n\n \n #events\n self.connect(btn_cancel, SIGNAL(\"clicked()\"), self.reject)\n self.connect(btn_key, SIGNAL(\"clicked()\"), self.enterKeyDialog)\n\n\n return self.exec_()\n\n\n def isAppActived(self):\n q_settings = QSettings(\"Matlle\", \"InterNotes\")\n\n q_settings.beginGroup(\"as\")\n \n state = q_settings.value(u\"s\", 0).toInt()[0]\n\n q_settings.endGroup()\n\n if state == 0:\n return False\n elif state == 1:\n self.activated_prod = True\n return True\n\n\n def remainingTime(self):\n q_settings = QSettings(\"Matlle\", \"InterNotes\")\n q_settings.beginGroup(\"rt\")\n\n first_time = q_settings.value(u\"ft\", u\"\").toDateTime()\n expired = q_settings.value(u\"ended\", 0).toInt()[0]\n\n if expired:\n return \"expired\"\n\n if not first_time:\n q_settings.setValue(\"ft\", QDateTime.currentDateTime())\n return \"first\"\n else:\n first_time = first_time.toPyDateTime()\n if first_time > datetime.datetime.now():\n \n # comment or uncomment this... can be dangerous to uncomment it\n q_settings.setValue(\"ended\", 1)\n\n return \"expired\"\n\n if first_time < datetime.datetime.now() - datetime.timedelta(\n days=self.trial_days):\n\n q_settings.setValue(\"ended\", 1)\n\n return \"expired\"\n else:\n elapsed = datetime.datetime.now() - first_time\n elapsed_days = elapsed.days\n remaining_days = self.trial_days - elapsed_days\n return remaining_days \n \n q_settings.endGroup()\n \n\n\n\n\n\n def init(self):\n if self.isAppActived() == True:\n return\n re = self.remainingTime()\n if re == \"expired\":\n QMessageBox.critical(self, u\"Licence d'évaluation expirée - InterNotes\", \n u\"Votre période d'essai InterNotes a expiré.\\n\"\n u\"Veuillez activé le logiciel avec une clé valide.\\n\\n\"\n u\"Ou contactez le frabricant du logiciel (Matlle) pour obtenir une clé d'activation:\\n\\n\"\n u\"Email: matllesoftware@gmail.com\\n\"\n u\"Tel: (+225) 07 08 68 98 / 41 87 07 68 / 01 58 03 30\"\n )\n self.enterKeyDialog(alone=True)\n elif re == \"first\":\n return\n else:\n self.showRemainingTimeDialog(re)\n\n\n","repo_name":"matlle/py_internotes_linux_beta2_x86","sub_path":"guard.py","file_name":"guard.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28162357060","text":"from CyberSource import *\nfrom pathlib import Path\nimport os\nimport json\nfrom importlib.machinery import SourceFileLoader\n\nconfig_file = os.path.join(os.getcwd(), \"data\", \"Configuration.py\")\nconfiguration = SourceFileLoader(\"module.name\", config_file).load_module()\n\n# To delete None values in Input Request Json body\ndef del_none(d):\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\ndef download_file_with_file_identifier():\n organizationId = \"testrest\"\n fileId = \"Q2hhcmdlYmFja0FuZFJldHJpZXZhbFJlcG9ydC1hYWVkMWEwMS03OGNhLTU1YzgtZTA1My1hMjU4OGUwYWNhZWEuY3N2LTIwMjAtMDctMzA=\"\n\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SecureFileShareApi(client_config)\n api_instance.api_client.download_file_path = os.path.join(os.getcwd(), \"resources\", \"download_report.csv\")\n status, headers = api_instance.get_file(fileId, organization_id=organizationId)\n\n print(\"Download Status : \", status)\n print(\"Response Headers : \", headers)\n\n print(\"Response downloaded at the location : \" + api_instance.api_client.download_file_path)\n write_log_audit(status)\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\"\\nException when calling SecureFileShareApi->get_file: %s\\n\" % e)\n\ndef write_log_audit(status):\n print(f\"[Sample Code Testing] [{Path(__file__).stem}] {status}\")\n\nif __name__ == \"__main__\":\n download_file_with_file_identifier()\n","repo_name":"CyberSource/cybersource-rest-samples-python","sub_path":"samples/SecureFileShare/download-file-with-file-identifier.py","file_name":"download-file-with-file-identifier.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"73"} +{"seq_id":"33361352415","text":"import StringIO\n\nfrom googlecloudsdk.calliope import cli_tree_markdown as markdown\nfrom googlecloudsdk.command_lib.shell import gcloud_parser\nfrom googlecloudsdk.command_lib.shell.gcloud_tree import gcloud_tree\nfrom googlecloudsdk.core.document_renderers import render_document\nfrom googlecloudsdk.core.document_renderers import token_renderer\nfrom prompt_toolkit.layout import controls\nfrom prompt_toolkit.token import Token\n\n\n# The height of the help window in the layout.\nHELP_WINDOW_HEIGHT = 10\n\n\nclass HelpWindowControl(controls.UIControl):\n \"\"\"Implementation of the help window.\"\"\"\n\n def __init__(self, default_char=None):\n self._default_char = default_char\n\n def create_content(self, cli, width, height):\n data = GenerateHelpContent(cli, width)\n\n return controls.UIContent(\n lambda i: data[i],\n line_count=len(data),\n show_cursor=False,\n default_char=self._default_char)\n\n\ndef GetCurrentInvocation(invocations, pos):\n \"\"\"Determine the current invocation given a cursor position.\n\n Args:\n invocations: a list of GcloudInvocations\n pos: an int giving the current cursor position\n\n Returns:\n The corresponding GcloudInvocation at position pos, or None.\n \"\"\"\n for invocation in invocations:\n tokens = invocation.tokens\n if tokens:\n start = tokens[0].start\n end = tokens[-1].end\n if ((start <= pos <= end) or\n (invocations.index(invocation) == len(invocations)-1)):\n return tokens\n return None\n\n\ndef GetCurrentToken(tokens, pos):\n \"\"\"Determine the current token given a cursor position.\n\n Args:\n tokens: a list of gcloud_parser.ArgTokens\n pos: an int giving the current cursor position\n\n Returns:\n The gcloud_parser.ArgToken at that position or None.\n \"\"\"\n i = 0\n while i < len(tokens):\n if pos >= tokens[i].start and pos < tokens[i].end:\n return tokens[i]\n if pos < tokens[i].start:\n return tokens[i-1] if i > 0 else None\n i += 1\n return tokens[len(tokens)-1] if tokens else None\n\n\ndef GenerateHelpContent(cli, width):\n \"\"\"Generates and renders the corresponding help content in the gcloud shell.\n\n Args:\n cli: the CLI in which to render the help contents.\n width: the width of the help prompt.\n\n Returns:\n A list with one list per line, each containing (token, string) tuples for\n words in the help text. These tuples represent (Markdown format,\n actual text) pairs.\n \"\"\"\n if width > 80:\n width = 80\n doc = cli.current_buffer.document\n\n tokens = GetCurrentInvocation(gcloud_parser.ParseLine(doc.text),\n doc.cursor_position)\n if not tokens:\n return []\n\n tok = GetCurrentToken(tokens, doc.cursor_position)\n if not tok:\n return []\n\n if tok.token_type == gcloud_parser.ArgTokenType.COMMAND:\n return GenerateHelpForCommand(tok, width)\n elif tok.token_type == gcloud_parser.ArgTokenType.GROUP:\n return GenerateHelpForCommand(tok, width)\n elif tok.token_type == gcloud_parser.ArgTokenType.FLAG:\n return GenerateHelpForFlag(tok, width)\n elif tok.token_type == gcloud_parser.ArgTokenType.FLAG_ARG:\n return GenerateHelpForFlag(tok, width)\n elif tok.token_type == gcloud_parser.ArgTokenType.POSITIONAL:\n return GenerateHelpForPositional(tok, width)\n\n return []\n\n\ndef RenderMarkdown(fin, width, height=HELP_WINDOW_HEIGHT, compact=True):\n \"\"\"Renders the markdown for the help prompt in the gcloud shell.\n\n Args:\n fin: the input stream containing the markdown.\n width: the width for which to create the renderer.\n height: optional value representing the height for which to create the\n renderer. Defaults to HELP_WINDOW_HEIGHT.\n compact: optional value representing whether the renderer representation\n should be compact. Defaults to True.\n\n Returns:\n A MarkdownRenderer Finish() value.\n \"\"\"\n return render_document.MarkdownRenderer(\n token_renderer.TokenRenderer(width=width, height=height, compact=compact),\n fin=fin).Run()\n\n\ndef GetDescriptionForCommand(token):\n \"\"\"Gets the description for the command specified in token.\n\n Args:\n token: the ArgTokenType.COMMAND token for which to get the description.\n\n Returns:\n A StringIO with the description of the token.\n \"\"\"\n gen = markdown.CliTreeMarkdownGenerator(token.tree, gcloud_tree)\n gen.PrintSectionIfExists('DESCRIPTION', disable_header=True)\n doc = gen.Edit()\n return StringIO.StringIO(doc)\n\n\ndef GetSynopsisForCommand(token):\n \"\"\"Gets the synopsis for the command specified in token.\n\n Args:\n token: the ArgTokenType.COMMAND token for which to get the synopsis.\n\n Returns:\n A StringIO with the synopsis of the token.\n \"\"\"\n gen = markdown.CliTreeMarkdownGenerator(token.tree, gcloud_tree)\n gen.PrintSynopsisSection()\n doc = gen.Edit()\n return StringIO.StringIO(doc)\n\n\ndef GetFullReferencePromptTokens():\n \"\"\"A line of Prompt Toolkit tokens about opening full reference pages.\"\"\"\n return [[(Token.Purple, 'ctrl-w'),\n (Token, ' to open full reference page within browser')]]\n\n\ndef GenerateHelpForCommand(token, width):\n \"\"\"Generates the help to show in the CLI for the command token passed.\n\n Args:\n token: the command token to show help for.\n width: the width of the CLI.\n\n Returns:\n A list with one list per line, each containing (token, string) tuples for\n words in the help text. These tuples represent (Markdown format,\n actual text) pairs.\n \"\"\"\n blank_line = [[]]\n return (\n RenderMarkdown(GetDescriptionForCommand(token), width=width, height=2) +\n blank_line +\n RenderMarkdown(\n GetSynopsisForCommand(token), width=width, height=5, compact=False) +\n blank_line +\n GetFullReferencePromptTokens())\n\n\ndef GetDefinitionForFlag(token):\n \"\"\"Gets the definition for the flag specified in token.\n\n Args:\n token: the ArgTokenType.FLAG/FLAG_ARG token for which to get the definition.\n\n Returns:\n A StringIO with the definition of the token.\n \"\"\"\n gen = markdown.CliTreeMarkdownGenerator(gcloud_tree, gcloud_tree)\n gen.PrintFlagDefinition(token.tree)\n mark = gen.Edit()\n return StringIO.StringIO(mark)\n\n\ndef GenerateHelpForFlag(token, width):\n \"\"\"Generates the help to show in the CLI for the flag token passed.\n\n Args:\n token: the command token to show help for.\n width: the width of the CLI.\n\n Returns:\n A list with one list per line, each containing (token, string) tuples for\n words in the help text. These tuples represent (Markdown format,\n actual text) pairs.\n \"\"\"\n return RenderMarkdown(GetDefinitionForFlag(token), width=width)\n\n\ndef GetDefinitionForPositional(token):\n \"\"\"Gets the definition for the positional specified in token.\n\n Args:\n token: the ArgTokenType.POSITIONAL token for which to get the definition.\n\n Returns:\n A StringIO with the definition of the token.\n \"\"\"\n gen = markdown.CliTreeMarkdownGenerator(gcloud_tree, gcloud_tree)\n gen.PrintPositionalDefinition(markdown.Positional(token.tree))\n mark = gen.Edit()\n return StringIO.StringIO(mark)\n\n\ndef GenerateHelpForPositional(token, width):\n \"\"\"Generates the help to show in the CLI for the positional token passed.\n\n Args:\n token: the command token to show help for.\n width: the width of the CLI.\n\n Returns:\n A list with one list per line, each containing (token, string) tuples for\n words in the help text. These tuples represent (Markdown format,\n actual text) pairs.\n \"\"\"\n return RenderMarkdown(GetDefinitionForPositional(token), width=width)\n","repo_name":"springml/case_routing","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/shell/help_window.py","file_name":"help_window.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"73"} +{"seq_id":"31380498134","text":"# -*- coding: utf-8 -*-\nimport socket\nimport subprocess\nimport struct\nimport json\nfrom Config import *\nfrom driver_leijun import Driver\n\nTEST = False\n\n\nclass TorcsEnv(object):\n def __init__(self, torcs_root, track='road/alpine-1', lap=0, with_gt=False):\n if len(torcs_root) == 0 or torcs_root[0] != '/':\n if torcs_root.startswith('./'):\n torcs_root = torcs_root[2:]\n torcs_root = os.getcwd() + '/' + torcs_root\n if not torcs_root.endswith('/'):\n torcs_root += '/'\n self.torcs_root = torcs_root\n self.track = track\n self.lap = lap\n self.with_gt = with_gt\n self.server_addr = ('localhost', 0)\n self.sock = None\n self.child = None\n self.data_sock = None\n self.current_state = None\n self.current_gt = None\n self.start_server()\n\n def start_server(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind(self.server_addr)\n self.sock.listen(1)\n\n def recv_msg(self):\n lenbuff = self.data_sock.recv(4, socket.MSG_WAITALL)\n msg_len, = struct.unpack('=I', lenbuff)\n return self.data_sock.recv(msg_len, socket.MSG_WAITALL)\n\n def send_msg(self, msg):\n lenbuff = struct.pack('=I', len(msg))\n self.data_sock.sendall(lenbuff)\n self.data_sock.sendall(msg)\n\n def restart_torcs(self):\n self.close()\n if TEST:\n cwd = self.torcs_root\n else:\n cwd = '%sshare/games/torcs' % self.torcs_root\n env = os.environ.copy()\n env['LD_LIBRARY_PATH'] = self.torcs_root + 'lib/torcs/lib:' + env.get('LD_LIBRARY_PATH', '')\n env['XIAOMI_BOT_SERVER'] = '%s:%d' % self.sock.getsockname()\n if self.with_gt:\n env['XIAOMI_BOT_GT'] = 'true'\n if TEST:\n program = '%storcs_test.py' % self.torcs_root\n else:\n program = '%slib/torcs/torcs-bin' % self.torcs_root\n pauto = '-a'\n ptrack = '-b%s' % self.track\n pbot = '-cxiaomi/1'\n pscreen = '-d'\n plap = '-e%d' % self.lap\n self.child = subprocess.Popen([program, pauto, ptrack, pbot, pscreen, plap], cwd=cwd, env=env)\n\n def close(self):\n if self.data_sock:\n try:\n self.data_sock.shutdown(socket.SHUT_RDWR)\n except:\n pass\n self.data_sock.close()\n self.data_sock = None\n if self.child:\n ret = self.child.wait()\n self.child = None\n if ret != 0:\n raise Exception('child exit with code %d' % ret)\n\n def recv_state(self):\n msg = self.recv_msg()\n\n if self.with_gt:\n state, gt = msg.split('\\t', 1)\n self.current_state = json.loads(state)\n self.current_gt = json.loads(gt)\n else:\n self.current_state = json.loads(state)\n self.current_gt = None\n return self.current_state\n\n def reset(self):\n '''\n start a new game, return initial state, like:\n { \"time\":0.00, \"end\":false, \"distance\":-25.01, \"damage\":0, \"lap\":0, \"gear\":0, \"speed\":0.01, \"rpm\":94.2, \"screen\":\"/9j/4AAQSkZJRgA....\" }\n time: seconds from start\n end: true/false, is game ended\n distance: total distance(m) the car runs\n damage: car damage due to colisions to road/barrier/other car\n lap: current lap\n geer: current geer\n speed: current speed (m/s)\n rpm: current engine rpm\n screen: jpeg image of current driver view\n '''\n self.restart_torcs()\n print('listen on %s:%d, waiting for torcs game to connect...' % self.sock.getsockname())\n self.data_sock, _ = self.sock.accept()\n print('connected!')\n return self.recv_state()\n\n def step(self, control):\n self.send_msg(control.to_msg())\n state = self.recv_state()\n if state['end']:\n self.close()\n return state\n\n\ndef control(_env, _driver):\n state = _env.reset()\n while not state['end']:\n predict_ctl = _driver.drive(state)\n state = _env.step(predict_ctl)\n ground_truth = env.current_gt\n # print ground_truth\n\nif __name__ == '__main__':\n env = TorcsEnv(torcs_root='/work/MachineLearning/aicontest/install/', track='road/wheel-2', lap=1, with_gt=True)\n #driver = Driver('./tensorflow_model/model.ckpt') if DataConfig.model_type == 'tensorflow' else Driver(DataConfig.keras_model_path)\n driver = Driver('./tensorflow_model/model.ckpt')\n control(env, driver)\n","repo_name":"ritterliu/torcs_train","sub_path":"train/CNNControler.py","file_name":"CNNControler.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23059284566","text":"import argparse\nimport json\nimport SQSnobFit\nimport zmq\n\nimport config as cfg\nfrom AbortException import AbortException\nfrom zmq_obj_function import zmq_obj_function\n\ndef initial_handshake(socket):\n \"\"\"Performs the initial handshake that get initialization data before starting the optimization\"\"\"\n\n # Request initial parameters\n print(\"Sending initial_parameters request\")\n socket.send(b\"initial_parameters\")\n\n # Receive initial parameters\n reply = socket.recv()\n initial_parameters = json.loads(reply)\n print(\"Initial parameters received: {}\".format(initial_parameters))\n\n # Request bounds\n print(\"Sending bounds request\")\n socket.send(b\"bounds\")\n \n # Receive bounds\n reply = socket.recv()\n bounds = json.loads(reply)\n print(\"Bounds received: {}\".format(bounds))\n\n # Request budget\n print(\"Sending budget request\")\n socket.send(b\"budget\")\n \n # Receive budget\n reply = socket.recv()\n budget = json.loads(reply)\n print(\"Budget received: {}\".format(budget))\n\n # Confirm receipt\n print(\"Sending options request\")\n socket.send(b\"options\")\n\n # Receive options\n reply = socket.recv()\n options = json.loads(reply)\n print(\"options received: {}\".format(options))\n\n return initial_parameters, bounds, budget, options\n \n\ndef init_socket(binding):\n print(\"Binding socket at {} ...\".format(binding))\n\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(binding)\n \n print(\"Binding complete!\")\n\n return socket\n\ndef parse_args():\n \"\"\"Parse command line arguments\"\"\"\n\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"config_file\", help=\"Location of the configuration file to use.\")\n parser.add_argument(\"--default-config\", action=\"store_true\", help=\"Generate config file with default values at the location given by config_file.\")\n\n args = parser.parse_args()\n\n return args\n\ndef main():\n \"\"\"Starting point for using SQSnobFit calls with zmq\"\"\"\n\n args = parse_args()\n\n # Optionally generate a default config file\n if (args.default_config):\n cfg.generate_default_config(args.config_file)\n\n config = cfg.load(args.config_file)\n\n address = config[\"ip_address\"] + \":\" + config[\"port\"]\n socket = init_socket(address)\n\n initial_parameters = config[\"param_init\"]\n bounds = config[\"bounds\"]\n budget = config[\"budget\"]\n options = config[\"options\"]\n\n if (not config[\"init_from_config\"]):\n initial_parameters, bounds, budget, options = initial_handshake(socket)\n\n options = SQSnobFit.optset(options)\n \n obj_func = zmq_obj_function(socket, config[\"direction\"])\n\n results = None\n history = None\n \n try:\n results, history = SQSnobFit.minimize(obj_func, initial_parameters, bounds, budget, options)\n except AbortException as e:\n socket.send(b\"aborted\")\n print(str(e))\n return\n\n # Send the best results\n if (config[\"direction\"] == \"max\"):\n results.optval = -results.optval\n socket.send(json.dumps({ \"value\": results.optval, \"parameters\": results.optpar.tolist() , \"history\": history.tolist(), \"steps\": len(history) }).encode('utf-8'))\n \n print(\"Steps taken: {}\".format(len(history)))\n print(\"Minimum reached: {0} at {1}\".format(results.optval, results.optpar))\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RxnRover/snobfit_remote_optimization","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41222912302","text":"# https://programmers.co.kr/learn/courses/30/lessons/12907\n# 풀이) DP\n\ndef solution(n, money):\n dp = [1] + [0] * n\n \n for coin in money:\n for price in range(coin, n+1):\n dp[price] += dp[price-coin]\n\n return dp[n] % 1000000007\n \n\nn, money = 5, [1,2,5]\t\nprint(solution(n, money))","repo_name":"hyez/Algorithms","sub_path":"프로그래머스/거스름돈.py","file_name":"거스름돈.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9959926065","text":"import pandas \nimport turtle\n\ndata = pandas.read_csv(\"50_states.csv\")\nstates = data.state.to_list()\n\nscreen = turtle.Screen()\nscreen.title(\"U.S States Game\")\n\nimage = \"blank_states_img.gif\"\nscreen.addshape(image)\nturtle.shape(image)\n\nright_answers = []\n\nscore = 0\nwhile len(right_answers) < 50:\n user_answer = screen.textinput(f'your score {score}/50', 'what is your guess ?').title()\n\n if user_answer == 'Exit':\n break \n if user_answer in states:\n t = turtle.Turtle()\n t.hideturtle()\n t.penup()\n state_data = data[data.state == user_answer]\n t.goto(int(state_data.x), int(state_data.y))\n t.write(user_answer)\n right_answers.append(user_answer)\n score += 1\n else:\n loop_on = True\n\nscreen.exitonclick()\n\nstates_to_learn = [i for i in states if i not in right_answers]\n# for i in states:\n# if i not in right_answers:\n# states_to_learn.append(i)\ndf = pandas.DataFrame(states_to_learn)\ndf.to_csv('learn.csv')","repo_name":"Draxsis/python-projects","sub_path":"U.S states game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"36315432247","text":"import cantor as c\n\ndef encode(l):\n k=0\n l.reverse()\n for i in l:\n k = c.cantor(i, k)\n return k\n\ndef decode(n):\n k=list()\n while(n!=0):\n s, d = c.reverse(n)\n k.append(s)\n n=d\n return k\n\ndef length(n):\n if n==0:\n return 0\n return 1+length(c.dx(n))\n\ndef projection(t, n):\n if(t == 0 or t > length(n)):\n return -1\n for _ in range(t):\n u, n = c.reverse(n)\n return u\n\ndef increase(t, n):\n if(t == 0 or t > length(n)):\n return -1\n k=list()\n for _ in range(t-1):\n s, n = c.reverse(n)\n k.append(s)\n s, n = c.reverse(n)\n k.append(s+1)\n k.reverse()\n for i in k:\n n = c.cantor(i,n)\n return n\n\ndef decrease(t, n):\n if(t == 0 or t > length(n)):\n return -1\n k=list()\n for _ in range(t-1):\n s, n = c.reverse(n)\n k.append(s)\n s, n = c.reverse(n)\n k.append(s-1)\n k.reverse()\n for i in k:\n n = c.cantor(i,n)\n return n\n","repo_name":"samuelebompani/cantor","sub_path":"integerList.py","file_name":"integerList.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17470478620","text":"import csv\nfrom Translator import Translator\n\nclass NumericTranslator(Translator):\n\n def __init__(self):\n self._allowed_values = [int, float, complex] # add numeric code as needed\n\n def translate(self, dataset=[], key='', format=int):\n new_dataset = []\n\n super().validate_parameters(name='translate', parameter='format', argument=format, valid_values=self._allowed_values)\n\n try:\n for index, data in enumerate(dataset):\n new_dataset.append(data)\n try:\n formatted_numeric = format(data[key])\n\n data[key] = formatted_numeric\n except:\n print(f\"ERROR [{Translator._stage}] Unhandled data format: '[{index}][numeric]{data[key]}'\")\n\n continue\n\n except KeyError as err:\n print(\"ERROR [{Translator._stage}] Key value not found in collection:\", err)\n sys.exit(1)\n\n return new_dataset\n\ndef main():\n dataset = []\n\n with open('Test/budget_data_1.csv', 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader: dataset.append(row)\n\n numeric = NumericTranslator()\n dataset = numeric.translate(dataset, 'Revenue', float)\n\n for data in dataset:\n print(data)\n\nif __name__ == '__main__': main()\n","repo_name":"deleojean/python-challenge","sub_path":"Plugins/NumericTranslator.py","file_name":"NumericTranslator.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24004795358","text":"m = 0\nk = ''\nfor s in open('24 (25).txt'):\n k += s.strip()\n if s.count('Q') >= m:\n m = s.count('Q')\n c = s.strip()\n\nn = len(c)\nfor i in sorted(set(c)):\n if c.count(i) < n:\n n = c.count(i)\n na = i\nb = {x: k.count(x) for x in sorted(set(k))}\nprint(na)\nprint(b)\n","repo_name":"Talkasi/USE-prepearing","sub_path":"unsorted_hell/tasks_22-27/24 3.py","file_name":"24 3.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"15958534606","text":"from typing import Dict\n\n\nclass Player:\n DEFAULT_GUILD = \"Unaffiliated\"\n\n def __init__(self, name: str, hp: int, mp: int):\n self.name = name\n self.hp = hp\n self.mp = mp\n self.skills: Dict = {}\n self.guild = Player.DEFAULT_GUILD\n\n def add_skill(self, skill_name: str, mana_cost: int) -> str:\n if skill_name not in self.skills:\n self.skills[skill_name] = mana_cost\n\n return f\"Skill {skill_name} added to the collection of the player {self.name}\"\n\n return \"Skill already added\"\n\n def player_info(self) -> str:\n all_skills = []\n for k, v in self.skills.items():\n all_skills.append(f\"==={k} - {v}\")\n\n return f\"Name: {self.name}\\n\" + \\\n f\"Guild: {self.guild}\\n\" + \\\n f\"HP: {self.hp}\\n\" + \\\n f\"MP: {self.mp}\\n\" + \\\n f\"{chr(10).join(all_skills)}\\n\"\n","repo_name":"pavlovsvpavel/SoftUni","sub_path":"python_oop_jun_2023/classes_and_objects/exercises/guild_system/project/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"35892160906","text":"from PIL import Image\nfrom numpy import *\nfrom pylab import *\nimport Homography\nimport Warp\n\ndef ConvertPoints(j):\n Index=Matches[j].nonzero()[0];\n fp=Homography.MakeHomoG(L[j+1][Index,:2].T);\n Index2=[int(Matches[j][i]) for i in Index];\n tp=Homography.MakeHomoG(L[j][Index2,:2].T);\n fp=vstack([fp[1],fp[0],fp[2]]);\n tp=vstack([tp[1],tp[0],tp[2]]);\n return fp,tp;\n\nimport sift\n\nFeatName=['../MergeImg/'+str(i)+'.sift' for i in range(5)];\nImgName=['../MergeImg/'+str(i)+'.jpg' for i in range(5)];\nL={}; D={};\nfor i in range(5):\n sift.ProcessImage(ImgName[i],FeatName[i]);\n L[i],D[i]=sift.ReadFeaturesFromFile(FeatName[i]);\n\nMatches={};\nfor i in range(4):\n Matches[i]=sift.Match(D[i+1],D[i]);\n '''\n figure(); gray();\n sift.PlotMatches(array(Image.open(ImgName[i+1]).convert('L')),array(Image.open(ImgName[i]).convert('L')),L[i+1],L[i],Matches[i]);\n show();\n '''\n\nModel=Homography.RansacModel();\n\nfp,tp=ConvertPoints(1);\nH12=Homography.HFromRansac(fp,tp,Model)[0];\nprint(H12);\nfp,tp=ConvertPoints(0);\nH01=Homography.HFromRansac(fp,tp,Model)[0];\nprint(H01);\ntp,fp=ConvertPoints(2);\nH32=Homography.HFromRansac(fp,tp,Model)[0];\nprint(H32);\ntp,fp=ConvertPoints(3);\nH43=Homography.HFromRansac(fp,tp,Model)[0];\nprint(H43);\n\nDelta=2000;\n\nImg1=array(Image.open(ImgName[1]),'uint8');\nImg2=array(Image.open(ImgName[2]),'uint8');\nImg12=Warp.Panorama(H12,Img1,Img2,Delta,Delta);\n\nImg1=array(Image.open(ImgName[0]),'f');\nImg02=Warp.Panorama(dot(H12,H01),Img1,Img12,Delta,Delta);\n\nImg1=array(Image.open(ImgName[3]),'f');\nImg32=Warp.Panorama(H32,Img1,Img02,Delta,Delta);\n\nImg1=array(Image.open(ImgName[4]),'f');\nImg42=Warp.Panorama(dot(H32,H43),Img1,Img32,Delta,2*Delta);\n\nfigure(); imshow(Img42.astype('uint8'));\nshow();\n\nimsave(\"../MergeImg/ImgRansac.jpg\",Img42.astype('uint8'));\n","repo_name":"paul2705/pythonCV","sub_path":"ImgProc/ImgRansac.py","file_name":"ImgRansac.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71931003116","text":"from enum import Enum\r\n\r\nclass E_SystemTypes():\r\n class E_FileUpload(Enum):\r\n #Region Products\r\n PackagePhoto = 1\r\n PackageFile = 2\r\n SongPhoto = 3\r\n SongFile = 4\r\n class E_Coupon(Enum):\r\n #Region CouponTypes\r\n PackageCP = 5\r\n SongCp = 6\r\n GameCp = 7\r\n BookCp = 8\r\n class E_MimeType(Enum):\r\n #Region MimeTypes\r\n Audio = 9\r\n Document = 10\r\n Video = 11\r\n Photo = 12","repo_name":"EquinoxNoxNoxNoxNox/MyTelegramRobot","sub_path":"plugins/Modules/E_SystemTypes.py","file_name":"E_SystemTypes.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3604453972","text":"import logging\nimport os\n\nfrom aminator.plugins.provisioner.base import BaseProvisionerPlugin\nfrom aminator.util.linux import monitor_command, result_to_dict\nfrom aminator.util.metrics import cmdsucceeds, cmdfails, lapse\n\n__all__ = ('YumProvisionerPlugin',)\nlog = logging.getLogger(__name__)\n\n\nclass YumProvisionerPlugin(BaseProvisionerPlugin):\n \"\"\"\n YumProvisionerPlugin takes the majority of its behavior from BaseProvisionerPlugin\n See BaseProvisionerPlugin for details\n \"\"\"\n _name = 'yum'\n\n def _refresh_repo_metadata(self):\n config = self._config.plugins[self.full_name]\n return yum_clean_metadata(config.get('clean_repos', []))\n\n @cmdsucceeds(\"aminator.provisioner.yum.provision_package.count\")\n @cmdfails(\"aminator.provisioner.yum.provision_package.error\")\n @lapse(\"aminator.provisioner.yum.provision_package.duration\")\n def _provision_package(self):\n result = self._refresh_repo_metadata()\n if not result.success:\n log.critical('Repo metadata refresh failed: {0.std_err}'.format(result.result))\n return result\n context = self._config.context\n if context.package.get('local_install', False):\n return yum_localinstall(context.package.arg)\n else:\n return yum_install(context.package.arg)\n\n def _store_package_metadata(self):\n context = self._config.context\n config = self._config.plugins[self.full_name]\n metadata = rpm_package_metadata(context.package.arg, config.get('pkg_query_format', ''), context.package.get('local_install', False))\n for x in config.pkg_attributes:\n metadata.setdefault(x, None)\n context.package.attributes = metadata\n\n\ndef yum_install(package):\n return monitor_command(['yum', '--nogpgcheck', '-y', 'install', package])\n\n\ndef yum_localinstall(path):\n if not os.path.isfile(path):\n log.critical('Package {0} not found'.format(path))\n return None\n return monitor_command(['yum', '--nogpgcheck', '-y', 'localinstall', path])\n\n\ndef yum_clean_metadata(repos=None):\n clean = ['yum', 'clean', 'metadata']\n if repos:\n clean.extend(['--disablerepo', '*', '--enablerepo', ','.join(repos)])\n return monitor_command(clean)\n\n\ndef rpm_query(package, queryformat, local=False):\n cmd = 'rpm -q --qf'.split()\n cmd.append(queryformat)\n if local:\n cmd.append('-p')\n cmd.append(package)\n return monitor_command(cmd)\n\n\ndef rpm_package_metadata(package, queryformat, local=False):\n return result_to_dict(rpm_query(package, queryformat, local))\n","repo_name":"Netflix/aminator","sub_path":"aminator/plugins/provisioner/yum.py","file_name":"yum.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":939,"dataset":"github-code","pt":"73"} +{"seq_id":"1920618964","text":"from spack.package import *\n\n\nclass RaxmlNg(CMakePackage):\n \"\"\"RAxML-NG is a phylogenetic tree inference tool which uses\n maximum-likelihood (ML) optimality criterion.\n\n Its search heuristic is based on iteratively performing a series\n of Subtree Pruning and Regrafting (SPR) moves,\n which allows to quickly navigate to the best-known ML tree.\n RAxML-NG is a successor of RAxML (Stamatakis 2014) and leverages\n the highly optimized likelihood computation implemented in libpll\n (Flouri et al. 2014).\"\"\"\n\n homepage = \"https://github.com/amkozlov/raxml-ng/wiki\"\n url = \"https://github.com/amkozlov/raxml-ng/archive/1.0.1.tar.gz\"\n git = \"https://github.com/amkozlov/raxml-ng.git\"\n\n version(\"1.1.0\", submodules=True)\n version(\"1.0.2\", submodules=True)\n version(\"1.0.1\", submodules=True)\n\n variant(\"mpi\", default=True, description=\"Use MPI\")\n\n depends_on(\"bison\")\n depends_on(\"flex\")\n depends_on(\"gmp\")\n depends_on(\"mpi\", when=\"+mpi\")\n\n def cmake_args(self):\n return [self.define_from_variant(\"USE_MPI\", \"mpi\")]\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/raxml-ng/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"17493141950","text":"\"\"\"\nAutor: Roberto Jaime Rico Sandoval.\nFille: Función lambda.\nDate: 18/ 08/ 2022\nFolio: 964NB09\n\"\"\"\n\nimport random\n\nlistaNumeros = []\n\nfunqui = lambda n:n % 2 == 0\n\nfor i in range(1, 11):\n \n num = random.randrange(10, 250)\n listaNumeros.append(num)\n \nlistaPar = list(filter(funqui, listaNumeros))\n\nprint(f\"\\nNúmeros pares: {listaPar}\")\n\n","repo_name":"RobertShilo98/PILARES-taller-de-python","sub_path":"ejercicio20/funciones_lambda.py","file_name":"funciones_lambda.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73935012077","text":"import asyncio\nimport io\nimport os\nimport re\n\nimport aiohttp\nimport discord\n\n\nTOKEN = os.getenv('TOKEN')\n\nCUSTOM_EMOJIS = re.compile('<:\\w+:\\d+>')\n\nclient = discord.Client(intents=discord.Intents.none())\ntree = discord.app_commands.CommandTree(client)\n\n# this is a bit stupid\ncache = {}\n\n\nasync def send_emojis(interaction, urls):\n \"\"\"\n Given a list of URLs to discord emojis, send a file of each.\n \"\"\"\n\n async def callback(url):\n if url not in cache:\n response = await client.session.get(url)\n bytes = await response.read()\n\n cache[url] = bytes\n\n return cache[url]\n\n resources = await asyncio.gather(*map(callback, urls))\n\n files = [discord.File(filename='file.png', fp=io.BytesIO(resource)) for resource in resources]\n\n return await interaction.response.send_message(files=files, ephemeral=True)\n\n\n@tree.context_menu(name='yoink emotes')\nasync def yoink_emotes(interaction, message: discord.Message):\n emojis = CUSTOM_EMOJIS.findall(message.content)\n\n if not emojis:\n return await interaction.response.send_message(\"There are no custom emoji in that message.\")\n\n urls = [discord.PartialEmoji.from_str(emoji).url for emoji in emojis]\n\n return await send_emojis(interaction, urls)\n\n\n@tree.context_menu(name='yoink reactions')\nasync def yoink_reactions(interaction, message: discord.Message):\n urls = [\n reaction.emoji.url\n for reaction in message.reactions\n if isinstance(reaction.emoji, (discord.Emoji, discord.PartialEmoji))\n ]\n\n if not urls:\n return await interaction.response.send_message(\"There are no custom reactions on this message.\")\n\n return await send_emojis(interaction, urls)\n\n\nasync def main():\n async with client, aiohttp.ClientSession() as session:\n client.session = session\n discord.utils.setup_logging()\n await client.login(TOKEN)\n await client.connect(reconnect=True)\n\n\nasyncio.run(main())\n","repo_name":"avayert/emote-yoinker","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"34136709949","text":"# SPDX-License-Identifier: GPL-2.0-or-later\n\nimport os\n\n\ndef blend_list(path):\n for dirpath, dirnames, filenames in os.walk(path):\n # skip '.git'\n dirnames[:] = [d for d in dirnames if not d.startswith(\".\")]\n for filename in filenames:\n if filename.lower().endswith(\".blend\"):\n filepath = os.path.join(dirpath, filename)\n yield filepath\n\n\ndef generate(dirpath, random_order, **kwargs):\n files = list(blend_list(dirpath))\n if random_order:\n import random\n random.shuffle(files)\n else:\n files.sort()\n\n config = []\n for f in files:\n defaults = kwargs.copy()\n defaults[\"file\"] = f\n config.append(defaults)\n\n return config, dirpath\n\n\ndef as_string(dirpath, random_order, exit, **kwargs):\n \"\"\" Config loader is in demo_mode.py\n \"\"\"\n cfg, dirpath = generate(dirpath, random_order, **kwargs)\n\n # hint for reader, can be used if files are not found.\n cfg_str = [\n \"# generated file\\n\",\n \"\\n\",\n \"# edit the search path so other systems may find the files below\\n\",\n \"# based on name only if the absolute paths cannot be found\\n\",\n \"# Use '//' for current blend file path.\\n\",\n \"\\n\",\n \"search_path = %r\\n\" % dirpath,\n \"\\n\",\n \"exit = %r\\n\" % exit,\n \"\\n\",\n ]\n\n # All these work but use nicest formatting!\n if 0: # works but not nice to edit.\n cfg_str += [\"config = %r\" % cfg]\n elif 0:\n import pprint\n cfg_str += [\"config = %s\" % pprint.pformat(cfg, indent=0, width=120)]\n elif 0:\n cfg_str += [(\"config = %r\" % cfg).replace(\"{\", \"\\n {\")]\n else:\n import pprint\n\n def dict_as_kw(d):\n return \"dict(%s)\" % \", \".join((\"%s=%s\" % (k, pprint.pformat(v))) for k, v in sorted(d.items()))\n ident = \" \"\n cfg_str += [\"config = [\\n\"]\n for cfg_item in cfg:\n cfg_str += [\"%s%s,\\n\" % (ident, dict_as_kw(cfg_item))]\n cfg_str += [\"%s]\\n\\n\" % ident]\n\n return \"\".join(cfg_str), dirpath\n","repo_name":"sobotka/blender-addons","sub_path":"system_demo_mode/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"73"} +{"seq_id":"1903636994","text":"from spack.package import *\n\n\nclass H5zZfp(MakefilePackage):\n \"\"\"A highly flexible floating point and integer compression plugin for the\n HDF5 library using ZFP compression.\"\"\"\n\n homepage = \"https://h5z-zfp.readthedocs.io/en/latest\"\n git = \"https://github.com/LLNL/H5Z-ZFP.git\"\n url = \"https://github.com/LLNL/H5Z-ZFP/archive/refs/tags/v1.1.1.tar.gz\"\n\n maintainers(\"markcmiller86\", \"brtnfld\", \"byrnHDF\")\n\n version(\"develop\", branch=\"master\")\n version(\"1.1.1\", sha256=\"921af7b9d1c8c46c036b46544f2785f69d405c0701abe1c1ce3aca2bd5899171\")\n version(\"1.1.0\", sha256=\"48a81e69d1f3b61d9a1eb07e868164fadf3b88690ec930efd849f5889681a893\")\n version(\"1.0.1\", sha256=\"b9ed91dab8e2ef82dc6706b4242c807fb352875e3b21c217dd00782dd1a22b24\")\n version(\"0.8.0\", sha256=\"a5eb089191369a5e929c51ec9e5da107afaee39c6ab3b7ad693c454319ab9217\")\n version(\"0.7.0\", sha256=\"f728b0bcb9e9cf8bafe05909ab02fec39415635d275e98b661176f69d34f87b3\")\n\n variant(\"fortran\", default=True, description=\"Enable Fortran support\")\n\n depends_on(\"hdf5+fortran\", when=\"+fortran\")\n depends_on(\"hdf5\", when=\"~fortran\")\n depends_on(\"mpi\", when=\"^hdf5+mpi\")\n depends_on(\"zfp bsws=8\")\n\n patch(\n \"https://github.com/LLNL/H5Z-ZFP/commit/983a1870cefff5fdb643898a14eda855c2c231e4.patch?full_index=1\",\n sha256=\"07a53b8b0d4c1df62a3f9f21b30ad0eb90f26b38eb6cacc0de6482fd8f5daea2\",\n when=\"@1.0.1\",\n )\n patch(\"config.make.patch\", when=\"@0.7.0:0.8.0\")\n patch(\"config.make.0.7.0.patch\", when=\"@0.7.0\")\n patch(\"Makefile.0.7.0.patch\", when=\"@0.7.0\")\n patch(\"fj.patch\", when=\"@0.7.0: %fj\")\n\n @property\n def make_defs(self):\n cc = spack_cc\n fc = spack_fc\n if \"^hdf5+mpi\" in self.spec:\n cc = self.spec[\"mpi\"].mpicc\n fc = self.spec[\"mpi\"].mpifc\n make_defs = [\n \"PREFIX=%s\" % prefix,\n \"CC=%s\" % cc,\n \"HDF5_HOME=%s\" % self.spec[\"hdf5\"].prefix,\n \"ZFP_HOME=%s\" % self.spec[\"zfp\"].prefix,\n ]\n\n if \"+fortran\" in self.spec and fc:\n make_defs += [\"FC=%s\" % fc]\n else:\n make_defs += [\"FC=\"]\n\n return make_defs\n\n @property\n def build_targets(self):\n targets = [\"all\"]\n return self.make_defs + targets\n\n @property\n def install_targets(self):\n make_args = [\"install\"]\n return make_args + self.make_defs\n\n @run_after(\"build\")\n @on_package_attributes(run_tests=True)\n def check_build(self):\n make(\"check\", *self.make_defs, parallel=False)\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/h5z-zfp/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"72732250475","text":"def gcd_naive(a, b):\n current_gcd = 1\n for d in range(2, min(a, b) + 1):\n if a % d == 0 and b % d == 0:\n if d > current_gcd:\n current_gcd = d\n return current_gcd\n\ndef gcd(a, b):\n if min(a, b) == 0:\n return max(a, b)\n else:\n return gcd(min(a, b), max(a, b) % min(a, b))\n","repo_name":"alisazosimova/algorithms","sub_path":"gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"37432538299","text":"n, k = map(int, input().split())\n\nif n-k < k:\n k = n-k\n\nup = 1\ndown = 1\nfor i in range(k):\n up *= n\n n -= 1\n down *= k\n k -= 1\n\nprint(up//down)","repo_name":"kingchobo/TIL","sub_path":"알고리즘문제풀이/BOJ/1. 브론즈/BOJ_11050_이항계수1.py","file_name":"BOJ_11050_이항계수1.py","file_ext":"py","file_size_in_byte":158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40013611974","text":"n = int(input())\ntable=[list(map(int,input().split())) for _ in range(n)]\nv = [False for _ in range(n)]\n\nminv = 99999\n\ndef backtracking(num, ind):\n global minv\n if num==n//2:\n x,y=0,0\n for i in range(n):\n for j in range(n):\n if v[i] and v[j]:\n x += table[i][j]\n elif not v[i] and not v[j]:\n y += table[i][j]\n minv = min(minv, abs(x-y))\n else:\n for i in range(ind, n):\n if not v[i]:\n v[i]=True\n backtracking(num+1, i+1)\n v[i]=False\n\nbacktracking(0,0)\nprint(minv)","repo_name":"shbin05/Baekjoon","sub_path":"samsung sw/14889.py","file_name":"14889.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29222187547","text":"from photostore import filetools, db\nfrom photostore.modules.editorjs import renderBlock\nfrom photostore.permissions import admin_perm\nfrom photostore.store.whoosh_schemas import PhotoIndexSchema\nfrom photostore.store.forms import PhotoDetailsForm, SearchPhotosForm\nfrom photostore.store.models import Photo, PhotoCoverage, PhotoPaginaBusqueda\nfrom photostore.store.utiles import StorageController\nfrom photostore.store.permissions import DownloadCoveragePermission\nfrom photostore.store.permissions import EditPhotoPermission\nfrom photostore.store.permissions import DownloadPhotoPermission\nfrom photostore.store.permissions import EDIT_PHOTO, DOWNLOAD_PHOTO\nfrom whoosh.filedb.filestore import FileStorage\nfrom whoosh.qparser import QueryParser\nfrom whoosh import sorting\nfrom flask_login import login_required, current_user\nfrom flask_breadcrumbs import register_breadcrumb, default_breadcrumb_root\nfrom flask_menu import register_menu, current_menu\nfrom flask import Blueprint, current_app, render_template, abort\nfrom flask import request, json, send_file, request, url_for\nfrom flask import stream_with_context\nfrom flask import Response\nfrom pathlib import Path\nfrom werkzeug.utils import redirect, secure_filename\nimport os\nimport tempfile\nimport datetime\n\n\nbp = Blueprint(\n 'photos', __name__, template_folder='templates')\ndefault_breadcrumb_root(bp, '.index')\n\n\ndef can_edit_cobertura(cob: PhotoCoverage):\n return (cob.author_id == current_user.id) or admin_perm.can()\n\n\n@bp.context_processor\ndef bp_context():\n def can_download(id):\n \"\"\"Can download original image\"\"\"\n return DownloadPhotoPermission(id=id).can()\n\n def can_download_coverage(id):\n return DownloadCoveragePermission(id).can()\n\n return {\n 'can_download_photo': can_download,\n 'can_download_coverage': can_download_coverage\n }\n\n\n@bp.before_app_first_request\ndef setupMenus():\n navbar = current_menu.submenu(\"navbar.photostore\")\n navbar._external_url = \"#!\"\n navbar._endpoint = None\n navbar._text = \"NAVBAR\"\n\n # mis actions\n actions = current_menu.submenu(\"actions.photostore\")\n actions._text = \"Fotos\"\n actions._endpoint = None\n actions._external_url = \"#!\"\n\n\n@bp.route('/photo/preview/')\ndef photo_thumbnail(id):\n p = Photo.query.get_or_404(id)\n if Path(p.thumbnail).is_file() is False:\n # this is slow but ensure the thumbnail is there in the next load\n StorageController.getInstance().generateThumbnail(p)\n abort(404)\n return send_file(p.thumbnail)\n\n\n@bp.route('/photo/download/archive/')\n@login_required\ndef photo_download(id):\n \"\"\"Descargar el zip con la foto y la información de la foto\"\"\"\n web_ready = True if request.args.get('web') else False\n if DownloadPhotoPermission(id=id).can() is False and web_ready is False:\n abort(403)\n p = Photo.query.get_or_404(id)\n file_name = StorageController.getInstance().makePhotoZip(\n p, web_ready=web_ready)\n file_handle = open(file_name, 'rb')\n\n def stream_and_remove():\n yield from file_handle\n file_handle.close()\n os.remove(file_name)\n\n return Response(\n stream_with_context(stream_and_remove()),\n headers={\n 'Content-Type': 'application/zip',\n 'Content-Disposition': 'attachment; filename=\"{}\"'.format(\n Path(file_name).name)\n }\n )\n\n\ndef view_photo_download_page_dlc(*args, **kwargs):\n id = request.view_args['id']\n return [\n {\n 'text': 'Exportar',\n 'url': url_for('.photo_download_page', id=id)\n }\n ]\n\n\n@bp.route('/photo/download/')\n@register_breadcrumb(\n bp, '.index.photo_download_page', '',\n dynamic_list_constructor=view_photo_download_page_dlc)\n@login_required\ndef photo_download_page(id):\n \"\"\"Present page with export options\"\"\"\n photo = Photo.query.get_or_404(id)\n return render_template(\n 'store/photo_download_page.html',\n foto=photo)\n\n\ndef view_photo_details_dlc(*args, **kwargs):\n id = request.view_args['id']\n return [\n {\n 'text': 'Detalles',\n 'url': url_for('.photo_details', id=id)\n }\n ]\n\n\n@bp.route('/photo/details/')\n@register_breadcrumb(\n bp, '.index.photo_details.id', 'Detalles',\n dynamic_list_constructor=view_photo_details_dlc)\n@login_required\ndef photo_details(id):\n p = Photo.query.get_or_404(id)\n can_edit = EditPhotoPermission(p.md5)\n return render_template(\n 'store/photo_details.html', foto=p, can_edit=can_edit)\n\n\n@bp.route('/photo/edit/', methods=['GET', 'POST'])\n@register_breadcrumb(bp, '.index.photo_edit.id', 'Editar datos de la foto')\ndef photo_edit(id):\n p = Photo.query.get_or_404(id)\n can_edit = EditPhotoPermission(p.md5)\n if can_edit is False:\n abort(403)\n\n form = PhotoDetailsForm()\n if form.validate_on_submit():\n p.headline = form.headline.data\n p.credit_line = form.credit_line.data\n p.excerpt = form.excerpt.data\n tags = list(filter(None, json.loads(form.tags.data)))\n p.keywords = [t.lower() for t in tags]\n db.session.add(p)\n db.session.commit()\n # reindexar la foto para que consten los cambios\n StorageController.getInstance().indexPhoto(p)\n return redirect(url_for('.photo_details', id=p.md5))\n\n return render_template(\n 'store/photo_edit.html', foto=p, form=form)\n\n\n@bp.route('/')\n@register_breadcrumb(bp, '.index', 'Fotos')\n@register_menu(bp, 'navbar.photostore.index', 'Fotos')\n@register_menu(bp, 'actions.photostore.index', 'Galerias')\n@login_required\ndef index():\n page = request.args.get('page', 1, type=int)\n form = SearchPhotosForm()\n coberturas = PhotoCoverage.query.order_by(\n PhotoCoverage.archive_on.desc()).paginate(page, per_page=4)\n\n return render_template(\n 'store/index.html', coberturas=coberturas,\n form=form, can_edit=can_edit_cobertura)\n\n\ndef verCobertura_dlc(*args, **kwargs):\n id = request.view_args['id']\n cob = PhotoCoverage.query.get_or_404(id)\n return [\n {\n 'text': cob.headline,\n 'url': url_for('.verCobertura', id=cob.id)\n }\n ]\n\n\n@bp.route('/ver/cobertura/')\n@register_breadcrumb(\n bp, '.index.verCobertura', '',\n dynamic_list_constructor=verCobertura_dlc)\ndef verCobertura(id):\n cobertura = PhotoCoverage.query.get_or_404(id)\n form = SearchPhotosForm()\n return render_template(\n \"store/ver_cobertura.html\",\n cobertura=cobertura,\n form=form,\n can_edit=can_edit_cobertura)\n\n\ndef view_editarCobertura_dlc(*args, **kwargs):\n id = request.view_args['id']\n cob = PhotoCoverage.query.get_or_404(id)\n return [\n {\n 'text': 'Editar Cobertura',\n 'url': url_for('.editarCobertura', id=cob.id)\n }\n ]\n\n\n@bp.route('/editar/cobertura/')\n@register_breadcrumb(\n bp, '.index.editarCobertura', '',\n dynamic_list_constructor=view_editarCobertura_dlc)\n@login_required\ndef editarCobertura(id):\n cobertura = PhotoCoverage.query.get_or_404(id)\n if can_edit_cobertura(cobertura) is False:\n abort(403)\n\n return render_template(\n 'store/editar_cobertura.html', cobertura=cobertura)\n\n\n@bp.route('/myphotos')\n@register_breadcrumb(bp, '.index.mis_fotos', 'Mis fotos')\n@register_menu(bp, 'actions.photostore.mis_fotos', 'Mis fotos')\n@login_required\ndef mis_fotos():\n \"\"\"Las fotos de este usuario\"\"\"\n page = request.args.get('page', 1, type=int)\n form = SearchPhotosForm()\n photos = Photo.query.filter_by(\n upload_by=current_user.id\n ).order_by(\n Photo.archive_on.desc()).paginate(page, per_page=12)\n return render_template(\n 'store/mis_fotos.html', fotos=photos, search_form=form)\n\n\n@bp.route('/search', methods=['GET', 'POST'])\n@register_breadcrumb(bp, '.index.buscar_indice', 'Buscar')\n@register_menu(\n bp, 'actions.photostore.buscar_indice', 'Buscar Fotos')\n@login_required\ndef buscar_indice():\n form = SearchPhotosForm()\n userquery = request.args.get('userquery', \"\")\n try:\n page = int(request.args.get('page', '1'))\n page = page if page > 0 else 1\n except ValueError:\n page = 1\n\n if form.validate_on_submit():\n userquery = form.userquery.data\n\n # hacer la busqueda aqui\n base = Path(current_app.config.get('INDEX_BASE_DIR'))\n store = FileStorage(str(base / 'photos'))\n ix = store.open_index()\n qp = QueryParser(\"excerpt\", PhotoIndexSchema())\n keywords_facet = sorting.FieldFacet(\"keywords\", maptype=sorting.Count)\n taken_facet = sorting.DateRangeFacet(\n \"taken_on\",\n datetime.datetime(2002, 1, 1),\n datetime.datetime.now(),\n datetime.timedelta(days=30),\n maptype=sorting.Count\n )\n\n with ix.searcher() as s:\n results = PhotoPaginaBusqueda(s.search_page(\n qp.parse(userquery, debug=False), page, pagelen=9,\n groupedby={\n \"keywords\": keywords_facet,\n \"taken_on\": taken_facet,\n },\n sortedby=\"taken_on\", reverse=True)\n )\n\n keywords_grp = dict()\n # extraer las agrupaciones por keywords de los resultados\n for k, v in results.groups(name=\"keywords\").items():\n keywords_grp[k] = {\n \"documents\": v,\n \"text\": k,\n \"link\": url_for(\n '.buscar_indice',\n userquery='keywords:\"{}\" {}'.format(\n k, userquery))\n }\n # --\n\n # extraer los rangos de fechas\n taken_grp = dict()\n for k, v in results.groups(name=\"taken_on\").items():\n if k is not None:\n start, end = k # es una tupla\n taken_grp[k] = { # range name\n \"documents\": v, # cantidad de documentos\n \"start\": start,\n \"end\": end,\n \"link\": url_for(\n '.buscar_indice',\n userquery='taken_on:[{} TO {}] {}'.format(\n start.strftime(\"%Y%m%d\"),\n end.strftime(\"%Y%m%d\"),\n userquery\n ))\n }\n # --\n\n return render_template(\n 'store/search.html',\n form=form, results=results, keywords_grp=keywords_grp,\n taken_grp=taken_grp, userquery=userquery)\n\n\n@bp.route('/upload-form')\n@register_breadcrumb(bp, '.index.upload_coverture', 'Subir cobertura')\n@register_menu(\n bp, 'actions.photostore.upload_coverture', 'Subir cobertura')\n@login_required\ndef upload_coverture():\n return render_template('store/upload.html')\n\n\ndef view_download_coverture_dlc(*args, **kwargs):\n id = request.view_args['id']\n cob = PhotoCoverage.query.get_or_404(id)\n return [\n {\n 'text': 'Exportar',\n 'url': url_for('.download_coverture', id=cob.id)\n }\n ]\n\n\n@bp.route('/exportar/cobertura/')\n@register_breadcrumb(\n bp, '.index.download_coverture', '',\n dynamic_list_constructor=view_download_coverture_dlc)\n@login_required\ndef download_coverture(id):\n cobertura = PhotoCoverage.query.get_or_404(id)\n return render_template(\n 'store/download_coverture.html', cobertura=cobertura)\n\n\n@bp.route('/exportar/cobertura/archive/')\n@login_required\ndef download_coberture_archive(id):\n \"\"\"Download the coverture .zip archive\"\"\"\n web_ready = True if request.args.get('web') else False\n\n if DownloadCoveragePermission(id=id).can() is False and web_ready is False:\n abort(403)\n\n cov = PhotoCoverage.query.get_or_404(id)\n file_name = StorageController.getInstance().exportCoverage(\n cov, web_ready=web_ready)\n file_handle = open(file_name, 'rb')\n\n def stream_and_remove():\n yield from file_handle\n file_handle.close()\n os.remove(file_name)\n\n return Response(\n stream_with_context(stream_and_remove()),\n headers={\n 'Content-Type': 'application/zip',\n 'Content-Disposition': 'attachment; filename=\"{}\"'.format(\n Path(file_name).name)\n }\n )\n\n\n@bp.route('/upload', methods=['POST'])\n@login_required\ndef handle_upload():\n \"\"\"Handle uploads of photos\"\"\"\n if 'image' not in request.files:\n current_app.logger.debug(\"not file send\")\n abort(400)\n\n file = request.files.get('image')\n if file.filename == '':\n return {'message': 'Ivalid image'}, 400\n\n if filetools.allowed_file(file.filename):\n filename = secure_filename(file.filename)\n fullname = os.path.join(tempfile.mkdtemp(), filename)\n file.save(fullname)\n # Procesar la imagen aqui\n # --\n keywords = json.loads(request.form.get('keywords'))\n user_data = {\n 'headline': request.form.get('headline'),\n 'creditline': request.form.get('creditline'),\n 'keywords': list(filter(None, keywords)),\n 'excerpt': request.form.get('excerpt'),\n 'uploader': current_user.id,\n 'taken_by': request.form.get('takenby')\n }\n im = StorageController.getInstance().processPhoto(\n fullname, user_data)\n if im:\n # darle permiso de edición al usuario que sube la foto\n # para que pueda modificiar los datos\n # REVIEW: puede sobre escribir los permisos\n current_user.getUserRole().addPermission(\n EDIT_PHOTO, im.md5, 'foto')\n current_user.getUserRole().addPermission(\n DOWNLOAD_PHOTO, im.md5, 'foto')\n # retornar la información de la imagen procesada, sobre\n # todo el md5 o id de la imagen\n db.session.add(im)\n db.session.commit()\n return {'md5': im.md5}, 200\n else:\n return {\"message\": \"Invalid image\"}, 400\n\n return {\"message\": \"Something went worng\"}, 400\n\n\n@bp.context_processor\ndef render_excerpt_to_html():\n def render_excerpt(in_data):\n data = json.loads(in_data)\n return render_template(\n 'store/editorjs/photo_excerpt.html',\n data=data,\n block_renderer=renderBlock)\n\n return dict(render_excerpt=render_excerpt)\n","repo_name":"ybenitezf/photostore","sub_path":"photostore/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41466150097","text":"from collections import deque\n\n\nclass Solution:\n def findCircleNum(self, isConnected: List[List[int]]) -> int:\n ans = 0\n total = len(isConnected)\n visited = [False for _ in range(total)]\n for i in range(total):\n if not visited[i]:\n ans += 1\n visited[i] = True\n q = deque([i])\n while q:\n curr_city = q.popleft()\n for adj_city in range(total):\n if isConnected[curr_city][adj_city] and not visited[adj_city]:\n visited[adj_city] = True\n q.append(adj_city)\n else:\n continue\n return ans\n \n","repo_name":"FallingStar624/GSDS3_Algo","sub_path":"FallingStar/LeetCode_W8/547_Number_of_Provinces.py","file_name":"547_Number_of_Provinces.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"15684254254","text":"from copy import deepcopy\nimport numpy as np\n\ndef x_rot(t):\n rot = [[1.0, 0.0, 0.0],\n [0.0, np.cos(t), -np.sin(t)],\n [0.0, np.sin(t), np.cos(t)]]\n return np.array(rot)\ndef y_rot(t):\n rot = [[np.cos(t), 0.0, np.sin(t)],\n [ 0.0, 1.0, 0.0],\n [-np.sin(t), 0.0, np.cos(t)]]\n return np.array(rot)\ndef z_rot(t):\n rot = [[np.cos(t), -np.sin(t), 0.0],\n [np.sin(t), np.cos(t), 0.0],\n [ 0.0, 0.0, 1.0]]\n return np.array(rot)\ndef rpy_rot(rpy):\n return np.matmul(z_rot(rpy[2]), np.matmul(y_rot(rpy[1]), x_rot(rpy[0])))\ndef diag_mat(diag):\n mat = np.eye(len(diag))\n for i in range(len(diag)):\n mat[i,i] = diag[i]\n return mat\n\ndef x_rot_dot(t):\n rot = [[0.0, 0.0, 0.0],\n [0.0, -np.sin(t), -np.cos(t)],\n [0.0, np.cos(t), -np.sin(t)]]\n return np.array(rot)\ndef y_rot_dot(t):\n rot = [[-np.sin(t), 0.0, np.cos(t)],\n [ 0.0, 0.0, 0.0],\n [-np.cos(t), 0.0, -np.sin(t)]]\n return np.array(rot)\ndef z_rot_dot(t):\n rot = [[-np.sin(t), -np.cos(t), 0.0],\n [np.cos(t), -np.sin(t), 0.0],\n [ 0.0, 0.0, 0.0]]\n return np.array(rot)\n\n\nclass Agent:\n def __init__(self, env=None):\n self.env = env\n self.x_dim = 3*4\n self.u_dim = 4*3\n self.damping_ratio = 1e-8\n self.learning_rate = 0.5\n self.max_iteration = 10 #10\n \n self.Qf_mat = 100.0*np.eye(self.x_dim)\n self.Qf_mat = np.matmul(self.Qf_mat.T, self.Qf_mat)\n self.Q_mat = 1.0*np.eye(self.x_dim) #diag_mat([1.0]*6 + [0.0]*6)\n self.Q_mat = np.matmul(self.Q_mat.T, self.Q_mat) \n self.R_mat = 1.0*np.eye(self.u_dim)\n self.R_mat = np.matmul(self.R_mat.T, self.R_mat) \n\n #for constraint\n self.const_dim = 17\n self.init_lambda_vector = np.zeros((self.const_dim, 1))\n self.init_mu_vector = np.ones((self.const_dim, 1))\n self.mu_scaling_factor = 2.0\n self.max_lambda = 10.0\n\n\n def get_action(self, init_x, init_u_list, delta_time_list, foot_pos_list, contact_phi_list, target_x_list, target_u_list):\n #example :\n #delta_time_list = [0.01, 0.01, 0.01]\n #target_x_list = [1, 1, 1, 1]\n #target_u_list = [0, 0, 0]\n\n #get x list\n J_value = 0\n x_list = [init_x]\n #u_list = deepcopy(target_u_list)\n u_list = deepcopy(init_u_list)\n lambda_list = [self.init_lambda_vector for i in range(len(delta_time_list) + 1)]\n mu_vector = deepcopy(self.init_mu_vector)\n for t_idx, delta_t in enumerate(delta_time_list):\n x = x_list[t_idx]\n u = u_list[t_idx]\n temp_foot_pos_list = foot_pos_list[t_idx]\n next_x = self.transition(x, u, temp_foot_pos_list, delta_t)\n x_list.append(next_x)\n J_value += 0.5*np.matmul((x_list[t_idx] - target_x_list[t_idx]).T, np.matmul(self.Q_mat, x_list[t_idx] - target_x_list[t_idx])) \\\n + 0.5*np.matmul((u_list[t_idx] - target_u_list[t_idx]).T, np.matmul(self.R_mat, (u_list[t_idx] - target_u_list[t_idx])))\n J_value += 0.5*np.matmul((x_list[len(delta_time_list)] - target_x_list[len(delta_time_list)]).T, np.matmul(self.Qf_mat, x_list[len(delta_time_list)] - target_x_list[len(delta_time_list)]))\n\n #K, d\n K_list = np.zeros((len(delta_time_list), self.u_dim, self.x_dim))\n d_list = np.zeros((len(delta_time_list), self.u_dim, 1))\n\n cnt = 0\n while cnt < self.max_iteration:\n pre_J_value = J_value\n cnt += 1\n\n #update lambda list\n for t_idx in range(len(delta_time_list) + 1):\n temp_foot_pos_list, temp_contact_phi_list = foot_pos_list[t_idx], contact_phi_list[t_idx]\n if t_idx == len(delta_time_list):\n const_vector = self.get_const(x_list[t_idx], np.zeros((self.u_dim, 1)), temp_foot_pos_list, temp_contact_phi_list)\n else:\n const_vector = self.get_const(x_list[t_idx], u_list[t_idx], temp_foot_pos_list, temp_contact_phi_list)\n lambda_list[t_idx] = np.clip(lambda_list[t_idx] + mu_vector*const_vector, 0.0, self.max_lambda)\n mu_vector = self.mu_scaling_factor*mu_vector \n\n #backward pass\n temp_foot_pos_list, temp_contact_phi_list = foot_pos_list[len(delta_time_list)], contact_phi_list[len(delta_time_list)]\n const_vector = self.get_const(x_list[len(delta_time_list)], np.zeros((self.u_dim, 1)), temp_foot_pos_list, temp_contact_phi_list)\n C_x_mat, C_u_mat = self.get_C_mat(x_list[len(delta_time_list)], np.zeros((self.u_dim, 1)), temp_foot_pos_list, temp_contact_phi_list)\n I_mat = self.get_I_mat(const_vector, lambda_list[len(delta_time_list)], mu_vector)\n P_mat = self.Qf_mat + np.matmul(C_x_mat.T, np.matmul(I_mat, C_x_mat))\n p_vector = np.matmul(self.Qf_mat, x_list[len(delta_time_list)] - target_x_list[len(delta_time_list)]) \\\n + np.matmul(C_x_mat.T, lambda_list[len(delta_time_list)] + np.matmul(I_mat, const_vector))\n for t_idx in range(len(delta_time_list) - 1, -1, -1):\n delta_t = delta_time_list[t_idx]\n temp_foot_pos_list, temp_contact_phi_list = foot_pos_list[t_idx], contact_phi_list[t_idx]\n A_mat, B_mat = self.get_A_B_mat(x_list[t_idx], u_list[t_idx], temp_foot_pos_list, delta_t)\n const_vector = self.get_const(x_list[t_idx], u_list[t_idx], temp_foot_pos_list, temp_contact_phi_list)\n C_x_mat, C_u_mat = self.get_C_mat(x_list[t_idx], u_list[t_idx], temp_foot_pos_list, temp_contact_phi_list)\n I_mat = self.get_I_mat(const_vector, lambda_list[t_idx], mu_vector)\n\n Qxx = self.Q_mat + np.matmul(A_mat.T, np.matmul(P_mat, A_mat)) + np.matmul(C_x_mat.T, np.matmul(I_mat, C_x_mat))\n Quu = self.R_mat + np.matmul(B_mat.T, np.matmul(P_mat, B_mat)) + np.matmul(C_u_mat.T, np.matmul(I_mat, C_u_mat))\n Qux = np.matmul(B_mat.T, np.matmul(P_mat, A_mat)) + np.matmul(C_u_mat.T, np.matmul(I_mat, C_x_mat))\n Qxu = np.matmul(A_mat.T, np.matmul(P_mat, B_mat)) + np.matmul(C_x_mat.T, np.matmul(I_mat, C_u_mat))\n Qx = np.matmul(self.Q_mat, x_list[t_idx] - target_x_list[t_idx]) + np.matmul(A_mat.T, p_vector) + np.matmul(C_x_mat.T, lambda_list[t_idx] + np.matmul(I_mat, const_vector))\n Qu = np.matmul(self.R_mat, u_list[t_idx] - target_u_list[t_idx]) + np.matmul(B_mat.T, p_vector) + np.matmul(C_u_mat.T, lambda_list[t_idx] + np.matmul(I_mat, const_vector))\n\n temp_mat = -np.linalg.inv(Quu + self.damping_ratio*np.eye(self.u_dim))\n K_mat = np.matmul(temp_mat, Qux)\n d_vector = np.matmul(temp_mat, Qu)\n\n P_mat = Qxx + np.matmul(K_mat.T, np.matmul(Quu, K_mat)) + np.matmul(K_mat.T, Qux) + np.matmul(Qxu, K_mat)\n p_vector = Qx + np.matmul(K_mat.T, np.matmul(Quu, d_vector)) + np.matmul(K_mat.T, Qu) + np.matmul(Qxu, d_vector)\n K_list[t_idx] = K_mat\n d_list[t_idx] = d_vector\n\n '''\n #forward pass\n new_x_list = [init_x]\n new_u_list = []\n J_value = 0\n for t_idx, delta_t in enumerate(delta_time_list)):\n delta_x = new_x_list[t_idx] - x_list[t_idx]\n delta_u = np.matmul(K_list[t_idx], delta_x) + self.learning_rate*d_list[t_idx]\n new_u_list.append(u_list[t_idx] + delta_u)\n temp_foot_pos_list = foot_pos_list[t_idx]\n next_x = self.transition(new_x_list[t_idx], new_u_list[t_idx], temp_foot_pos_list, delta_t)\n new_x_list.append(next_x)\n J_value += 0.5*np.matmul((new_x_list[t_idx] - target_x_list[t_idx]).T, np.matmul(self.Q_mat, new_x_list[t_idx] - target_x_list[t_idx])) \\\n + 0.5*np.matmul((new_u_list[t_idx] - target_u_list[t_idx]).T, np.matmul(self.R_mat, (new_u_list[t_idx] - target_u_list[t_idx])))\n J_value += 0.5*np.matmul((new_x_list[len(delta_time_list)] - target_x_list[len(delta_time_list)]).T, np.matmul(self.Qf_mat, new_x_list[len(delta_time_list)] - target_x_list[len(delta_time_list)]))\n\n print(pre_J_value - J_value, J_value)\n #print(new_x_list[-1][:3,0])\n if pre_J_value - J_value < 0.0:\n break\n '''\n #forward pass\n learning_rate = self.learning_rate\n while True:\n new_x_list = [init_x]\n new_u_list = []\n J_value = 0\n for t_idx, delta_t in enumerate(delta_time_list):\n delta_x = new_x_list[t_idx] - x_list[t_idx]\n delta_u = np.matmul(K_list[t_idx], delta_x) + learning_rate*d_list[t_idx]\n new_u_list.append(u_list[t_idx] + delta_u)\n temp_foot_pos_list = foot_pos_list[t_idx]\n next_x = self.transition(new_x_list[t_idx], new_u_list[t_idx], temp_foot_pos_list, delta_t)\n new_x_list.append(next_x)\n J_value += 0.5*np.matmul((new_x_list[t_idx] - target_x_list[t_idx]).T, np.matmul(self.Q_mat, new_x_list[t_idx] - target_x_list[t_idx])) \\\n + 0.5*np.matmul((new_u_list[t_idx] - target_u_list[t_idx]).T, np.matmul(self.R_mat, (new_u_list[t_idx] - target_u_list[t_idx])))\n J_value += 0.5*np.matmul((new_x_list[len(delta_time_list)] - target_x_list[len(delta_time_list)]).T, np.matmul(self.Qf_mat, new_x_list[len(delta_time_list)] - target_x_list[len(delta_time_list)]))\n if pre_J_value - J_value >= 0.0:\n break\n learning_rate *= 0.5\n print(pre_J_value - J_value, J_value)\n #print(new_x_list[-1][:3,0])\n x_list = new_x_list\n u_list = new_u_list\n\n delta_x = new_x_list[0] - x_list[0]\n delta_u = np.matmul(K_list[0], delta_x) + d_list[0]\n return u_list[0] + delta_u, u_list\n\n def transition(self, x, u, foot_pos_list, delta_t):\n assert len(x) == self.x_dim and len(u) == self.u_dim\n\n com_pos = x[:3, 0]\n rpy = x[3:6, 0]\n com_vel = x[6:9, 0]\n base_ang_vel = x[9:, 0]\n\n force_list = u.reshape((4,3))\n f_net = np.sum(force_list, axis=0)\n base_torque_net = np.sum([np.cross(foot_pos_list[i] - com_pos, force_list[i]) for i in range(4)], axis=0)\n base_torque_net = np.matmul(z_rot(rpy[2]).T, base_torque_net)\n \n next_com_pos = com_pos + delta_t*com_vel + 0.5*(delta_t**2)*(f_net/self.env.model.mass + self.env.model.gravity)\n next_rpy = rpy + delta_t*base_ang_vel + 0.5*(delta_t**2)*np.matmul(np.linalg.inv(self.env.model.inertia), base_torque_net)\n next_com_vel = com_vel + delta_t*(f_net/self.env.model.mass + self.env.model.gravity)\n next_base_ang_vel = base_ang_vel + delta_t*np.matmul(np.linalg.inv(self.env.model.inertia), base_torque_net)\n\n new_x = np.concatenate([next_com_pos.ravel(), next_rpy.ravel(), next_com_vel.ravel(), next_base_ang_vel.ravel()])\n new_x = new_x.reshape((-1, 1))\n return new_x\n\n def get_const(self, x, u, foot_pos_list, contact_phi_list):\n com_pos = x[:3, 0]\n rpy = x[3:6, 0]\n com_vel = x[6:9, 0]\n base_ang_vel = x[9:, 0]\n force_list = u.reshape((4,3))\n\n z_const = 0.4 * 0.9\n ground_normal = np.array([0, 0, 1])\n friction_coeff = 0.9\n\n const = np.zeros((self.const_dim, 1))\n const_idx = 0\n get_dist = lambda x : np.sqrt(np.dot(x, x))\n\n # kinematic leg limits\n base_rot = rpy_rot(rpy)\n for leg_idx in range(4):\n heap_pos = np.matmul(base_rot, self.env.model.abduct_org_list[leg_idx]) + com_pos\n leg_dist = get_dist((foot_pos_list[leg_idx] - heap_pos).ravel())\n const[const_idx, 0] = (leg_dist - z_const)*contact_phi_list[leg_idx]\n const_idx += 1\n\n # swing leg's force limits\n temp_const = 0\n for leg_idx in range(4):\n temp_const += (1 - contact_phi_list[leg_idx])*get_dist(force_list[leg_idx])\n const[const_idx, 0] = temp_const\n const_idx += 1\n\n # positive ground force normal\n for leg_idx in range(4):\n const[const_idx, 0] = -np.dot(force_list[leg_idx], ground_normal)\n const_idx += 1\n\n # friction pyramid\n for leg_idx in range(4):\n const[const_idx, 0] = np.abs(force_list[leg_idx, 0]) - friction_coeff*force_list[leg_idx, 2]\n const_idx += 1\n const[const_idx, 0] = np.abs(force_list[leg_idx, 1]) - friction_coeff*force_list[leg_idx, 2]\n const_idx += 1\n\n return const\n\n def get_A_B_mat(self, x, u, foot_pos_list, delta_t):\n new_x = self.transition(x, u, foot_pos_list, delta_t)\n EPS = 1e-3\n\n A = []\n for x_idx in range(self.x_dim):\n delta_x = np.zeros_like(x)\n delta_x[x_idx][0] = EPS\n A.append((self.transition(x + delta_x, u, foot_pos_list, delta_t) - new_x)/EPS)\n A = np.concatenate(A, axis=-1)\n\n B = []\n for u_idx in range(self.u_dim):\n delta_u = np.zeros_like(u)\n delta_u[u_idx][0] = EPS\n B.append((self.transition(x, u + delta_u, foot_pos_list, delta_t) - new_x)/EPS)\n B = np.concatenate(B, axis=-1)\n return A, B\n\n def get_C_mat(self, x, u, foot_pos_list, contact_phi_list):\n const = self.get_const(x, u, foot_pos_list, contact_phi_list)\n EPS = 1e-3\n\n C_x_mat = []\n for x_idx in range(self.x_dim):\n delta_x = np.zeros_like(x)\n delta_x[x_idx][0] = EPS\n C_x_mat.append((self.get_const(x + delta_x, u, foot_pos_list, contact_phi_list) - const)/EPS)\n C_x_mat = np.concatenate(C_x_mat, axis=-1)\n\n if len(u) == 0:\n C_u_mat = []\n else:\n C_u_mat = []\n for u_idx in range(self.u_dim):\n delta_u = np.zeros_like(u)\n delta_u[u_idx][0] = EPS\n C_u_mat.append((self.get_const(x, u + delta_u, foot_pos_list, contact_phi_list) - const)/EPS)\n C_u_mat = np.concatenate(C_u_mat, axis=-1)\n return C_x_mat, C_u_mat\n\n def get_I_mat(self, const_vector, lambda_vector, mu_vector):\n I_mat = np.eye(self.const_dim)\n for const_idx in range(self.const_dim):\n if const_vector[const_idx] >= 0:\n I_mat[const_idx, const_idx] = mu_vector[const_idx]\n return I_mat\n","repo_name":"dobro12/ALiLQR","sub_path":"cheetah/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":14705,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"74182590032","text":"from flask import Blueprint, jsonify, request, make_response\nfrom app.models import db, Character\nfrom app.forms import CharacterForm\nfrom werkzeug.datastructures import MultiDict\n\n\ncharacter_routes = Blueprint('characters', __name__)\n\n@character_routes.route('/', methods=[\"POST\"])\ndef new():\n data = MultiDict(mapping=request.json)\n form = CharacterForm(data)\n print(form.validate())\n if form.validate():\n new_persona = Character(name=data['name'],\n strength=data['strength'],\n dexterity=data['dexterity'],\n constitution=data['constitution'],\n intelligence=data['intelligence'],\n wisdom=data['wisdom'],\n charisma=data['charisma'],\n armor_class=data['armor_class'],\n max_hitpoints=data['max_hitpoints'],\n features=data['features'],\n actions=data['actions'],\n images=data['images'],\n ownerId=data['ownerId'],\n )\n db.session.add(new_persona)\n db.session.commit()\n return new_persona.to_dict()\n else:\n res = make_response(\n {\"errors\": [form.errors[error][0] for error in form.errors]}, 401)\n print(form.errors)\n return res\n\n\n@character_routes.route('/', methods=[\"PUT\"])\ndef edit(characterId):\n data = request.json\n old_persona = Character.query.get(characterId)\n if 'name' in data:\n old_persona.name = data['name']\n if 'strength' in data:\n old_persona.strength = data['strength']\n if 'dexterity' in data:\n old_persona.dexterity = data['dexterity']\n if 'constitution' in data:\n old_persona.constitution = data['constitution']\n if 'intelligence' in data:\n old_persona.intelligence = data['intelligence']\n if 'wisdom' in data:\n old_persona.wisdom = data['wisdom']\n if 'charisma' in data:\n old_persona.charisma = data['charisma']\n if 'armor_class' in data:\n old_persona.armor_class = data['armor_class']\n if 'max_hitpoints' in data:\n old_persona.max_hitpoints = data['max_hitpoints']\n if 'features' in data:\n old_persona.features = data['features']\n if 'actions' in data:\n old_persona.actions = data['actions']\n if 'images' in data:\n old_persona.image\n if 'ownerId' in data:\n old_persona.ownerId = data['ownerId']\n db.session.commit()\n return old_persona.to_dict()\n\n\n@character_routes.route('/', methods=[\"DELETE\"])\ndef remove(characterId):\n old_persona = Character.query.get(characterId)\n db.session.delete(old_persona)\n db.session.commit()\n return old_persona.to_dict()\n","repo_name":"djwilki/BookOfAncientSecrets","sub_path":"flask_react_starter/backend/app/api/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18471734469","text":"from datetime import datetime,timedelta\r\n\r\ndef getCurrent():\r\n curDate=datetime.now()#현재 날짜 및 시간을 구함\r\n return curDate\r\ndef getAfterDate(now,day):\r\n retDate=now+timedelta(days=day)#현재에 timedelta를 더해 지난 일자를 구함\r\n return retDate\r\nnowDate,afterDate=None,None\r\n\r\nnowDate=getCurrent()\r\nprint(\"현재 날짜와 시간==>\",nowDate)\r\nafterDate=getAfterDate(nowDate,100)\r\nprint(\"100일 후 날짜와 시간==>\",afterDate)\r\n","repo_name":"zoown12/Python","sub_path":"python/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70194622673","text":"import Creature\r\nimport pygame\r\nimport Empty\r\nimport random\r\n\r\nclass Wolf(Creature.Creature):\r\n \"\"\"\r\n This class declares all the variables for a wolf\r\n \"\"\"\r\n def __init__(self):\r\n super().__init__(\"wolf\")\r\n self.energy = 7\r\n self.max_energy = 20\r\n self.image = pygame.image.load(\"newWolf.png\")\r\n self.birthRate = .5\r\n self.already_moved = False\r\n self.birth_requirement = 30\r\n self.birth_amt = 0\r\n self.max_birth = 1\r\n self.type = \"predator\"\r\n self.kill_rate = 75\r\n self.given_birth = False\r\n self.birth_chance = 4\r\n self.birth_high = 8\r\n self.birth_low = 2\r\n self.too_many = 40\r\n self.too_little = 18\r\n\r\n def moved(self):\r\n \"\"\"\r\n Changes the energy of the creature after it moved\r\n :return:\r\n \"\"\"\r\n self.energy = self.energy - 1\r\n self.already_moved = True\r\n\r\n def has_moved(self):\r\n \"\"\"\r\n Determines if the creature has moved\r\n :return:\r\n \"\"\"\r\n return self.already_moved\r\n\r\n def action(self, grid, x, y, board_length, sheep_amt, sheep_killed):\r\n \"\"\"\r\n Action performed by a wolf, kill, move, or starve\r\n :param grid: stored data\r\n :param x: x location\r\n :param y: y location\r\n :param board_length: size of the grid\r\n :param sheep_amt: sheep amount\r\n :param sheep_killed: sheep killed by wolves\r\n :return:\r\n \"\"\"\r\n kill_x, kill_y, position = self.kill(grid, x, y, board_length)\r\n # predator has a 100% chance to kill a prey at a perpendicular location\r\n if position == \"no prey\":\r\n move_x, move_y = self.surrounding_creatures(grid, x, y, board_length)\r\n return grid, move_x, move_y, sheep_amt, sheep_killed\r\n\r\n elif position == \"perpendicular\":\r\n grid[kill_x][kill_y][1] = Empty.Empty()\r\n grid[kill_x][kill_y][1].just_died()\r\n if self.energy + 5 >= self.max_energy:\r\n self.energy = self.max_energy\r\n else:\r\n self.energy += 5\r\n sheep_amt -= 1\r\n sheep_killed += 1\r\n\r\n else:\r\n kill_chance = random.randint(0, 100)\r\n # checks if the wolf can kill the predator to its diagonal\r\n # if the random int in less than or equal to 75\r\n if kill_chance <= self.kill_rate:\r\n grid[kill_x][kill_y][1] = Empty.Empty()\r\n grid[kill_x][kill_y][1].just_died()\r\n self.energy += 5\r\n sheep_amt -= 1\r\n sheep_killed += 1\r\n\r\n move_x, move_y = self.surrounding_creatures(grid, x, y, board_length)\r\n\r\n return grid, move_x, move_y, sheep_amt, sheep_killed\r\n\r\n def surrounding_creatures(self, grid, x, y, board_size):\r\n \"\"\"\r\n creates a list of all the spots surrounding the creature\r\n :param x: the row of the creature\r\n :param y: the col of the creature\r\n :return: list of spots surrounding the creature\r\n \"\"\"\r\n move_x = 0\r\n move_y = 0\r\n choice_list = []\r\n highest_rank = 0\r\n\r\n choice_list.append(self.give_rank(grid, grid[(x - 1) % board_size][y],\r\n (x - 1) % board_size, y, \"left\", board_size))\r\n choice_list.append(self.give_rank(grid, grid[(x - 1) % board_size][(y - 1) % board_size],\r\n (x - 1) % board_size, (y - 1) % board_size, \"upper left\", board_size))\r\n choice_list.append(self.give_rank(grid, grid[x][(y - 1) % board_size], x,\r\n (y - 1) % board_size, \"top\", board_size))\r\n choice_list.append(self.give_rank(grid, grid[(x + 1) % board_size][(y - 1) % board_size],\r\n (x + 1) % board_size, (y - 1) % board_size, \"upper right\", board_size))\r\n choice_list.append(self.give_rank(grid, grid[(x + 1) % board_size][y],\r\n (x + 1) % board_size, y, \"right\", board_size))\r\n choice_list.append(self.give_rank(grid, grid[(x + 1) % board_size][(y + 1) % board_size],\r\n (x + 1) % board_size, (y + 1) % board_size,\"lower right\", board_size))\r\n choice_list.append(self.give_rank(grid, grid[x][(y + 1) % board_size], x,\r\n (y + 1) % board_size, \"bottom\", board_size))\r\n choice_list.append(self.give_rank(grid, grid[(x - 1) % board_size][(y + 1) % board_size],\r\n (x - 1) % board_size, (y + 1) % board_size, \"lower left\", board_size))\r\n\r\n # I need to see if I can\r\n for i in range(len(choice_list) - 1):\r\n\r\n neighbor1 = choice_list[(i - 1) % len(choice_list)][0]\r\n neighbor2 = choice_list[(i + 1) % len(choice_list)][0]\r\n rank = choice_list[i][0] + neighbor1 + neighbor2\r\n\r\n # checks if the rank is the higher than the current highest\r\n if rank > highest_rank:\r\n highest_rank = rank\r\n move_x = choice_list[i][1]\r\n move_y = choice_list[i][2]\r\n\r\n if highest_rank == 0:\r\n rand_choice = random.randint(0, len(choice_list) - 1)\r\n move_x = choice_list[rand_choice][1]\r\n move_y = choice_list[rand_choice][2]\r\n\r\n return move_x, move_y\r\n\r\n def give_rank(self, grid, block, x, y, position, size):\r\n \"\"\"\r\n Gives each position a rank so the creature can determine where to move\r\n :param grid: stored data\r\n :param block: block with data\r\n :param x: x location\r\n :param y: y location\r\n :param position: position being checked\r\n :param size: size of the board\r\n :return:\r\n \"\"\"\r\n rank = 0\r\n # creates a dictionary of all possible functions relative to their\r\n # positions on the board.\r\n position_choice = {\"left\": self.left_right(grid, (x - 1) % size, y, size, rank),\r\n \"right\": self.left_right(grid, (x + 1) % size, y, size, rank),\r\n \"top\": self.top_bottom(grid, x, (y - 1) % size, size, rank),\r\n \"bottom\": self.top_bottom(grid, x, (y + 1) % size, size, rank),\r\n \"upper_left\": self.up_low_left(grid, x, (y - 1) % size, size, rank, \"upper left\"),\r\n \"lower_left\": self.up_low_left(grid, x, (y + 1) % size, size, rank, \"lower left\"),\r\n \"upper_right\": self.up_low_right(grid, x, (y - 1) % size, size, rank, \"upper right\"),\r\n \"lower_right\": self.up_low_right(grid, x, (y + 1) % size, size, rank, \"lower right\")}\r\n\r\n # checks if the block holds a prey\r\n if block[1].get_type() == \"prey\":\r\n rank = rank + 7\r\n\r\n # there is a problem with this idea because if there are no sheep found then the wolf will keep going\r\n # back to the exact same spot they came from\r\n\r\n elif not block[2].fully_grown() and block[2].get_terrain_type() == \"clover\":\r\n rank = rank + 2\r\n elif not block[2].fully_grown() and block[2].get_terrain_type() == \"grass\":\r\n rank = rank + 1\r\n\r\n\r\n elif position == \"left\":\r\n rank = position_choice['left']\r\n elif position == \"right\":\r\n rank = position_choice['right']\r\n elif position == \"top\":\r\n rank = position_choice['top']\r\n elif position == \"bottom\":\r\n rank = position_choice['bottom']\r\n elif position == \"upper left\":\r\n rank = position_choice['upper_left']\r\n elif position == \"upper right\":\r\n rank = position_choice['upper_right']\r\n elif position == \"lower left\":\r\n rank = position_choice['lower_left']\r\n elif position == \"lower right\":\r\n rank = position_choice['lower_right']\r\n\r\n return [rank, x, y]\r\n\r\n def left_right(self, grid, x, y, size, rank):\r\n \"\"\"\r\n Checks the left or the right\r\n :param grid: stored data\r\n :param x: x location\r\n :param y: y location\r\n :param size: size of the grid\r\n :param rank: rank of the position\r\n :return:\r\n \"\"\"\r\n # checks left or right\r\n if grid[x][y][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks bottom side\r\n elif grid[x][(y + 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks top side\r\n elif grid[x][(y - 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n return rank\r\n\r\n def top_bottom(self, grid, x, y, size, rank):\r\n \"\"\"\r\n Checks the top or bottom position\r\n :param grid: stored data\r\n :param x: x location\r\n :param y: y location\r\n :param size: size of the grid\r\n :param rank: rank of spot\r\n :return:\r\n \"\"\"\r\n # checks top or bottom\r\n if grid[x][y][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks left side\r\n elif grid[(x - 1) % size][y][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks right side\r\n elif grid[(x + 1) % size][y][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n return rank\r\n\r\n def up_low_left(self, grid, x, y, size, rank, position):\r\n \"\"\"\r\n Checks the upper and lower left positions\r\n :param grid: stored data\r\n :param x: x location\r\n :param y: y location\r\n :param size: size of the grid\r\n :param rank: rank of the position\r\n :param position: position being checked\r\n :return:\r\n \"\"\"\r\n # checks upper left\r\n if grid[x][(y + 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks left\r\n elif grid[x][y][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks lower left\r\n elif grid[x][(y - 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n\r\n elif position == \"upper left\":\r\n # checks top\r\n if grid[(x + 1) % size][(y - 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks upper right, on the left\r\n elif grid[(x + 2) % size][(y - 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n\r\n elif position == \"lower left\":\r\n # checks bottom\r\n if grid[(x + 1) % size][(y + 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks lower right, on the left\r\n elif grid[(x + 2) % size][(y + 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n\r\n return rank\r\n\r\n def up_low_right(self, grid, x, y, size, rank, position):\r\n \"\"\"\r\n This will check the upper and lower left spots\r\n :param grid: stored data\r\n :param x: x location\r\n :param y: y location\r\n :param size: size of the grid\r\n :param rank: rank the position has\r\n :param position: spot being checked\r\n :return:\r\n \"\"\"\r\n # checks upper right\r\n if grid[x][(y + 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks right\r\n elif grid[x][y][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks lower right\r\n elif grid[x][(y - 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n\r\n elif position == \"upper right\":\r\n # checks top\r\n if grid[(x - 1) % size][(y - 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks upper left, on the right\r\n elif grid[(x - 2) % size][(y - 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n\r\n elif position == \"lower right\":\r\n # checks bottom\r\n if grid[(x - 1) % size][(y + 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n # checks lower left, on the right\r\n elif grid[(x - 2) % size][(y + 1) % size][1].get_type() == \"prey\":\r\n rank = rank + 3\r\n\r\n return rank\r\n\r\n def kill(self, grid, x, y, board_size):\r\n \"\"\"\r\n This will try and kill a sheep if in the kill zone\r\n :param grid: stored data\r\n :param x: x location\r\n :param y: y location\r\n :param board_size: size of the board\r\n :return:\r\n \"\"\"\r\n\r\n kill_perpendicular = []\r\n kill_diagonal = []\r\n\r\n kill_perpendicular.append([grid[(x - 1) % board_size][y],\r\n (x - 1) % board_size, y])\r\n kill_diagonal.append([grid[(x - 1) % board_size][(y - 1) % board_size],\r\n (x - 1) % board_size, (y - 1) % board_size])\r\n kill_perpendicular.append([grid[x][(y - 1) % board_size], x,\r\n (y - 1) % board_size])\r\n kill_diagonal.append([grid[(x + 1) % board_size][(y - 1) % board_size],\r\n (x + 1) % board_size, (y - 1) % board_size])\r\n kill_perpendicular.append([grid[(x + 1) % board_size][y],\r\n (x + 1) % board_size, y])\r\n kill_diagonal.append([grid[(x + 1) % board_size][(y + 1) % board_size],\r\n (x + 1) % board_size, (y + 1) % board_size])\r\n kill_perpendicular.append([grid[x][(y + 1) % board_size], x,\r\n (y + 1) % board_size])\r\n kill_diagonal.append([grid[(x - 1) % board_size][(y + 1) % board_size],\r\n (x - 1) % board_size, (y + 1) % board_size])\r\n\r\n # adds all spots that hold a prey to the select list\r\n # need to make a while loop for this with an index and count\r\n i = 0\r\n count = 0\r\n perp_amt = len(kill_perpendicular)\r\n while i < perp_amt:\r\n # increment i no matter what\r\n i = i + 1\r\n # if not prey\r\n if not kill_perpendicular[count][0][1].get_type() == \"prey\":\r\n kill_perpendicular.pop(count)\r\n else:\r\n # increment the count because nothing was pop'd off\r\n count = count + 1\r\n\r\n # resets the count and index variable\r\n i = 0\r\n count = 0\r\n # checks to see if kill perpendicular is empty\r\n if not kill_perpendicular:\r\n diag_amt = len(kill_diagonal)\r\n while i < diag_amt:\r\n # increment i no matter what\r\n i = i + 1\r\n # if not prey\r\n if not kill_diagonal[count][0][1].get_type() == \"prey\":\r\n # appends the block with prey to diagonal_select\r\n kill_diagonal.pop(count)\r\n else:\r\n # increment the count because nothing was pop'd off\r\n count = count + 1\r\n\r\n if not kill_diagonal:\r\n return x, y, \"no prey\"\r\n else:\r\n # randomly selects a block with a prey in it to kill\r\n selection = random.randint(0, len(kill_diagonal) - 1)\r\n return kill_diagonal[selection][1], kill_diagonal[selection][2], \"diagonal\"\r\n else:\r\n # len starts at 1 while lists start at 0\r\n # randomly selects a block with a prey in it to kill\r\n selection = random.randint(0, len(kill_perpendicular) - 1)\r\n return kill_perpendicular[selection][1], kill_perpendicular[selection][2], \"perpendicular\"\r\n\r\n\r\n\r\n\r\n","repo_name":"Cpcagle/SheepAndWolfAttemptTwo","sub_path":"Wolf.py","file_name":"Wolf.py","file_ext":"py","file_size_in_byte":15651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"721209948","text":"'''\nTo run:\nCUDA_VISIBLE_DEVICES=\"\" python3.5 main.py &\nCUDA_VISIBLE_DEVICES=1 python3.5 main.py &\nCUDA_VISIBLE_DEVICES=2 python3.5 main.py &\nCUDA_VISIBLE_DEVICES=3 python3.5 main.py &\n'''\nfrom __future__ import print_function\nimport tensorflow as tf\nimport os\nimport utils\nimport numpy as np\nimport matplotlib\nimport copy\nimport distutils.util\nimport pickle\nimport glob\nimport brat_to_conll\nimport conll_to_brat\nimport codecs\nimport utils_nlp\nmatplotlib.use('Agg')\nimport dataset as ds\nimport time\nimport random\nimport evaluate\nimport configparser\nimport train\nfrom pprint import pprint\nfrom entity_lstm import EntityLSTM\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport sys\n\n# http://stackoverflow.com/questions/42217532/tensorflow-version-1-0-0-rc2-on-windows-opkernel-op-bestsplits-device-typ\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nprint('NeuroNER version: {0}'.format('1.0-dev'))\nprint('TensorFlow version: {0}'.format(tf.__version__))\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef load_parameters(parameters_filepath, arguments={}, verbose=True):\n '''\n Load parameters from the ini file if specified, take into account any command line argument, and ensure that each parameter is cast to the correct type.\n Command line arguments take precedence over parameters specified in the parameter file.\n '''\n parameters = {'pretrained_model_folder':'../trained_models/conll_2003_en',\n 'dataset_text_folder':'../data/conll2003/en',\n 'character_embedding_dimension':25,\n 'character_lstm_hidden_state_dimension':25,\n 'check_for_digits_replaced_with_zeros':True,\n 'check_for_lowercase':True,\n 'debug':False,\n 'dropout_rate':0.5,\n 'experiment_name':'experiment',\n 'freeze_token_embeddings':False,\n 'gradient_clipping_value':5.0,\n 'learning_rate':0.005,\n 'load_only_pretrained_token_embeddings':False,\n 'main_evaluation_mode':'conll',\n 'maximum_number_of_epochs':100,\n 'number_of_cpu_threads':8,\n 'number_of_gpus':0,\n 'optimizer':'sgd',\n 'output_folder':'../output',\n 'patience':10,\n 'plot_format':'pdf',\n 'reload_character_embeddings':True,\n 'reload_character_lstm':True,\n 'reload_crf':True,\n 'reload_feedforward':True,\n 'reload_token_embeddings':True,\n 'reload_token_lstm':True,\n 'remap_unknown_tokens_to_unk':True,\n 'spacylanguage':'en',\n 'tagging_format':'bioes',\n 'token_embedding_dimension':100,\n 'token_lstm_hidden_state_dimension':100,\n 'token_pretrained_embedding_filepath':'../data/word_vectors/glove.6B.100d.txt',\n 'tokenizer':'spacy',\n 'train_model':True,\n 'use_character_lstm':True,\n 'use_crf':True,\n 'use_pretrained_model':False,\n 'verbose':False}\n # If a parameter file is specified, load it\n if len(parameters_filepath) > 0:\n conf_parameters = configparser.ConfigParser()\n conf_parameters.read(parameters_filepath, encoding=\"UTF-8\")\n nested_parameters = utils.convert_configparser_to_dictionary(conf_parameters)\n for k,v in nested_parameters.items():\n parameters.update(v)\n # Ensure that any arguments the specified in the command line overwrite parameters specified in the parameter file\n for k,v in arguments.items():\n if arguments[k] != arguments['argument_default_value']:\n parameters[k] = v\n for k,v in parameters.items():\n v = str(v)\n # If the value is a list delimited with a comma, choose one element at random.\n if ',' in v:\n v = random.choice(v.split(','))\n parameters[k] = v\n # Ensure that each parameter is cast to the correct type\n if k in ['character_embedding_dimension','character_lstm_hidden_state_dimension','token_embedding_dimension',\n 'token_lstm_hidden_state_dimension','patience','maximum_number_of_epochs','maximum_training_time','number_of_cpu_threads','number_of_gpus']:\n parameters[k] = int(v)\n elif k in ['dropout_rate', 'learning_rate', 'gradient_clipping_value']:\n parameters[k] = float(v)\n elif k in ['remap_unknown_tokens_to_unk', 'use_character_lstm', 'use_crf', 'train_model', 'use_pretrained_model', 'debug', 'verbose',\n 'reload_character_embeddings', 'reload_character_lstm', 'reload_token_embeddings', 'reload_token_lstm', 'reload_feedforward', 'reload_crf',\n 'check_for_lowercase', 'check_for_digits_replaced_with_zeros', 'freeze_token_embeddings', 'load_only_pretrained_token_embeddings']:\n parameters[k] = distutils.util.strtobool(v)\n # If loading pretrained model, set the model hyperparameters according to the pretraining parameters \n if parameters['use_pretrained_model']:\n pretraining_parameters = load_parameters(parameters_filepath=os.path.join(parameters['pretrained_model_folder'], 'parameters.ini'), verbose=False)[0]\n for name in ['use_character_lstm', 'character_embedding_dimension', 'character_lstm_hidden_state_dimension', 'token_embedding_dimension', 'token_lstm_hidden_state_dimension', 'use_crf']:\n if parameters[name] != pretraining_parameters[name]:\n print('WARNING: parameter {0} was overwritten from {1} to {2} to be consistent with the pretrained model'.format(name, parameters[name], pretraining_parameters[name]))\n parameters[name] = pretraining_parameters[name]\n if verbose: pprint(parameters)\n # TODO: update conf_parameters to reflect the overriding\n return parameters, conf_parameters\n\ndef get_valid_dataset_filepaths(parameters):\n dataset_filepaths = {}\n dataset_brat_folders = {}\n for dataset_type in ['train', 'valid', 'test', 'deploy']:\n dataset_filepaths[dataset_type] = os.path.join(parameters['dataset_text_folder'], '{0}.txt'.format(dataset_type))\n dataset_brat_folders[dataset_type] = os.path.join(parameters['dataset_text_folder'], dataset_type)\n dataset_compatible_with_brat_filepath = os.path.join(parameters['dataset_text_folder'], '{0}_compatible_with_brat.txt'.format(dataset_type))\n\n # Conll file exists\n if os.path.isfile(dataset_filepaths[dataset_type]) and os.path.getsize(dataset_filepaths[dataset_type]) > 0:\n # Brat text files exist\n if os.path.exists(dataset_brat_folders[dataset_type]) and len(glob.glob(os.path.join(dataset_brat_folders[dataset_type], '*.txt'))) > 0:\n\n # Check compatibility between conll and brat files\n brat_to_conll.check_brat_annotation_and_text_compatibility(dataset_brat_folders[dataset_type])\n if os.path.exists(dataset_compatible_with_brat_filepath):\n dataset_filepaths[dataset_type] = dataset_compatible_with_brat_filepath\n conll_to_brat.check_compatibility_between_conll_and_brat_text(dataset_filepaths[dataset_type], dataset_brat_folders[dataset_type])\n\n # Brat text files do not exist\n else:\n\n # Populate brat text and annotation files based on conll file\n conll_to_brat.conll_to_brat(dataset_filepaths[dataset_type], dataset_compatible_with_brat_filepath, dataset_brat_folders[dataset_type], dataset_brat_folders[dataset_type])\n dataset_filepaths[dataset_type] = dataset_compatible_with_brat_filepath\n\n # Conll file does not exist\n else:\n # Brat text files exist\n if os.path.exists(dataset_brat_folders[dataset_type]) and len(glob.glob(os.path.join(dataset_brat_folders[dataset_type], '*.txt'))) > 0:\n dataset_filepath_for_tokenizer = os.path.join(parameters['dataset_text_folder'], '{0}_{1}.txt'.format(dataset_type, parameters['tokenizer']))\n if os.path.exists(dataset_filepath_for_tokenizer):\n conll_to_brat.check_compatibility_between_conll_and_brat_text(dataset_filepath_for_tokenizer, dataset_brat_folders[dataset_type])\n else:\n # Populate conll file based on brat files\n brat_to_conll.brat_to_conll(dataset_brat_folders[dataset_type], dataset_filepath_for_tokenizer, parameters['tokenizer'], parameters['spacylanguage'])\n dataset_filepaths[dataset_type] = dataset_filepath_for_tokenizer\n\n # Brat text files do not exist\n else:\n del dataset_filepaths[dataset_type]\n del dataset_brat_folders[dataset_type]\n continue\n\n if parameters['tagging_format'] == 'bioes':\n # Generate conll file with BIOES format\n bioes_filepath = os.path.join(parameters['dataset_text_folder'], '{0}_bioes.txt'.format(utils.get_basename_without_extension(dataset_filepaths[dataset_type])))\n utils_nlp.convert_conll_from_bio_to_bioes(dataset_filepaths[dataset_type], bioes_filepath)\n dataset_filepaths[dataset_type] = bioes_filepath\n\n return dataset_filepaths, dataset_brat_folders\n\ndef check_parameter_compatiblity(parameters, dataset_filepaths):\n # Check mode of operation\n if parameters['train_model']:\n if 'train' not in dataset_filepaths or 'valid' not in dataset_filepaths:\n raise IOError(\"If train_model is set to True, both train and valid set must exist in the specified dataset folder: {0}\".format(parameters['dataset_text_folder']))\n elif parameters['use_pretrained_model']:\n if 'train' in dataset_filepaths and 'valid' in dataset_filepaths:\n print(\"WARNING: train and valid set exist in the specified dataset folder, but train_model is set to FALSE: {0}\".format(parameters['dataset_text_folder']))\n if 'test' not in dataset_filepaths and 'deploy' not in dataset_filepaths:\n raise IOError(\"For prediction mode, either test set and deploy set must exist in the specified dataset folder: {0}\".format(parameters['dataset_text_folder']))\n else: #if not parameters['train_model'] and not parameters['use_pretrained_model']:\n raise ValueError('At least one of train_model and use_pretrained_model must be set to True.')\n\n if parameters['use_pretrained_model']:\n if all([not parameters[s] for s in ['reload_character_embeddings', 'reload_character_lstm', 'reload_token_embeddings', 'reload_token_lstm', 'reload_feedforward', 'reload_crf']]):\n raise ValueError('If use_pretrained_model is set to True, at least one of reload_character_embeddings, reload_character_lstm, reload_token_embeddings, reload_token_lstm, reload_feedforward, reload_crf must be set to True.')\n\n if parameters['gradient_clipping_value'] < 0:\n parameters['gradient_clipping_value'] = abs(parameters['gradient_clipping_value'])\n\ndef parse_arguments(arguments=None):\n ''' Parse the NeuroNER arguments\n\n arguments:\n arguments the arguments, optionally given as argument\n '''\n parser = argparse.ArgumentParser(description='''NeuroNER CLI''', formatter_class=RawTextHelpFormatter)\n parser.add_argument('--parameters_filepath', required=False, default=os.path.join('.','parameters.ini'), help='The parameters file')\n\n argument_default_value = 'argument_default_dummy_value_please_ignore_d41d8cd98f00b204e9800998ecf8427e'\n parser.add_argument('--character_embedding_dimension', required=False, default=argument_default_value, help='')\n parser.add_argument('--character_lstm_hidden_state_dimension', required=False, default=argument_default_value, help='')\n parser.add_argument('--check_for_digits_replaced_with_zeros', required=False, default=argument_default_value, help='')\n parser.add_argument('--check_for_lowercase', required=False, default=argument_default_value, help='')\n parser.add_argument('--dataset_text_folder', required=False, default=argument_default_value, help='')\n parser.add_argument('--debug', required=False, default=argument_default_value, help='')\n parser.add_argument('--dropout_rate', required=False, default=argument_default_value, help='')\n parser.add_argument('--experiment_name', required=False, default=argument_default_value, help='')\n parser.add_argument('--freeze_token_embeddings', required=False, default=argument_default_value, help='')\n parser.add_argument('--gradient_clipping_value', required=False, default=argument_default_value, help='')\n parser.add_argument('--learning_rate', required=False, default=argument_default_value, help='')\n parser.add_argument('--load_only_pretrained_token_embeddings', required=False, default=argument_default_value, help='')\n parser.add_argument('--main_evaluation_mode', required=False, default=argument_default_value, help='')\n parser.add_argument('--maximum_number_of_epochs', required=False, default=argument_default_value, help='')\n parser.add_argument('--number_of_cpu_threads', required=False, default=argument_default_value, help='')\n parser.add_argument('--number_of_gpus', required=False, default=argument_default_value, help='')\n parser.add_argument('--optimizer', required=False, default=argument_default_value, help='')\n parser.add_argument('--output_folder', required=False, default=argument_default_value, help='')\n parser.add_argument('--patience', required=False, default=argument_default_value, help='')\n parser.add_argument('--plot_format', required=False, default=argument_default_value, help='')\n parser.add_argument('--pretrained_model_folder', required=False, default=argument_default_value, help='')\n parser.add_argument('--reload_character_embeddings', required=False, default=argument_default_value, help='')\n parser.add_argument('--reload_character_lstm', required=False, default=argument_default_value, help='')\n parser.add_argument('--reload_crf', required=False, default=argument_default_value, help='')\n parser.add_argument('--reload_feedforward', required=False, default=argument_default_value, help='')\n parser.add_argument('--reload_token_embeddings', required=False, default=argument_default_value, help='')\n parser.add_argument('--reload_token_lstm', required=False, default=argument_default_value, help='')\n parser.add_argument('--remap_unknown_tokens_to_unk', required=False, default=argument_default_value, help='')\n parser.add_argument('--spacylanguage', required=False, default=argument_default_value, help='')\n parser.add_argument('--tagging_format', required=False, default=argument_default_value, help='')\n parser.add_argument('--token_embedding_dimension', required=False, default=argument_default_value, help='')\n parser.add_argument('--token_lstm_hidden_state_dimension', required=False, default=argument_default_value, help='')\n parser.add_argument('--token_pretrained_embedding_filepath', required=False, default=argument_default_value, help='')\n parser.add_argument('--tokenizer', required=False, default=argument_default_value, help='')\n parser.add_argument('--train_model', required=False, default=argument_default_value, help='')\n parser.add_argument('--use_character_lstm', required=False, default=argument_default_value, help='')\n parser.add_argument('--use_crf', required=False, default=argument_default_value, help='')\n parser.add_argument('--use_pretrained_model', required=False, default=argument_default_value, help='')\n parser.add_argument('--verbose', required=False, default=argument_default_value, help='')\n\n try:\n arguments = parser.parse_args(args=arguments)\n except:\n parser.print_help()\n sys.exit(0)\n\n arguments = vars(arguments) # http://stackoverflow.com/questions/16878315/what-is-the-right-way-to-treat-python-argparse-namespace-as-a-dictionary\n arguments['argument_default_value'] = argument_default_value\n return arguments\n\ndef main(argv=sys.argv):\n ''' NeuroNER main method\n\n Args:\n parameters_filepath the path to the parameters file\n output_folder the path to the output folder\n '''\n arguments = parse_arguments(argv[1:])\n parameters, conf_parameters = load_parameters(arguments['parameters_filepath'], arguments=arguments)\n dataset_filepaths, dataset_brat_folders = get_valid_dataset_filepaths(parameters)\n check_parameter_compatiblity(parameters, dataset_filepaths)\n\n # Load dataset\n dataset = ds.Dataset(verbose=parameters['verbose'], debug=parameters['debug'])\n dataset.load_dataset(dataset_filepaths, parameters)\n\n # Create graph and session\n with tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n intra_op_parallelism_threads=parameters['number_of_cpu_threads'],\n inter_op_parallelism_threads=parameters['number_of_cpu_threads'],\n device_count={'CPU': 1, 'GPU': parameters['number_of_gpus']},\n allow_soft_placement=True, # automatically choose an existing and supported device to run the operations in case the specified one doesn't exist\n log_device_placement=False\n )\n\n sess = tf.Session(config=session_conf)\n\n with sess.as_default():\n # Initialize and save execution details\n start_time = time.time()\n experiment_timestamp = utils.get_current_time_in_miliseconds()\n results = {}\n results['epoch'] = {}\n results['execution_details'] = {}\n results['execution_details']['train_start'] = start_time\n results['execution_details']['time_stamp'] = experiment_timestamp\n results['execution_details']['early_stop'] = False\n results['execution_details']['keyboard_interrupt'] = False\n results['execution_details']['num_epochs'] = 0\n results['model_options'] = copy.copy(parameters)\n\n dataset_name = utils.get_basename_without_extension(parameters['dataset_text_folder'])\n model_name = '{0}_{1}'.format(dataset_name, results['execution_details']['time_stamp'])\n\n utils.create_folder_if_not_exists(parameters['output_folder'])\n stats_graph_folder=os.path.join(parameters['output_folder'], model_name) # Folder where to save graphs\n utils.create_folder_if_not_exists(stats_graph_folder)\n model_folder = os.path.join(stats_graph_folder, 'model')\n utils.create_folder_if_not_exists(model_folder)\n with open(os.path.join(model_folder, 'parameters.ini'), 'w') as parameters_file:\n conf_parameters.write(parameters_file)\n tensorboard_log_folder = os.path.join(stats_graph_folder, 'tensorboard_logs')\n utils.create_folder_if_not_exists(tensorboard_log_folder)\n tensorboard_log_folders = {}\n for dataset_type in dataset_filepaths.keys():\n tensorboard_log_folders[dataset_type] = os.path.join(stats_graph_folder, 'tensorboard_logs', dataset_type)\n utils.create_folder_if_not_exists(tensorboard_log_folders[dataset_type])\n pickle.dump(dataset, open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))\n\n # Instantiate the model\n # graph initialization should be before FileWriter, otherwise the graph will not appear in TensorBoard\n model = EntityLSTM(dataset, parameters)\n\n # Instantiate the writers for TensorBoard\n writers = {}\n for dataset_type in dataset_filepaths.keys():\n writers[dataset_type] = tf.summary.FileWriter(tensorboard_log_folders[dataset_type], graph=sess.graph)\n embedding_writer = tf.summary.FileWriter(model_folder) # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings\n\n embeddings_projector_config = projector.ProjectorConfig()\n tensorboard_token_embeddings = embeddings_projector_config.embeddings.add()\n tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name\n token_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_tokens.tsv')\n tensorboard_token_embeddings.metadata_path = os.path.relpath(token_list_file_path, '..')\n\n tensorboard_character_embeddings = embeddings_projector_config.embeddings.add()\n tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name\n character_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_characters.tsv')\n tensorboard_character_embeddings.metadata_path = os.path.relpath(character_list_file_path, '..')\n\n projector.visualize_embeddings(embedding_writer, embeddings_projector_config)\n\n # Write metadata for TensorBoard embeddings\n token_list_file = codecs.open(token_list_file_path,'w', 'UTF-8')\n for token_index in range(dataset.vocabulary_size):\n token_list_file.write('{0}\\n'.format(dataset.index_to_token[token_index]))\n token_list_file.close()\n\n character_list_file = codecs.open(character_list_file_path,'w', 'UTF-8')\n for character_index in range(dataset.alphabet_size):\n if character_index == dataset.PADDING_CHARACTER_INDEX:\n character_list_file.write('PADDING\\n')\n else:\n character_list_file.write('{0}\\n'.format(dataset.index_to_character[character_index]))\n character_list_file.close()\n\n\n # Initialize the model\n sess.run(tf.global_variables_initializer())\n if not parameters['use_pretrained_model']:\n model.load_pretrained_token_embeddings(sess, dataset, parameters)\n\n # Start training + evaluation loop. Each iteration corresponds to 1 epoch.\n bad_counter = 0 # number of epochs with no improvement on the validation test in terms of F1-score\n previous_best_valid_f1_score = 0\n transition_params_trained = np.random.rand(len(dataset.unique_labels)+2,len(dataset.unique_labels)+2)\n model_saver = tf.train.Saver(max_to_keep=parameters['maximum_number_of_epochs']) # defaults to saving all variables\n epoch_number = -1\n try:\n while True:\n step = 0\n epoch_number += 1\n print('\\nStarting epoch {0}'.format(epoch_number))\n\n epoch_start_time = time.time()\n\n if parameters['use_pretrained_model'] and epoch_number == 0:\n # Restore pretrained model parameters\n transition_params_trained = train.restore_model_parameters_from_pretrained_model(parameters, dataset, sess, model, model_saver)\n elif epoch_number != 0:\n # Train model: loop over all sequences of training set with shuffling\n sequence_numbers=list(range(len(dataset.token_indices['train'])))\n random.shuffle(sequence_numbers)\n for sequence_number in sequence_numbers:\n transition_params_trained = train.train_step(sess, dataset, sequence_number, model, transition_params_trained, parameters)\n step += 1\n if step % 10 == 0:\n print('Training {0:.2f}% done'.format(step/len(sequence_numbers)*100), end='\\r', flush=True)\n\n epoch_elapsed_training_time = time.time() - epoch_start_time\n print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time), flush=True)\n\n y_pred, y_true, output_filepaths = train.predict_labels(sess, model, transition_params_trained, parameters, dataset, epoch_number, stats_graph_folder, dataset_filepaths)\n\n # Evaluate model: save and plot results\n evaluate.evaluate_model(results, dataset, y_pred, y_true, stats_graph_folder, epoch_number, epoch_start_time, output_filepaths, parameters)\n\n if parameters['use_pretrained_model'] and not parameters['train_model']:\n conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder)\n break\n\n # Save model\n model_saver.save(sess, os.path.join(model_folder, 'model_{0:05d}.ckpt'.format(epoch_number)))\n\n # Save TensorBoard logs\n summary = sess.run(model.summary_op, feed_dict=None)\n writers['train'].add_summary(summary, epoch_number)\n writers['train'].flush()\n utils.copytree(writers['train'].get_logdir(), model_folder)\n\n\n # Early stop\n valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']\n if valid_f1_score > previous_best_valid_f1_score:\n bad_counter = 0\n previous_best_valid_f1_score = valid_f1_score\n conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder, overwrite=True)\n else:\n bad_counter += 1\n print(\"The last {0} epochs have not shown improvements on the validation set.\".format(bad_counter))\n\n if bad_counter >= parameters['patience']:\n print('Early Stop!')\n results['execution_details']['early_stop'] = True\n break\n\n if epoch_number >= parameters['maximum_number_of_epochs']: break\n\n\n except KeyboardInterrupt:\n results['execution_details']['keyboard_interrupt'] = True\n print('Training interrupted')\n\n print('Finishing the experiment')\n end_time = time.time()\n results['execution_details']['train_duration'] = end_time - start_time\n results['execution_details']['train_end'] = end_time\n evaluate.save_results(results, stats_graph_folder)\n for dataset_type in dataset_filepaths.keys():\n writers[dataset_type].close()\n\n sess.close() # release the session's resources\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"littleheap/TensorFlow-Coursera","sub_path":"11.自然语言处理/NeuroNER-master/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26719,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"83"} +{"seq_id":"28766343109","text":"'''\r\nCreated on Dec 29, 2018\r\n\r\n@author: Winterberger\r\n'''\r\npremium_cost = 125.\r\nground_flat = 20.\r\ndrone_flat = 0.\r\n\r\nground_U2 = 1.5\r\nground_U6 = 3.\r\nground_U10 = 4.\r\nground_Heavy = 4.75\r\n\r\ndrone_U2 = 4.5\r\ndrone_U6 = 9.\r\ndrone_U10 = 12.\r\ndrone_Heavy = 14.25\r\n\r\n\r\ndef main():\r\n #prompt for weight\r\n weight = float(input(\"Enter your package's weight in lbs: \"))\r\n \r\n #initialize to premium\r\n cost = premium_cost\r\n \r\n drone_rate = drone_U2\r\n ground_rate = ground_U2\r\n \r\n #Check weight starting low\r\n if weight > 2.:\r\n drone_rate = drone_U6\r\n ground_rate = ground_U6\r\n if weight > 6.:\r\n drone_rate = drone_U10\r\n ground_rate = ground_U10\r\n if weight > 10:\r\n drone_rate = drone_Heavy\r\n ground_rate = ground_Heavy\r\n \r\n drone_cost = round(drone_flat + drone_rate * weight,2)\r\n ground_cost = round(ground_flat + ground_rate * weight,2)\r\n \r\n print(\"Ground: %0.2f \\n Drone: %0.2f \\n Premium: %0.2f\" % (ground_cost, drone_cost, premium_cost))\r\n cost = min(premium_cost, drone_cost, ground_cost)\r\n if cost == drone_cost:\r\n method = 'drone'\r\n elif cost == ground_cost:\r\n method = 'ground'\r\n else:\r\n method = 'premium'\r\n \r\n method += \" shipping.\"\r\n print(\"Best price is %0.2f by shipping via %s\" % (cost, method))\r\n #return cost\r\n return cost\r\n\r\nmain()","repo_name":"ee1tbg/elanFirstPyProj","sub_path":"src/SalsShipping.py","file_name":"SalsShipping.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21532852247","text":"\"\"\"\r\nSystem support for role based authorization.\r\n\r\nSupports declaring grants,\r\nand enforcing them using SQLAlchemy \r\n * do_orm_execute\r\n * with_loader_criteria(each_grant.entity, each_grant.filter)\r\n\r\nYou typically do not alter this file.\r\n\"\"\"\r\n\r\nfrom typing import Dict, Tuple\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy.orm import session\r\nfrom sqlalchemy import event, MetaData, and_, or_\r\nimport safrs\r\nfrom sqlalchemy import event, MetaData\r\nfrom sqlalchemy.orm import with_loader_criteria, DeclarativeMeta\r\nimport logging, sys\r\n\r\n\r\nfrom flask_jwt_extended import current_user\r\n\r\nfrom config import Config\r\nauthentication_provider = Config.SECURITY_PROVIDER\r\n\r\nsecurity_logger = logging.getLogger(__name__)\r\n\r\nsecurity_logger.debug(f'\\nAuthorization loaded via api_logic_server_run.py -- import \\n')\r\n\r\n\r\ndb = safrs.DB # Use the safrs.DB, not db!\r\nsession = db.session # sqlalchemy.orm.scoping.scoped_session\r\n\r\n\r\nclass Security:\r\n\r\n @classmethod\r\n def set_user_sa(cls):\r\n from flask import g\r\n g.isSA = True\r\n\r\n @classmethod\r\n def current_user(cls):\r\n \"\"\" \r\n User code calls this as required to get user/roles (eg, multi-tenant client_id)\r\n\r\n see https://flask-login.readthedocs.io/en/latest/\r\n \"\"\"\r\n return current_user\r\n\r\n @staticmethod\r\n @classmethod\r\n def current_user_has_role(role_name: str) -> bool: \r\n '''\r\n Helper, e.g. rules can determine if update allowed\r\n\r\n If user has role xyz, then for update authorization s/he can... \r\n '''\r\n result = False\r\n for each_role in Security.current_user().UserRoleList:\r\n if role_name == each_role.name:\r\n result = True\r\n break\r\n return result\r\n \r\n\r\nclass Grant:\r\n \"\"\"\r\n Invoke these to declare Role Permissions.\r\n\r\n Use code completion to discover models.\r\n \"\"\"\r\n\r\n grants_by_table : Dict[str, list[object]] = {}\r\n '''\r\n Dict keyed by Table name (obtained from class name), value is a (role, filter)\r\n '''\r\n\r\n def __init__(self, on_entity: DeclarativeMeta, \r\n to_role: str = \"\",\r\n filter: object = None):\r\n '''\r\n Create grant for / \r\n\r\n Example\r\n =======\r\n Grant( on_entity = models.Category, # use code completion\r\n to_role = Roles.tenant,\r\n filter = models.Category.Id == Security.current_user().client_id) # User table attributes\r\n \r\n Args\r\n ----\r\n on_entity: a class from models.py\r\n to_role: valid role name from Authentication Provider\r\n filter: where clause to be added\r\n \r\n per calls from declare_security.py\r\n '''\r\n self.class_name : str = on_entity._s_class_name # type: ignore\r\n self.role_name : str = to_role\r\n self.filter = filter\r\n self.entity :DeclarativeMeta = on_entity\r\n self.table_name : str = on_entity.__tablename__ # type: ignore\r\n if (self.table_name not in self.grants_by_table):\r\n Grant.grants_by_table[self.table_name] = []\r\n Grant.grants_by_table[self.table_name].append( self )\r\n\r\n @staticmethod\r\n def exec_grants(orm_execute_state):\r\n '''\r\n SQLAlchemy select event for current user's roles, append that role's grant filter to the SQL before execute \r\n\r\n if you have a select() construct, you can add new AND things just calling .where() again.\r\n \r\n e.g. existing_statement.where(or_(f1, f2)) .\r\n\r\n u2 is a manager and a tenant\r\n '''\r\n user = Security.current_user()\r\n mapper = orm_execute_state.bind_arguments['mapper']\r\n table_name = mapper.persist_selectable.fullname # mapper.mapped_table.fullname disparaged\r\n try:\r\n from flask import g\r\n if g.isSA or user.id == 'sa':\r\n security_logger.debug(\"sa (eg, set_user_sa()) - no grants apply\")\r\n return\r\n except:\r\n security_logger.debug(\"no user - ok (eg, system initialization)\")\r\n if table_name in Grant.grants_by_table:\r\n grant_list = list()\r\n grant_entity = None\r\n for each_grant in Grant.grants_by_table[table_name]:\r\n grant_entity = each_grant.entity\r\n for each_user_role in user.UserRoleList:\r\n if each_grant.role_name == each_user_role.role_name:\r\n security_logger.debug(f'Amend Grant for class / role: {table_name} / {each_grant.role_name} - {each_grant.filter}')\r\n grant_list.append(each_grant.filter())\r\n grant_filter = or_(*grant_list)\r\n orm_execute_state.statement = orm_execute_state.statement.options(\r\n with_loader_criteria(grant_entity, grant_filter ))\r\n security_logger.debug(f\"Grants applied for {table_name}\")\r\n else:\r\n security_logger.debug(f\"No Grants for {table_name}\")\r\n\r\n@event.listens_for(session, 'do_orm_execute')\r\ndef receive_do_orm_execute(orm_execute_state):\r\n \"listen for the 'do_orm_execute' event from SQLAlchemy\"\r\n if (\r\n Config.SECURITY_ENABLED\r\n and orm_execute_state.is_select\r\n and not orm_execute_state.is_column_load\r\n and not orm_execute_state.is_relationship_load\r\n ): \r\n security_logger.debug(f'receive_do_orm_execute alive')\r\n mapper = orm_execute_state.bind_arguments['mapper']\r\n table_name = mapper.persist_selectable.fullname # mapper.mapped_table.fullname disparaged\r\n if table_name == \"User\":\r\n pass\r\n security_logger.debug(f'No grants - avoid recursion on User table')\r\n elif session._proxied._flushing: # type: ignore\r\n security_logger.debug(f'No grants during logic processing')\r\n else:\r\n Grant.exec_grants(orm_execute_state) # SQL read check grants\r\n","repo_name":"valhuber/ApiLogicServer","sub_path":"api_logic_server_cli/project_prototype/security/system/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"83"} +{"seq_id":"26132109414","text":"import math\nimport random\nimport time\n\nimport cv2\nimport numpy as np\nimport pygame\nfrom enum import Enum\nimport queue\nimport lib.imagemodder as imd\n\n\nfrom lib.line_path import LinePath\n\n\nclass Sprite(pygame.sprite.Sprite):\n # Free x and y refers free to flip around that axis\n class RotationRule(Enum):\n FREE = 0\n FREE_Y = 1\n FREE_X = 2\n LOCKED = 3\n\n key_color = (255, 0, 255)\n\n # Constructor. Pass in the color of the block,\n # and its x and y position\n def __init__(self, image_word = \"Cat\", image_path=\"data/sprites/ScratchCat.png\", scale=1, rotation_offset=90, x=700 / 2 - 120,\n y=400 / 2 - 100,\n r_rule=RotationRule.FREE):\n # Call the parent class (Sprite) constructor\n pygame.sprite.Sprite.__init__(self)\n self.__scale = scale\n self.__rotation_offset = rotation_offset\n self.__rotation = 0\n self.__rotation_rule = r_rule\n # Pen starts up\n self.__pen_state = False\n self.__pen_size = 1\n # used to store path of sprite so lines can be drawn\n self.__line_path = LinePath()\n # Load the image and prep it for colorkey, and color effects\n # Color key set when sprite is rendered. Idk why it works only there but not here\n self.image_path = image_path\n if image_word == \"Dog\" or image_word == \"dog\":\n self.image_path = \"data/sprites/Dog.png\"\n elif image_word == \"Cat\":\n self.image_path = \"data/sprites/ScratchCat.png\"\n elif image_word == \"Person\" or image_word == \"person\":\n self.image_path = \"data/sprites/Person.png\"\n elif image_word == \"secret\" or image_word == \"Secret\":\n self.image_path = \"data/sprites/charizard.png\"\n temp_image = cv2.imread(self.image_path, cv2.IMREAD_UNCHANGED)\n new_temp_image = np.zeros((temp_image.shape[1], temp_image.shape[0], 3), dtype=np.uint8)\n for i in range(temp_image.shape[0]):\n for j in range(temp_image.shape[1]):\n if temp_image[i][j][3] == 0:\n new_temp_image[j][i] = self.key_color\n else:\n new_temp_image[j][i] = (temp_image[i][j][2], temp_image[i][j][1], temp_image[i][j][0])\n self.base_image = pygame.surfarray.make_surface(new_temp_image)\n #self.base_image = pygame.transform.rotate(self.base_image, -1 * self.__rotation_offset)\n self.image = pygame.transform.rotate(self.base_image, 0)\n\n # Rotates the image to fix the weirdness of pyatch images\n self.imd = imd.ImageArrMod(new_temp_image)\n if scale != 1:\n self.set_scale(self.__scale)\n self.rect = self.image.get_rect()\n\n self.__exec_set_x(x)\n self.__exec_set_y(y)\n self.__exec_set_rotation(self.__rotation_offset)\n\n self.__font = pygame.font.SysFont(\"Arial\", 28)\n self.say_bubble = None\n self.__has_say = False\n self.__say_background_color = (255, 255, 255)\n self.__say_text_color = (0, 0, 0)\n self.__say_border_color = (200, 200, 200)\n\n self.alpha = 255\n\n self.__screen_width = 700\n self.__screen_height = 400\n\n def move(self, dist):\n self.__exec_move(dist)\n if self.__pen_state:\n self.__update_cur_line()\n\n def set_x(self, x):\n if self.__pen_state:\n self.__new_line_seg()\n self.__exec_set_x(x)\n if self.__pen_state:\n self.__update_cur_line()\n\n def set_y(self, y):\n if self.__pen_state:\n self.__new_line_seg()\n self.__exec_set_y(y)\n if self.__pen_state:\n self.__update_cur_line()\n\n def change_x(self, x):\n if self.__pen_state:\n self.__new_line_seg()\n self.__exec_change_x(x)\n if self.__pen_state:\n self.__update_cur_line()\n\n def change_y(self, y):\n if self.__pen_state:\n self.__new_line_seg()\n self.__exec_change_y(y)\n if self.__pen_state:\n self.__update_cur_line()\n\n def rotate(self, angle):\n self.__exec_rotate(angle)\n if self.__pen_state:\n self.__new_line_seg()\n\n def turn_left(self, angle):\n self.rotate(-angle)\n\n def turn_right(self, angle):\n self.rotate(angle)\n\n def set_rotation(self, angle):\n self.__exec_set_rotation(angle)\n if self.__pen_state:\n self.__new_line_seg()\n\n def point_towards(self, pos):\n self.__exec_point_towards(pos[0], pos[1])\n if self.__pen_state:\n self.__new_line_seg()\n\n def go_to(self, x, y):\n if self.__pen_state:\n self.__new_line_seg()\n self.__exec_set_x(x)\n self.__exec_set_y(y)\n if self.__pen_state:\n self.__update_cur_line()\n\n def go_to_rand(self):\n if self.__pen_state:\n self.__new_line_seg()\n self.__exec_go_to_rand()\n if self.__pen_state:\n self.__update_cur_line()\n\n def __update_cur_line(self):\n self.__line_path.update([self.get_center_x(), self.get_center_y()])\n\n def __new_line_seg(self):\n self.__line_path.add([[self.get_center_x(), self.get_center_y()], [self.get_center_x(), self.get_center_y()]])\n\n def get_center_x(self):\n return self.rect.x + self.rect.width / 2\n\n def get_center_y(self):\n return self.rect.y + self.rect.height / 2\n\n ## Movement Functions ##\n\n def __exec_point_towards(self, x, y):\n rel_x = x - self.get_center_x()\n rel_y = y - self.get_center_y()\n\n angle = math.degrees(math.atan2(-rel_y, rel_x))\n\n self.__exec_set_rotation(angle + self.__rotation_offset)\n\n def set_scale(self, scale):\n self.__scale = scale\n size = self.base_image.get_size()\n self.image = pygame.transform.scale(self.base_image, (int(size[0] * self.__scale), int(size[1] * self.__scale)))\n\n def __exec_set_rotation(self, rotation):\n self.__rotation = rotation\n if self.__rotation_rule == self.RotationRule.FREE:\n self.image = pygame.transform.rotate(self.base_image, self.__rotation + -1 * self.__rotation_offset)\n elif self.__rotation_rule == self.RotationRule.FREE_Y:\n if abs(self.__rotation % 360) - 90 > 90:\n self.image = pygame.transform.flip(self.base_image, True, False)\n else:\n self.image = self.base_image\n elif self.__rotation_rule == self.RotationRule.FREE_X:\n if abs(self.__rotation % 360) > 90:\n self.image = pygame.transform.flip(self.base_image, False, True)\n else:\n self.image = self.base_image\n\n def set_rotation_rule(self, rule):\n if isinstance(rule, self.RotationRule):\n self.__rotation_rule = rule\n # Refresh sprite rotation so it matches the rule\n self.__exec_set_rotation(self.__rotation)\n\n def __exec_rotate(self, angle):\n self.__exec_set_rotation(self.__rotation + angle)\n\n def __exec_set_x(self, x):\n self.rect.x = x\n\n def __exec_change_x(self, x):\n dist = math.ceil(self.rect.x + x)\n self.__exec_set_x(dist)\n\n def __exec_set_y(self, y):\n self.rect.y = y\n\n def __exec_change_y(self, y):\n dist = math.floor(self.rect.y + y)\n self.__exec_set_y(dist)\n\n def __exec_move(self, dist):\n dist_x = dist * math.sin(math.radians(self.__rotation))\n dist_y = dist * math.cos(math.radians(self.__rotation))\n self.__exec_change_x(dist_x)\n self.__exec_change_y(dist_y)\n\n def __exec_go_to(self, x, y):\n self.__exec_set_x(x)\n self.__exec_set_y(y)\n\n def __exec_go_to_rand(self):\n self.__exec_go_to(random.randint(0, self.__screen_width - self.rect.width), random.randint(0, self.__screen_height - self.rect.height))\n\n ## Pen Functions ##\n\n def pen_state(self):\n return self.__pen_state\n\n def pen_down(self):\n self.__pen_state = True\n self.__new_line_seg()\n\n def pen_up(self):\n self.__pen_state = False\n self.__line_path.clear()\n\n def set_pen_size(self, size):\n self.__pen_size = size\n\n def pen_size(self):\n return self.__pen_size\n\n def get_line_path(self):\n return self.__line_path\n\n ## Looks Functions ##\n\n # Sets the font of the sprite\n def set_font(self, font):\n self.__font = font\n\n def has_say(self):\n return self.__has_say\n\n def say(self, message, seconds=-1, left=False):\n text = self.__font.render(message, True, self.__say_text_color)\n text_rect = text.get_rect()\n text_surface = text_rect.inflate(10, 10)\n self.say_bubble = pygame.Surface(text_surface.size)\n self.say_bubble.fill(self.key_color)\n self.say_bubble.set_colorkey(self.key_color)\n pygame.draw.rect(self.say_bubble, self.__say_background_color, (0, 0, text_surface.w, text_surface.h), border_radius=5)\n pygame.draw.rect(self.say_bubble, self.__say_border_color, (0, 0, text_surface.w, text_surface.h), 3, 5)\n self.say_bubble.blit(text, (text_surface.w / 2 - text_rect.w / 2, text_surface.h / 2 - text_rect.h / 2))\n self.__has_say = True\n\n def think(self, message, second=-1, left=False):\n self.say(message, second, left)\n self.__set_alpha(20)\n\n def set_effect(self, effect, value):\n if effect == \"ghost\":\n self.__set_alpha(value)\n if effect == \"color\":\n self.imd.restore_image_arr()\n self.__change_hue(value)\n\n def change_effect(self, effect, value):\n if effect == \"ghost\":\n self.__alpha += value\n self.__alpha %= 200\n self.__set_alpha(self.__alpha)\n if effect == \"color\":\n self.__change_hue(value)\n\n # set alpha of sprite\n def __set_alpha(self, alpha):\n self.__alpha = alpha\n self.image.set_alpha(self.__alpha)\n\n def __change_hue(self, hue):\n #start = time.time()\n self.imd.hue_shift_image_arr(hue)\n self.image = pygame.surfarray.make_surface(self.imd.get_image_arr())\n self.base_image = self.image\n #self.__exec_set_rotation(self.__rotation)\n #end = time.time()\n #print(\"Hue shift took: \" + str(end - start))\n\n","repo_name":"ElliotRoe/Pyatch","sub_path":"lib/sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":10303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24841670608","text":"from imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport dlib\nimport cv2\nimport copy\nimport colorsys\nimport math\nfrom colorDistance import CIELABdistance\nimport os\nfrom colorDistance import CIEDE2000\nfrom convexHull import convexHull, convexRectangle \nfrom processBar import progressbar\nimport shutil\n\nimport collections \n\nclass Shape:\n def __init__(self, code, r, g, b):\n self.code = code\n self.r = r\n self.g = g\n self.b = b\n\n def __str__(self):\n return f\"Code: {self.code}, Color: RGB({self.r}, {self.g}, {self.b})\"\n\n def getCode(self):\n return self.code\n\n def getColor(self):\n return [int(self.r), int(self.g), int(self.b)]\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\ndef getFacial(image):\n \n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rects = detector(gray, 1)\n for (i, rect) in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n return shape[60:68]\n \n\ndef getListPixelMayBeTeeth(image):\n vertical = image[int(image.shape[0] / 2), :] \n horizontal = image[:, int(image.shape[1] / 2)]\n listPixel = []\n for color in horizontal:\n listPixel.append(color)\n for color in vertical:\n listPixel.append(color)\n return listPixel\n\ndef readTeethShade():\n file = open(\n r\"C:\\Users\\haime\\OneDrive\\Máy tính\\Python\\facial\\teethcolor.txt\",\n \"r\",\n encoding=\"utf8\",\n )\n outString = []\n for line in file:\n line = line.rstrip(\"\\r\\n\")\n line = line.split(\",\")\n outString.append(Shape(line[0], line[1], line[2], line[3]))\n return outString\ndef distance(p1, p2):\n upperCos = p1[0]*p2[0] + p1[1] * p2[1] + p1[2]*p2[2]\n lowerCos = (p1[0]**2+p1[1]**2+p1[2]**2)**(1/2) * (p2[0]**2+p2[1]**2+p2[2]**2)**(1/2) \n acos = math.acos((upperCos/lowerCos))*180/math.pi\n return acos\ndef findTeethColor(pixelMayBeTeeths, teethShades):\n minShade = teethShades[0]\n minMayBeTeeth = pixelMayBeTeeths[0]\n minDist = distance(pixelMayBeTeeths[0], teethShades[0].getColor())\n for teethColor in pixelMayBeTeeths:\n for shade in teethShades:\n dist = distance(teethColor, shade.getColor())\n if dist - minDist < 0:\n minShade = shade\n minDist = dist\n minMayBeTeeth = teethColor\n print(minShade)\n return minShade\ndef calculateThreshhold(image, color):\n distances = []\n for i in range(0, image.shape[0]):\n for j in range(0, image.shape[1]):\n pixel = image[i][j]\n distances.append(distance(pixel, color))\n distances.sort()\n return distances[int(len(distances)*0.5)]\n\ndef isTeethColor(pixel, teethColor, threshold):\n \"\"\"\n if pixel is so close to teeth color, return true\n \"\"\"\n # if distance(pixel, teethColor) < 50 and (pixel[0] - teethColor[0] < 30) and (pixel[1] - teethColor[1] < 30) and (pixel[2] - teethColor[2] < 30):\n # return True\n if CIEDE2000(pixel, teethColor) < 20:\n return True\n \n return False\n\ndef shiftShapeAfterCrop(shape, point):\n result = []\n for p in shape:\n result.append([p[0] - point[0], p[1] - point[1]])\n return np.array([result], np.int32)\n\ndef reInpainting(image, ground_truth, teethColor):\n \"\"\"\n if pixel has pink color (marked for teeth) and not in range of teeth => fill by teethColor\n \"\"\"\n isTeeth, isNotTeeth = 0, 0\n threshold = calculateThreshhold(image, teethColor)\n # print(f\"Threshold: {threshold}\")\n for i in range(0, image.shape[0]):\n for j in range(0, image.shape[1]):\n pixel = image[i][j]\n pink = [255, 0, 255]\n if collections.Counter(pixel) == collections.Counter(pink):\n if isTeethColor(ground_truth[i][j], teethColor, threshold):\n isTeeth = isTeeth + 1\n else: \n # 229,224,212 _________ 200,160,75\n ground_truth[i][j] = [teethColor[2], teethColor[1], teethColor[0]]\n isNotTeeth = isNotTeeth + 1 \n # print(f\"isTeeth: {isTeeth}, isNotTeeth: {isNotTeeth}\")\n return ground_truth\n\ndef createFacial(image):\n try:\n shape = getFacial(image) # points of mouth\n if shape is None:\n return None\n else:\n [topLeft, botRight] = convexRectangle(shape) # 2 point for crop mouth\n needed_image = copy.copy(image)\n if topLeft[1] - botRight[1] > topLeft[0] - botRight[0]:\n deltaXY = abs(abs(topLeft[1] - botRight[1]) - abs(topLeft[0] - botRight[0]))\n newTopLeft = [topLeft[0], topLeft[1] - int(deltaXY/2)]\n newBotRight = [botRight[0], botRight[1] + int(deltaXY/2)]\n upper_needed_image = needed_image[newTopLeft[1] : topLeft[1] + 1, newTopLeft[0] : botRight[0] + 1]\n bottom_needed_image = needed_image[botRight[1] : newBotRight[1] + 1, newTopLeft[0] : botRight[0] + 1]\n needed_image = needed_image[newTopLeft[1] : newBotRight[1] + 1, newTopLeft[0] : newBotRight[0] + 1]\n image = image[topLeft[1] : botRight[1] + 1, topLeft[0] : botRight[0] + 1] # mouth\n shape = shiftShapeAfterCrop(shape, topLeft) # new point of mouth after crop\n ground_truth = copy.copy(image)\n clone_image = copy.copy(image)\n pixelMayBeTeeths = getListPixelMayBeTeeth(image) # color on +\n teethShades = readTeethShade() # list of teeth shade\n teethColor = findTeethColor(pixelMayBeTeeths,teethShades).getColor() # color of teeth\n image = convexHull(image, shape)\n ground_truth = reInpainting(image, ground_truth, teethColor)\n # clone_image = cv2.resize(clone_image, (256,256), interpolation = cv2.INTER_CUBIC )\n # ground_truth = cv2.resize(ground_truth, (256,256), interpolation = cv2.INTER_CUBIC )\n image = cv2.resize(image, (256,256), interpolation = cv2.INTER_CUBIC)\n\n res = np.concatenate((upper_needed_image, ground_truth, bottom_needed_image), axis=0) \n res = cv2.resize(res, (256,256), interpolation = cv2.INTER_CUBIC )\n needed_image = cv2.resize(needed_image, (256,256), interpolation = cv2.INTER_CUBIC )\n out = np.concatenate((needed_image, res), axis=1)\n # print(f\"Teeth color {teethColor}\")\n return out\n except:\n return\ndef make_directory_if_not_exists(path):\n while not os.path.isdir(path):\n try:\n os.makedirs(path)\n break \n except WindowsError:\n print(\"got WindowsError\")\n pass \ndef main():\n path = \"C:/Users/haime/Downloads/test\"\n shutil.rmtree(path + \"/result\", ignore_errors=True)\n os.mkdir(path + \"/result\")\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n for i in progressbar(range(len(files)), \"Computing: \", 10):\n file = files[i]\n filename = file.split(\".\")\n images = cv2.imread(path + '/' + file)\n out = createFacial(images)\n if out is not None:\n cv2.imwrite(f\"{path}/result/{filename[0]}.png\", out)\n\nif __name__ == \"__main__\":\n main()","repo_name":"vutuanhai237/Braces2TeethUtilities","sub_path":"createPix2Pix (facial)/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":7418,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"873090044","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom model.flat import Flat\nfrom traceback import print_exc\n\nclass Scraper:\n def __init__(self):\n self.flats = []\n\n baseUrl = \"https://realitymix.cz/vypis-nabidek/?form%5Badresa_kraj_id%5D[]=19&form%5Bcena_mena%5D=&form%5Bcena_normalizovana__from%5D=&form%5Bcena_normalizovana__to%5D=6000000&form%5Bdispozice%5D[]=10&form%5Bdispozice%5D[]=11&form%5Bdispozice%5D[]=4&form%5Bdispozice%5D[]=5&form%5Bdruh_objektu%5D[]=2&form%5Bexclusive%5D=&form%5Bfk_rk%5D=&form%5Binzerat_typ%5D=1&form%5Bnemovitost_typ%5D=4&form%5Bplocha__from%5D=50&form%5Bplocha__to%5D=&form%5Bpodlazi_cislo__from%5D=1&form%5Bpodlazi_cislo__to%5D=&form%5Bprojekt_id%5D=&form%5Bsearch_in_city%5D=&form%5Bsearch_in_text%5D=&form%5Bstari_inzeratu%5D=&form%5Bstav_objektu%5D=&form%5Btop_nabidky%5D=&form%5Bvlastnictvi%5D[]=1\"\n self.urls = [baseUrl]\n for i in range(1, 3):\n additionalUrl = baseUrl + \"&stranka=\" + str(i)\n self.urls.append(additionalUrl)\n\n def start_workflow(self):\n self.parse_pages(self.urls)\n return self.flats\n\n def parse_pages(self,urls):\n for url in urls:\n #print(url)\n response = requests.get(url,verify=False)\n soup = BeautifulSoup(response.content,'html.parser',fromEncoding='utf-8')\n\n all_posts = soup.find(\"ul\", {\"class\": \"advert-list-items__items\"})\n posts = all_posts.find_all(\"li\")\n\n #print(mydivs)\n self.parse_posts(posts)\n\n def parse_post(self,link):\n\n print(link)\n\n state = 'N/A'\n penb = 'N/A'\n floor = 1000\n\n # parsing here\n response = requests.get(link, verify=False)\n soup = BeautifulSoup(response.content, 'html.parser', fromEncoding='utf-8')\n\n detail_info = soup.find(\"div\",class_=\"detail-information\")\n desc = soup.find(\"div\", class_=\"advert-description__text-inner-inner\").text.strip()\n\n\n\n\n info_items = detail_info.find_all(\"li\",class_=\"detail-information__data-item\")\n\n for item in info_items:\n text= item.text\n\n #print(text)\n\n if \"Číslo podlaží v domě\" in text:\n replaced = text.replace(\"Číslo podlaží v domě:\",\"\").strip()\n floor = int(replaced)\n\n if \"Energetická náročnost budovy\" in text:\n replaced = text.replace(\"Energetická náročnost budovy:\", \"\").strip()\n if replaced == 'N/A':\n penb = replaced\n else:\n penb = replaced.split('-')[0].strip()\n if \"Stav objektu\" in text:\n replaced = text.replace(\"Stav objektu:\", \"\").strip()\n state = replaced\n\n if \"přízem\" in desc:\n floor = 0\n\n return floor,penb, state, desc\n\n def parse_posts(self,posts):\n for post in posts:\n link = \"\"\n try:\n heading = post.find(\"h2\").text.strip()\n heading = heading.replace(\"Prodej bytu,\",\"\").replace(\" \",\"\")\n rooms = heading.split(',')[0]\n room_base_coeff = int(rooms.split('+')[0])\n room_addons_coeff = 0.0 if \"kk\" in rooms else 0.5\n room_coeff = room_base_coeff + room_addons_coeff\n meters = heading.split(',')[1]\n meters = int(meters.replace(\"m²\",\"\").strip())\n price = post.find(\"span\",class_=\"advert-list-items__content-price-price\").text.strip()\n price = price.replace(\"Kč\",\"\")\n price = price.encode(\"ascii\", errors=\"ignore\").decode()\n price = int(price.replace(\" \",\"\").strip())\n \n price_per_meter = price / meters\n location = post.find(\"p\",class_=\"advert-list-items__content-address\").text.strip()\n floor = \"N/A\"\n penb = \"N/A\"\n state = \"N/A\"\n link = post.find(\"a\",class_=\"advert-list-items__images\")[\"href\"]\n\n id = link.split('.html')[0].split('-')[-1]\n #print(room_coeff,meters,location,price, link)\n floor, penb, state, desc = self.parse_post(link)\n\n if floor < 1:\n continue\n\n flat = Flat(\n id=id,\n price=price,\n title=location,\n link=link,\n size=room_coeff,\n meters=meters,\n price_per_meter=price_per_meter,\n floor=floor,\n penb=penb,\n state=state,\n description=desc\n )\n self.flats.append(flat)\n except AttributeError as ae:\n pass # this is an advert\n except Exception as e:\n if \"Cena\" in str(e):\n pass\n elif \"Rezerv\" in str(e):\n pass\n else:\n print(\"Uncaught Exception occurred in post-----------------------------\")\n print(e.__class__.__name__, e)\n print_exc()\n print(post)\n print(link)\n\n\n\n\nif __name__ == \"__main__\":\n scraper = Scraper()\n scraper.start_workflow()\n\n for flat in scraper.flats:\n print(flat.get_cmp_dict())\n","repo_name":"josefkerner/realityScraper","sub_path":"centrumReality.py","file_name":"centrumReality.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"47802017664","text":"class Solution:\n def checkStraightLine(self, coordinates: List[List[int]]) -> bool:\n if len(coordinates)==2:\n return True\n FN_0=coordinates[0][0]\n FN_1=coordinates[0][1]\n SN_0=coordinates[1][0]\n SN_1=coordinates[1][1]\n if FN_1==SN_1:\n for point in coordinates[2:]:\n if point[1]!=FN_1:\n return False\n return True\n rate=(SN_0-FN_0)/(SN_1-FN_1)\n for point in coordinates[2:]:\n if point[1]-FN_1==0:\n return False\n cur_rate=(point[0]-FN_0)/(point[1]-FN_1)\n if cur_rate!=rate:\n return False\n return True\n","repo_name":"yangzongwu/leetcode","sub_path":"archives/20190519python/1232. Check If It Is a Straight Line.py","file_name":"1232. Check If It Is a Straight Line.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"7693566288","text":"from django import template\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nfrom ferramentas.models import Aviso\nfrom sispag.models import *\n\nregister = template.Library()\n\ndef get_avisos(request):\n \n if request.user.is_superuser:\n aviso_list = Aviso.objects.all()\n else:\n pessoa = Pessoa.objects.get(user = request.user)\n aviso_list = pessoa.admin_aviso_set.all()\n # render template\n paginador = Paginator(aviso_list, 5)\n if request.GET.get('pa'):\n pagina = int(request.GET.get('pa'))\n else:\n pagina = 1\n try:\n avisos = paginador.page(pagina)\n except (EmptyPage, InvalidPage):\n avisos = paginador.page(paginador.num_pages)\n return {\n 'aviso_list': avisos,\n }\n \n\nregister.inclusion_tag('admin/includes_ferramentas/avisos.html')(get_avisos)\n\n\n","repo_name":"bmelo/caixa","sub_path":"src/ferramentas/templatetags/aviso.py","file_name":"aviso.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10754697607","text":"import subprocess, os\nimport numpy as np\nimport math\nimport numpy.matlib\nimport multiprocessing as mp\n\ncnresult_list = []\ndef cnlog_result(result):\n cnresult_list.append(result)\n\ndef run_fcont(config):\n #set up\n miriad_path = config.get(\"pipeline\", \"miriad_path\")\n cver = config.get(\"pipeline\", \"cver\")\n fcont_seed = int(config.getfloat(\"pipeline\", \"fcont_seed\"))\n frames = config.get(\"pipeline\", \"frames\")\n bins = config.get(\"pipeline\", \"bins\")\n np.random.seed(fcont_seed)\n indir = config.get(\"pipeline\", \"indir\")\n outdir = config.get(\"pipeline\", \"outdir\")\n dfreq = 0.050\n sdfreq = '%.6e' % dfreq\n ds = 2.8\n db = 7.\n dbr = db/3600.*np.pi/180.\n sdbr = '%.6e' % dbr\n pool = mp.Pool()\n # convert from Jy/pix to Jy/bm, db arcs FWHM and ds arcs pixels\n px2bm = np.pi/(4.*math.log(2))*(db/ds)**2\n spx2bm = '%.6f' % px2bm\n\n if os.path.exists(outdir+'cube_frncont_'+cver): \n print ('**** message from pipeline: '+outdir+'cube_frncont_'+cver+' already exists')\n print ('**** message from pipeline: not running func_fcont')\n return\n if not os.path.exists(indir+'sky_cont_'+cver): \n print('converting fits files to miriad format')\n convert(indir, cver,miriad_path) \n\n\n text = miriad_path+'maths exp=\"(<'+indir+'sky_cont_'+cver+'>)*'+spx2bm+'\" out=\"'+outdir+'cube_fcont\"'\n subprocess.run(text, shell='True')\n text = miriad_path+'puthd in='+outdir+'cube_fcont/bunit value=\"JY/BEAM \"'\n subprocess.run(text, shell='True')\n\n # resample continuum cube more finely\n text = miriad_path+'regrid in=\"'+outdir+'cube_fcont\" axes=3 desc=\"0.95,1,0.01,21\" tol=0.001 out=\"'+outdir+'cube_frcont_'+cver+'\"'\n subprocess.run(text, shell='True')\n # make second imperfect version of the resampled continuum cube\n sigma = 0.001 \n pool = mp.Pool()\n for mfreq in range(950,1160,10):\n pool.apply_async(make_ncont, args=(mfreq,sigma,cver, outdir, frames, bins,miriad_path), callback = cnlog_result)\n pool.close()\n pool.join()\n print (cnresult_list)\n\n text = miriad_path+'imcat in=\"*_rnc\" out=\"'+outdir+'cube_frncont_'+cver+'\"'\n subprocess.run(text, shell='True')\n text = '/bin/rm -rf *_rnc '+outdir+'cube_fcont' \n subprocess.run(text, shell='True')\n\ndef convert(indir, cver,miriad_path):\n text = miriad_path+'fits op=\"xyin\" in=\"'+indir+'sky_continuum_f1_'+cver+'.fits\" out=\"'+indir+'sky_cont_f1_'+cver+'\"'\n subprocess.run(text, shell='True')\n text = miriad_path+'fits op=\"xyin\" in=\"'+indir+'sky_continuum_f2_'+cver+'.fits\" out=\"'+indir+'sky_cont_f2_'+cver+'\"'\n subprocess.run(text, shell='True')\n text = miriad_path+'imcat in='+indir+'sky_cont_f1_'+cver+','+indir+'sky_cont_f2_'+cver+' out='+indir+'sky_cont_'+cver\n subprocess.run(text, shell='True')\n text = miriad_path+'fits in='+indir+'sky_continuum_f1_'+cver+'_z.fits op=\"xyin\" out='+indir+'sky_cont_'+cver+'_z'\n subprocess.run(text, shell='True')\n\n\n\n\n\ndef make_ncont(mfreq,sigma,cver, outdir, frames, bins, miriad_path):\n ichan = 1 + (mfreq - 950)/10\n schan = str(ichan)\n smfreq = '%d' % mfreq\n sm4freq = smfreq.zfill(4)\n sifreq = '%.6f' % (mfreq/1000.)\n text = miriad_path+'imsub in=\"'+outdir+'cube_frcont_'+cver+'\" out=\"'+sm4freq+'_rc\" region=\"images('+schan+')\"'\n subprocess.run(text, shell='True')\n# text = miriad_path+'imframe in=\"'+sm4freq+'_rc\" out=\"'+sm4freq+'_rcf\" frame=\"-2944,2943,-2944,2943\"'\n# text = miriad_path+'imframe in=\"'+sm4freq+'_rc\" out=\"'+sm4freq+'_rcf\" frame=\"-656,655,-656,655\"'\n text = miriad_path+'imframe in=\"'+sm4freq+'_rc\" out=\"'+sm4freq+'_rcf\" frame=\"'+frames+'\"'\n subprocess.run(text, shell='True')\n text = miriad_path+'imbin in=\"'+sm4freq+'_rcf\" out=\"'+sm4freq+'_rcb\" bin=\"'+bins+'\"'\n subprocess.run(text, shell='True')\n text = miriad_path+'imgen in=\"'+sm4freq+'_rcb\" out=\"'+sm4freq+'_rcbn\" object=\"noise\" spar=1. seed=\"'+smfreq+'\"'\n subprocess.run(text, shell='True')\n text = miriad_path+'regrid in=\"'+sm4freq+'_rcbn\" out=\"'+sm4freq+'_rcbnx\" tin='+sm4freq+'_rc\"\"'\n subprocess.run(text, shell='True')\n ssigma = '%.6e' % sigma\n text = miriad_path+'maths exp=\"(<'+sm4freq+'_rc>*(1.+'+ssigma+'*<'+sm4freq+'_rcbnx>))\" out=\"'+sm4freq+'_rnc\"'\n subprocess.run(text, shell='True')\n # clean up intermediate products\n text = '/bin/rm -rf '+sm4freq+'_rc '+sm4freq+'_rcf '+sm4freq+'_rcb '+sm4freq+'_rcbn '+sm4freq+'_rcbnx' \n subprocess.run(text, shell='True')\n text = 'Finished NCont Frequency: '+sm4freq\n return text\n\n\nif __name__ == '__main__':\n run_fcont(config)\n\n\n","repo_name":"PhilippaHartley/SKAO-SDC2","sub_path":"observe/func_fcont.py","file_name":"func_fcont.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"37712559342","text":"from L1135_ConnectingCitiesWithMinimumCost import f_gold\n\n##########\n# ++++++ to be replaced by tester ++++++\nmylog = print\nmyexactlog = print\n\"+++++++++++++++++\"\n\ndef test():\n \"--- test function ---\"\n param = [\n # example 1\n [3, [[1, 2, 5], [1, 3, 6], [2, 3, 1]]]\n # output: 6\n # EXPLANATION: Choosing any 2 edges will connect all cities so we choose the minimum 2.\n ,\n # example 2\n [4, [[1, 2, 3], [3, 4, 4]]]\n # output: -1\n # EXPLANATION: There is no way to connect all cities even if all edges are used.\n ,\n ]\n for i, parameters_set in enumerate(param):\n idx = i\n mylog(0, idx)\n result = f_gold(* parameters_set)\n myexactlog(1, result)\n\n##########\n\ntest()\n","repo_name":"HALOCORE/DuoGlot","sub_path":"data/duoglot/tests/staleetcode/pysep/L1135_ConnectingCitiesWithMinimumCost__test.py","file_name":"L1135_ConnectingCitiesWithMinimumCost__test.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"28685411774","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.invoice\"\n\n rate_invoice = fields.Float(\n string='Rate',\n compute='_compute_rate_invoice',\n digits=(12, 4))\n\n @api.multi\n def _compute_rate_invoice(self):\n for invoice in self:\n company_currency = invoice.company_id.currency_id\n rate = 1\n date = invoice._get_currency_rate_date()\n date = date or fields.Date.context_today(invoice)\n\n if invoice.currency_id != company_currency:\n currency = invoice.currency_id.with_context(date=date)\n rate = currency.compute(rate, company_currency)\n\n invoice.rate_invoice = rate\n\n @api.multi\n def write(self, vals):\n rec = super(AccountInvoice, self).write(vals)\n\n for invoice in self:\n invoice.invoice_line_ids.recompute_tax_id()\n '''\n if vals.get('invoice_line_ids'):\n for invoice in self:\n invoice.invoice_line_ids.recompute_tax_id()\n\n for line in vals.get('invoice_line_ids'):\n if line[0] == 2:\n for invoice in self:\n invoice.invoice_line_ids.recompute_tax_id()\n\n continue\n\n if not type(line[2]) is dict:\n continue\n\n if ('price_unit' in line[2].keys()\n or 'quantity' in line[2].keys()\n or 'discount' in line[2].keys()):\n for invoice in self:\n invoice.invoice_line_ids.recompute_tax_id()\n\n break\n '''\n return rec\n\n @api.multi\n def get_tax_groups(self):\n tax_groups = {}\n\n for line in self.invoice_line_ids:\n price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.invoice_line_tax_ids.compute_all(\n price_unit,\n line.currency_id,\n line.quantity,\n product=line.product_id,\n partner=self.partner_id)['taxes']\n\n for tax in taxes:\n tax_id = self.env['account.tax'].browse(tax['id'])\n key = str(tax_id.tax_group_id.id)\n\n if key not in tax_groups:\n values = {\n 'tax_group_id': tax_id.tax_group_id.id,\n 'base': tax['base'],\n 'amount': tax['amount']}\n tax_groups[key] = values\n else:\n tax_groups[key]['base'] += tax['base']\n tax_groups[key]['amount'] += tax['amount']\n\n return tax_groups\n\n def check_base_to_overcome(self):\n msg = _('There is no date range corresponding to the date of your invoice')\n\n if self.date_invoice:\n param = [\n ('date_start', '<=', self.date_invoice),\n ('date_end', '>=', self.date_invoice)]\n else:\n param = [\n ('date_start', '<=', fields.Date.today()),\n ('date_end', '>=', fields.Date.today())]\n\n daterange = self.env['date.range'].search(param)\n\n if not daterange:\n raise UserError(msg)\n else:\n fiscalunit = daterange.fiscalunit\n\n tax_groups = self.get_tax_groups()\n remove_tax_groups_ids = []\n\n if tax_groups:\n for group_key in tax_groups.keys():\n tax_group_id = self.env['account.tax.group'].search(\n [('id', '=', tax_groups[group_key]['tax_group_id'])])\n base_to_overcome = tax_group_id.fiscalunit_factor * fiscalunit\n base = tax_groups[group_key]['base'] * self.rate_invoice\n\n if base_to_overcome > base:\n remove_tax_groups_ids.append(tax_group_id)\n\n return remove_tax_groups_ids\n\n @api.onchange('invoice_line_ids')\n def _onchange_invoice_line_ids(self):\n self.invoice_line_ids.recompute_tax_id()\n\n return super(AccountInvoice, self)._onchange_invoice_line_ids()\n","repo_name":"caldasrdev/l10n-colombia","sub_path":"account_invoice_tax_fiscalunit/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"73059623632","text":"from typing import List\nfrom fastapi import APIRouter, Depends, status, HTTPException\nimport sys \nsys.path.append(\"..\")\nimport schema, database, oauth2\nfrom models import FlightModel, ItineraryModel\nfrom sqlalchemy.orm import Session\n\n\nrouter = APIRouter(\n prefix=\"/flight_booking\",\n tags=['Flight Bookings']\n)\n\nget_db = database.get_db\n\n@router.get('/', response_model=List[schema.FlightBookingModel])\ndef all(db: Session = Depends(get_db)):\n return FlightModel.FlightBooking.get_all_flight_bookings(db)\n\n@router.post('/new_booking', status_code=status.HTTP_201_CREATED)\ndef create(request: schema.FlightBookingModel, db: Session = Depends(get_db)):\n return FlightModel.FlightBooking.create_flight_booking(request, db)\n\n@router.get('/create/{flight_id}', status_code=status.HTTP_201_CREATED)\ndef create_booking(flight_id, db: Session = Depends(get_db), current_user: schema.UserModel = Depends(oauth2.get_current_user)):\n user_id = current_user.id\n itinerary_id = ItineraryModel.Itinerary.get_user_itinerary_id(user_id, db)\n if itinerary_id == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"User does not have an itinerary to book a flight for\")\n booking = FlightModel.FlightBooking.create_flight_booking(schema.FlightBookingModel(flight_id=flight_id, itinerary_id=itinerary_id), db)\n if booking == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Flight Booking with the flight id {flight_id} and itinerary id {itinerary_id} was not found\")\n return booking\n\n@router.put('/update_booking/{flight_id}/{itinerary_id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(flight_id, itinerary_id, request: schema.FlightBookingModel, db: Session = Depends(get_db)):\n booking = FlightModel.FlightBooking.update_flight_booking(flight_id, itinerary_id, request, db)\n if booking == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Flight Booking with the flight id {flight_id} and itinerary id {itinerary_id} was not found\")\n return booking\n\n@router.delete('/delete/{flight_id}/{itinerary_id}', status_code=status.HTTP_204_NO_CONTENT)\ndef destroy(flight_id, itinerary_id, db: Session = Depends(get_db)):\n booking = FlightModel.FlightBooking.delete_flight_booking(flight_id, itinerary_id, db)\n if booking == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Flight Booking with the flight id {flight_id} and itinerary id {itinerary_id} was not found\")\n return booking\n\n@router.get('/find_booking/{flight_id}/{itinerary_id}', status_code=status.HTTP_200_OK, response_model=schema.FlightBookingModel)\ndef show(flight_id, itinerary_id, db: Session = Depends(get_db)):\n booking = FlightModel.FlightBooking.get_flight_booking(flight_id, itinerary_id, db)\n if booking == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Flight Booking with the flight id {flight_id} and itinerary id {itinerary_id} was not found\")\n return booking\n\n@router.get('/price/{flight_id}/{itinerary_id}', status_code=status.HTTP_200_OK, response_model=float)\ndef get_price(flight_id, itinerary_id, db: Session = Depends(get_db)):\n booking = FlightModel.FlightBooking.get_flight_booking(flight_id, itinerary_id, db)\n if booking == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Flight Booking with the flight id {flight_id} and itinerary id {itinerary_id} was not found\")\n return booking.price\n\n@router.get('/sum_price/{itinerary_id}', status_code=status.HTTP_200_OK, response_model=float)\ndef get_total_price(itinerary_id: int, db: Session = Depends(get_db)):\n query = db.query(FlightModel.FlightBooking).join(FlightModel.Flight).filter(FlightModel.FlightBooking.itinerary_id == itinerary_id).with_entities(FlightModel.FlightBooking.totalPrice).all()\n total_price = sum([booking.totalPrice for booking in query])\n return total_price\n","repo_name":"rakeebh7233/NOMAD2","sub_path":"backend/routers/flight_booking.py","file_name":"flight_booking.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"41214761804","text":"\"\"\"\ncoding: utf8\n@time: 2021/3/28 17:02\n@author: cjr\n@file: 字符串中不同整数的数目.py\n\n给你一个字符串 word ,该字符串由数字和小写英文字母组成。\n\n请你用空格替换每个不是数字的字符。例如,\"a123bc34d8ef34\" 将会变成 \" 123  34 8  34\" 。注意,剩下的这些整数为(相邻彼此至少有一个空格隔开):\"123\"、\"34\"、\"8\" 和 \"34\" 。\n\n返回对 word 完成替换后形成的 不同 整数的数目。\n\n只有当两个整数的 不含前导零 的十进制表示不同, 才认为这两个整数也不同。\n\n \n\n示例 1:\n\n输入:word = \"a123bc34d8ef34\"\n输出:3\n解释:不同的整数有 \"123\"、\"34\" 和 \"8\" 。注意,\"34\" 只计数一次。\n示例 2:\n\n输入:word = \"leet1234code234\"\n输出:2\n示例 3:\n\n输入:word = \"a1b01c001\"\n输出:1\n解释:\"1\"、\"01\" 和 \"001\" 视为同一个整数的十进制表示,因为在比较十进制值时会忽略前导零的存在。\n \n\n提示:\n\n1 <= word.length <= 1000\nword 由数字和小写英文字母组成\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n 用栈存储数字,遇到非数字就转成int之后,一起存到set中。\n 然后计算set的长度就行了。\n \"\"\"\n def numDifferentIntegers(self, w: str) -> int:\n stack = []\n res = set()\n for i in w:\n if i.isdigit():\n stack.append(i)\n else:\n if stack:\n res.add(int(''.join(stack)))\n stack = []\n if stack:\n res.add(int(''.join(stack)))\n return len(list(res))\n\n\n\n\n\n\n\n","repo_name":"cjrzs/MyLeetCode","sub_path":"LeetCode周赛/2021-03-28第234场周赛/字符串中不同整数的数目.py","file_name":"字符串中不同整数的数目.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"29266650751","text":"from functools import reduce\nfrom operator import __or__\ndef CountBits(n):\n n = (n & 0x5555555555555555) + ((n & 0xAAAAAAAAAAAAAAAA) >> 1)\n n = (n & 0x3333333333333333) + ((n & 0xCCCCCCCCCCCCCCCC) >> 2)\n n = (n & 0x0F0F0F0F0F0F0F0F) + ((n & 0xF0F0F0F0F0F0F0F0) >> 4)\n n = (n & 0x00FF00FF00FF00FF) + ((n & 0xFF00FF00FF00FF00) >> 8)\n n = (n & 0x0000FFFF0000FFFF) + ((n & 0xFFFF0000FFFF0000) >> 16)\n n = (n & 0x00000000FFFFFFFF) + ((n & 0xFFFFFFFF00000000) >> 32) # This last & isn't strictly necessary.\n return n\n\nhex2segnames=['abcdef','bc','abged','abcdg','bcgf','afgcd','acdefg','abc',\n 'abcdefg','abcfg','abcefg','cdefg','defa','bcdeg','adefg','aefg']\ndef segmentsFromByte(value,segmentorder='abcdefgp'):\n return [c for i,c in enumerate(segmentorder) if value & (1< numeroDois:\n print(f'\\nO primeiro valor \"{numeroUm}\" é maior que o segundo valor \"{numeroDois}\"!!')\n elif numeroUm < numeroDois:\n print(f'\\nO segundo valor \"{numeroDois}\" é maior que o primeiro valor \"{numeroUm}\"!!')\n elif opcao == 4:\n numeroUm = int(input('\\nInforme novamente o primeiro valor: '))\n numeroDois = int(input('Informe novamente o segundo valor: '))\n elif opcao == 5:\n print('Saindo do programa...')\n else:\n print('Opção Inválida!! Tente Novamente!!')\n","repo_name":"HebertFB/Curso-de-Python-3-Mundo-2-Curso-em-Video","sub_path":"Curso de Python 3 - Mundo 2 - Curso em Video/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71289764113","text":"#####################\n # Day 14\n # Project: higher or lower game\n\nimport art\nfrom game_data import data\nimport random\nfrom replit import clear\n\ndef check_answer(guess, count1, count2):\n if count1 > count2:\n return guess == \"A\"\n else:\n return guess == \"B\"\n\ndef higher_lower():\n\n print(art.logo)\n score = 0\n game_over = False\n sample = random.sample(data, 2)\n \n while not game_over:\n\n # switching positions and resampling the second account\n # to make sure account2 is account1 in the next round\n acc1 = sample[1]\n acc2 = random.choice(data)\n \n # making sure the accounts are not the same\n while acc1 == acc2:\n acc2 = random.choice(data)\n \n name1 = acc1['name']\n count1 = int(acc1[\"follower_count\"])\n desc1 = acc1[\"description\"]\n country1 = acc1[\"country\"]\n \n name2 = acc2['name']\n count2 = int(acc2[\"follower_count\"])\n desc2 = acc2[\"description\"]\n country2 = acc2[\"country\"]\n \n print(f\"Compare A: {name1}, a {desc1}, from {country1}.\")\n print(art.vs)\n print(f\"Against B: {name2}, a {desc2}, from {country2}.\")\n \n guess = input(\"Who has more followers? Type 'A' or 'B'. \")\n correct_answer = check_answer(guess, count1, count2)\n \n clear()\n print(art.logo)\n if correct_answer:\n score += 1\n print(f\"You're right! Your current score is {score}.\")\n else:\n game_over = True\n print(f\"Sorry, that's wrong. Your final score is {score}.\")\n\nhigher_lower() \n ","repo_name":"ffrodslaw/100-days-of-code","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28681653844","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nAll the files show the result by means of a single decision variable,\r\nbut I put the code (as comment) for the 2 decision variable solution at the \r\nend of each script\r\n\"\"\"\r\nimport numpy as np\r\nimport math\r\n\r\n# Parameters\r\nA = 1.1\r\nalpha = .75\r\nw_0 = 1.5\r\nw_1 = .19\r\nw_2 = .03\r\nw = .05\r\neta = 1\r\nq = .05\r\nbeta = .95\r\n\r\n# Grids\r\ne = np.linspace(1, 500, 20) \r\nfor i in range(len(e)): \r\n e[i] = round(e[i])\r\n \r\ne_dense= np.linspace(1, 500, 200) \r\nfor i in range(len(e)): \r\n e_dense[i] = round(e[i])\r\n \r\nh = np.linspace(1, 70, 70) \r\nfor i in range(len(h)):\r\n h[i] = round(h[i])\r\n\r\n\r\n#--------------------------------#\r\n#-STEP 1: Interpolation function-#\r\n#--------------------------------#\r\ndef interpolation(e_next, e, V):\r\n if e_next <= e[0]:\r\n return V[0]\r\n elif e_next >= e[-1]:\r\n return V[-1]\r\n else:\r\n n = math.floor((e_next-e[0])/(e[1]-e[0])) \r\n ebefore = e[n]\r\n eafter = e[n+1]\r\n vbefore = V[n]\r\n vafter = V[n+1]\r\n return vbefore + (e_next-ebefore)/(eafter-ebefore)*(vafter-vbefore) \r\n\r\n#-----------------------------------------------------#\r\n#-STEP 2: Function to find the optimal \"h*\" given \"e\"-#\r\n#-----------------------------------------------------#\r\ndef optimal_h(e_next):\r\n LHS = np.zeros(len(h))\r\n RHS = np.zeros(len(h))\r\n for i in range(len(h)):\r\n LHS[i] = A*h[i]**(alpha-1)*alpha*e_next**alpha \r\n RHS[i] = w*e_next + w*e_next*w_1 + 2*w*e_next*w_2*h[i] - 80*w*e_next*w_2 \r\n difference = abs(RHS-LHS)\r\n h_opt = int(h[np.argmin(difference)])\r\n return h_opt\r\n \r\n#----------------------------------------#\r\n#-STEP 3: compute the objective function-#\r\n#----------------------------------------#\r\ndef objValue(e_index, e_nextindex, e, h, e_dense, V):\r\n e_today = e[e_index]\r\n e_nextday = e_dense[e_nextindex]\r\n h_today = optimal_h(e_today)\r\n \r\n # different elements of the function \r\n revenue = A*(e_today*h_today)**alpha\r\n costs = w*e_today*(w_0 + h_today + w_1*(h_today-40) + w_2*(h_today-40)**2)\r\n adj_costs = (eta/2)*(e_nextday-(1-q)*e_today)**2\r\n \r\n obj_function = revenue - costs - adj_costs + beta*interpolation(e_nextday, e, V)\r\n return obj_function\r\n\r\n#-----------------------------#\r\n#-STEP 4: compute the maximum-#\r\n#-----------------------------#\r\ndef computeMax(e_index, e, h, e_dense, V):\r\n solution = float('-inf')\r\n optindex = -1\r\n N = len(e)\r\n ub = N\r\n lb = 0\r\n while ub != lb:\r\n test = math.floor((ub+lb)/2)\r\n value = objValue(e_index, test, e, h, e_dense, V)\r\n valuenext = float('-inf')\r\n if test+1 < N:\r\n valuenext = objValue(e_index, test+1, e, h, e_dense, V)\r\n if value < valuenext:\r\n lb = test+1 \r\n if valuenext > solution:\r\n solution = valuenext\r\n optindex = test+1\r\n else:\r\n ub = test\r\n if value > solution:\r\n solution = value\r\n optindex = test\r\n return solution, optindex \r\n\r\n#----------------------------------------#\r\n#-STEP 5: compute the new value function-#\r\n#----------------------------------------#\r\ndef newValueFunction(e, h, e_dense, V):\r\n N = len(e)\r\n TV = np.zeros(N)\r\n gamma = np.zeros(N)\r\n for i in range(N):\r\n TV[i], gamma[i] = computeMax(i, e, h, e_dense, V)\r\n return TV, gamma\r\n\r\n#----------------------------------#\r\n#-STEP 6: value function iteration-#\r\n#----------------------------------#\r\ndef valueFunctionIteration(e, h, e_dense):\r\n N = len(e)\r\n V = np.random.normal(0,1,N) \r\n TV = np.zeros(N)\r\n gamma = np.zeros(N) \r\n dist = 10\r\n epsilon = .1\r\n loops = 0\r\n while dist > epsilon:\r\n TV, gamma = newValueFunction(e, h, e_dense, V)\r\n dist = max(abs(V-TV))\r\n V = TV.copy()\r\n loops += 1 \r\n print(f'loop number {loops} with distance {dist}')\r\n gamma = gamma.astype(int)\r\n return V, gamma \r\n\r\nV_ast2, Gamma_ast2 = valueFunctionIteration(e, h, e_dense)\r\n\r\n#--------------------------------------------------------------------------\r\n\r\n##### SOLUTION WITH 2 DECISION VARIABLES\r\n\r\n\"\"\"\r\ne = np.linspace(1, 500, 20) \r\nfor i in range(len(e)): \r\n e[i] = round(e[i])\r\n\r\nedense = np.linspace(1, 500, 100) \r\nfor i in range(len(e)):\r\n e[i] = round(e[i])\r\n\r\nh = np.linspace(1, 70, 70) \r\nfor i in range(len(h)): \r\n h[i] = round(h[i])\r\n \r\n#---------------------------------------#\r\n#-Step 1: linear interpolation function-#\r\n#---------------------------------------#\r\n# The idea is to make a function to linearly interpolate only with respect \r\n# to the variable \"e\", so keeping a fixed hindex. \r\n# That's because our V function is bivariate but for the purpose of\r\n# the assignment we don't need a bilinear interpolation\r\ndef lin_interpolation(enext, hnext, e, h, V):\r\n n_h = math.floor((hnext-h[0])/(h[1]-h[0])) # index of hnext\r\n \r\n if enext <= e[0]:\r\n return V[0, n_h]\r\n elif enext >= e[-1]:\r\n return V[-1, n_h]\r\n else:\r\n n_e = math.floor((enext-e[0])/(e[1]-e[0]))\r\n ebefore = e[n_e]\r\n eafter = e[n_e+1]\r\n vbefore = V[n_e, n_h]\r\n vafter = V[n_e+1, n_h] \r\n return vbefore + (enext-ebefore)/(eafter-ebefore)*(vafter-vbefore) \r\n\r\n#----------------------------------------#\r\n#-Step 2: compute the objective function-#\r\n#----------------------------------------#\r\ndef objValue(e_index, h_index, e_nextindex, h_nextindex, e, h, edense, V):\r\n e_today = e[e_index]\r\n e_nextday = edense[e_nextindex]\r\n h_today = h[h_index]\r\n h_nextday = h[h_nextindex]\r\n \r\n revenue = A*(e_today*h_today)**alpha\r\n costs = w*e_today*(w_0 + h_today + w_1*(h_today-40) + w_2*(h_today-40)**2)\r\n adj_costs = (eta/2)*(e_nextday-(1-q)*e_today)**2\r\n \r\n obj_function = revenue - costs - adj_costs + beta*lin_interpolation(e_nextday, h_nextday, e, h, V)\r\n return obj_function\r\n\r\n#-----------------------------#\r\n#-Step 3: compute the maximum-#\r\n#-----------------------------#\r\ndef computeMax(e_index, h_index, e, h, edense, V):\r\n solution = float('-inf')\r\n optindex = [-1, -1]\r\n N_h = len(h)\r\n N_e = len(edense)\r\n \r\n for i in range(N_h):\r\n ub = N_e\r\n lb = 0\r\n while ub != lb:\r\n test = math.floor((ub+lb)/2)\r\n value = objValue(e_index, h_index, test, i, e, h, edense, V)\r\n valuenext = float('-inf')\r\n if test+1 < N_e:\r\n valuenext = objValue(e_index, h_index, test+1, i, e, h, edense, V)\r\n if value < valuenext:\r\n lb = test+1 \r\n if valuenext > solution:\r\n solution = valuenext\r\n optindex = [test+1, i]\r\n else:\r\n ub = test\r\n if value > solution:\r\n solution = value\r\n optindex = [test, i]\r\n\r\n return solution, optindex \r\n\r\n#----------------------------------------#\r\n#-Step 4: compute the new value function-#\r\n#----------------------------------------#\r\ndef newValueFunction(e, h, edense, V):\r\n N_e = len(e)\r\n N_h = len(h)\r\n TV = np.zeros(shape=(N_e, N_h))\r\n gamma = []\r\n for i in range(N_e):\r\n for j in range(N_h):\r\n result = computeMax(i, j, e, h, edense, V)\r\n TV[i,j] = result[0]\r\n gamma.append(result[1])\r\n return TV, gamma \r\n\r\n#----------------------------------#\r\n#-Step 5: value function iteration-#\r\n#----------------------------------#\r\ndef valueFunctionIteration(e, h, edense):\r\n N_e = len(e)\r\n N_h = len(h)\r\n V = np.zeros(shape=(len(e), len(h))) \r\n for r in range(len(e)):\r\n V[r, ] = np.random.normal(0,1, len(h))\r\n TV = np.zeros(shape=(N_e, N_h))\r\n gamma = [] \r\n dist = 10\r\n epsilon = 1\r\n loops = 0\r\n while dist > epsilon:\r\n TV, gamma = newValueFunction(e, h, edense, V)\r\n dist = (abs(V-TV)).max()\r\n V = TV.copy()\r\n loops += 1 \r\n print(f'loop number {loops} with distance {dist}')\r\n for i in range(len(gamma)):\r\n gamma[i] = [int(x) for x in gamma[i]]\r\n return V, gamma\r\n \r\nV_ast2, Gamma_ast2 = valueFunctionIteration(e, h, edense)\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"calculus-ask/Bellman-Equation-Economics","sub_path":"Code/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":8231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"70889557391","text":"from uuid import uuid4\n\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Q\n\nfrom rest_framework.views import APIView\nfrom rest_framework.parsers import JSONParser, MultiPartParser, FormParser, FileUploadParser\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions, status\n\n\nfrom creatder.decorators import is_authorized, is_authorized_photo\nfrom creatder.models import (\n Creature, Review, User, Token, PasswordResetToken, CreateAccountToken, File\n )\nfrom creatder.serializers import (GetCreatureSerializer,\n CreateCreatureSerializer, GetUserSerializer, CreateUserSerializer,\n UpdateUserSerializer, RateCreatureSerializer,GetUserRatingsSerializer,\n UpdateCreatureSerializer, RegisterTokenSerializer, RegisterRequestSerializer,\n TokenSerializer, LoginSerializer, PasswordResetRequestSerializer,\n PasswordResetTokenSerializer, PasswordUserSerializer,\n FileSerializer, SearchCreatureSerializer)\n\nfrom creatder.services import (\n send_password_reset_mail, check_token_validity, send_user_register_mail,\n MinimumLengthValidator, NumericPasswordValidator, delete_pig_photo)\n\n\n# @is_authorized\n@csrf_exempt\ndef creature_list(request):\n \"\"\"\n List all creatures.\n \"\"\"\n if request.method == 'GET':\n creatures_all = Creature.objects.all()\n serializer = GetCreatureSerializer(creatures_all, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n\n@csrf_exempt\ndef creature_list_paginated(request, page=1):\n \"\"\"\n List all creatures.\n Page size is 9 (hence magic 9s in the code).\n \"\"\"\n # Ignore bad value for page and substitute 1\n if page < 1 or type(page) != int:\n page = 1\n\n if request.method == 'GET':\n creatures_all = Creature.objects.all()\n creature_count = creatures_all.count()\n\n if (creature_count % 9) == 0:\n max_page = (creature_count // 9)\n else:\n max_page = (creature_count // 9) + 1\n\n upper_limit = min([page * 9, creature_count])\n creatures = creatures_all[(page-1)*9:upper_limit]\n\n if page > max_page:\n return HttpResponse(status=404)\n \n serializer = GetCreatureSerializer(creatures, many=True)\n resp = {\n \"objects\": serializer.data,\n \"page\": page,\n \"max_page\": max_page\n }\n\n return JsonResponse(resp, safe=False)\n\n\n@csrf_exempt\ndef user_list(request):\n \"\"\"\n List all users or add a user.\n \"\"\"\n if request.method == 'GET':\n users = User.objects.all()\n serializer = GetUserSerializer(users, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = CreateUserSerializer(data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n\n new_user = User(\n name=data['name'],\n login=data['login'],\n email=data['email'],\n about_myself=data['about_myself']\n )\n new_user.set_password(data['password']) # also saves the instance\n\n serializer_return = GetUserSerializer(new_user)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n# @is_authorized\n@csrf_exempt\ndef user_details(request, id):\n \"\"\"\n Retrieve, update or delete a user.\n \"\"\"\n try:\n user = User.objects.get(id=id)\n except User.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = GetUserSerializer(user)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = UpdateUserSerializer(user, data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n user.name = data['name']\n user.email = data['email']\n user.about_myself = data['about_myself']\n user.save()\n\n serializer_return = GetUserSerializer(user)\n return JsonResponse(serializer_return.data, safe=False)\n\n elif request.method == 'DELETE':\n user.delete()\n return HttpResponse(status=204)\n\n\n# @is_authorized\n@csrf_exempt\ndef creature_details(request, id):\n \"\"\"\n Retrieve, update or delete a user.\n \"\"\"\n try:\n creature = Creature.objects.get(id=id)\n except Creature.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = GetCreatureSerializer(creature)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = UpdateCreatureSerializer(creature, data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n creature.name = data['name']\n creature.age = data['age']\n creature.sex = data['sex']\n creature.breed = data['breed']\n creature.color_pattern = data['color_pattern']\n creature.crossed_rainbow_bridge = data['crossed_rainbow_bridge']\n creature.save()\n\n serializer_return = GetCreatureSerializer(creature)\n return JsonResponse(serializer_return.data, safe=False)\n\n elif request.method == 'DELETE':\n creature.delete()\n return HttpResponse(status=204)\n\n\n@is_authorized\n@csrf_exempt\ndef create_creature(request, user_id):\n \"\"\"\n Create new creature owned by registered user.\n \"\"\"\n try:\n user = User.objects.get(id=user_id)\n except User.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = CreateCreatureSerializer(data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n\n new_creature = Creature(\n name=data['name'],\n age=data['age'],\n sex=data['sex'],\n breed=data['breed'],\n color_pattern=data['color_pattern'],\n crossed_rainbow_bridge=data['crossed_rainbow_bridge'],\n owner=user\n )\n new_creature.save()\n\n serializer_return = GetCreatureSerializer(new_creature)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n@is_authorized\n@csrf_exempt\ndef get_user_creatures(request, id):\n \"\"\"\n Retrieve users creatures.\n \"\"\"\n try:\n owner = User.objects.get(id=id)\n except User.DoesNotExist:\n return HttpResponse(status=404)\n\n creatures = Creature.objects.filter(owner=owner)\n\n serializer = GetCreatureSerializer(creatures, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@csrf_exempt\ndef user_ratings(request, id):\n \"\"\"\n Retrieve user ratings.\n \"\"\"\n try:\n user = User.objects.get(id=id)\n except User.DoesNotExist:\n return HttpResponse(status=404)\n\n get_user_ratings = Review.objects.filter(\n user__id=user.id)\n\n if request.method == 'GET':\n serializer = GetUserRatingsSerializer(get_user_ratings, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n# TODO: Only allow user to vote once on a creature; if they already voted\n# update the value on the existing vote\n@is_authorized\n@csrf_exempt\ndef rate_creature(request, id):\n \"\"\"\n Rate a creature, also possible to add a comment.\n \"\"\"\n try:\n creature = Creature.objects.get(id=id)\n except Creature.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = RateCreatureSerializer(data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n\n try:\n user = User.objects.get(id=data['user_id'])\n except User.DoesNotExist:\n return HttpResponse(status=404)\n\n review = Review.objects.filter(\n user__id=user.id, creature__id=creature.id).first()\n if review:\n review.comment = data['comment']\n review.rating = data['rating']\n else:\n review = Review(\n creature=creature,\n user=user,\n comment=data['comment'],\n rating=data['rating']\n )\n review.save()\n\n serializer_return = GetCreatureSerializer(creature)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n\n\n\n\n@csrf_exempt\ndef login(request):\n \"\"\"\n Login a user.\n \"\"\"\n if request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = LoginSerializer(data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n\n user = User.objects.get(login=data['login'])\n token = Token(user_id=user.id)\n token.save()\n serializer_return = TokenSerializer(token)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n\n@is_authorized\n@csrf_exempt\ndef logout(request):\n \"\"\"\n Logout a user.\n \"\"\"\n if request.method == 'GET':\n result = request.META['HTTP_AUTHORIZATION']\n user_id = result.split(':')[0]\n uuid = result.split(':')[1]\n token = Token.objects.get(uuid=uuid)\n token.is_expired = True\n token.save()\n return HttpResponse(status=204)\n\n\n@csrf_exempt\ndef register_request_view(request):\n \"\"\"\n Register request from a user.\n \"\"\"\n if request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = RegisterRequestSerializer(data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n\n token = CreateAccountToken(email=data['email'])\n token.save()\n send_user_register_mail(data['email'], str(token.uuid))\n serializer_return = RegisterTokenSerializer(token)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n\n@csrf_exempt\ndef register_view(request, token_uuid):\n \"\"\"\n Register a user.\n \"\"\"\n if request.method == 'POST':\n check_result = check_token_validity(CreateAccountToken, token_uuid)\n if check_result:\n return JsonResponse(check_result, status=403)\n\n else:\n print('good token')\n data = JSONParser().parse(request)\n serializer = CreateUserSerializer(data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n\n new_user = User(\n name=data['name'],\n login=data['login'],\n email=data['email']\n )\n new_user.set_password(data['password']) # also saves the instance\n serializer_return = GetUserSerializer(new_user)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n\n@csrf_exempt\ndef password_reset_request(request):\n \"\"\"\n Password reset request from a user.\n \"\"\"\n if request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = PasswordResetRequestSerializer(data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n\n user = User.objects.get(email=data['email'])\n token = PasswordResetToken(user=user)\n token.save()\n send_password_reset_mail(data['email'], str(token.uuid))\n serializer_return = PasswordResetTokenSerializer(token)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n\n@csrf_exempt\ndef reset_password_view(request, token_uuid):\n \"\"\"\n Reset users password.\n \"\"\"\n if request.method == 'POST':\n check_result = check_token_validity(PasswordResetToken, token_uuid)\n if check_result:\n return JsonResponse(check_result, status=400)\n\n token = PasswordResetToken.objects.get(uuid=token_uuid)\n user = token.user\n data = JSONParser().parse(request)\n serializer = PasswordUserSerializer(user, data=data)\n if not serializer.is_valid():\n return JsonResponse(serializer.errors, status=400)\n user.set_password(data['password']) # also saves the instance\n serializer_return = GetUserSerializer(user)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n\n@csrf_exempt\ndef search_creature(request):\n if request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = SearchCreatureSerializer(data=data)\n\n creatures = Creature.objects.filter(\n Q(name__icontains=data['search_field']) |\n Q(breed__icontains=data['search_field'])\n )\n serializer_return = GetCreatureSerializer(creatures, many=True)\n return JsonResponse(serializer_return.data, safe=False, status=201)\n\n\n\n\nclass FileUploadView(APIView):\n \"\"\"\n Retrieve all photos or upload a photo.\n \"\"\"\n parser_class = (MultiPartParser, FormParser)\n\n @is_authorized_photo\n def get(self, request, creature_id, *args, **kwargs):\n try:\n creature = Creature.objects.get(id=creature_id)\n except Creature.DoesNotExist:\n return HttpResponse(status=404)\n\n files = File.objects.filter(creature__id=creature.id)\n file_serializer = FileSerializer(files, many=True)\n return Response(file_serializer.data)\n\n def post(self, request, creature_id, *args, **kwargs):\n\n try:\n creature = Creature.objects.get(id=creature_id)\n except Creature.DoesNotExist:\n return HttpResponse(status=404)\n # Weird stuff; has to be 'creture' field for it's integer value\n request.data.update({'creature': creature.id})\n\n # Add a good-enough unique-ish suffix to file name, before the extension\n if len(request.data['file'].name) < 4:\n return HttpResponse(status=400)\n file_name_split = [\n request.data['file'].name[:-4], request.data['file'].name[-4:]]\n file_name_split[0] += '_' + str(uuid4())[:8]\n request.data['file'].name = ''.join(file_name_split)\n\n file_serializer = FileSerializer(data=request.data)\n if file_serializer.is_valid():\n file_serializer.save()\n return Response(file_serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass FileUploadDetails(APIView):\n \"\"\"\n Retrieve, update or delete a photo instance.\n \"\"\"\n # @is_authorized\n def get_object(self, id, creature_id):\n # import pdb; pdb.set_trace()\n try:\n creature = Creature.objects.get(id=id)\n except Creature.DoesNotExist:\n return HttpResponse(status=404)\n\n try:\n return File.objects.filter(\n id=id, creature__id=creature.id).first()\n except File.DoesNotExist:\n return HttpResponse(status=404)\n\n # @is_authorized\n def get(self, request, id, creature_id):\n file = self.get_object(id, creature_id)\n serializer = FileSerializer(file)\n return Response(serializer.data)\n\n # @is_authorized\n # def put(self, request, id, creature_id):\n # file = self.get_object(id, creature_id)\n # serializer = FileSerializer(file, data=request.data)\n # if serializer.is_valid():\n # serializer.save()\n # return Response(serializer.data)\n # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @is_authorized_photo\n def delete(self, request, id, creature_id, format=None):\n try:\n file = File.objects.filter(id=id, creature__id=creature_id).first()\n except File.DoesNotExist:\n return HttpResponse(status=404)\n\n file.delete()\n delete_pig_photo(file)\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"m-szczepanska/Show_your_cavy","sub_path":"creatder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"25967260923","text":"f = open(\"p067_triangle.txt\", \"r\")\ntree = []\nfor x in f:\n y = x[:-1].split(\" \")\n z = []\n for item in y:\n while item[0] == \"0\":\n item = item[1:]\n z.append(int(item))\n tree.append(z)\nfor i in tree:\n print(i)\nindex_tree = list(range(len(tree) - 1))[::-1]\n\nfor i in index_tree:\n for j in range(i + 1):\n if tree[i + 1][j] > tree[i + 1][j + 1]:\n tree[i][j] += tree[i + 1][j]\n else:\n tree[i][j] += tree[i + 1][j + 1]\nprint(tree[0][0])\n","repo_name":"neelop/Public","sub_path":"projecteuler.net/067_treePathSum2_dyna.py","file_name":"067_treePathSum2_dyna.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14883088935","text":"from flask import Flask, render_template, request, jsonify\nfrom sentiment_analysis import predict\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/getemotion', methods=['POST'])\ndef review():\n review = request.json.get('data')\n if not review:\n return jsonify({\n 'status' : 'error', \n 'message' : 'Empty response'\n })\n\n emotion, url = predict(review)\n return jsonify({\n 'emotion':emotion ,\n 'url':url\n })\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"CometConnect/svhdkjdh","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"33547683526","text":"from virtool.http.rights import MODIFY, READ, REMOVE\nfrom virtool.jobs.utils import JobRights, JobRightsDomain\n\n\nasync def test_job_rights_domain():\n domain = JobRightsDomain(\"dom\")\n\n domain.can_read(\"foo\", \"bar\", \"baz\")\n domain.can_read(\"baz\")\n domain.can_modify(\"foo\", \"bar\")\n domain.can_remove(\"foo\")\n\n assert domain.has_right(\"foo\", READ)\n assert domain.has_right(\"bar\", READ)\n assert domain.has_right(\"baz\", READ)\n assert domain.has_right(\"foo\", MODIFY)\n assert domain.has_right(\"bar\", MODIFY)\n assert domain.has_right(\"foo\", REMOVE)\n\n assert not domain.has_right(\"baz\", MODIFY)\n assert not domain.has_right(\"bar\", REMOVE)\n assert not domain.has_right(\"baz\", REMOVE)\n\n assert not domain.has_right(\"not\", READ)\n assert not domain.has_right(\"not\", MODIFY)\n assert not domain.has_right(\"not\", REMOVE)\n\n assert domain.as_dict() == {\n \"read\": [\"bar\", \"baz\", \"foo\"],\n \"modify\": [\"bar\", \"foo\"],\n \"remove\": [\"foo\"],\n }\n\n\nasync def test_job_rights():\n \"\"\"\n Ensure that all calls to methods on JobRights succeed given the input rights dictionary.\n\n \"\"\"\n rights = JobRights(\n {\n \"analyses\": {\"read\": [\"foo\", \"bar\", \"baz\"], \"modify\": [\"baz\"]},\n \"samples\": {\"read\": [\"foo\"]},\n \"subtractions\": {\"read\": [\"bar\"], \"modify\": [\"bar\"], \"remove\": [\"bar\"]},\n \"uploads\": {\n \"read\": [\"foo\", \"baz\"],\n \"modify\": [\"foo\", \"baz\"],\n \"remove\": [\"foo\", \"baz\"],\n },\n \"references\": {\"read\": [\"foo\"], \"modify\": [\"foo\"]},\n }\n )\n\n assert rights.as_dict() == {\n \"analyses\": {\"modify\": [\"baz\"], \"read\": [\"bar\", \"baz\", \"foo\"]},\n \"references\": {\"modify\": [\"foo\"], \"read\": [\"foo\"]},\n \"samples\": {\"read\": [\"foo\"]},\n \"subtractions\": {\"modify\": [\"bar\"], \"read\": [\"bar\"], \"remove\": [\"bar\"]},\n \"uploads\": {\n \"modify\": [\"baz\", \"foo\"],\n \"read\": [\"baz\", \"foo\"],\n \"remove\": [\"baz\", \"foo\"],\n },\n }\n\n all_combos = {\n \"analyses\": [(\"foo\", READ), (\"bar\", READ), (\"baz\", READ), (\"baz\", \"modify\")],\n \"indexes\": [],\n \"samples\": [(\"foo\", READ)],\n \"subtractions\": [(\"bar\", READ), (\"bar\", MODIFY), (\"bar\", REMOVE)],\n \"uploads\": [\n (\"foo\", READ),\n (\"baz\", READ),\n (\"foo\", MODIFY),\n (\"baz\", MODIFY),\n (\"foo\", REMOVE),\n (\"baz\", REMOVE),\n ],\n \"references\": [(\"foo\", READ), (\"foo\", MODIFY)],\n }\n\n for name, combos in all_combos.items():\n for id_ in (\"foo\", \"baz\", \"baz\", \"not\"):\n for right in (READ, MODIFY, REMOVE):\n assert getattr(rights, name).has_right(id_, right) is (\n (id_, right) in combos\n )\n","repo_name":"ryanfang5/virtool","sub_path":"tests/jobs/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"70213012431","text":"from Jogador.personagens import *\nfrom Inimigos.inimigo import *\n\n\n\nprint(\"-\"* 20, \"Jogador\", \"-\"* 20)\nguerreiroteste = Guerreiro()\nprint(guerreiroteste.vida)\n\nguerreiroteste.andar()\nguerreiroteste.atacar()\nguerreiroteste.abrir_inventario()\nguerreiroteste.consumir()\n\nprint()\nprint(\"-\"* 20, \"Inimigo\", \"-\"* 20)\nminotauro = Minotauro()\n\nminotauro.Seguir(guerreiroteste.pos)\nminotauro.Atacar(guerreiroteste)\n\n\nprint(guerreiroteste.vida)\n\n","repo_name":"MajimbaV/Atividade-Avaliativa-Terceiro-Bimestre-JD","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70078048593","text":"from flask import Flask, request\nfrom datetime import datetime, date\nimport time\nimport pandas_market_calendars as mcal\n\napp = Flask(__name__)\n\nDATE_FMT = \"%Y-%m-%d\"\nDEFAULT_START_DATE = date(1817, 3, 7).strftime(DATE_FMT)\n\ndef list_trading_days():\n\tstart_date_str = request.args.get('start_date') or DEFAULT_START_DATE\n\tend_date_str = request.args.get('end_date') or datetime.fromtimestamp(time.time()).strftime(DATE_FMT)\n\n\tstart_date_obj = time.strptime(start_date_str, DATE_FMT)\n\tend_date_obj = time.strptime(end_date_str, DATE_FMT)\n\n\tif start_date_obj > end_date_obj:\n\t\treturn {\"trading_days\": [], \"error\": \"start_date > end_date\"}\n\n\tdates = mcal.get_calendar(\"NYSE\").valid_days(start_date=start_date_str,\n\t\t\t\t\t\t\t\t\t\t\tend_date=end_date_str)\n\tresponse = []\n\tfor x in dates:\n\t\tresponse.append(x.to_pydatetime().strftime(DATE_FMT))\n\n\treturn {\"trading_days\": response}\n\n@app.route('/trading_days', methods=['GET'])\ndef trading_days():\n\tif request.method == 'GET':\n\t\treturn list_trading_days()\n","repo_name":"edwardkarak/market_trading_days","sub_path":"trading_days_app.py","file_name":"trading_days_app.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22440444962","text":"import asyncio\nfrom datetime import datetime\n\nfrom authentication.models import User\nfrom django.core.paginator import Paginator\nfrom django.db import connection\nfrom django.utils import timezone\nfrom drf_spectacular.utils import extend_schema\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom post.utils import fetchFollowingDetails, getBasicDetails\nfrom push_notifications.models import GCMDevice\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom .models import *\nfrom .notificationResponse import *\n\n@extend_schema(methods=['post'],request = followRequestNotificationsRequest,\n responses={200: followRequestNotificationsResponse,\n 400 : ErrorResponseNotification})\n@api_view(['POST'])\n@permission_classes([IsAuthenticated,])\ndef followRequestNotifications(request):\n print(\"Inside followRequestNotifications\")\n try:\n user = User.objects.get(id = request.user.id)\n #notifDB = notifications.objects.filter(userId_id = user.id).values()\n\n dataToSend = []\n finalData = []\n #totalRecords=len(notifDB)\n page_size = 1 \n page = 1\n\n data = request.data\n \n temp = {}\n temp['userId'] = user.id\n temp['status'] = 0\n\n dataToSend = fetchFollowingDetails(temp)\n \n '''for i in range(len(notifDB)): \n temp = {}\n temp['notification'] = notifDB[i]['notification']\n temp['dateTime'] = notifDB[i]['date']\n temp['read'] = notifDB[i]['read']\n dataToSend.append(temp)'''\n\n '''try : \n page_size = request.GET['page_size']\n p = Paginator(dataToSend, page_size)\n page = request.GET.get('page', 1)\n finalData=p.page(page).object_list\n\n except Exception as e:\n print(\"Some error occured\" ,e)\n finalData = p.page(1).object_list'''\n \n return Response({\n 'status': 200,\n 'message': \"Success\",\n 'data' : dataToSend\n })\n\n except Exception as e:\n print(e)\n message=\"Failed to fetch followRequestNotifications \"\n return Response({\n 'status': 400,\n 'message': message,\n 'data':None\n })\n\n@extend_schema(methods=['post'],\n request = fetchActivityNotificationRequest,\n responses={200: fetchActivityNotificationResponse,\n 400 : ErrorResponseNotification})\n@api_view(['POST'])\n@permission_classes([IsAuthenticated,])\ndef fetchActivity(request):\n print(\"Inside fetchActivity\")\n try:\n user = User.objects.get(id = request.user.id)\n data = request.data\n \n finalData = []\n page_size = 1 \n page = 1\n\n notifDB = notifications.objects.filter(userId_id = user.id).values()\n #totalRecords=len(notifDB)\n for i in range(len(notifDB)): \n temp = {}\n temp['id'] = notifDB[i]['id']\n temp['notification'] = notifDB[i]['notification']\n temp['dateTime'] = notifDB[i]['date']\n temp['read'] = int(notifDB[i]['read'])\n temp['lastVisitedTime'] = notifDB[i]['lastVisitedTime']\n userData = User.objects.get(id=int(notifDB[i]['fromId_id']))\n \n temp['userId'] = str(userData.id)\n temp['username'] = userData.username\n temp['fullname'] = userData.first_name + \" \" +userData.last_name\n \n profileData = profile.objects.get(user_id = userData.id)\n temp['profileImage'] = profileData.profileImage.url\n\n # Mark every notification as read \n try :\n notifDataToUpdate = notifications.objects.get(id = notifDB[i]['id'],\n read = 0)\n notifDataToUpdate.read = 1\n notifDataToUpdate.lastVisitedTime = timezone.localtime(timezone.now())\n notifDataToUpdate.save()\n except Exception as e : \n pass\n \n finalData.append(temp)\n \n return Response({\n 'status': 200,\n 'message': \"Success\",\n 'data' : finalData\n })\n\n except Exception as e:\n print(e)\n message=\"Failed to fetch activity \"\n return Response({\n 'status': 400,\n 'message': message,\n 'data':None\n })\n\n\n\n ","repo_name":"sachinas/Zipcho-Python-Backend","sub_path":"src/notificationService/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22051107655","text":"#Excercise_4\n\n\"\"\"returns true if word is made only out of those given letters else it returns flase\"\"\"\ndef uses_only(word, string_of_letters):\n\tfor letter in word:\n\t\tif letter in string_of_letters:\n\t\t\treturn True\n\t\t\tprint(\"letter is in the string\")\n\t\telse:\n\t\t\treturn False\n\treturn True\n\nuses_only('ace','a')\n\n","repo_name":"sandeepganti7/sandeepg7","sub_path":"task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73197909072","text":"#!/usr/bin/python3\n\nfrom mitemp.mitemp_bt.mitemp_bt_poller import MiTempBtPoller\nfrom mitemp.mitemp_bt.mitemp_bt_poller import MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY\nfrom btlewrap.bluepy import BluepyBackend\nfrom bluepy.btle import BTLEException\nimport paho.mqtt.publish as publish\nimport traceback\nimport configparser\nimport os\nimport json\nimport datetime\n\nworkdir = os.path.dirname(os.path.realpath(__file__))\nconfig = configparser.ConfigParser()\nconfig.read(\"{0}/devices.ini\".format(workdir))\n\ndevices = config.sections()\n\n# Averages\naverages = configparser.ConfigParser()\naverages.read(\"{0}/averages.ini\".format(workdir))\n\nmessages = []\n\nfor device in devices:\n\n mac = config[device].get(\"device_mac\")\n poller = MiTempBtPoller(mac, BluepyBackend, ble_timeout=config[device].getint(\"timeout\", 10))\n\n try:\n\n temperature = poller.parameter_value(MI_TEMPERATURE)\n humidity = poller.parameter_value(MI_HUMIDITY)\n battery = poller.parameter_value(MI_BATTERY)\n\n data = json.dumps({\n \"temperature\": temperature,\n \"humidity\": humidity,\n \"battery\": battery\n })\n\n # Check averages\n avg = []\n average_count = config[device].getint(\"average\")\n if average_count:\n if mac in averages.sections():\n avg = json.loads(averages[mac][\"avg\"])\n\n # Add average\n avg.insert(0, data)\n\n # Strip data\n avg = avg[0:average_count]\n\n # Calc averages\n temperature = 0\n humidity = 0\n battery = 0\n\n for a in avg:\n al = json.loads(a)\n temperature += al[\"temperature\"]\n humidity += al[\"humidity\"]\n battery += al[\"battery\"]\n\n temperature = round(temperature / len(avg), 1)\n humidity = round(humidity / len(avg), 1)\n battery = round(battery / len(avg), 1)\n\n # Convert averages\n averages[mac] = {}\n averages[mac][\"avg\"] = json.dumps(avg)\n\n # Rewrite data\n data = json.dumps({\n \"temperature\": temperature,\n \"humidity\": humidity,\n \"battery\": int(battery),\n \"average\": len(avg)\n })\n\n print(datetime.datetime.now(), device, \" : \", data)\n messages.append({'topic': config[device].get(\"topic\"), 'payload': data, 'retain': config[device].getboolean(\"retain\", False)})\n availability = 'online'\n except BTLEException as e:\n availability = 'offline'\n print(datetime.datetime.now(), \"Error connecting to device {0}: {1}\".format(device, str(e)))\n except Exception as e:\n availability = 'offline'\n print(datetime.datetime.now(), \"Error polling device {0}. Device might be unreachable or offline.\".format(device))\n # print(traceback.print_exc())\n finally:\n messages.append({'topic': config[device].get(\"availability_topic\"), 'payload': availability, 'retain': config[device].getboolean(\"retain\", False)})\n\n\n# Init MQTT\nmqtt_config = configparser.ConfigParser()\nmqtt_config.read(\"{0}/mqtt.ini\".format(workdir))\nmqtt_broker_cfg = mqtt_config[\"broker\"]\n\ntry:\n auth = None\n mqtt_username = mqtt_broker_cfg.get(\"username\")\n mqtt_password = mqtt_broker_cfg.get(\"password\")\n\n if mqtt_username:\n auth = {\"username\": mqtt_username, \"password\": mqtt_password}\n\n publish.multiple(messages, hostname=mqtt_broker_cfg.get(\"host\"), port=mqtt_broker_cfg.getint(\"port\"), client_id=mqtt_broker_cfg.get(\"client\"), auth=auth)\nexcept Exception as ex:\n print(datetime.datetime.now(), \"Error publishing to MQTT: {0}\".format(str(ex)))\n\nwith open(\"{0}/averages.ini\".format(workdir), \"w\") as averages_file:\n averages.write(averages_file)\n","repo_name":"algirdasc/xiaomi-ble-mqtt","sub_path":"data-read.py","file_name":"data-read.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"83"} +{"seq_id":"25511545249","text":"#!/usr/bin/env python\n# encoding: utf-8\n# Date: 16/4/23 上午11:48\n# file: views_friend.py\n# Email: wangjian2254@icloud.com\n# Author: 王健\nfrom liyuim.im_tools import check_friend_relation, im_friend_commend\nfrom liyuim.models import Friend, FriendApply\nfrom util.jsonresult import get_result\nfrom util.loginrequired import client_login_required, check_request_parmes\n\n\n@check_request_parmes(page_index=(\"页码\", \"int\", 1), page_size=(\"页长度\", \"int\", 50))\n@client_login_required\ndef query_my_friend_list(request, page_index, page_size):\n \"\"\"\n 查询我的好友列表,分页\n :param request:\n :param page_index:\n :param page_size:\n :return:\n 查询我的所有好友,分页\n \"\"\"\n query = Friend.objects.list_json().filter(owner=request.user).filter(is_active=True, is_black=False)\n\n return get_result(True, None, query.get_page(page_index, page_size))\n\n\n@check_request_parmes(page_index=(\"页码\", \"int\", 1), page_size=(\"页长度\", \"int\", 50))\n@client_login_required\ndef query_friendapply_list(request, page_index, page_size):\n \"\"\"\n 查询好友申请,分页\n :param request:\n :param page_index:\n :param page_size:\n :return:\n \"\"\"\n query = FriendApply.objects.list_json().filter(friend=request.user).filter(is_active=True).order_by('-create_time')\n return get_result(True, None, query.get_page(page_index, page_size))\n\n\n@check_request_parmes(user_id=(\"申请的好友用户id\", \"r,int\"), content=(\"申请内容\", \"r\"))\n@client_login_required\ndef apply_friend(request, user_id, content):\n \"\"\"\n 申请添加好友\n :param request:\n :param user_id:\n :return:\n 申请添加好友\n by:王健 at:2016-04-24\n 增加好友变动的事件\n by:王健 at:2016-05-03\n \"\"\"\n friend_apply = FriendApply()\n friend_apply.friend_id = user_id\n friend_apply.content = content\n friend_apply.owner = request.user\n friend_apply.save()\n im_friend_commend(\"apply_friend\", request.user.id, user_id, friend_apply.toJSON())\n return get_result(True, None, friend_apply)\n\n\n@check_request_parmes(friendapply_id=(\"好友申请id\", \"r,int\"))\n@client_login_required\ndef pass_friendapply(request, friendapply_id):\n \"\"\"\n 修改好友申请\n :param request:\n :param friendapply_id:\n :return:\n 修改好友申请\n by:王健 at:2016-04-24\n 增加好友变动的事件\n by:王健 at:2016-05-03\n \"\"\"\n try:\n friendapply = FriendApply.objects.get(id=friendapply_id, owner=request.user, is_active=True)\n friendapply.copy_old()\n friendapply.status = 1\n created, diff = friendapply.compare_old()\n if diff:\n friend, created = Friend.objects.get_or_create(friend=friendapply.friend, owner=request.user)\n if not created:\n friend.is_active = True\n friend.save()\n friend, created = Friend.objects.get_or_create(owner=friendapply.friend, friend=request.user)\n if not created:\n friend.is_active = True\n friend.save()\n im_friend_commend(\"pass_friendapply\", friendapply.owner_id, friendapply.friend_id, friendapply.toJSON())\n return get_result(True, u'好友申请处理成功', friendapply)\n else:\n return get_result(False, u'已经处理过的申请,不能再次处理', friendapply)\n except FriendApply.DoesNotExist:\n return get_result(False, u'好友申请,不是发给您的,您无权处理')\n\n\n@check_request_parmes(friend_id=(\"好友id\", \"r,int\"))\n@client_login_required\ndef add_friend(request, friend_id):\n \"\"\"\n 直接添加好友\n :param request:\n :param friend_id:\n :return:\n 直接添加好友\n by:王健 at:2016-04-24\n 增加好友变动的事件\n by:王健 at:2016-05-03\n \"\"\"\n friend, created = Friend.objects.get_or_create(friend_id=friend_id, owner=request.user)\n if not created:\n friend.is_active = True\n friend.save()\n im_friend_commend(\"add_friend\", friend.owner_id, friend.friend_id, friend.toJSON())\n return get_result(True, u'添加好友成功')\n\n\n@check_request_parmes(friendapply_id=(\"好友申请id\", \"r,int\"))\n@client_login_required\ndef reject_friendapply(request, friendapply_id):\n \"\"\"\n 拒绝好友申请(删除)\n :param request:\n :param friendapply_id:\n :return:\n 拒绝好友申请(删除)\n by:王健 at:2016-04-24\n 增加好友变动的事件\n by:王健 at:2016-05-03\n \"\"\"\n try:\n friendapply = FriendApply.objects.get(id=friendapply_id, owner=request.user, is_active=True)\n friendapply.copy_old()\n friendapply.is_active = False\n friendapply.compare_old()\n friendapply.save()\n im_friend_commend(\"reject_friendapply\", friendapply.owner_id, friendapply.friend_id, friendapply.toJSON())\n return get_result(True, u'好友申请删除成功', friendapply)\n except FriendApply.DoesNotExist:\n return get_result(False, u'好友申请,不是发给您的,您无权处理')\n\n\n@check_request_parmes(friend_id=(\"好友id\", \"r,int\"), nickname=(\"昵称\", \"\"))\n@client_login_required\n@check_friend_relation()\ndef modefy_friend_nickname(request, friend_id, nickname, friend):\n \"\"\"\n 修改好友备注名称\n :param request:\n :param friend_id:\n :return:\n 修改好友备注名称\n by:王健 at:2016-04-24\n 增加好友变动的事件\n by:王健 at:2016-05-03\n \"\"\"\n friend.copy_old()\n friend.nickname = nickname\n friend.compare_old()\n friend.save()\n im_friend_commend(\"friend_change\", friend.owner_id, friend.friend_id, friend.toJSON())\n return get_result(True, u'修改好友昵称成功', friend)\n\n\n@check_request_parmes(friend_id=(\"好友id\", \"r,int\"), is_black=(\"是否黑名单\", \"r,b\"))\n@client_login_required\n@check_friend_relation()\ndef mark_friend_black(request, friend_id, is_black, friend):\n \"\"\"\n 设置好友到黑名单\n :param friend: check_friend_relation 装饰器 注入的 friend对象\n :param is_black:\n :param request:\n :param friend_id:\n :return:\n 设置好友到黑名单\n by:王健 at:2016-04-24\n 增加好友变动的事件\n by:王健 at:2016-05-03\n \"\"\"\n friend.copy_old()\n friend.is_black = is_black\n friend.compare_old()\n friend.save()\n im_friend_commend(\"friend_change\", friend.owner_id, friend.friend_id, friend.toJSON())\n return get_result(True, u'修改好友昵称成功', friend)\n\n\n@check_request_parmes(friend_id=(\"好友id\", \"r,int\"), is_muted=(\"是否免扰\", \"r,b\"))\n@client_login_required\n@check_friend_relation()\ndef mark_friend_muted(request, friend_id, is_muted, friend):\n \"\"\"\n 设置好友免扰\n :param friend: check_friend_relation 装饰器 注入的 friend对象\n :param is_muted:\n :param request:\n :param friend_id:\n :return:\n 设置好友免扰\n by:王健 at:2016-04-24\n 增加好友变动的事件\n by:王健 at:2016-05-03\n \"\"\"\n friend.copy_old()\n friend.is_muted = is_muted\n friend.compare_old()\n im_friend_commend(\"friend_change\", friend.owner_id, friend.friend_id, friend.toJSON())\n return get_result(True, u'修改好友昵称成功', friend)\n\n\n@check_request_parmes(page_index=(\"页码\", \"int\", 1), page_size=(\"页长度\", \"int\", 20))\n@client_login_required\ndef query_black_friend_list(request, page_index, page_size):\n \"\"\"\n 查询黑名单列表\n :param page_size:\n :param page_index:\n :param :\n :param request:\n :return:\n 查询黑名单列表\n by:王健 at:2016-04-24\n \"\"\"\n query = Friend.objects.list_json().filter(owner=request.user, is_active=True).filter(is_black=True)\n\n return get_result(True, None, query.get_page(page_index, page_size))\n\n\n","repo_name":"BPC-LIYU/LiYuOA","sub_path":"liyuim/views_friend.py","file_name":"views_friend.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7229277668","text":"# -*- coding: utf-8 -*-\nfrom numba import jit\n\nfrom common import methods_chooser as cs\nfrom common.commons import Cutoff\nimport common.results as res\nfrom common.presets import FoldersLocation as foldersLocation\nfrom helpers import logger, scores_cutoff as cutoff, time\nfrom evaluation import unsupervised\nimport pandas as pd\nfrom helpers.logger import mount_beauty_output\n\nn_features = 0\n\n\n#@jit\ndef run_and_evaluate_fs_methods(dataset, dataset_name, method, y, cols, variance_threshold, control):\n logger.log(\"dataset after: \" + str(dataset.shape), False)\n\n global n_features\n n_features = dataset.shape[1]\n\n best_nmi = control.initial_best_metric\n max_nmi = control.initial_best_metric\n max_acc = control.initial_best_metric\n max_corrected_rand = control.initial_best_metric\n best_corrected_rand = control.initial_best_metric\n best_acc = control.initial_best_metric\n best_f_measure = control.initial_best_metric\n best_cutoff_point = control.initial_best_metric\n best_inertia = control.initial_best_metric\n number_of_clusters = 0\n\n all_opt_to_save = pd.DataFrame()\n\n result = []\n best_ranking = []\n\n all_method_config_ranks = run_fs_method(method, dataset, control.state_of_art)\n count_configs = all_method_config_ranks.shape[1]\n\n for config in range(0, count_configs):\n\n partial_result = []\n cutoff_options_list = pd.DataFrame()\n\n logger.log(\"evaluate \" + method + \" setting \" + str(config + 1) + \" of \" + str(count_configs), True)\n\n scores = all_method_config_ranks[:, config]\n scores_df = pd.DataFrame({\"feature\": range(0, n_features), \"score\": scores})\n scores_sorted = scores_df.sort_values(by=\"score\", ascending=False)\n\n scores_values = pd.DataFrame(scores_sorted.iloc[:, 1])\n scores_values.reset_index(drop=True, inplace=True)\n\n logger.log(\"total sum of scores: \" + str(sum(scores_values.values)), False)\n\n logger.log(\"total cutoff methods: \" + str(len(control.cutoff_methods)), False)\n\n for cutoff_method in control.cutoff_methods:\n\n cut_point = get_cut_point(cutoff_method, scores_values)\n\n if cut_point == Cutoff.INFLEXION_BY_SILHOUETTE.name or (3 <= cut_point < n_features):\n\n features_selected = scores_sorted.iloc[0:cut_point, 0]\n dataset_reduced = dataset.iloc[:, features_selected]\n\n logger.log(\n \"verify cut_off \" + str(cutoff_method) + \" from method: \" + method + \" config n:\" + str(config),\n False)\n\n time.start_time()\n\n clustering_result = unsupervised.evaluation(x_selected=dataset_reduced.values, y=y, state_of_art=control.state_of_art)\n\n time.end_time(\"evaluate the model\", False)\n\n current_cutoff_result_df = pd.DataFrame({\n \"pos\": [cut_point],\n \"values\": [clustering_result.avg_sil],\n \"cut_method\": [cutoff_method],\n \"cut_point\": [cut_point],\n \"clustering_result\": [clustering_result]\n })\n\n cutoff_options_list = cutoff_options_list.append(current_cutoff_result_df)\n\n current_result = res.mount_partial_result(dataset_name,\n n_features,\n method,\n cutoff_method,\n cut_point,\n clustering_result)\n\n partial_result.append(current_result)\n\n if clustering_result.nmi > max_nmi:\n max_nmi = clustering_result.nmi\n\n if clustering_result.acc > max_acc:\n max_acc = clustering_result.acc\n\n if clustering_result.corrected_rand > max_corrected_rand:\n max_corrected_rand = clustering_result.corrected_rand\n\n # best_cutoff_point = get_point_by_inflexion(pd.DataFrame(cutoff_options_list.loc[:, \"values\"]))\n\n best_option = pd.DataFrame(cutoff_options_list.sort_values(by=\"values\", ascending=False).iloc[0, :])\n\n clustering_result = best_option.loc[\"clustering_result\", :][0]\n cut_point = best_option.loc[\"cut_point\", :][0]\n best_cutoff_method = best_option.loc[\"cut_method\", :][0]\n\n features_selected = scores_sorted.iloc[0:cut_point, 0]\n dataset_reduced = dataset.iloc[:, features_selected]\n\n partial_result_to_save = pd.DataFrame(partial_result, columns=res.get_column_names())\n all_opt_to_save = pd.concat([all_opt_to_save, partial_result_to_save])\n\n logger.log(\"\\n\\n<------------------------------------------------->\\n\" +\n \"The config (higher silhouette) has :\" +\n \"\\navg_sil: \" + str(clustering_result.avg_sil) +\n \"\\nnmi: \" + str(clustering_result.nmi) +\n \"\\n<--------------------------------------------------->\\n\\n\"\n , False)\n\n if clustering_result.avg_sil > control.best_silhouette:\n control.best_silhouette = clustering_result.avg_sil\n\n best_ranking = scores_sorted.loc[:, 'feature'].tolist()\n\n number_of_clusters = clustering_result.number_of_clusters\n\n best_inertia = clustering_result.inertia # current_result[5]\n best_nmi = clustering_result.nmi # current_result[12]\n best_corrected_rand = clustering_result.corrected_rand # current_result[14]\n control.best_cutoff_method = best_cutoff_method # current_result[3]\n best_cutoff_point = cut_point # current_result[4]\n best_f_measure = clustering_result.f_measure # current_result[15]\n best_acc = clustering_result.acc # current_result[13]\n control.best_config = config\n\n dataset_reduced.to_csv(foldersLocation.results.value + dataset_name + \"_after_FS_\" + method + \".csv\", sep=\" \")\n\n result.append([number_of_clusters, control.best_config, dataset_name, n_features, method, control.best_cutoff_method, best_cutoff_point, best_inertia,\n control.best_silhouette, best_nmi, max_nmi, best_acc, max_acc, best_corrected_rand, max_corrected_rand,\n best_f_measure])\n\n result = pd.DataFrame(result, columns=cols)\n\n result = result.sort_values(by=\"best_avg_sil\", ascending=False)\n\n result = pd.DataFrame(result.iloc[0, :]).T\n\n all_opt_to_save.to_csv(foldersLocation.results.value + \"result_all_opt_\" + method + \"_in_\" + dataset_name + \".csv\", sep=\" \")\n\n mount_beauty_output(dataset_name, control.best_silhouette, method, best_nmi, max_nmi, best_acc, max_acc)\n\n # rankings = best_ranking\n # config_cols = [\"config\", \"ranking\"]\n # rankings = pd.DataFrame(rankings, columns=config_cols)\n # best_ranking = rankings.loc[rankings['config'] == result.iloc[0, 0], 'ranking'].tolist()[0]\n\n return result, best_ranking\n\n\ndef run_fs_method(method, dataset, default_method_configs):\n if method == 'iDetect':\n numpy_data = dataset.values\n all_configs_by_method = cs.run_method(numpy_data.T, method, default_method_configs)\n else:\n numpy_data = dataset.values\n all_configs_by_method = cs.run_method(numpy_data, method, default_method_configs)\n\n return all_configs_by_method\n\n\ndef get_cut_point(cutoff_method, values):\n if cutoff_method == Cutoff.INFLEXION.name:\n cut_point = cutoff.get_cut_off_point_by_second_derivate(values, 4)\n elif cutoff_method == Cutoff.QUARTILE_1.name:\n cut_point = cutoff.get_cut_off_point_by_quartile(values, 1)\n elif cutoff_method == Cutoff.QUARTILE_2.name:\n cut_point = cutoff.get_cut_off_point_by_quartile(values, 2)\n elif cutoff_method == Cutoff.QUARTILE_3.name:\n cut_point = cutoff.get_cut_off_point_by_quartile(values, 3)\n elif cutoff_method == Cutoff.PERCENT_25.name:\n cut_point = cutoff.get_cut_off_point_by_percent(values, 0.25)\n elif cutoff_method == Cutoff.PERCENT_45.name:\n cut_point = cutoff.get_cut_off_point_by_percent(values, 0.45)\n elif cutoff_method == Cutoff.PERCENT_65.name:\n cut_point = cutoff.get_cut_off_point_by_percent(values, 0.65)\n elif cutoff_method == Cutoff.PERCENT_85.name:\n cut_point = cutoff.get_cut_off_point_by_percent(values, 0.85)\n elif cutoff_method == Cutoff.ALL_FEA.name:\n cut_point = n_features\n else:\n cut_point = cutoff_method\n\n return cut_point\n\n","repo_name":"marcosd3souza/FSMethodology","sub_path":"src/main/execution/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"16714381754","text":"#!/usr/bin/env python\n\n\"\"\"\n❯ ./p1.py input.short\n5\n❯ ./p1.py input.long\n1548\n\"\"\"\n\nimport fileinput\nimport typing\n\n\nclass Instruction(typing.NamedTuple):\n function: typing.Callable\n value: int\n\n\ndef acc(value: int, accumulator: int, stack_ptr: int,\n seen: typing.Set[int], instructions: typing.List[Instruction]) -> int:\n return traverse(accumulator+value, stack_ptr+1, seen, instructions)\n\n\ndef nop(value: int, accumulator: int, stack_ptr: int,\n seen: typing.Set[int], instructions: typing.List[Instruction]) -> int:\n return traverse(accumulator, stack_ptr+1, seen, instructions)\n\n\ndef jmp(value: int, accumulator: int, stack_ptr: int,\n seen: typing.Set[int], instructions: typing.List[Instruction]) -> int:\n return traverse(accumulator, stack_ptr+value, seen, instructions)\n\n\nFUNCTDICT = {\n \"acc\": acc,\n \"nop\": nop,\n \"jmp\": jmp,\n}\n\n\ndef traverse(accumulator: int, stack_ptr: int, seen: typing.Set[int], instructions: typing.List[Instruction]) -> int:\n if stack_ptr in seen:\n return accumulator\n inst = instructions[stack_ptr]\n seen.add(stack_ptr)\n return inst.function(inst.value, accumulator, stack_ptr, seen, instructions)\n\n\ndef gather() -> typing.List[Instruction]:\n instructions: typing.List[Instruction] = []\n for line in fileinput.input():\n words = line.strip().split()\n instructions.append(Instruction(FUNCTDICT[words[0]], int(words[1])))\n return instructions\n\n\ndef main() -> None:\n instructions = gather()\n print(traverse(0, 0, set(), instructions))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zapman449/AdventOfCode","sub_path":"2020/day08/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7369641870","text":"from collections import Counter\nfrom heapq import heappush, heappop\nclass Solution:\n def reorganizeString(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n if not S:\n return S\n counter = Counter(S)\n max_val = max([c for _,c in counter.items()])\n counter = list(counter.items())\n counter = sorted(counter, key=lambda x: x[1], reverse=True)\n l = len(S)\n if max_val > (l/2) + (l%2):\n return \"\"\n ans = [\"\"]*max_val\n index = 0\n # print(counter)\n for char, count in counter:\n for j in range(0, count):\n ans[index] += char\n index = (index + 1)%max_val\n return \"\".join(ans)\n\n\n\n\ns = Solution()\nprint(s.reorganizeString(\"aab\"))\n","repo_name":"Rohithyeravothula/leetcode","sub_path":"rearange2.py","file_name":"rearange2.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"10705239264","text":"\"\"\"\nProjeto de Redes de Computadores I - PCS 3614\nRede de Blockchain\n\nMódulo: Miner.py\n\nFelipe Kenzo Shiraishi - 10262700\nTiago Santa Maria R. Marto - 9004289\n\"\"\"\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import hashes\n\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\n\nimport datetime as d\nimport json\nimport socket as s\nimport base64\nimport hashlib as h\n\nDIFICULDADE = 10\nALVO = 2 ** (256 - DIFICULDADE)\nMAX_NONCE = 2 ** 32\n\nPORT_NUMBER = 8080\nIP_BOOK = 'localhost'\n\ndef load_private_key(deserialized_key):\n private_key = serialization.load_pem_private_key(\n deserialized_key,\n password=None,\n backend=default_backend())\n\n return private_key\n\ndef load_public_key(deserialized_public_key):\n public_key = serialization.load_pem_public_key(deserialized_public_key, default_backend())\n return public_key\n\ndef FETCH_request():\n BSP = {}\n BSP['method'] = \"FETCH\"\n BSP_JSON = json.dumps(BSP, ensure_ascii=False)\n\n soc = s.socket(s.AF_INET, s.SOCK_STREAM)\n\n soc.connect((IP_BOOK, PORT_NUMBER))\n soc.send(bytes(BSP_JSON, 'utf-8'))\n msg = soc.recv(4096)\n soc.close()\n return json.loads(msg)\n\ndef pegar_bloco():\n response = FETCH_request()\n if (response['response_code'] == 1):\n return False\n else:\n return response['block'], response['public_key']\n\ndef minerar_bloco(bloco):\n nonce = 0\n while (not(check_validity(nonce, bloco))):\n nonce += 1\n return nonce\n\n\ndef check_validity(nonce, block_to_solve):\n block_to_solve['mined_by'] = public_key\n block_to_solve['nonce'] = nonce\n hasher = h.sha256()\n\n if (nonce > MAX_NONCE):\n return False\n\n hasher.update(\n str(nonce).encode('utf-8') +\n str(block_to_solve['value']).encode('utf-8') +\n str(block_to_solve['last_hash']).encode('utf-8') +\n str(block_to_solve['datetime']).encode('utf-8') +\n str(block_to_solve['from']).encode('utf-8') +\n str(block_to_solve['to']).encode('utf-8') +\n str(block_to_solve['signature']).encode('utf-8') +\n str(block_to_solve['mined_by']).encode('utf-8') +\n str(block_to_solve['block_no']).encode('utf-8')\n )\n\n block_hash = hasher.hexdigest()\n if (int(block_hash, 16) <= ALVO):\n block_to_solve['hash'] = block_hash\n return True\n else:\n return False\n\ndef encrypt(msg, public_key):\n public_key = public_key.encode('utf-8')\n public_key = base64.b64decode(public_key)\n public_key_object = RSA.import_key(public_key)\n public_key_object = PKCS1_OAEP.new(public_key_object)\n crypt = public_key_object.encrypt(str(msg).encode('utf-8'))\n return crypt\n\ndef SOLVE_request(nonce, book_key):\n BSP = {}\n BSP['method'] = \"SOLVE\"\n BSP['from'] = public_key\n\n nonce_crypt = encrypt(nonce, book_key)\n nonce_crypt = base64.b64encode(nonce_crypt)\n nonce_crypt = nonce_crypt.decode('utf-8')\n\n BSP['nonce'] = nonce_crypt\n BSP_JSON = json.dumps(BSP, ensure_ascii=False)\n\n soc = s.socket(s.AF_INET, s.SOCK_STREAM)\n\n soc.connect((IP_BOOK, PORT_NUMBER))\n soc.send(bytes(BSP_JSON, 'utf-8'))\n msg = soc.recv(4096)\n soc.close()\n return json.loads(msg)\n\ndef enviar_resposta(bloco, book_key):\n return SOLVE_request(bloco, book_key)\n\nprint(\"Iniciando Mineração às {}\".format(d.date.today()))\nprint(\"Insira a sua chave pública\")\npublic_key = input()\nprint(\"Insira a sua chave privada\")\nprivate_key = input()\nwhile (True):\n bloco, book_key = pegar_bloco()\n if (bloco != ''):\n bloco_calculado = minerar_bloco(bloco)\n print(enviar_resposta(bloco_calculado, book_key))\n print(\"Bloco Minerado às {}\".format(d.date.today()))\n","repo_name":"90felipe09/Simple_Blockchain_Network","sub_path":"Miner.py","file_name":"Miner.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9221878954","text":"import json\nimport requests\nimport datetime\n\nfrom MalardClient.MaskFilter import MaskFilter\n\ndef dateconverter(o):\n if isinstance(o, datetime.datetime):\n timestamp = datetime.datetime.timestamp(o)\n return timestamp\n\nclass DataSetQuery:\n def __init__(self, serverUrl, envName = \"DEVv2\" ):\n self.serverUrl = serverUrl\n self.envName = envName\n self.headers = {'Content-Type':'application/json'}\n def createEnvironment(self, name, cacheCdfPath, maskPublisherPath, pointCdfPath, mongoConnection, swathIntermediatePath, holdingBaseDir, dataBaseDir, deflateLevel = 1, serverVersion = 'v3' ):\n data = { 'name': name\n , 'cacheCdfPath': cacheCdfPath\n , 'maskPublisherPath': maskPublisherPath\n , 'pointCdfPath': pointCdfPath\n , 'mongoConnection' : mongoConnection\n , 'swathIntermediatePath' : swathIntermediatePath\n , 'holdingBaseDir' : holdingBaseDir\n , 'dataBaseDir' : dataBaseDir\n , 'deflateLevel' : deflateLevel\n , 'serverVersion' : serverVersion}\n\n jsonStr = json.dumps(data)\n setUrl = self.serverUrl + '/env/createenvironment/' + name\n response = requests.post(setUrl, data=jsonStr, headers=self.headers)\n return response.text\n def getEnvironment(self, name):\n getUrl = self.serverUrl + '/env/getenvironment/' + name\n response = requests.get(getUrl, headers=self.headers)\n return response.text\n def getParentDataSets(self):\n dsUrl = self.serverUrl + '/api/parentdatasets/' + self.envName\n response = requests.get(dsUrl, headers=self.headers)\n return response.text\n def getDataSets(self, parentName):\n dsUrl = self.serverUrl + '/api/datasets/' + self.envName + '/' + parentName\n response = requests.get(dsUrl, headers=self.headers)\n return response.text\n def getDataSetBoundingBox(self, parentDsName, dsName, region):\n dsUrl = self.serverUrl + '/api/boundingbox/' + self.envName + '/' + parentDsName + '/' + dsName + '/' + region\n response = requests.get(dsUrl, headers=self.headers)\n return response.text\n def getGridCells(self, parentDsName, dsName, region, minX, maxX, minY, maxY, minT, maxT, xCol='x', yCol='y' ):\n gcUrl = self.serverUrl + '/api/boundingbox/' + self.envName + '/' + parentDsName + '/' + dsName+ '/' + region\n bbox = { 'minX':minX, 'maxX':maxX, 'minY':minY, 'maxY':maxY, 'minT':minT,'maxT':maxT, 'xCol':xCol, 'yCol': yCol, 'extentFilter': MaskFilter().maskdict, 'maskFilters' : [] }\n jsonStr = json.dumps(bbox,default=dateconverter)\n response = requests.post(gcUrl, data=jsonStr, headers=self.headers)\n return response.text\n #similar to getGridCells except a result for each time slice is also returned.\n def getShards(self, parentDsName, dsName, region, minX, maxX, minY, maxY, minT, maxT, xCol='x', yCol='y' ):\n gcUrl = self.serverUrl + '/api/shards/' + self.envName + '/' + parentDsName + '/' + dsName + '/' + region\n bbox = { 'minX':minX, 'maxX':maxX, 'minY':minY, 'maxY':maxY, 'minT':minT,'maxT':maxT, 'xCol':xCol, 'yCol': yCol, 'extentFilter': MaskFilter().maskdict, 'maskFilters' : [] }\n jsonStr = json.dumps(bbox,default=dateconverter)\n response = requests.post(gcUrl, data=jsonStr, headers=self.headers)\n return response.text\n def getSwathDetailsFromId(self, parentDsName, dsName, region, swathId):\n url = self.serverUrl + '/api/swathdetailsfromid/' + self.envName + '/' + parentDsName + '/' + dsName + '/' + region + '/' + str(swathId)\n response = requests.get(url, headers=self.headers)\n return response.text\n def getSwathDetailsFromName(self, parentDsName, dsName, region, name):\n url = self.serverUrl + '/api/swathdetailsfromname/' + self.envName + '/' + parentDsName + '/' + dsName + '/' + region + '/' + name\n response = requests.get(url, headers=self.headers)\n return response.text\n def getSwathDetails(self, parentDsName, dsName, region):\n url = self.serverUrl + '/api/swathdetails/' + self.envName + '/' + parentDsName + '/' + dsName + '/' + region\n response = requests.get(url, headers=self.headers)\n return response.text\n def publishMask(self, sourcePath, fileName, parentDsName, dataSet, maskType, region, minX, minY, size ):\n url = self.serverUrl + '/mask/publishmask/' + self.envName + \"/\" + parentDsName + '/' + dataSet +'/' + maskType + '/' + region\n request = { 'sourceFilePath' : sourcePath, 'fileName':fileName, 'gridCell' : { 'minX':minX, 'minY':minY, 'size': size } }\n j = json.dumps(request)\n response = requests.post(url,data=j, headers=self.headers)\n return response.text\n def getMasks(self, parentDsName ):\n url = self.serverUrl + '/mask/gridmasks/' + self.envName + \"/\" + parentDsName\n response = requests.get(url, headers=self.headers)\n return response.text\n def getGridCellMasks(self, parentdataset, dataSet, maskType, region):\n url = self.serverUrl + '/mask/gridcells/' + self.envName + \"/\" + parentdataset + '/' + dataSet + '/' + maskType + '/' + region\n response = requests.get(url, headers=self.headers)\n return response.text\n def getGridCellMask(self, parentdataset, dataSet, maskType, region, minX, minY, size):\n url = self.serverUrl + '/mask/gridcellmask/' + self.envName + \"/\" + parentdataset + '/' + dataSet + '/' + maskType + '/' + region\n request = { 'minX':minX, 'minY':minY, 'size': size }\n j = json.dumps(request)\n response = requests.post(url, data=j, headers=self.headers)\n return response.text\n def publishGridCellStats(self, parentDsName, runName, minX, minY, size, statistics ):\n url = self.serverUrl + '/gridcellstats/publishgridcellstats/'+ self.envName + '/' + parentDsName + '/' + runName\n request = { 'gridCell' : { 'minX':minX, 'minY':minY, 'size': size }, 'statistics' : statistics }\n j = json.dumps(request)\n response = requests.post(url,data=j, headers=self.headers)\n return response.text\n def getAvailableRunStatistics(self, parentDsName ):\n url = self.serverUrl + '/gridcellstats/getavailablestatistics/' + self.envName + '/' + parentDsName\n response = requests.get(url, headers=self.headers)\n return response.text\n def getRunStatistics(self, parentDsName, runName ):\n url = self.serverUrl + '/gridcellstats/getrunstatistics/' + self.envName + '/' + parentDsName + '/' + runName\n response = requests.get(url, headers=self.headers)\n return response.text\n def getGridCellStatistics(self, parentdataset, runName, minX, minY, size):\n url = self.serverUrl + '/gridcellstats/getgridcellstatistics/' + self.envName + '/' + parentdataset + '/' + runName\n request = { 'minX':minX, 'minY':minY, 'size': size }\n j = json.dumps(request)\n response = requests.post(url, data=j, headers=self.headers)\n return response.text\n def getProjectionFromShortName(self, shortName ):\n url = self.serverUrl + '/projection/getprojectionfromshortname/' + self.envName + '/' + shortName\n response = requests.get(url, headers=self.headers)\n return response.text\n def getProjection(self, parentDataSetName, region ):\n url = self.serverUrl + '/projection/getprojection/' + self.envName + '/' + parentDataSetName + '/' + region\n response = requests.get(url, headers=self.headers)\n return response.text\n def publishProjection(self, shortName, proj4, conditions):\n url = self.serverUrl + '/projection/publishprojection/' + self.envName\n request = { 'shortName':shortName, 'proj4':proj4, 'conditions' : conditions }\n j = json.dumps(request)\n response = requests.post(url, data=j, headers=self.headers)\n return response.text\n def publishProjectionRegionMapping(self, parentDataSetName, region, shortName ):\n url = self.serverUrl + '/projection/publishregionmapping/' + self.envName\n request = { 'parentDataSetName': parentDataSetName, 'region':region, 'shortName':shortName }\n j = json.dumps(request)\n response = requests.post(url, data=j, headers=self.headers)\n return response.text\n def validateDataFiles(self, inputDir, startsWith, endsWith, columns ):\n url = self.serverUrl + '/validation/validate'\n request = { 'dir' : inputDir, 'startsWith' : startsWith, 'endsWith' : endsWith, 'expectedColumns' : columns }\n j = json.dumps(request)\n response = requests.post(url, data=j, headers=self.headers)\n return response.text\n","repo_name":"whigg/malard","sub_path":"python/MalardInterface/MalardClient/DataSetQuery.py","file_name":"DataSetQuery.py","file_ext":"py","file_size_in_byte":8739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"36732985085","text":"# -*- coding: utf-8 -*-\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n'''\n\tTIPS: 由于是二叉搜索树, 在判定的时候, 只需要考虑一边即可.\n'''\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n return self.findPQ(root, p, q)\n \n def findPQ(self, root, p, q):\n \tif root is None or root == p or root == q:\n \t\treturn root\n\n \tif root.val > p.val and root.val > q.val:\n \t\treturn self.findPQ(root.left, p, q)\n\n \tif root.val < p.val and root.val < q.val:\n \t\treturn self.findPQ(root.right, p, q)\n\n \treturn root","repo_name":"zhiyu-he/algorithm-trip","sub_path":"growth/oj/leet_code/algorithms/235-lowest-common-ancestor-of-a-binary-search-tree.py","file_name":"235-lowest-common-ancestor-of-a-binary-search-tree.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"14583196979","text":"from trieste.objectives.multi_objectives import DiscBrakeDesign\nimport tensorflow as tf\nimport numpy as np\nfrom trieste.acquisition.multi_objective.utils import moo_nsga2_pymoo\nfrom trieste.acquisition.multi_objective.pareto import Pareto\npf = np.loadtxt('DiscBrakeDesign_PF_F.txt')\nideal_hv = Pareto(observations=pf).hypervolume_indicator(np.array([8.0, 4.0]))\nprint(ideal_hv)\nraise ValueError\n# class CRE23():\n# def __init__(self):\n# self.problem_name = 'CRE23'\n# self.n_objectives = 2\n# self.n_variables = 4\n# self.n_constraints = 4\n#\n# self.ubound = np.zeros(self.n_variables)\n# self.lbound = np.zeros(self.n_variables)\n# self.lbound[0] = 55\n# self.lbound[1] = 75\n# self.lbound[2] = 1000\n# self.lbound[3] = 11\n# self.ubound[0] = 80\n# self.ubound[1] = 110\n# self.ubound[2] = 3000\n# self.ubound[3] = 20\n#\n# def evaluate(self, x):\n# x = x * (self.ubound - self.lbound) + self.lbound\n# f = np.zeros(self.n_objectives)\n# g = np.zeros(self.n_constraints)\n#\n# x1 = x[0]\n# x2 = x[1]\n# x3 = x[2]\n# x4 = x[3]\n#\n# # First original objective function\n# f[0] = 4.9 * 1e-5 * (x2 * x2 - x1 * x1) * (x4 - 1.0)\n# # Second original objective function\n# f[1] = ((9.82 * 1e6) * (x2 * x2 - x1 * x1)) / (x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1))\n#\n# # Reformulated objective functions\n# g[0] = (x2 - x1) - 20.0\n# g[1] = 0.4 - (x3 / (3.14 * (x2 * x2 - x1 * x1)))\n# g[2] = 1.0 - (2.22 * 1e-3 * x3 * (x2 * x2 * x2 - x1 * x1 * x1)) / np.power((x2 * x2 - x1 * x1), 2)\n# g[3] = (2.66 * 1e-2 * x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1)) / (x2 * x2 - x1 * x1) - 900.0\n# # g = np.where(g < 0, -g, 0)\n#\n# return f, g\n\n# import numpy as np\n# print(CRE23().evaluate(np.array([0.0, 0.3, 0.2, 0.4])))\nprint(DiscBrakeDesign().objective()(tf.constant([[0.0, 0.3, 0.2, 0.4]])))\nprint(DiscBrakeDesign().constraint()(tf.constant([[0.0, 0.3, 0.2, 0.4]])))\nraise ValueError\nres = moo_nsga2_pymoo(DiscBrakeDesign().objective(), input_dim= 4, obj_num= 2,\n bounds= tf.convert_to_tensor(DiscBrakeDesign.bounds), popsize= 100,\n cons=DiscBrakeDesign().constraint(), cons_num=4, num_generation=1000)\nfrom matplotlib import pyplot as plt\nplt.scatter(res.fronts[:, 0], res.fronts[:, 1])\nplt.show()\n\nnp.savetxt('DiscBrakeDesign_PF_F.txt', res.fronts)\nnp.savetxt('DiscBrakeDesign_PF_X.txt', res.inputs)","repo_name":"TsingQAQ/pf2es","sub_path":"docs/exp/auxiliary_files/gen_ref_pf/DiscBrakeDesign.py","file_name":"DiscBrakeDesign.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"73631091792","text":"from flask import Flask, render_template\nimport fitbit\nimport datetime\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport spotipy.util as util\n\n# Fitbit stuff\nFITBIT_CLIENT_ID = '???'\nFITBIT_CLIENT_SECRET = '???'\n\n# Spotify stuff\nSPOTIFY_CLIENT_ID = '???'\nSPOTIFY_CLIENT_SECRET = '???'\nspotify_username = 't_mnguyen'\nredirect_uri = 'https://developer.spotify.com/dashboard/applications/2b7aa645d7b4400d95380a252017b3da'\nscope = 'user-library-read playlist-modify-public playlist-read-private'\n\nACCESS_TOKEN = '???'\nREFRESH_TOKEN = '???'\n\n# Fitbit client\nauth2_client = fitbit.Fitbit(FITBIT_CLIENT_ID, FITBIT_CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)\n\n# Spotify client\nclient_credentials_manager = SpotifyClientCredentials(client_id = SPOTIFY_CLIENT_ID, client_secret = SPOTIFY_CLIENT_SECRET)\nsp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)\ntoken = util.prompt_for_user_token(spotify_username, scope, SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET, redirect_uri)\nif token:\n sp = spotipy.Spotify(auth=token)\n\n# Start and end should be the start and end times of yesterday's workout, in strings.\ndef get_fit_statsHR(start, end):\n yesterday = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime(\"%Y%m%d\"))\n yesterday2 = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\"))\n today = str(datetime.datetime.now().strftime(\"%Y%m%d\"))\n # Getting heart rate stuff from yesterday.\n fit_statsHR = auth2_client.intraday_time_series('activities/heart', base_date=yesterday2, detail_level='1min', start_time=start, end_time=end)\n return fit_statsHR\n\ndef get_yesterdays_bpm():\n yesterdays_bpm = []\n for bpm in fit_statsHR['activities-heart-intraday']['dataset']:\n yesterdays_bpm.append(bpm['value'])\n return yesterdays_bpm\n\n# playlist_id should be a string id.\ndef get_playlist(playlist_id):\n return sp.user_playlist(spotify_username, playlist_id)\n\ndef get_song_ids(playlist):\n return [song['track']['id'] for song in playlist['tracks']['items']]\n\ndef get_song_features(playlist):\n return [sp.audio_features(song['track']['id']) for song in playlist['tracks']['items']]\n\n# features returned from get_song_features.\ndef get_song_tempos(playlist, features):\n song_tempos = {}\n song_ids = get_song_ids(playlist)\n for i in range(len(song_ids)):\n song_tempos[song_ids[i]] = features[i][0]['tempo']\n return song_tempos\n\n# Should get a list of song ids that correspond with the pace of yesterday's workout.\ndef get_songs_pace_filtered(workout_bpm, song_tempos):\n song_ids = []\n song_added = False\n for bpm in workout_bpm:\n song_added = False\n for (song_id, tempo) in song_tempos.items():\n if (bpm - 5) <= tempo <= (bpm + 5):\n song_ids.append(song_id)\n # Don't reuse the song in the playlist.\n del song_tempos[song_id]\n song_added = True\n break\n # If none of the songs match that bpm, we'll just skip over it.\n return song_ids\n\nfit_statsHR = get_fit_statsHR(\"7:38\", \"8:05\")\n\n# If `entrypoint` is not defined in app.yaml, App Engine will look for an app\n# called `app` in `main.py`.\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n \"\"\"Return a friendly HTTP greeting.\"\"\"\n # print(fit_statsHR)\n # return fit_statsHR\n # return str(fit_statsHR)\n # return str(get_yesterdays_bpm())\n liked_playlist = get_playlist('7dURqGU9FfEAV6FtpHhGMn?si=9Cx7dV8PQ0-NDmGNwR9N2Q')\n liked_song_features = get_song_features(liked_playlist)\n tempos = get_song_tempos(liked_playlist, liked_song_features)\n bpm = get_yesterdays_bpm()\n # tempos = get_song_tempos(liked_playlist, get_song_features(liked_playlist))\n # return bpm\n # return str(get_song_tempos(liked_playlist, liked_song_features))\n return str(get_songs_pace_filtered(bpm, tempos))\n\n\n# @app.route('/fitbit-auth')\n# def user_authorized():\n# \"\"\"Return a friendly HTTP greeting.\"\"\"\n# return render_template('authorized.html', name='authorized')\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python37_app]\n","repo_name":"thumn/pacebeats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41407955075","text":"# -*- coding: utf-8 -*-\n# © 2018 Comunitea\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\nfrom openerp import models, fields, api, exceptions, _\n\n\nclass AccountAnalyticAccountMonthReportWizard(models.TransientModel):\n\n _name = 'account.analytic.account.month.report.wizard'\n from_date = fields.Date(required=True)\n to_date = fields.Date(required=True)\n exploitation= fields.Many2one('res.company', domain=[('parent_id', '!=', False)])\n\n @api.multi\n def print_report(self):\n self.ensure_one()\n datas = {\n 'model': 'account.analytic.account.month.report.wizard',\n 'from_date': self.from_date,\n 'to_date': self.to_date,\n 'company_id': self.exploitation.id,\n 'ids': [self.id]\n }\n\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'analytic.account.month.report.xlsx',\n 'datas': datas,\n }\n","repo_name":"Comunitea/CMNT_00033_2015_COOP_IV","sub_path":"project-addons/custom_report/wizard/analytic_account_month_report_wizard.py","file_name":"analytic_account_month_report_wizard.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13542156220","text":"#!/usr/bin/env python\n\"\"\"\nThis is part of the work-around for the lack\nof a 64 bit version of the AotfLibrary.dll file.\n\nHazen 12/13\n\"\"\"\n\nimport socket\nimport sys\n\nimport AOTF\n\nmy_aotf = AOTF.AOTF()\nmy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nmy_socket.connect((\"127.0.0.1\", 9001))\n \nencoding = 'utf-8'\nrunning = True\nwhile running:\n next_cmd = my_socket.recv(1024).decode(encoding)\n if (next_cmd == \"shutdown\"):\n my_aotf.shutDown()\n my_socket.sendall(\"done\".encode(encoding))\n my_socket.close()\n running = False\n else:\n if my_aotf.live:\n response = my_aotf._sendCmd(next_cmd)\n my_socket.sendall(response.encode(encoding))\n else:\n my_socket.sendall(\"Invalid\".encode(encoding))\n\n#\n# The MIT License\n#\n# Copyright (c) 2013 Zhuang Lab, Harvard University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n \n","repo_name":"acycliq/storm-control","sub_path":"storm_control/sc_hardware/crystalTechnologies/AOTF32Bit.py","file_name":"AOTF32Bit.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"25197474297","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule: tests\nFile: test_main.py\nCreator: Nick Geense\nDate: 23-11-2016\n\nUnit Tests for Main Rush Hour Program.\n\n\"\"\"\n\n\nimport os.path\nimport unittest\nfrom unittest.mock import patch, MagicMock\n\nfrom rushhour_solver.main import RushHour\nfrom rushhour_solver.game_components import Board\nfrom rushhour_solver.parser import RushHourParser\n\n\nclass TestRushHour(unittest.TestCase):\n\n def setUp(self):\n root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.valid_puzzle = os.path.join(root_dir, 'tests', 'puzzle_valid.txt')\n self.invalid_puzzle = os.path.join(root_dir, 'tests', 'puzzle_invalid.txt')\n\n def test_rushhour_init(self):\n rushhour = RushHour(self.valid_puzzle)\n self.assertEqual(rushhour.filename, self.valid_puzzle)\n\n def test_rushhour_set_valid_filename(self):\n with self.assertRaises(ValueError):\n rushhour = RushHour('not/a/real/path')\n\n def test_rushhour_get_board_returns_board(self):\n with patch.object(RushHourParser, 'get_board', return_value=Board()) as mock_method:\n rushhour = RushHour(self.valid_puzzle)\n board = rushhour.get_board()\n self.assertIsInstance(board, Board)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MazeFX/rushhour-solver","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13308268790","text":"\"\"\"\r\nPrograma maior que 5\r\nRequisito: Leia um número digitado pelo usuário e diga se ele é\r\nmaior do que 5.\r\nAutor: Eu\r\nData: hoje\r\nVersão: 0.0.1\r\n\"\"\"\r\n\r\n\r\n\r\n# Entrada\r\n\r\nnumero = float(input(\"\\nDigite um número real: \"))\r\n\r\n# Processamento\r\n\r\nif numero > 5:\r\n frase = f\"O número {numero} é maior que 5.\"\r\nelif numero == 5:\r\n frase = f\"O número {numero} é igual que 5.\"\r\nelse:\r\n frase = f\"O número {numero} é menor que 5.\"\r\n\r\n# Saida\r\n\r\nprint(frase)\r\n","repo_name":"ecompfin-ufrgs/intprogpython","sub_path":"unidade3/maior5/maior_que_5.py","file_name":"maior_que_5.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"pt","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"27636166430","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 10 12:23:40 2016\n\n@author: diyadas\n\"\"\"\n \n## from https://docs.python.org/3/library/xml.etree.elementtree.html#tutorial\n\nimport xml.etree.ElementTree as ET\n\ntree = ET.parse('simplewiki-20160701-pages-articles-multistream.xml')\nroot = tree.getroot()\n\nN = len(root.getchildren())-1\nprint('There are '+ N +' pages.')\n\npages = [child[0].text for child in root]\n\n\ncolons = [title for title in pages if ':' in title]\nlen(colons)\n\nfor child in root:\n if child[1]=='0': #get articles\n for elem in child.iter(tag='{http://www.mediawiki.org/xml/export-0.10/}text'): #get article text\n text = elem.text\n \n\n#for child in root:\n #print(child.tag, child.attrib)\n #print(child.attrib)\n \n#for item in root.iter('neighbor'):\n# print(item) \n# print(item.attrib)\n \n","repo_name":"tristanlmiller/Topic-Ontology","sub_path":"Archived/parsexml.py","file_name":"parsexml.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"42434453535","text":"from util import *\n\n\n@apply\ndef apply(given):\n A, B = given.of(Element)\n\n return Equal({A} & B, {A})\n\n\n@prove\ndef prove(Eq):\n from axiom import sets\n\n e = Symbol(integer=True)\n s = Symbol(etype=dtype.integer)\n Eq << apply(Element(e, s))\n\n Eq << sets.el.imply.subset.apply(Eq[0], simplify=False)\n\n Eq << sets.subset.imply.eq.intersect.apply(Eq[-1])\n\n\nif __name__ == '__main__':\n run()\n\n# created on 2020-10-28\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/sets/el/imply/eq/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"34507467544","text":"import turtle\nfrom turtle import Turtle, Screen\nimport pandas\n\nscreen = Screen()\nscreen.title(\"Iran Provinces Game\")\nimg = \"iran.gif\"\nscreen.addshape(img)\nturtle.shape(img)\n\n#for find location on the map\n# def get_mouse_click_coor(x, y):\n# print(x, y)\n#\n# turtle.onscreenclick(get_mouse_click_coor)\n#\n# turtle.mainloop()\n\ndata = pandas.read_csv(\"provinces.csv\")\nprovinces = []\n\ngame_is_on = True\nc=0\ndata_list = data[\"province\"].to_list()\nwhile game_is_on:\n answer = screen.textinput(title=\"Guess the Province\",prompt=\"What's another province?\").capitalize()\n\n for i in data_list:\n if i == answer:\n d = data[data.province == answer]\n x = int(d.x)\n y = int(d.y)\n provinces.append(Turtle())\n provinces[c].penup()\n provinces[c].hideturtle()\n provinces[c].goto(x,y)\n provinces[c].color(\"Black\")\n\n provinces[c].write(f\"{answer} \", move=False, align=\"center\", font=(\"Arial\",6, \"bold\"))\n c += 1\n\n\nscreen.exitonclick()\n","repo_name":"HediyeRaisy/Iran_Provinces_Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42436457585","text":"from util import *\n\n\n@apply\ndef apply(le, contains_y):\n if le.is_Element:\n le, contains_y = contains_y, le\n y, a = le.of(LessEqual)\n _y, domain = contains_y.of(Element)\n assert y == _y\n b, c = domain.of(Interval)\n a = Min(c, a)\n return Element(y, Interval(b, a, left_open=domain.left_open, right_open=domain.right_open))\n\n\n@prove\ndef prove(Eq):\n from axiom import sets, algebra\n\n a, b, c, x, y = Symbol(real=True)\n Eq << apply(x <= a, Element(x, Interval(b, c)))\n\n Eq << sets.el_interval.given.et.apply(Eq[2])\n\n Eq << sets.el_interval.imply.et.apply(Eq[1])\n\n Eq << algebra.le.le.imply.le.min.rhs.apply(Eq[-1], Eq[0])\n\n\nif __name__ == '__main__':\n run()\n# created on 2020-11-27\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/sets/le/el/imply/el/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"5474117152","text":"class Solution:\n def minSubArrayLen(self, target: int, nums: List[int]) -> int:\n _sum,j,ans=0,0,float('inf')\n for i in range(len(nums)):\n _sum+=nums[i]\n while(_sum>=target):\n ans=min(ans,i-j+1)\n _sum-=nums[j]\n j+=1\n if ans==float('inf'):\n return 0\n return ans\n \n# Time complexity - O(n)\n\n# Space complexity - O(1)\n \n","repo_name":"Sakesh-Pusuluri/Leetcode-solutions","sub_path":"minSubArrayLen.py","file_name":"minSubArrayLen.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"75072998672","text":"from django.views import View\nfrom django.shortcuts import render\nfrom django.db import IntegrityError\nfrom . import models\nfrom .forms import Myforms\n\n\nclass Login(View):\n def get(self, request):\n form = Myforms()\n return render(request, 'login-forms.html', {'form': form})\n\n def post(self, request):\n form = Myforms(request.POST) # 请求数据做参数\n if form.is_valid():\n wb = request.POST['wb']\n centent = {}\n username = form.cleaned_data['username']\n phone_valid = form.cleaned_data['phone'] # 推荐这么取验证后的数据\n # phone = request.POST['phone'] # 取原始数据\n city = form.cleaned_data['choice']\n try:\n wb_name = models.Wangba.objects.filter(id=wb).first() # 不加.first()是一个可迭代的QuerySet,这里取出第一个值\n except:\n return render(request, 'error.html', )\n centent['phone_valid'] = phone_valid\n centent['city'] = city\n centent['wb'] = wb_name\n centent['username'] = username\n try:\n models.UserInfo.objects.create(username=username, phone=phone_valid, city=city, wb=wb_name)\n except IntegrityError:\n return render(request, 'error2.html', )\n return render(request, 'index.html', centent)\n else:\n error = {}\n error['form'] = form\n error['error'] = form.errors\n\n return render(request, 'error.html', error)\n\n\n\nclass zhizhu_index(View):\n def get(self, request):\n return render(request, 'login.html')\n\n\nclass Shuju(View):\n def get(self, request):\n reg = request.GET.get('country')\n obj = models.Wangba.objects.filter(wb_for_id=reg)\n return render(request, 'city_dropdown_list_options.html', {'objs': obj})\n","repo_name":"jiqingfen110/hazhogkej","sub_path":"hazhongkeji/zhizhu_proxy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71620656912","text":"class Solution: # 96%, 53%\n\n def __init__(self, w: List[int]):\n for i in range(1, len(w)):\n w[i] += w[i - 1]\n self.w = w\n self.maxVal = w[-1]\n\n def pickIndex(self) -> int:\n tarVal = random.randint(1, self.maxVal)\n # Binary Search\n #return bisect.bisect_left(self.w, tarVal)\n l, h = 0, len(self.w) - 1\n while l < h:\n mid = (l + h) // 2\n if self.w[mid] >= tarVal:\n h = mid\n else:\n l = mid + 1\n return l\n \n \n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(w)\n# param_1 = obj.pickIndex()","repo_name":"DuskPiper/Code-Puzzle-Diary","sub_path":"LeetCode 0528 Random Pick with Weight.py","file_name":"LeetCode 0528 Random Pick with Weight.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"23141902655","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None or head.next == None:\n return head\n \n tracker = head\n while tracker != None:\n if tracker.next != None and tracker.val == tracker.next.val:\n tracker.next = tracker.next.next\n else:\n tracker = tracker.next\n \n return head\n ","repo_name":"itgoujie2/leetcode_python","sub_path":"remove_duplicates_from_sorted_list.py","file_name":"remove_duplicates_from_sorted_list.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7368479790","text":"from collections import defaultdict\nclass Solution:\n def distinctSubseqII(self, s):\n \"\"\"\n :type S: str\n :rtype: int\n \"\"\"\n m=int(1e9+7)\n seen = defaultdict(list)\n ans = 0\n for idx, val in enumerate(s):\n \tif val not in seen:\n \t\tans = (2*ans + 1)%m\n \t\tseen[val].append(idx)\n \telse:\n \t\tseen[val].append(idx)\n \t\tprev = 0\n \t\tfor i in seen[val]:\n \t\t\td = i-prev\n \t\t\tans += ((d*(d+1))/2)%m\n \t\t\tprev = i\n \t\tans += 1\n return ans%m\n\n \n\ninp = \"\"\ninp = \"abca\"\ninp = \"abc\"\ninp = \"aaa\"\ninp = \"aba\"\ninp = \"baa\"\ninp = \"babb\"\nprint(Solution().distinctSubseqII(inp))\n ","repo_name":"Rohithyeravothula/leetcode","sub_path":"distinct_subsequences_count.py","file_name":"distinct_subsequences_count.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"32489442908","text":"import re\n\n\nr'''\nIf the input color string matches one of the pre-defined\ncolors, then the string is converted into a\nANSI colored string.\n\nFormat f'\\033[XXm{string}\\033[XXm'\n\n\\033 is an escape character (same as \\e)\ne.g. \\010 is the escape character for newline (\\n) or 'EOF'\nThis is why this doc-string starts with an 'r',\nwhich indicates a 'raw' string without formatting.\nOtherwise \\n would literally print a new line if\nthis string would be printed.\n\n\nWhen using \\033 the next character is [ which indicates\nthe start of a 'command'\n\nXXm is the command for a string format (such as color)\n34m is defined as blue\n32m is defined as green\n1m is defined as blinking (such as a prompt) etc.\n\nsee https://misc.flogisoft.com/bash/tip_colors_and_formatting\nfor a detailed list.\n\n\nIf the re.search can't find a match, the parsed\nstring won't be altered.\n\nLast, print() prints the string.\n\n'''\n\n\ndef color_print(color, string):\n if re.search(r'^red$', color):\n colored_string = f'\\033[31m{string}\\033[00m'\n elif re.search(r'^green$', color):\n colored_string = f'\\033[32m{string}\\033[00m'\n elif re.search(r'^yellow$', color):\n colored_string = f'\\033[33m{string}\\033[00m'\n elif re.search(r'^blue$', color):\n colored_string = f'\\033[34m{string}\\033[00m'\n elif re.search(r'^magenta$', color):\n colored_string = f'\\033[35m{string}\\033[00m'\n elif re.search(r'^cyan$', color):\n colored_string = f'\\033[36m{string}\\033[00m'\n elif re.search(r'^light_gray$', color):\n colored_string = f'\\033[37m{string}\\033[00m'\n elif re.search(r'^dark_gray$', color):\n colored_string = f'\\033[90m{string}\\033[00m'\n elif re.search(r'^light_red$', color):\n colored_string = f'\\033[91m{string}\\033[00m'\n elif re.search(r'^light_green$', color):\n colored_string = f'\\033[92m{string}\\033[00m'\n elif re.search(r'^light_yellow$', color):\n colored_string = f'\\033[93m{string}\\033[00m'\n elif re.search(r'^light_blue$', color):\n colored_string = f'\\033[94m{string}\\033[00m'\n elif re.search(r'^light_magenta$', color):\n colored_string = f'\\033[95m{string}\\033[00m'\n elif re.search(r'^light_cyan$', color):\n colored_string = f'\\033[96m{string}\\033[00m'\n elif re.search(r'^white$', color):\n colored_string = f'\\033[97m{string}\\033[00m'\n \n print(colored_string)\n\n\ndef print_color_options():\n color_print('red', 'red')\n color_print('green', 'green')\n color_print('yellow', 'yellow')\n color_print('blue', 'blue')\n color_print('magenta', 'magenta')\n color_print('cyan', 'cyan')\n color_print('light_gray', 'light_gray')\n color_print('dark_gray', 'dark_gray')\n color_print('light_red', 'light_red')\n color_print('light_green', 'light_green')\n color_print('light_yellow', 'light_yellow')\n color_print('light_blue', 'light_blue')\n color_print('light_magenta', 'light_magenta')\n color_print('light_cyan', 'light_cyan')\n color_print('white', 'white')\n","repo_name":"ChristopherKlix/logic_gates","sub_path":"color_print.py","file_name":"color_print.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30239206885","text":"import random\nteste = 0\nteste_2 = 1\nteste_3 = 'Fora da zona'\nvalor = -233\nvalor_2 = -200\nresultado = 'ok'\n\n#for ajuste in range(-400,-200): \nif valor >= -400 and valor <= -200:\n teste = 10\nelse:\n teste = 3\nif valor_2 >= -400 and valor_2 <= -200:\n teste_2 = 10\nelse:\n teste_2 = 5\nif teste == teste_2:\n teste_3 = \"mesma zona\"\nprint(teste,teste_2,teste_3)\n\n\n\n\n\n","repo_name":"PauloRCTrindade/Jogos","sub_path":"Jogo carro/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"32529946157","text":"\"\"\"Encoding of directory of raw ms images.\nWriting an xr.DataArray for each modality encoded with each hub module.\"\"\"\nimport logging\nimport os\nimport re\nimport sys\nimport traceback\nfrom collections import OrderedDict\n\nimport pandas as pd\nimport plac\nimport tensorflow as tf\n\nfrom mstc.processing import Compose, HubEncoder, Map, PNGReader\n\nassert sys.version_info >= (3, 6)\n\nHUB_MODULES = pd.Series(OrderedDict([\n # 1-10\n ('inception_v3_imagenet', 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1'), # noqa\n # # ('mobilenet_v2', 'https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2') # noqa\n ('mobilenet_v2_100_224', 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/2'), # noqa\n ('inception_resnet_v2', 'https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/1'), # noqa\n ('resnet_v2_50', 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/1'), # noqa\n ('resnet_v2_152', 'https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/1'), # noqa\n ('mobilenet_v2_140_224', 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/2'), # noqa\n ('pnasnet_large', 'https://tfhub.dev/google/imagenet/pnasnet_large/feature_vector/2'), # noqa\n ('mobilenet_v2_035_128', 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/2'), # noqa\n ('mobilenet_v1_100_224', 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/1'), # noqa\n # 11-20\n ('mobilenet_v1_050_224', 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_224/feature_vector/1'), # noqa\n ('mobilenet_v2_075_224', 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_224/feature_vector/2'), # noqa\n # # ('inception_v3', 'https://tfhub.dev/google/tf2-preview/inception_v3/feature_vector/2') # noqa\n ('resnet_v2_101', 'https://tfhub.dev/google/imagenet/resnet_v2_101/feature_vector/1'), # noqa\n # # ('quantops', 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/quantops/feature_vector/1'), # noqa\n ('nasnet_large', 'https://tfhub.dev/google/imagenet/nasnet_large/feature_vector/1'), # noqa\n ('mobilenet_v2_100_96', 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_96/feature_vector/2'), # noqa\n ('inception_v1', 'https://tfhub.dev/google/imagenet/inception_v1/feature_vector/1'), # noqa\n ('mobilenet_v2_035_224', 'https://tfhub.dev/google/imagenet/mobilenet_v2_035_224/feature_vector/2'), # noqa\n ('mobilenet_v2_050_224', 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_224/feature_vector/2'), # noqa\n # 21-30\n ('mobilenet_v2_100_128', 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_128/feature_vector/2'), # noqa\n ('nasnet_mobile', 'https://tfhub.dev/google/imagenet/nasnet_mobile/feature_vector/1'), # noqa\n ('inception_v3_inaturalist', 'https://tfhub.dev/google/inaturalist/inception_v3/feature_vector/1'), # noqa\n ('mobilenet_v1_025_128', 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_128/feature_vector/1'), # noqa\n ('mobilenet_v2_050_128', 'https://tfhub.dev/google/imagenet/mobilenet_v2_050_128/feature_vector/2'), # noqa\n ('inception_v2', 'https://tfhub.dev/google/imagenet/inception_v2/feature_vector/1'), # noqa\n ('mobilenet_v1_025_224', 'https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/1'), # noqa\n ('mobilenet_v2_075_96', 'https://tfhub.dev/google/imagenet/mobilenet_v2_075_96/feature_vector/2'), # noqa\n ('mobilenet_v1_100_128', 'https://tfhub.dev/google/imagenet/mobilenet_v1_100_128/feature_vector/1'), # noqa\n ('mobilenet_v1_050_128', 'https://tfhub.dev/google/imagenet/mobilenet_v1_050_128/feature_vector/1'), # noqa\n # other\n ('amoebanet_a_n18_f448', 'https://tfhub.dev/google/imagenet/amoebanet_a_n18_f448/feature_vector/1'), # noqa\n]))\n\n\ntf.logging.set_verbosity('CRITICAL')\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nPATTERN = re.compile(\n r'(?P.+?)(\\.mzXML\\.gz\\.image\\.0\\.)'\n r'(?P(itms)|(ms2\\.precursor=\\d{3,}\\.\\d{2}))'\n r'\\.png'\n)\n\n\ndef run_all_encodings_on_all_modalities(input_directory, output_directory,\n batch_size=4):\n output_directory = os.path.abspath(os.path.expanduser(output_directory))\n assert os.path.exists(output_directory)\n data_dir = os.path.abspath(os.path.expanduser(input_directory))\n\n sample_set = set()\n modality_set = set()\n for filepath in os.listdir(data_dir):\n groupdict = PATTERN.match(filepath).groupdict()\n sample_set.add(groupdict['sample_name'])\n modality_set.add(groupdict['modality'])\n\n cohort_identifier = os.path.basename(data_dir)\n glob_patterns = [\n os.path.join(data_dir, f'*{modality}*.png')\n for modality in modality_set\n ]\n\n modalities_reader = Map(\n PNGReader(directory=data_dir), map_reader='read modalities'\n )\n\n for module, url in HUB_MODULES.items():\n try:\n logger.info(\n f'{module} encoding starts '\n f'({HUB_MODULES.index.get_loc(module)+1}/{len(HUB_MODULES)})'\n )\n # each encoding of all modalities consumes reader\n # so read again instead of keeping in memory with BroadcastMap\n modalities_encoder = Map(\n HubEncoder(url, batch_size=batch_size,\n encoder_module_name=module)\n )\n pipeline = Compose(\n [modalities_reader, modalities_encoder],\n\n pipeline='for encoder, map encoder over all read modalities',\n pipeline_output='single modality, single encoder'\n )\n\n def is_encoding_required(pattern):\n \"\"\"function to filter glob_patterns with logging side effect\"\"\"\n modality = pattern.split('*')[1]\n if not os.path.exists(os.path.join(\n output_directory,\n cohort_identifier + '-' + module + '-' + modality + '.nc'\n )):\n return True\n else:\n logger.info(\n f'skipped modality {modality}, encoding exitst.'\n )\n return False\n required_glob_patterns = filter(is_encoding_required, glob_patterns) # noqa\n\n for modality_array in pipeline(required_glob_patterns):\n modality = PATTERN.match(\n modality_array.sample.data[0]\n ).groupdict()['modality']\n name = cohort_identifier + '-' + module + '-' + modality\n modality_array.name = name\n filename = os.path.join(output_directory, name + '.nc')\n\n modality_array.to_netcdf(filename)\n logger.info(f'{name}.nc was written')\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except Exception:\n logger.warn(f'FAIL with module {module} (url: {url})')\n traceback.print_exc()\n\n logger.info('Processing done.')\n\n\nif __name__ == \"__main__\":\n plac.call(run_all_encodings_on_all_modalities)\n","repo_name":"PhosphorylatedRabbits/mass_spec_trans_coding","sub_path":"experiments/run_encoding.py","file_name":"run_encoding.py","file_ext":"py","file_size_in_byte":7155,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"2615803850","text":"# -*- coding: UTF-8 -*-\n# By definition, the Strategy Pattern defines a family of\n# an algorithm and encapsulate each of them in you own class,\n# that way it will enable that the strategy can be interchanged.\n\n\nclass FeeCalculator(object):\n\n def calculate(self, budget, callback):\n amount = callback.apply(budget)\n print(amount)\n\n\nif __name__ == '__main__':\n from models import Budget\n from fee import ServiceFee, ProductFee\n\n budget = Budget(500.0)\n calculator = FeeCalculator()\n print('Applying ServiceFee:')\n calculator.calculate(budget, ServiceFee())\n print('Applying ProductFee:')\n calculator.calculate(budget, ProductFee())\n","repo_name":"clauda/til","sub_path":"DesignPatterns/Python/Strategy/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22875471674","text":"import PySimpleGUI as sg\r\nimport requests\r\n \r\nauthCode = \"youtAuthorizationCode\"\r\ndeafoultChannel = \"deafoultChannel\"\r\n \r\nlayout = [\r\n [sg.Text('Channel', size =(15, 1)), sg.InputText()],\r\n [sg.Text('Authorization', size =(15, 1)), sg.InputText()],\r\n [sg.Text('Message', size =(15, 1)), sg.InputText()],\r\n [sg.Button('Ok'), sg.Button('Cancel')]\r\n ]\r\n\r\nwin = sg.Window(\"TEST\", layout)\r\n\r\nwhile True:\r\n event, values = win.read()\r\n \r\n channel = values[0].split(\"/\")[6] if \"/\" in values[0] else (deafoultChannel if values[0] == \"\" else values[0])\r\n auth = authCode if values[1] == \"\" else values[1]\r\n message = values[2] \r\n\r\n def send(channel=channel, message=message, auth=auth):\r\n payload = {\r\n \"content\": message\r\n }\r\n\r\n header = {\r\n \"authorization\": auth\r\n }\r\n URL = f\"https://discord.com/api/v9/channels/{channel}/messages\"\r\n requests.post(URL, data=payload, headers=header)\r\n \r\n if event == \"Ok\":\r\n send(channel, message, auth)\r\n ","repo_name":"Gamer5240/My-projects","sub_path":"discordMessage.py","file_name":"discordMessage.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12240134268","text":"# 연산자의 우선순위로 가능한 경우의 수를 모두 구하고, 각 격우마다 계산을 해서 최대값을 찾는다\n\nfrom itertools import permutations\n\ndef calculate(exp, ops) :\n exp_list = []\n num = ''\n \n for i in range(len(exp)) :\n if exp[i].isdigit() :\n num += exp[i]\n else :\n exp_list.append(num)\n exp_list.append(exp[i])\n num = ''\n exp_list.append(num)\n \n for op in ops :\n stack = []\n while len(exp_list) != 0 :\n temp = exp_list.pop(0)\n if temp == op :\n result = eval(str(stack.pop()) + op + str(exp_list.pop(0)))\n stack.append(result)\n else :\n stack.append(temp)\n exp_list = stack\n \n return abs(int(stack[0]))\n \n \ndef solution(expression):\n ops = ['-', '+', '*']\n ops = list(permutations(ops, 3))\n \n answer = 0\n for op in ops :\n answer = max(answer, calculate(expression, op))\n return answer","repo_name":"ckdals3121/Programmers","sub_path":"solutions/LEVEL2/수식 최대화/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29029242036","text":"#!/bin/env python3\n\nrom = open(\"smb.nes\", \"ab\")\nbyte = b\"\\xff\"\n\nbeforeSize = len(open(\"smb.nes\", \"rb\").read())\npadSize = (40976 - beforeSize)\n\nprint(\"Padding \" + str(padSize) + \" byte(s)...\", end=\" \")\npad = byte * padSize\n\nrom.write(pad)\n\nrom.close()\n\nprint(\"Done!\")\n","repo_name":"pgattic/smb1-disasm","sub_path":"tools/padding.py","file_name":"padding.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"29788338590","text":"from stanza.dataclasses import dataclass, replace\nfrom stanza.runtime import activity\nfrom stanza.util.logging import logger, LoggerHook\n\nfrom stanza.train import batch_loss\nfrom stanza.train.ema import EmaHook\nfrom stanza.reporting import Video\nfrom stanza.util import mat_jacobian\nfrom stanza.nn.mlp import MLP\n\nfrom stanza import partial, Partial\nfrom stanza.util.random import PRNGSequence\nfrom stanza.policies.transforms import ChunkedPolicy\n\nfrom jax.random import PRNGKey\nfrom diffusion_policy.util import load_data, knn_data, eval\nfrom typing import Tuple\n\nimport stanza.envs as envs\nimport stanza.policies as policies\nimport stanza.util\n\nimport optax\nimport jax\nimport jax.numpy as jnp\n\n@dataclass\nclass Config:\n env: str\n rng_seed: int = 42\n\n learning_rate: float = 1e-4\n epochs: int = 100\n batch_size: int = 128\n warmup_steps: int = 500\n\n num_trajectories: int = None\n obs_horizon: int = 1\n action_horizon: int = 1\n action_padding: int = 0\n\n jac_lambda: float = 0.\n zorder_lambda: float = 0.\n zorder_knn: int = 3\n\n lambda_param: str = None\n lambda_val: float = None\n\n net: str = \"mlp\"\n features: Tuple[int] = (128, 64, 32)\n\ndef loss(config, net, normalizer, state, params, rng, sample):\n logger.trace(\"Tracing loss function\", only_tracing=True)\n obs_normalizer = normalizer.map(lambda x: x.observation)\n action_normalizer = normalizer.map(lambda x: x.action)\n\n norm_obs = obs_normalizer.normalize(sample.observation)\n norm_action = action_normalizer.normalize(sample.action)\n pred_action = net.apply(params, norm_obs)\n pred_action_flat = jax.flatten_util.ravel_pytree(pred_action)[0]\n action_flat = jax.flatten_util.ravel_pytree(norm_action)[0]\n action_loss = jnp.mean(jnp.square(pred_action_flat - action_flat))\n\n stats = {}\n loss = action_loss\n stats[\"action_loss\"] = action_loss\n if config.jac_lambda > 0:\n def policy(x):\n norm_obs = obs_normalizer.normalize(x)\n norm_action = net.apply(params, norm_obs)\n return action_normalizer.unnormalize(norm_action)\n jac = mat_jacobian(policy)(sample.observation)\n J = sample.info.J\n jac_loss = jnp.mean(jnp.square(jac - J))\n stats[\"jac_loss\"] = jac_loss\n loss = loss + config.jac_lambda * jac_loss\n if config.zorder_lambda > 0:\n def diff_loss(x):\n per_obs = obs_normalizer.normalize(x.observation)\n eps = stanza.util.l2_norm_squared(per_obs, norm_obs)\n\n per_action = action_normalizer.normalize(x.action)\n action_diff = jax.tree_map(lambda x, y: x - y, \n per_action, norm_action)\n pred_per_action = net.apply(params, per_obs)\n pred_diff = jax.tree_map(lambda x, y: x - y,\n pred_per_action, pred_action)\n loss = stanza.util.l2_norm_squared(action_diff, pred_diff)/(eps + 1e-3)\n return loss\n zorder_loss = jax.vmap(diff_loss)(sample.info.knn)\n zorder_loss = jnp.mean(zorder_loss)\n stats[\"zorder_loss\"] = zorder_loss\n loss = loss + config.zorder_lambda * zorder_loss\n stats[\"loss\"] = loss\n return state, loss, stats\n\n@activity(Config)\ndef train_policy(config, repo):\n if config.lambda_param is not None:\n if config.lambda_param == \"jac\":\n config = replace(config, jac_lambda=config.lambda_val)\n elif config.lambda_param == \"zorder\":\n config = replace(config, zorder_lambda=config.lambda_val)\n exp = repo.create()\n env = envs.create(config.env)\n rng = PRNGSequence(config.rng_seed)\n data_db = repo.find(data_for=config.env).latest\n if data_db is None:\n logger.error(\"Unable to find data for {}\", config.env)\n return\n logger.info(\n \"Using environment [blue]{}[/blue] with config: {}\",\n config.env, config\n )\n with jax.default_device(jax.devices(\"cpu\")[0]):\n data, val_data, val_trajs, normalizer = load_data(\n data_db, num_trajectories=config.num_trajectories,\n obs_horizon=config.obs_horizon,\n action_horizon=config.action_horizon,\n action_padding=config.action_padding)\n sample = normalizer.normalize(data.get(data.start))\n if config.zorder_lambda > 0:\n data = knn_data(data, config.zorder_knn)\n val_data = knn_data(val_data, config.zorder_knn)\n # move to GPU\n data, sample, val_data, val_trajs, normalizer = jax.device_put(\n (data, sample, val_data, val_trajs, normalizer), device=jax.devices(\"gpu\")[0])\n logger.info(\"Dataset size: {} chunks\", data.length)\n\n if config.net == \"mlp\":\n net = MLP(name=\"net\", features=config.features,\n output_sample=sample.action)\n else:\n raise ValueError(f\"Unknown network {config.net}\")\n\n init_params = jax.jit(net.init)(next(rng), sample.observation)\n params_flat, _ = jax.flatten_util.ravel_pytree(init_params)\n logger.info(\"params: {}\", params_flat.shape[0])\n\n # Make loss function, training schedule\n loss_fn = Partial(partial(loss, config, net), normalizer)\n batch_size = min(config.batch_size, data.length)\n steps_per_epoch = (data.length // batch_size)\n epochs = max(config.epochs, 20_000 // steps_per_epoch + 1)\n train_steps = steps_per_epoch * epochs\n logger.info(\"Training for {} steps ({} epochs)\", train_steps, epochs)\n warmup_steps = min(config.warmup_steps, train_steps/2)\n lr_schedule = optax.join_schedules(\n [optax.linear_schedule(1e-4/500, config.learning_rate, warmup_steps),\n optax.cosine_decay_schedule(1e-4, train_steps - warmup_steps)],\n [warmup_steps]\n )\n optimizer = optax.adamw(lr_schedule, weight_decay=1e-5)\n\n ema_hook = EmaHook(\n decay=0.75\n )\n trainer = stanza.train.express(\n optimizer=optimizer,\n batch_size=batch_size,\n max_epochs=epochs,\n # hook related things\n validate_dataset=val_data,\n validate_batch_size=config.batch_size,\n validate_rng=next(rng),\n bucket=exp,\n train_hooks=[ema_hook]\n )\n logger.info(\"Initialized, starting training...\")\n results = trainer.train(data,\n loss_fn=batch_loss(loss_fn), \n rng_key=next(rng),\n init_params=init_params)\n params = results.ema_params\n policy = make_bc_policy(Partial(net.apply, params), normalizer,\n obs_chunk_length=config.obs_horizon,\n action_chunk_length=config.action_horizon)\n test_policy, test_reward = eval(val_trajs, env, policy, next(rng))\n\n N_trajs = jax.tree_flatten(val_trajs)[0][0].shape[0]\n for i in range(N_trajs):\n logger.info(f\"Rendering trajectory {i}\")\n val_traj = jax.tree_map(lambda x: x[i], val_trajs)\n expert_video = jax.vmap(env.render)(val_traj.states)\n exp.log({\"{}_expert\".format(i): Video(expert_video, fps=10)})\n test_traj = jax.tree_map(lambda x: x[i], test_policy)\n policy_video = jax.vmap(env.render)(test_traj.states)\n exp.log({\"{}_learned\".format(i): Video(policy_video, fps=10)})\n\n logger.info(\"Reward: {}\", test_reward)\n\n exp.add(\"test_reward\", test_reward)\n\ndef make_bc_policy(net_fn, normalizer, obs_chunk_length, action_chunk_length):\n def policy(input):\n obs_norm = normalizer.map(lambda x: x.observation)\n action_norm = normalizer.map(lambda x: x.action)\n obs = obs_norm.normalize(input.observation)\n actions = net_fn(obs)\n actions = action_norm.unnormalize(actions)\n return policies.PolicyOutput(actions)\n return ChunkedPolicy(policy,\n input_chunk_size=obs_chunk_length,\n output_chunk_size=action_chunk_length)","repo_name":"pfrommerd/stanza","sub_path":"projects/diffusion_policy_max/bc.py","file_name":"bc.py","file_ext":"py","file_size_in_byte":7799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40419062563","text":"import torch\nimport matplotlib.pyplot as plt\n\nx_data = torch.Tensor([[1.0],[2.0],[3.0]])\ny_data = torch.Tensor([[2.0],[4.0],[6.0]])\n\nclass LinearModel(torch.nn.Module):\n def __init__(self):\n super(LinearModel,self).__init__()\n self.linear = torch.nn.Linear(1,1)\n\n def forward(self,x):\n y_pred = self.linear(x)\n return y_pred\nmodel = LinearModel()\n\ncriterion = torch.nn.MSELoss(size_average=False)\noptimizer = torch.optim.Adam(model.parameters(),lr=0.1)\n\nloss_list = []\nfor epoch in range(100):\n y_pred = model(x_data)\n loss = criterion(y_pred,y_data)\n loss_list.append(loss.item())\n print(epoch,loss)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\nprint('w= ',model.linear.weight.item())\nprint('b= ',model.linear.bias.item())\n\nx_test = torch.Tensor([[4.0]])\ny_test = model(x_test)\nprint('y_pred = ',y_test.data)\n\nplt.plot(loss_list)\nplt.title('The cost of Adam')\nplt.xlabel(\"epoch\")\nplt.ylabel(\"cost\")\nplt.show(block=True)","repo_name":"YiyangHuang-work/Pytorch-tutorial-liu2","sub_path":"cha5/pytorch_first.py","file_name":"pytorch_first.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"4393207903","text":"max_sheep = 0\n\n\ndef dfs(visited, info, graph, route, sheep, wolf):\n global max_sheep\n if max_sheep < sheep:\n max_sheep = sheep\n\n for r in route:\n if not visited[r]:\n visited[r] = True\n if info[r] == 1:\n if sheep > wolf + 1:\n dfs(visited, info, graph, route.union(graph[r]), sheep, wolf + 1)\n else:\n dfs(visited, info, graph, route.union(graph[r]), sheep + 1, wolf)\n visited[r] = False\n\n\ndef solution(info, edges):\n global max_sheep\n max_sheep = 0\n answer = 0\n graph = {}\n for i in range(len(info)):\n graph[i] = set()\n\n for edge in edges:\n x, y = edge\n graph[x].add(y)\n\n visited = [False for _ in range(len(info))]\n visited[0] = True\n route = graph[0]\n dfs(visited, info, graph, route, 1, 0)\n answer = max_sheep\n return answer\n\n\nprint(solution([0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1],\n [[0, 1], [1, 2], [1, 4], [0, 8], [8, 7], [9, 10], [9, 11], [4, 3], [6, 5], [4, 6], [8, 9]]))\nprint(solution([0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0],\n [[0, 1], [0, 2], [1, 3], [1, 4], [2, 5], [2, 6], [3, 7], [4, 8], [6, 9], [9, 10]]))\n","repo_name":"ssafy6-nathan/algorithm-study","sub_path":"study/2022/22.09.20/EJH/양과 늑대.py","file_name":"양과 늑대.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"83"} +{"seq_id":"22106072420","text":"'''\n3. Label adalah sebuah widget Tkinter\nyang berfungsi untuk menampilkan teks untuk mendeskripsikan suatu section atau entry.\n'''\n\nfrom tkinter import *\n\n#1. Membuat GUI\n\nroot = Tk()\n\n#2. Costumize GUI\n\n#A. label widget\n# menampilkan teks di dalam GUI\nlabel1 = Label(root, text='ini adalah label 1')\nlabel2 = Label(root, text='ini adalah label 2')\n\nlabel1.pack()\nlabel2.pack()\n\n#a. Untuk pack()\n# label.pack(side=BOTTOM) untuk menempatkan label nya di bagian bawah\n# label.pack(side=TOP) untuk menempatkan label nya di bagian atas\n# label.pack(side=LEFT) untuk menempatkan label nya di bagian kiri\n# label.pack(side=RIGHT) untuk menempatkan label nya di bagian kanan\n\n#b. Untuk grid()\n# label1.grid(row=0, column=1)\n# label2.grid(row=1, column=2)\n\n#c. untuk place()\n# label1.place(x=130, y=50, height=50, width=250)\n# label2.place(x=130, y=120, height=50, width=250)\n\n#3. Menampillkan GUI\nroot.mainloop()","repo_name":"dhann4/Tutorial-Python-tkinter","sub_path":"A. Module di dalam tkinter/#03 Label Widget/Label.py","file_name":"Label.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"35829849861","text":"\"\"\"\n백준 1759번 : 암호 만들기\n\"\"\"\nfrom itertools import combinations\nimport sys\ninput = sys.stdin.readline\n\ndef is_print(word):\n mo = ['a', 'e', 'i', 'o', 'u']\n check_mo = 0\n check_not_mo = 0\n flag = 0\n\n for w in word:\n if w in mo:\n check_mo += 1\n if w not in mo:\n check_not_mo += 1\n if check_mo >= 1 and check_not_mo >= 2:\n flag = 1\n break\n\n if flag:\n print(\"\".join(word))\n\n\nL, C = map(int, input().split())\nletter = sorted(list(input().split()))\n\nfor c in combinations(letter, L):\n is_print(c)","repo_name":"CodingLeeSeungHoon/Python_Algorithm_TeamNote","sub_path":"BOJ_Solved/BOJ-1759.py","file_name":"BOJ-1759.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"18638326167","text":"N = int(input())\nS, A = [], []\nfor _ in range(N):\n s, a = input().split()\n S.append(s)\n A.append(int(a))\n\nminA_idx = A.index(min(A))\n\nfor i in range(N):\n print(S[(minA_idx+i)%N])","repo_name":"EijiToriki/atcorder","sub_path":"ABC_Answer/A/300_309/304.py","file_name":"304.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21389281570","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 23 09:28:37 2022\r\n\r\n@author: daniela.castillo\r\n\"\"\"\r\nimport pandas as pd \r\nimport geopy.distance\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import shortest_path\r\nimport numpy as np\r\nimport googlemaps\r\n\r\n\r\ncabeceras=pd.read_excel(\"GEOCENTROS.xlsx\", converters={'COD_DANE':str})\r\naerop=pd.read_excel(\"AEROPUERTOS_COL.xlsx\", converters={'CODDANE_MP':str})\r\n\r\ngmaps = googlemaps.Client(key='your key')\r\n\r\nmun_aerop=list(aerop[\"CODDANE_MP\"])\r\n\r\ncabeceras.sort_values(by=[\"COD_DANE\"], inplace=True)\r\n\r\n\r\ndef google_distances(df): \r\n time_list = []\r\n distance_list = []\r\n origin_id_list = []\r\n destination_id_list = []\r\n for i in range(0, df.shape[0]):\r\n global result\r\n LatOrigin = df.iloc[i,4]\r\n LongOrigin = df.iloc[i,3]\r\n origin = (LatOrigin, LongOrigin)\r\n origin_id = df.iloc[i,2]\r\n for j in range(0, df.shape[0]):\r\n if df.iloc[i,0]==df.iloc[j,0]: \r\n time_list.append(0)\r\n distance_list.append(0)\r\n origin_id_list.append(origin_id)\r\n destination_id_list.append(origin_id)\r\n else: \r\n LatDestination = df.iloc[i,4]\r\n LongDestination = df.iloc[j,3]\r\n destination_id = df.iloc[j,2]\r\n destination = (LatDestination, LongDestination)\r\n try:\r\n result = gmaps.distance_matrix(origin, destination, mode = 'driving')\r\n result_distance = result[\"rows\"][0][\"elements\"][0][\"distance\"][\"value\"]\r\n result_time = result[\"rows\"][0][\"elements\"][0][\"duration\"][\"value\"]\r\n except: \r\n result_distance = np.nan\r\n result_time = np.nan\r\n else: \r\n result = gmaps.distance_matrix(origin, destination, mode = 'driving', units = 'metric')\r\n result_distance = result[\"rows\"][0][\"elements\"][0][\"distance\"][\"value\"]\r\n result_time = result[\"rows\"][0][\"elements\"][0][\"duration\"][\"value\"]\r\n time_list.append(result_time)\r\n distance_list.append(result_distance)\r\n origin_id_list.append(origin_id)\r\n destination_id_list.append(destination_id)\r\n \r\n output = pd.DataFrame(distance_list, columns = ['Distance in meter'])\r\n output['duration in seconds'] = time_list\r\n output['origin_id'] = origin_id_list\r\n output['destination_id'] = destination_id_list\r\n return output\r\n\r\nmunicipios_a_visitar=[\"52838\", \"52835\"]\r\ncabeceras_cortadas=cabeceras[cabeceras[\"COD_DANE\"].isin(municipios_a_visitar)==True]\r\nout=google_distances(cabeceras_cortadas)\r\n\r\n\r\ndef llenar_edges(municipios_visitar_ed):\r\n we_edges=[]\r\n cabeceras_cortadas=cabeceras[cabeceras[\"COD_DANE\"].isin(municipios_visitar_ed)==True]\r\n for i in range(0, cabeceras_cortadas.shape[0]):\r\n for j in range(0, cabeceras_cortadas.shape[0]): \r\n we_edges.append([cabeceras_cortadas.iloc[i,1], cabeceras_cortadas.iloc[j,1], \r\n geopy.distance.geodesic((cabeceras_cortadas.iloc[i,3], \r\n cabeceras_cortadas.iloc[i,4]), (cabeceras_cortadas.iloc[j,3], cabeceras_cortadas.iloc[j,4])).km])\r\n return we_edges\r\n\r\n\r\ndef ruta_mas_corta(municipios_visitar):\r\n we_edge=llenar_edges(municipios_visitar)\r\n graph=[]\r\n for i in municipios_visitar:\r\n line=[]\r\n for j in municipios_visitar:\r\n for k in we_edge: \r\n if k[0]==i and k[1]==j: \r\n line.append(k[2])\r\n graph.append(line)\r\n\r\n return graph\r\n\r\ndef aeropuerto_mas_cercano(municipio_origen_1):\r\n coord_municipio_origen=(cabeceras.loc[cabeceras['COD_DANE'] == municipio_origen_1, 'POINT_Y'].iloc[0], \r\n cabeceras.loc[cabeceras['COD_DANE'] == municipio_origen_1, 'POINT_X'].iloc[0])\r\n dist_aerop_min=9999999\r\n nombre_aero_min=\"nan\"\r\n \r\n for j in range(0, aerop.shape[0]):\r\n coordenada_aeropuerto=(aerop.iloc[j,1], aerop.iloc[j,2])\r\n nombre_aeropuerto=aerop.iloc[j,0]\r\n distancia= geopy.distance.geodesic(coordenada_aeropuerto, coord_municipio_origen).km\r\n if distancia= P[item]:\n output.append(stack.pop())\n stack.append(item)\n\n while stack:\n output.append(stack.pop())\n\n return output\n\n\ndef calc_expr(expStr):\n tokens = expStr\n OP = (\"*\", \"/\", \"+\", \"-\",)\n FUNC = {\n \"*\": lambda x, y: y * x,\n \"/\": lambda x, y: y / x,\n \"+\": lambda x, y: y + x,\n \"-\": lambda x, y: y - x,\n }\n stack = []\n\n for item in tokens:\n if item not in OP:\n if '.' in item:\n stack.append(float(item))\n else:\n stack.append(int(item))\n else:\n a = stack.pop()\n b = stack.pop()\n stack.append(FUNC[item](a, b))\n\n return stack.pop()\n\n\nclass MyWidget(QWidget):\n def __init__(self):\n super().__init__()\n # 숫자가 보이는 라인에딧 위젯\n leLayout = QVBoxLayout()\n self.le = QLineEdit(self)\n leLayout.addWidget(self.le)\n\n # button 모음 그리드 레이아웃\n grid = QGridLayout()\n names = ['Cls', 'Bck', '(', ')',\n '7', '8', '9', '/',\n '4', '5', '6', '*',\n '1', '2', '3', '-',\n '0', '.', '=', '+']\n\n positions = [(i, j) for i in range(5) for j in range(4)]\n\n for position, name in zip(positions, names):\n if name == '':\n continue\n button = QPushButton(name)\n button.setObjectName(name)\n button.pressed.connect(self.button_pressed)\n grid.addWidget(button, *position)\n\n # h_layout 과 grid 를 하나로 만들어줄 v_layout\n vbox = QVBoxLayout()\n vbox.addLayout(leLayout)\n vbox.addLayout(grid)\n\n self.setGeometry(300, 150, 300, 250)\n self.setLayout(vbox)\n\n self.show()\n\n def button_pressed(self):\n sending_button = self.sender()\n if str(sending_button.objectName()) == \"=\":\n self.le.setText(str(calc_expr(parse_expr(self.le.text()))))\n elif str(sending_button.objectName()) == \"Bck\":\n tmp = str(self.le.text())\n tmp = tmp[:-1]\n self.le.setText(tmp)\n elif str(sending_button.objectName()) == \"Cls\":\n tmp = str(self.le.text())\n tmp = tmp[:0]\n self.le.setText(tmp)\n else:\n self.le.setText(self.le.text() + str(sending_button.objectName()))\n\n\nclass MyWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"계산기\")\n MainWidget = MyWidget()\n self.setCentralWidget(MainWidget) # 반드시 필요함.\n\n self.setGeometry(300, 700, 350, 150)\n # move & resize로 대체 가능\n self.show()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n mywindow = MyWindow()\n app.exec_()\n","repo_name":"m16khb/Calculator","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30170587509","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n===============================================================================\n Python code for SCRE analysis\n Created by Combustion Research Center CRC at LETE - Sao Paulo, Brasil\n Laboratory of Environmental and Thermal Engineering - LETE\n Escola Politecnica da USP - EPUSP\n===============================================================================\nversion:0.0 - 04/2022: Helio Villanueva\nversion:0.1 - 05/2022: Helio Villanueva\nversion:0.2 - 01/2023: Helio Villanueva\nversion:0.3 - 02/2023: Helio Villanueva\nversion:0.4 - 03/2023: Helio Villanueva\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import image\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.ticker import MultipleLocator\nfrom scipy import signal\nimport glob\nfrom tqdm import tqdm\nimport os\n\nplt.rcParams.update({\n \"text.usetex\": True,\n \"font.family\": \"Helvetica\",\n \"font.size\": 14\n})\nplt.rcParams['xtick.top'] = plt.rcParams['ytick.right'] = True\nplt.rcParams['xtick.minor.top'] = plt.rcParams['ytick.minor.right'] = True\n\n# ******************************************************************************\n# -- USER\n# ******************************************************************************\n\npaths = [\"inferior_DF02P01_221216\"]\n\ninputs = {'imFmt':'jpg',\n 'rMotor':2000, # rpm rotacao do motor\n 'fCAM':24000, # Hz taxa aquisicao cameras\n 'init0':0, # number of initial images discarted (eg 360)\n 'durationCADs':150, # imgs with combustion to save\n 'combCycles':21, # numero de ciclos com combustao\n 'saveImgs':True # save mean and stdDev images for each CAD\n }\n\n# *****************************************************************************\n# -- FUNCOES\n# *****************************************************************************\n\n\nclass Caso():\n '''Classe que organiza o caso\n '''\n\n def __init__(self,path, inputs):\n self.path = path\n self.inputs = inputs\n self.lins, self.cols, self.rCAD, self.CADs, self.cycles, self.combCycles, self.limgNames, self.infos = self.baseInfos()\n\n def baseInfos(self):\n '''Basic informations\n '''\n # List images in Dir 'path'\n imgNames = glob.glob(self.path + '/*[0-9].' + self.inputs['imFmt'])\n imgNames.sort()\n imgNames = imgNames[self.inputs['init0']:] # discard initial images\n\n img = image.imread(imgNames[0]) # read single img for general infos\n lins = img.shape[0] # y coord\n cols = img.shape[1] # x coord\n stepsOrig = len(imgNames) # tot of all imgs saved by the camera\n singleCycle = 2*360 # deg\n rCAD = (singleCycle*self.inputs['rMotor']/60)/self.inputs['fCAM'] # CAD / image\n CADs = int(719 / rCAD) # CADs observed in each cycle\n cycles = int(stepsOrig / CADs) # total steps / cycle\n\n steps = cycles * CADs\n imgNames = imgNames[:steps]\n limgNames = np.array(imgNames).reshape(cycles, CADs)\n\n infos = 'General Infos\\n'\n infos += 14*'-' + '\\nImage res: %.1f x %.1f\\n' %(lins, cols)\n infos += 'N tot images: %.2f\\n' %stepsOrig\n infos += 'CADs/image: %.2f\\n' %rCAD\n infos += 'CADs/cycle: %.1f\\n' %(CADs+1)\n infos += 'Cycles: %.1f\\n' %cycles\n infos += 'Cycles w/ combustion: %.1f\\n' %self.inputs['combCycles']\n infos += 14*'-'\n\n return lins, cols, rCAD, CADs, cycles, self.inputs['combCycles'], limgNames, infos\n\n def imgBackground(self):\n '''Background image for removal process\n '''\n imsCy = np.zeros((self.lins, self.cols, self.cycles))\n for cy in range(self.cycles): # loop over cycles\n # print(\"Cycle No: \", cy)\n imsCy[:, :, cy] = image.imread(self.limgNames[cy, 0])\n\n return np.mean(imsCy, 2)\n\n def imgProcess(self):\n '''For loops for each cycle and CAD\n '''\n imgBackground = self.imgBackground()\n\n try:\n print('Trying to read npy files')\n imsCycleMean = np.load(self.path + '/imsCycleMean.npy')\n imsCycleStdDev = np.load(self.path + '/imsCycleStdDev.npy')\n print('done')\n except Exception:\n print('Reading raw img files')\n imsCycleMean = np.zeros((self.lins, self.cols, self.CADs))\n imsCycleStdDev = np.zeros((self.lins, self.cols, self.CADs))\n\n for t in tqdm(range(self.CADs), desc=\"CAD calculations: \"):\n if t > self.inputs['durationCADs']:\n break\n imsCy = np.zeros((self.lins, self.cols, self.combCycles))\n for cy in range(self.combCycles): # loop over cycles w/ comb\n imsCy[:, :, cy] = image.imread(self.limgNames[cy, t])\n imsCy[:, :, cy] -= imgBackground\n\n # hole cycle calculation\n imsCy[imsCy<0] = 0 # No negative values after background removal\n imM = np.mean(imsCy, 2, keepdims=True)\n imsCycleMean[:, :, t] = imM[:, :, 0]\n imsCyFluct = np.sqrt((imsCy[:, :, :] - imM)**2)\n imsCycleStdDev[:, :, t] = np.mean(imsCyFluct, 2)\n print('Saving .npy arrays')\n np.save(self.path + '/imsCycleMean', imsCycleMean)\n np.save(self.path + '/imsCycleStdDev', imsCycleStdDev)\n\n return imsCycleMean, imsCycleStdDev\n\n def calcDerivada(self,imsCycleMean):\n print('Processing time derivative')\n # central differencing 2nd order\n # central differencing 4th order\n # - Fourth order CDS scheme from Ferziger Computational methods for\n # - fluid dynamics on page 44 eq 3.14\n scheme = np.array([-1,8,0,-8,1]).reshape(1,1,5)\n den = 12\n num = signal.convolve(imsCycleMean,scheme, mode='same')\n dt = 1/self.inputs['fCAM']\n ddt = num/(den*dt)\n\n return ddt\n\n def calcFlameArea(self,imsCycleMean):\n print('Processing Flame area')\n # binarization\n mask = imsCycleMean > 50\n flameArea = np.sum(mask,axis=(0,1))\n return flameArea,mask\n\n def Plots(self,imsCycleMean,imsCycleStdDev):\n '''\n '''\n # min max for plots\n vMeanMin = imsCycleMean.min()\n vMeanMax = imsCycleMean.max()\n vStdDevMin = imsCycleStdDev.min()\n vStdDevMax = imsCycleStdDev.max()\n\n # print('Saving images')\n if not os.path.exists(self.path + '/CADmean'):\n os.makedirs(self.path + '/CADmean')\n\n if not os.path.exists(self.path + '/CADstdDev'):\n os.makedirs(self.path + '/CADstdDev')\n\n for t in tqdm(range(self.CADs), desc=\"Saving mean/stdDev CAD imgs: \"):\n if t > self.inputs['durationCADs']:\n break\n cad = t*self.rCAD\n # Mean\n plt.figure(tight_layout=True)\n plt.imshow(imsCycleMean[:, :, t], cmap='hot', vmin=vMeanMin, vmax=vMeanMax)\n plt.title('mean CAD %3d' %cad)\n plt.colorbar(label='Intensidade luminosa I')\n figNameMean = self.path + '/CADmean/CADmean%04d' %t + '.png'\n plt.savefig(figNameMean)\n plt.close()\n # StdDev\n plt.figure(tight_layout=True)\n plt.imshow(imsCycleStdDev[:, :, t], cmap='hot', vmin=vStdDevMin, vmax=vStdDevMax)\n plt.title('stdDev CAD %3d' %cad)\n plt.colorbar(label='Intensidade luminosa I')\n figNameStdDev = self.path + '/CADstdDev/CADstdDev%04d' %t + '.png'\n plt.savefig(figNameStdDev)\n plt.close()\n\n return 0\n\n def PlotDerivada(self,imsCycleddt):\n '''\n '''\n # min max for plots\n vddtMin = imsCycleddt.min()\n vddtMax = imsCycleddt.max()\n\n # print('Saving images')\n\n if not os.path.exists(self.path + '/CADddt'):\n os.makedirs(self.path + '/CADddt')\n\n for t in tqdm(range(self.CADs), desc=\"Saving ddt CAD imgs: \"):\n if t > self.inputs['durationCADs']:\n break\n cad = t*self.rCAD\n # Ddt\n plt.figure(tight_layout=True)\n plt.imshow(imsCycleddt[:, :, t], cmap='jet', vmin=vddtMin, vmax=vddtMax)\n plt.title('ddt CAD %3d' %cad)\n plt.colorbar()\n figNameddt = self.path + '/CADddt/CADddt%04d' %t + '.png'\n plt.savefig(figNameddt)\n plt.close()\n\n return 0\n\n def PlotFlameArea(self, flameArea, flameAreaImg):\n '''\n '''\n # min max for plots\n fAMin = flameAreaImg.min()\n fAMax = flameAreaImg.max()\n\n # print('Saving images')\n\n if not os.path.exists(self.path + '/CADflameArea'):\n os.makedirs(self.path + '/CADflameArea')\n\n for t in tqdm(range(self.CADs), desc=\"Saving flame Area CAD imgs: \"):\n if t > self.inputs['durationCADs']:\n break\n cad = t*self.rCAD\n # Flame Area\n fig = plt.figure(tight_layout=True)\n gs = gridspec.GridSpec(1, 1)\n ax = fig.add_subplot(gs[0, 0])\n ax.imshow(flameAreaImg[:, :, t], cmap='gray', vmin=fAMin, vmax=fAMax)\n ax.set_title('Flame Area CAD %3d' %cad)\n bbox = dict(facecolor='w', alpha=0.7,boxstyle='Round')\n text = 'A = %0.2f $px^2$' %flameArea[t]\n ax.text(0.7,0.8,text,bbox=bbox,transform=ax.transAxes)\n # plt.colorbar()\n figNameFA = self.path + '/CADflameArea/CADflameArea%04d' %t + '.png'\n plt.savefig(figNameFA)\n plt.close()\n\n return 0\n\n def PlotFlameIntensity(self, flameArea):\n '''\n '''\n # min max for plots\n fIMin = flameArea.min()\n fIMax = flameArea.max()\n\n fig = plt.figure(tight_layout=True)\n gs = gridspec.GridSpec(1, 1)\n ax = fig.add_subplot(gs[0, 0])\n ax.plot(flameArea,'k')\n ax.set_ylim(fIMin,fIMax)\n ax.set_title('case: ')\n ax.set_xlabel('CAD')\n ax.set_ylabel('Flame Intensity')\n ax.xaxis.set_minor_locator(MultipleLocator(10))\n ax.yaxis.set_minor_locator(MultipleLocator(200))\n figNameFA = self.path + '/flameIntensity.png'\n plt.savefig(figNameFA)\n plt.close()\n\n return 0\n\n################################################################################\n\n# *****************************************************************************\n# -- MAIN\n# *****************************************************************************\n\n\ndef main():\n '''Funcao main caso arquivo seja disparado via terminal\n '''\n header = '\\n' + 70*\"=\" + '\\n' + '\\t\\tPython code for SCRE analysis\\n'\n header += 'Created by Combustion Research Center CRC at LETE - Sao Paulo, Brasil\\n'\n header += 'Laboratory of Environmental and Thermal Engineering - LETE\\n'\n header += 'Escola Politecnica da USP - EPUSP\\n'\n header += 70*\"=\" + '\\n'\n print(header)\n\n for path in paths:\n print('\\nProcessando caso %s' %path)\n case = Caso(path,inputs)\n print(case.infos)\n imgMean, imgDev = case.imgProcess()\n imgddt = case.calcDerivada(imgMean)\n flameArea, flameAreaImg = case.calcFlameArea(imgMean)\n case.Plots(imgMean,imgDev)\n case.PlotDerivada(imgddt)\n case.PlotFlameArea(flameArea, flameAreaImg)\n case.PlotFlameIntensity(flameArea)\n print('Done\\n')\n return 0\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","repo_name":"LETE-CRC/LETE-Experimental","sub_path":"SCRE/SCRE-Chemluminescence.py","file_name":"SCRE-Chemluminescence.py","file_ext":"py","file_size_in_byte":11591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11067496095","text":"# (C) Datadog, Inc. 2018-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport re\nimport sys\n\nfrom ..errors import ManifestError\nfrom ..fs import chdir, file_exists, path_join, read_file, read_file_lines, write_file, write_file_lines\nfrom ..subprocess import run_command\nfrom .utils import get_version_file, load_manifest\n\n# Maps the Python platform strings to the ones we have in the manifest\nPLATFORMS_TO_PY = {'windows': 'win32', 'mac_os': 'darwin', 'linux': 'linux2'}\nALL_PLATFORMS = sorted(PLATFORMS_TO_PY)\nVERSION = re.compile(r'__version__ *= *(?:[\\'\"])(.+?)(?:[\\'\"])')\nDATADOG_PACKAGE_PREFIX = 'datadog-'\n\n\ndef get_release_tag_string(check_name, version_string):\n \"\"\"\n Compose a string to use for release tags\n \"\"\"\n if check_name == 'ddev':\n version_string = f'v{version_string}'\n\n if check_name:\n return f'{check_name}-{version_string}'\n else:\n return version_string\n\n\ndef update_version_module(check_name, old_ver, new_ver):\n \"\"\"\n Change the Python code in the __about__.py module so that `__version__`\n contains the new value.\n \"\"\"\n version_file = get_version_file(check_name)\n contents = read_file(version_file)\n\n contents = contents.replace(old_ver, new_ver)\n write_file(version_file, contents)\n\n\ndef get_package_name(folder_name):\n \"\"\"\n Given a folder name for a check, return the name of the\n corresponding Python package\n \"\"\"\n if folder_name == 'datadog_checks_base':\n return 'datadog-checks-base'\n elif folder_name == 'datadog_checks_downloader':\n return 'datadog-checks-downloader'\n elif folder_name == 'datadog_checks_dependency_provider':\n return 'datadog-checks-dependency-provider'\n elif folder_name == 'ddev':\n return 'ddev'\n\n return f\"{DATADOG_PACKAGE_PREFIX}{folder_name.replace('_', '-')}\"\n\n\ndef get_folder_name(package_name):\n \"\"\"\n Given a Python package name for a check, return the corresponding folder\n name in the git repo\n \"\"\"\n if package_name == 'datadog-checks-base':\n return 'datadog_checks_base'\n elif package_name == 'datadog-checks-downloader':\n return 'datadog_checks_downloader'\n elif package_name == 'datadog-checks-dependency-provider':\n return 'datadog_checks_dependency_provider'\n elif package_name == 'ddev':\n return 'ddev'\n\n return package_name.replace('-', '_')[len(DATADOG_PACKAGE_PREFIX) :]\n\n\ndef get_agent_requirement_line(check, version):\n \"\"\"\n Compose a text line to be used in a requirements.txt file to install a check\n pinned to a specific version.\n \"\"\"\n package_name = get_package_name(check)\n\n # no manifest\n if check in ('datadog_checks_base', 'datadog_checks_downloader', 'datadog_checks_dependency_provider', 'ddev'):\n return f'{package_name}=={version}'\n\n m = load_manifest(check)\n if 'tile' in m:\n platforms = []\n for classifier_tag in m['tile']['classifier_tags']:\n key, value = classifier_tag.split('::', 1)\n if key != 'Supported OS':\n continue\n elif value == 'macOS':\n value = 'mac_os'\n platforms.append(value.lower())\n platforms.sort()\n else:\n platforms = sorted(m.get('supported_os', []))\n\n # all platforms\n if platforms == ALL_PLATFORMS:\n return f'{package_name}=={version}'\n # one specific platform\n elif len(platforms) == 1:\n return f\"{package_name}=={version}; sys_platform == '{PLATFORMS_TO_PY.get(platforms[0])}'\"\n elif platforms:\n if 'windows' not in platforms:\n return f\"{package_name}=={version}; sys_platform != 'win32'\"\n elif 'mac_os' not in platforms:\n return f\"{package_name}=={version}; sys_platform != 'darwin'\"\n elif 'linux' not in platforms:\n return f\"{package_name}=={version}; sys_platform != 'linux2'\"\n\n raise ManifestError(f\"Can't parse the supported OS list for the check {check}: {platforms}\")\n\n\ndef update_agent_requirements(req_file, check, newline):\n \"\"\"\n Update the requirements lines for the given check\n \"\"\"\n package_name = get_package_name(check)\n lines = read_file_lines(req_file)\n\n pkg_lines = {line.split('==')[0]: line for line in lines}\n pkg_lines[package_name] = f'{newline}\\n'\n\n write_file_lines(req_file, sorted(pkg_lines.values()))\n\n\ndef build_package(package_path, sdist):\n with chdir(package_path):\n if file_exists(path_join(package_path, 'pyproject.toml')):\n command = [sys.executable, '-m', 'build']\n if not sdist:\n command.append('--wheel')\n\n result = run_command(command, capture='out')\n if result.code != 0:\n return result\n else:\n # Clean up: Files built previously and now deleted might still persist in build directory\n # and will be included in the final wheel. Cleaning up before avoids that.\n result = run_command([sys.executable, 'setup.py', 'clean', '--all'], capture='out')\n if result.code != 0:\n return result\n\n result = run_command([sys.executable, 'setup.py', 'bdist_wheel', '--universal'], capture='out')\n if result.code != 0:\n return result\n\n if sdist:\n result = run_command([sys.executable, 'setup.py', 'sdist'], capture='out')\n if result.code != 0:\n return result\n\n return result\n","repo_name":"DataDog/integrations-core","sub_path":"datadog_checks_dev/datadog_checks/dev/tooling/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","stars":820,"dataset":"github-code","pt":"83"} +{"seq_id":"35319763987","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2022/09/04 13:10:44\n\n@author: josephlbailey@arizona.edu\n\nCode can also be found at: https://github.com/josephlbailey/phys141-lab\n\nThis file contains two functions. The first is read_frame which\nreads a specific frame from a video file. The second is called\ntrack_motion. This function tracks the largest object in the image\nthat is darker than the background. The track_motion function also\nassumes that there are scale markers included in the image that the\nuser will click on during the first frame to define the pixel to cm\nratio. The distance (dist) between these markers should be input in cm.\n\"\"\"\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef read_frame(vid_obj, frame_num):\n vid_obj.set(1, frame_num)\n ret, frame = vid_obj.read()\n return frame\n\n\ndef track_motion(filename, thresh, dist):\n vid_obj = cv2.VideoCapture(filename)\n\n # determine the size of the video images and the number of frames\n width = vid_obj.get(3)\n height = vid_obj.get(4)\n fps = vid_obj.get(5)\n num_frames = int(vid_obj.get(7)) # determines number of frames in video\n\n # setup arrays to store the coordinates\n # of the center of mass at each time point\n xcm = np.zeros(num_frames)\n ycm = np.zeros(num_frames)\n\n time = np.array([i for i in range(num_frames)], dtype='float') / fps\n\n # read in first frame and convert to float\n frame = read_frame(vid_obj, 0)\n\n # convert frame to RGB\n frame = frame[:, :, [2, 1, 0]]\n\n # have user measure the distance between lines in image\n fig = plt.figure()\n plt.imshow(frame)\n fig.suptitle('Click on two marker positions in the image that are Dist apart.')\n points = np.array(plt.ginput(2))\n\n # define the pixel to cm scale\n d = np.sqrt((points[1, 0] - points[0, 0]) ** 2 + (points[1, 1] - points[0, 1]) ** 2)\n pix2cm = dist / d\n\n # loop through the remaining frames of the video\n for i in range(0, num_frames):\n # read in first frame and convert to float\n frame = read_frame(vid_obj, i)\n frame = frame.astype('float')\n\n # compute the grayscale image\n gray = np.mean(frame, axis=2, dtype=float)\n blue = frame[:, :, 0]\n\n # find values of the grayscale image greater than Thresh\n mask = (blue - gray > thresh)\n\n # find the connected regions in the mask\n regions = cv2.connectedComponentsWithStats(mask.astype('uint8'))\n\n # determine which region has the largest area\n stats = regions[2]\n stats[0, 4] = 0\n can_label = np.argmax(stats[:, 4])\n\n # remove unwanted regions from the mask\n can_mask = mask\n can_mask[regions[1] != can_label] = 0\n\n # find the Center of Mass of the object\n xcm[i] = regions[3][can_label, 0]\n ycm[i] = regions[3][can_label, 1]\n\n # plot the mask and its center of mass\n plt.clf()\n plt.spy(can_mask)\n plt.plot(xcm[i], ycm[i], 'or')\n plt.pause(0.1)\n\n # convert xcm and ycm to centimeters\n xcm = pix2cm * xcm\n ycm = pix2cm * ycm\n\n # plot xcm as a function of time\n plt.figure()\n plt.plot(time, xcm)\n\n plt.xlabel('Time (s)', fontname='Arial', fontsize=16)\n plt.ylabel('Distance (cm)', fontname='Arial', fontsize=16)\n\n vid_obj.release()\n\n return xcm, ycm, time\n","repo_name":"josephlbailey/phys141-lab","sub_path":"lab2/AnalyzeVideo.py","file_name":"AnalyzeVideo.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72487193870","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport pandas as pd\n\nchoices = [0, 1]\nusers = [i for i in range(0, 54)]\ndata_45_couples_no_zeros = np.zeros((54, 1, 4), dtype='float32')\ndata_105_couples_no_zeros = np.zeros((54, 1, 4), dtype='float32')\ndata_210_couples_no_zeros = np.zeros((54, 1, 4), dtype='float32')\nlist_theory_45_couple = []\nlist_theory_105_couple = []\nlist_theory_210_couple = []\nfor choice in choices:\n if choice == 1:\n path = './Data8Component2Std/testOutput_original/results_zero_founded_parameters.csv'\n else:\n continue\n # temporally\n # path = './Data8Component2Std/testOutput/results_no_zero(variant).csv'\n user_counter = 0\n for user in users:\n # if user in [15, 3, 32, 7, 36, 4, 20, 29, 14, 11]:\n # continue\n parameter_base = 0\n test_not_inserted = True\n with open(path, newline='\\n') as csvFile:\n reader = csv.reader(csvFile, delimiter=\";\")\n for i, row in enumerate(reader):\n if i == 0:\n continue\n else:\n if int(row[4]) <= 45: # 45CouplesCase\n if int(row[1]) == 1 and int(row[2]) == 4:\n if int(row[0]) == user:\n data_45_couples_no_zeros[user_counter][0][0] = float(row[6]) # accuracy_percentage\n data_45_couples_no_zeros[user_counter][0][1] = float(row[7]) # precision_percentage\n data_45_couples_no_zeros[user_counter][0][2] = float(row[8]) # recall_percentage\n data_45_couples_no_zeros[user_counter][0][3] = float(row[9]) # train_time\n list_theory_45_couple.append(row[10]) # theory\n elif (int(row[4]) > 45) and (int(row[4]) <= 105): # 45CouplesCase\n if int(row[1]) == 1 and int(row[2]) == 5:\n if int(row[0]) == user:\n data_105_couples_no_zeros[user_counter][0][0] = float(row[6]) # accuracy_percentage\n data_105_couples_no_zeros[user_counter][0][1] = float(row[7]) # precision_percentage\n data_105_couples_no_zeros[user_counter][0][2] = float(row[8]) # recall_percentage\n data_105_couples_no_zeros[user_counter][0][3] = float(row[9]) # train_time\n list_theory_105_couple.append(row[10]) # theory\n elif (int(row[4]) > 105) and (int(row[4]) <= 210):\n if int(row[1]) == 1 and int(row[2]) == 5:\n if int(row[0]) == user:\n data_210_couples_no_zeros[user_counter][0][0] = float(row[6]) # accuracy_percentage\n data_210_couples_no_zeros[user_counter][0][1] = float(row[7]) # precision_percentage\n data_210_couples_no_zeros[user_counter][0][2] = float(row[8]) # recall_percentage\n data_210_couples_no_zeros[user_counter][0][3] = float(row[9]) # train_time\n list_theory_210_couple.append(row[10]) # theory\n user_counter+=1\n\n\n user_counter = 0\n insert_counter = 0\n final_accuracy_percentages = np.zeros((54, 3))\n final_precision_percentages = np.zeros((54, 3))\n final_recall_percentages = np.zeros((54, 3))\n final_training_times = np.zeros((54, 3))\n for user_matrix_45, user_matrix_105, user_matrix_210 in zip(data_45_couples_no_zeros, data_105_couples_no_zeros, data_210_couples_no_zeros):\n accuracy_percentages_45 = user_matrix_45[:, 0]\n precision_percentages_45 = user_matrix_45[:, 1]\n recall_percentages_45 = user_matrix_45[:, 2]\n accuracy_percentages_105 = user_matrix_105[:, 0]\n precision_percentages_105 = user_matrix_105[:, 1]\n recall_percentages_105 = user_matrix_105[:, 2]\n accuracy_percentages_210 = user_matrix_210[:, 0]\n precision_percentages_210 = user_matrix_210[:, 1]\n recall_percentages_210 = user_matrix_210[:, 2]\n accuracy_percentages_temp = np.concatenate((accuracy_percentages_45, accuracy_percentages_105))\n precision_percentages_temp = np.concatenate((precision_percentages_45, precision_percentages_105))\n recall_percentages_temp = np.concatenate((recall_percentages_45, recall_percentages_105))\n accuracy_percentages = np.concatenate((accuracy_percentages_temp, accuracy_percentages_210))\n precision_percentages = np.concatenate((precision_percentages_temp, precision_percentages_210))\n recall_percentages = np.concatenate((recall_percentages_temp, recall_percentages_210))\n train_times_45 = user_matrix_45[:, 3]\n train_times_105 = user_matrix_105[:, 3]\n train_times_210 = user_matrix_210[:, 3]\n train_times_temp = np.concatenate((train_times_45, train_times_105))\n train_times = np.concatenate((train_times_temp, train_times_210))\n final_accuracy_percentages[insert_counter] = accuracy_percentages\n final_precision_percentages[insert_counter] = precision_percentages\n final_recall_percentages[insert_counter] = recall_percentages\n final_training_times[insert_counter] = train_times\n insert_counter += 1\n user_counter += 1\n\n datasets = [\"Dataset_45_couples\", \"Dataset_105_couples\", \"Dataset_210_couples\"]\n parameters = [\"maxv= \" + str(i) + \"; maxp=\" + str(j) for i in range(1, 6) for j in range(1, 6)]\n parameters_for_graph = [str(i) + \";\" + str(j) for i in range(1, 6) for j in range(1, 6)]\n print(\"considered user id: all\")\n # print(\"considered user id: all\")\n print(\"test size: 105\")\n print(\"\")\n for dataset_counter in range(0,3):\n if dataset_counter == 0:\n mean_of_accuracy = np.mean(final_accuracy_percentages[:, 0])\n mean_of_precision = np.mean(final_precision_percentages[:, 0])\n mean_of_recall = np.mean(final_recall_percentages[:, 0])\n mean_of_training_time = np.mean(final_training_times[:, 0])\n print(\"On Dataset_45_couples with max_v=1;max_p=4 mean of accuracy \" + str(mean_of_accuracy) + \"; mean of precision \" + str(mean_of_precision) + \"; mean of recall \" + str(mean_of_recall) + \"; mean of training time \" + str(mean_of_training_time))\n print(\"\")\n elif dataset_counter == 1:\n mean_of_accuracy = np.mean(final_accuracy_percentages[:, 1])\n mean_of_precision = np.mean(final_precision_percentages[:, 1])\n mean_of_recall = np.mean(final_recall_percentages[:, 1])\n mean_of_training_time = np.mean(final_training_times[:, 1])\n print(\"On Dataset_105_couples with max_v=1;max_p=5 mean of accuracy \" + str(mean_of_accuracy) + \"; mean of precision \" + str(mean_of_precision) + \"; mean of recall \" + str(mean_of_recall) + \"; mean of training time \" + str(mean_of_training_time))\n print(\"\")\n else:\n mean_of_accuracy = np.mean(final_accuracy_percentages[:, 2])\n mean_of_precision = np.mean(final_precision_percentages[:, 2])\n mean_of_recall = np.mean(final_recall_percentages[:, 2])\n mean_of_training_time = np.mean(final_training_times[:, 2])\n print(\"On Dataset_190_couples with max_v=1;max_p=5 mean of accuracy \" + str(mean_of_accuracy) + \"; mean of precision \" + str(mean_of_precision) + \"; mean of recall \" + str(mean_of_recall) + \"; mean of training time \" + str(mean_of_training_time))\n print(\"\")\n","repo_name":"DanieleF198/ILASP-as-post-hoc-method-in-a-preference-system","sub_path":"ILASPcode/getStatistics_accuracy_precision_recall_founded_parameters.py","file_name":"getStatistics_accuracy_precision_recall_founded_parameters.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"286250398","text":"def expect(xDistribution, function):\r\n fxProduct=[px*function(x) for x, px in xDistribution.items()]\r\n expectation=sum(fxProduct)\r\n return expectation\r\n\r\n\r\ndef forward(xT_1Distribution, eT, transitionTable, sensorTable):\r\n \r\n##################################################\r\n#\t\tYour code here\r\n################################################## \r\n\r\n # implementation of the formula\r\n unnormPx = {x:sensorTable[x][eT]*sum([transitionTable[xt][x]*xT_1Distribution[xt] for xt in xT_1Distribution]) for x in sensorTable}\r\n\r\n # normalization constant\r\n normConst = sum([unnormPx[x] for x in unnormPx])\r\n\r\n # return value is a dictionary representing belief distribution\r\n return {x: unnormPx[x]/normConst for x in unnormPx}\r\n\r\ndef main():\r\n \r\n pX0={0:0.3, 1:0.7}\r\n e=1\r\n transitionTable={0:{0:0.6, 1:0.4}, 1:{0:0.3, 1:0.7}}\r\n sensorTable={0:{0:0.6, 1:0.3, 2:0.1}, 1:{0:0, 1:0.5, 2:0.5}}\r\n \r\n xTDistribution=forward(pX0, e, transitionTable, sensorTable)\r\n print(xTDistribution)\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n","repo_name":"richpaulyim/Learning-Reinforcement-Learning","sub_path":"ForwardBackwardAlgorithm/forward_Yim_Richard.py","file_name":"forward_Yim_Richard.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3047575912","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = ''\n__author__ = 'Administrator'\n__mtime__ = '2016/1/8'\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n\"\"\"\n\nimport urllib\nimport urllib2\n\nurl = 'http://blog.chromev.com'\n\nlogin_header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',\n}\n\nrequest = urllib2.Request(url, headers=login_header)\n\nproxy_url = '121.34.195.34:9999'\nproxy = urllib2.ProxyHandler({'http': proxy_url})\n\nopener = urllib2.build_opener(proxy)\nurllib2.install_opener(opener)\n\nresp = urllib2.urlopen(request)\n\nprint(resp.getcode())\n\n","repo_name":"run100/python","sub_path":"scrapy/tbmm.py","file_name":"tbmm.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18207054686","text":"import cv2 as cv\nimport numpy as np \n\nimg=cv.imread(\"euro.jpg\",0)\nimg=cv.resize(img,(600,300))\nlayer=img.copy()\ngp=[layer]\n\n#lr2=cv.pyrUp(img)\nlr=cv.pyrDown(img)\nfor i in range(3):\n\tlayer=cv.pyrDown(layer)\n\tgp.append(layer)\n\ncv.imshow(\"imagen\",layer)\n\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"Alberto-Arias-x64/Artificial_Vision","sub_path":"piramides.py","file_name":"piramides.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"20987481651","text":"from typing import List\n\n\nclass Action:\n method: str = None\n protocol: str = None\n args: List[str] = None\n include_values: dict = None\n active_if_not_root: bool = None\n\n def __init__(self, method: str, args: List[str], protocol: str = 'REST', include_values: dict = None,\n active_if_not_root: bool = True):\n self.method = method\n self.protocol = protocol.upper()\n self.args = args\n self.active_if_not_root = active_if_not_root\n self.include_values = include_values or {}\n\n def __str__(self):\n return \"\".format(self.protocol, self.method)\n\n def serialize(self):\n return {\n \"method\": self.method,\n \"protocol\": self.protocol,\n \"args\": self.args,\n \"activeIfNotRoot\": self.active_if_not_root,\n \"includeValues\": self.include_values\n }\n\n @classmethod\n def factory(cls, data):\n return cls(\n method=data['method'],\n protocol=data['protocol'],\n args=data['args'],\n active_if_not_root=data['activeIfNotRoot'],\n include_values=data['includeValues']\n )\n","repo_name":"orn0t/trood-sdk","sub_path":"trood/api/custodian/objects/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"25436612604","text":"from reportlab.lib import colors\nfrom reportlab.lib.enums import TA_CENTER, TA_LEFT\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Spacer, Paragraph, Table, TableStyle\nfrom reportlab.lib.styles import ParagraphStyle\nfrom reportlab.lib.units import inch\nimport pandas as pd\nfrom report_a import *\nimport numpy as np\nfrom itertools import groupby\n\n\ndef create_reportB(dynamodb, tokens):\n try:\n year = tokens[0]\n num_countries = get_num_countries(dynamodb, year)\n years = get_years(dynamodb, 'fhamid_economic')\n decades = get_decades(years)\n\n document = []\n document.append(Spacer(20, 20))\n document.append(Paragraph('Report B - Global Report',\n ParagraphStyle(name='Report name', fontSize=14, alignment=TA_LEFT)))\n document.append(Spacer(0, 15))\n document.append(Paragraph(\"Global Report\", ParagraphStyle(\n name='Name of Country', fontSize=14, alignment=TA_CENTER)))\n document.append(Spacer(0, 10))\n document.append(Paragraph(\n f\"Year: {year}\", ParagraphStyle(name='Year', fontSize=12, alignment=TA_LEFT)))\n document.append(Spacer(0, 5))\n document.append(Paragraph(f\"Number of Countries: {num_countries}\", ParagraphStyle(\n name='Number of Countries', fontSize=12, alignment=TA_LEFT)))\n document.append(Spacer(0, 20))\n document.append(Paragraph(\"Table of Countries Ranked by Population (largest to smallest)\",\n ParagraphStyle(name='Table 1 description', fontSize=12, alignment=TA_LEFT)))\n document.append(Spacer(0, 10))\n document.append(create_pop_table(dynamodb, year, num_countries))\n document.append(Spacer(0, 20))\n document.append(Paragraph(\"Table of Countries Ranked by Area (largest to smallest)\",\n ParagraphStyle(name='Table 2 description', fontSize=12, alignment=TA_LEFT)))\n document.append(Spacer(0, 10))\n document.append(create_area_table(dynamodb, num_countries))\n document.append(Spacer(0, 20))\n document.append(Paragraph(\"Table of Countries Ranked by Density (largest to smallest)\",\n ParagraphStyle(name='Table 3 description', fontSize=12, alignment=TA_LEFT)))\n document.append(Spacer(0, 10))\n document.append(create_density_table(dynamodb, year, num_countries))\n document.append(Spacer(0, 20))\n document.append(Paragraph(\"GDP Per Capita for all Countries\",\n ParagraphStyle(name=\"GDPPC Decades Header\", fontSize=12, alignment=TA_LEFT)))\n document.append(Spacer(0, 10))\n for decade in decades:\n document.append(Paragraph(f\"{decade[0]}'s Table\", ParagraphStyle(\n name=\"Decade header\", fontSize=12, alignment=TA_LEFT)))\n document.append(Spacer(0, 20))\n decade_data = get_decade_data(dynamodb, decade)\n table = create_decade_table(decade_data, decade)\n document.append(table)\n document.append(Spacer(0, 15))\n SimpleDocTemplate(f\"pdf/{year}.pdf\", pagesize=letter, rightMargin=40,\n leftMargin=40, topMargin=40, bottomMargin=40).build(document)\n\n except Exception as error:\n print(f\"{terminal.FAIL}Error:{error}{terminal.ENDC}\")\n return\n\n# Get the total number of countries from fhamid_people\n\n\ndef get_num_countries(dynamodb, year):\n response = dynamodb.Table('fhamid_people').scan(\n AttributesToGet=['Country', year])\n object = pd.DataFrame(response['Items']).to_dict(\"records\")\n return len(object)\n\n\n# Get the all the ranks for a table\ndef get_all_rank(dynamodb, table_name, key):\n response = dynamodb.Table(table_name).scan(\n AttributesToGet=['Country', key])\n object = pd.DataFrame(response['Items'])\n object[key] = object[key].astype(int)\n object = object.sort_values([key], ascending=False)\n return object\n\n\ndef create_pop_table(dynamodb, year, num_countries):\n data = []\n data.append([\"Country Name\", \"Population\", \"Rank\"])\n object = get_all_rank(dynamodb, 'fhamid_people', year).to_dict(\"records\")\n for i in range(0, num_countries):\n if (object[i][year] != 'nan'):\n pop_country = object[i]['Country']\n population = object[i][year]\n rank = i+1\n data.append([pop_country, population, rank])\n table = Table(data, colWidths=[2.4*inch])\n table.setStyle(TableStyle([\n ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black),\n ('BOX', (0, 0), (-1, -1), 0.5, colors.black),\n ('FONTSIZE', (0, 0), (-1, -1), 10)\n ]))\n return table\n\n\ndef get_all_density_rank(dynamodb, year, num_countries):\n response = dynamodb.Table('fhamid_people').scan(\n AttributesToGet=['Country', year, 'Area'])\n object = pd.DataFrame(response['Items'])\n object[year] = object[year].astype(int)\n object['Area'] = object['Area'].astype(int)\n object['Density'] = object[year]/object['Area']\n object = object.sort_values(['Density'], ascending=False)\n list = []\n for i in range(1, num_countries+1):\n list.append(i)\n object['Rank'] = list\n object.drop(year, axis=1, inplace=True)\n object.drop('Area', axis=1, inplace=True)\n return object\n\n\ndef create_area_table(dynamodb, num_countries):\n data = []\n data.append([\"Country Name\", \"Area (sq km)\", \"Rank\"])\n object = get_all_rank(dynamodb, 'fhamid_people', 'Area').to_dict(\"records\")\n for i in range(0, num_countries):\n if (object[i]['Area'] != 'nan'):\n country = object[i]['Country']\n area = object[i]['Area']\n rank = i+1\n data.append([country, area, rank])\n table = Table(data, colWidths=[2.4*inch])\n table.setStyle(TableStyle([\n ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black),\n ('BOX', (0, 0), (-1, -1), 0.5, colors.black),\n ('FONTSIZE', (0, 0), (-1, -1), 10)\n ]))\n return table\n\n\ndef create_density_table(dynamodb, year, num_countries):\n data = []\n data.append([\"Country Name\", \"Density (people / sq km)\", \"Rank\"])\n object = get_all_density_rank(\n dynamodb, year, num_countries).to_dict(\"records\")\n for i in range(0, num_countries):\n if (object[i]['Density'] != 'nan'):\n country = object[i]['Country']\n density = \"{:.2f}\".format(object[i]['Density'])\n rank = i+1\n data.append([country, density, rank])\n table = Table(data, colWidths=[2.4*inch])\n table.setStyle(TableStyle([\n ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black),\n ('BOX', (0, 0), (-1, -1), 0.5, colors.black),\n ('FONTSIZE', (0, 0), (-1, -1), 10)\n ]))\n return table\n\n\ndef get_decades(years):\n years = [int(i) for i in years] # Convert the years from str to int\n # Create a list of lists that contains for the decades\n decades = np.array([list(g) for k, g in groupby(years, lambda i: i // 10)])\n return decades\n\n\ndef get_decade_data(dynamodb, decade):\n # Convert all items in the decade list to strings\n decade = [str(i) for i in decade]\n response = dynamodb.Table('fhamid_economic').scan()\n df = pd.DataFrame(response['Items'])\n # Keep only the Country Name and the years for the specified decade\n df = df[['Country'] + decade]\n df = df.sort_values(['Country'])\n # Convert the 'nan' values for years to -1 and cast it as a float\n df[decade] = df[decade].fillna(-1).astype(float)\n # Cast the values to int after float cast\n df[decade] = df[decade].fillna(-1).astype(int)\n return df\n\n\ndef create_decade_table(df, decade):\n data = []\n # First item to append will be the column header names\n data.append(list(df))\n df = df.values.tolist() # Create a list from the df dict\n for item in df:\n temp = item[1:]\n result = all(element == -1 for element in temp)\n if (result == False): # If all the elements are equal to -1, then don't display it as they are 'nan' values obtained from DynamoDB\n data.append(item)\n\n # Creates the table using the data and styles it\n table = Table(data)\n table.setStyle(TableStyle([\n ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black),\n ('BOX', (0, 0), (-1, -1), 0.5, colors.black),\n ('FONTSIZE', (0, 0), (-1, -1), 10)\n ]))\n return table\n","repo_name":"Farid-Hamid-4/dynamodb-shell","sub_path":"report_b.py","file_name":"report_b.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21718524119","text":"import os\n\nimport pytest\n\n\n@pytest.fixture\ndef example():\n def _example(name):\n with open(\n os.path.join(os.path.dirname(__file__), \"examples\", name + \".toml\"),\n encoding=\"utf-8\",\n ) as f:\n return f.read()\n\n return _example\n\n\n@pytest.fixture\ndef json_example():\n def _example(name):\n with open(\n os.path.join(os.path.dirname(__file__), \"examples\", \"json\", name + \".json\"),\n encoding=\"utf-8\",\n ) as f:\n return f.read()\n\n return _example\n\n\n@pytest.fixture\ndef invalid_example():\n def _example(name):\n with open(\n os.path.join(\n os.path.dirname(__file__), \"examples\", \"invalid\", name + \".toml\"\n ),\n encoding=\"utf-8\",\n ) as f:\n return f.read()\n\n return _example\n\n\nTEST_DIR = os.path.join(os.path.dirname(__file__), \"toml-test\", \"tests\")\nIGNORED_TESTS = {\n \"valid\": [\n \"float/inf-and-nan\", # Can't compare nan\n ]\n}\n\n\ndef get_tomltest_cases():\n dirs = sorted(\n f for f in os.listdir(TEST_DIR) if os.path.isdir(os.path.join(TEST_DIR, f))\n )\n assert dirs == [\"invalid\", \"valid\"]\n rv = {\"invalid_encode\": {}}\n for d in dirs:\n rv[d] = {}\n ignored = IGNORED_TESTS.get(d, [])\n\n for root, _, files in os.walk(os.path.join(TEST_DIR, d)):\n relpath = os.path.relpath(root, os.path.join(TEST_DIR, d))\n if relpath == \".\":\n relpath = \"\"\n for f in files:\n try:\n bn, ext = f.rsplit(\".\", 1)\n except ValueError:\n bn, ext = f.rsplit(\"-\", 1)\n key = f\"{relpath}/{bn}\"\n if ext == \"multi\":\n continue\n if key in ignored:\n continue\n if d == \"invalid\" and relpath == \"encoding\":\n rv[\"invalid_encode\"][bn] = os.path.join(root, f)\n continue\n if key not in rv[d]:\n rv[d][key] = {}\n with open(os.path.join(root, f), encoding=\"utf-8\") as inp:\n rv[d][key][ext] = inp.read()\n return rv\n\n\ndef pytest_generate_tests(metafunc):\n test_list = get_tomltest_cases()\n if \"valid_case\" in metafunc.fixturenames:\n metafunc.parametrize(\n \"valid_case\",\n test_list[\"valid\"].values(),\n ids=list(test_list[\"valid\"].keys()),\n )\n elif \"invalid_decode_case\" in metafunc.fixturenames:\n metafunc.parametrize(\n \"invalid_decode_case\",\n test_list[\"invalid\"].values(),\n ids=list(test_list[\"invalid\"].keys()),\n )\n elif \"invalid_encode_case\" in metafunc.fixturenames:\n metafunc.parametrize(\n \"invalid_encode_case\",\n test_list[\"invalid_encode\"].values(),\n ids=list(test_list[\"invalid_encode\"].keys()),\n )\n","repo_name":"sdispater/tomlkit","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":581,"dataset":"github-code","pt":"83"} +{"seq_id":"25701421591","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 15 14:53:09 2021\n\n@author: ken\n\"\"\"\n\nimport pandas as pd \nimport itertools \nby_team = pd.read_csv(\"./Data/teamlevel.csv\")\n\nseason_matchups = {}\n\nszns = by_team.Season.unique()\nszns.sort()\n\nfor i in szns:\n szn_yr = by_team[by_team['Season'] == i]\n all_teams = szn_yr.loc[:,'TeamName'].unique().tolist()\n season_matchups[str(i)] = (i,list(itertools.combinations(all_teams,2))) \n\n","repo_name":"jeffmli/ncaab_bracket_hack","sub_path":"Matchup_combinations.py","file_name":"Matchup_combinations.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74565203790","text":"##################################################################\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport pyglet\nimport numpy as np\nimport math\n# ---Deng, Xiang, dxiang@ini.ethz.ch\n################################################################\n\nclass WptUtil:\n\n def __init__(self):\n print(\"WptUtil initialized\")\n self.dir_path = os.path.dirname(os.path.realpath(__file__))\n print(self.dir_path)\n self.wpts = []\n self.wpt_ref = []\n self.loadWpts()\n self.trajectory=np.append(self.wpts,[self.wpts[0,:]], axis=0)\n ########################################################\n self.diffs = self.trajectory[1:, :] - self.trajectory[:-1, :]\n self.l2s = self.diffs[:, 0]**2 + self.diffs[:, 1]**2\n\n self.wpts_opt=[]\n def loadWpts(self,filename='/Oschersleben_map_wpts'):\n path = self.dir_path+filename\n waypoints = pd.read_csv(path+'.csv', header=None).to_numpy()\n self.wpts = waypoints\n if True:\n # path2 = self.dir_path+'/nicholas_icra_global_wpnts'\n # waypoints2 = pd.read_csv(path2+'.csv', header=None).to_numpy()\n # waypoints2=waypoints2[0:-1:10,1:3] \n\n path2 = self.dir_path+'/Oschersleben_map_wpts_dense800_60'\n waypoints2 = pd.read_csv(path2+'.csv', header=None).to_numpy()\n waypoints2=waypoints2[0:-1:1,1:3] \n self.wpts_opt=waypoints2 \n self.wpts=self.wpts_opt\n def normalizeAngle(self,theta):\n \"\"\"return angle in [-pi, pi]\n \"\"\"\n while theta>np.pi:\n theta=theta-2.0*np.pi\n while theta<-np.pi:\n theta=theta+2.0*np.pi\n\n return theta \n def find_nearest_point_on_trajectory(self,point, trajectory):\n \"\"\" Acknowledgement: adapted from the original f1tenth code base##########################\n Return the nearest wpt given a point 2x1 dim\n trajectory: waypoints, Nx2\n \"\"\"\n # print(trajectory.shape)\n ######################################################## \n \n dots = np.empty((self.trajectory.shape[0]-1, ))\n tmp = point - self.trajectory \n dots = np.sum(np.multiply(tmp[range(dots.shape[0]),:],self.diffs[:,:]), axis=1)\n # deprecated, these stupid for-loop significantly slow down the code\n # for i in range(dots.shape[0]):\n # dots[i] = np.dot(tmp[i,:], self.diffs[i, :])\n t = dots / self.l2s\n t[t < 0.0] = 0.0\n t[t > 1.0] = 1.0\n projections = self.trajectory[:-1, :] + (t*self.diffs.T).T\n tmp=point-projections\n distssq=np.sum(np.multiply(tmp,tmp),axis=1)\n # deprecated, these stupid for-loop significantly slow down the code\n # for i in range(dists.shape[0]):\n # temp = point - projections[i]\n # dists[i] = np.sqrt(np.sum(temp*temp))\n min_dist_segment = np.argmin(distssq)\n return projections[min_dist_segment], np.sqrt(distssq[min_dist_segment]), t[min_dist_segment], min_dist_segment\n def get_wpt_ref(self,pose_x,pose_y,pose_theta, min_dist_segment,K=3):\n next_wpt_id = min_dist_segment+K\n if next_wpt_id >= self.wpts.shape[0]:\n next_wpt_id = next_wpt_id-self.wpts.shape[0]\n self.wpt_ref = self.wpts[next_wpt_id,:]\n wpt_diff=self.wpt_ref-[pose_x,pose_y]\n self.theta2wpt=self.normalizeAngle(np.arctan2(wpt_diff[1],wpt_diff[0])-pose_theta)\n return self.wpt_ref, self.theta2wpt\n def angleDistance(self, theta1, theta2):\n return np.abs(np.arctan2(np.sin(theta1-theta2),np.cos(theta1-theta2)))\n\n\n def suggestGap(self,gaps,largest_gap_index,distances,angles, steering_angle, max_distance,theta2wpt): \n if len(gaps)>0 and True:\n index_gapII=largest_gap_index\n if len(gaps)>1:\n gap_thetas = np.zeros((len(gaps),))\n gap_widths = np.zeros((len(gaps),))\n for i in range(len(gaps)):\n gap=gaps[i]\n gap_thetas[i]=(gap[0]+gap[1])/2\n gap_widths[i] = gap[5]\n \n theta_dist=self.angleDistance(gap_thetas,theta2wpt)\n vals=np.divide(theta_dist,gap_widths)\n index_gapII=np.argmin(vals) \n gap=gaps[index_gapII]\n gap_starting_index=gap[2]\n gap_closing_index=gap[3]\n distances_sub=distances[gap_starting_index+1:gap_closing_index] \n\n max_distance=np.nanmean(distances_sub) \n steering_angle=gap_thetas[index_gapII] \n good_gap = gaps[index_gapII]\n else:\n gap=gaps[index_gapII]\n gap_starting_index=gap[2]\n gap_closing_index=gap[3]\n distances_sub=distances[gap_starting_index+1:gap_closing_index]\n if np.array(distances_sub).size==0:\n distances_sub=distances[gap_starting_index:gap_closing_index+1]\n # print(distances_sub)\n max_distance=np.nanmean(distances_sub)\n gap=gaps[index_gapII]\n gap_starting_index=gap[2]\n gap_closing_index=gap[3]\n distances_sub=distances[gap_starting_index+1:gap_closing_index]\n if np.array(distances_sub).size==0:\n gap_starting_index-=1\n gap_closing_index+=1\n angles_sub=angles[gap_starting_index+1:gap_closing_index]\n distances_sub=distances[gap_starting_index+1:gap_closing_index]\n theta_dist=self.angleDistance(angles_sub,theta2wpt)\n isel= np.argmin(theta_dist)\n steering_angleII=angles_sub[isel]\n steering_angle=(steering_angleII+3*steering_angle)/4\n # max_distance=distances_sub[isel]\n return steering_angle, max_distance \n def normalizedProjection(self,pose_x,pose_y,targets,wpt_ref):\n diff_tar = targets-[pose_x,pose_y]\n diff_ref = wpt_ref-[pose.x,pose.y]\n diff_tar_norm = np.sqrt(np.sum(np.multiply(diff_tar,diff_tar),axis=1))\n diff_ref_norm = np.sqrt(np.sum(np.multiply(diff_ref,diff_ref),axis=1))\n diff_ref /= diff_ref_norm\n diff_tar = np.divide(diff_tar, [diff_tar_norm,diff_tar_norm])\n projection = np.sum(np.multiply(np.repeat(diff_ref,),diff_tar),axis=1)","repo_name":"F1Tenth-INI/f1tenth_development_gym","sub_path":"others/Controllers_obsolate/xiang/wptutils.py","file_name":"wptutils.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"72968892110","text":"from config.envvars import NODES_KEY, get_config_param\nfrom logger.logger import Logger\nimport random\nimport subprocess\nimport time\n\nlogger = Logger()\n\nTIME_BETWEEN_STOPS = 2\n\ndef main():\n nodes = get_config_param(NODES_KEY, logger).split(',')\n while True:\n node_to_stop = random.choice(nodes)\n result = subprocess.run(['docker', 'stop', node_to_stop],\n check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n logger.info(f'Command executed. Result={result.returncode}. Output={result.stdout}. Error={result.stderr}')\n time.sleep(TIME_BETWEEN_STOPS)\n\nif __name__ == '__main__':\n main()\n","repo_name":"FrancoLiberali/age_of_empires_ii_data_analysis","sub_path":"randomizer/randomizer.py","file_name":"randomizer.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40661541857","text":"import logging\nimport operator\nfrom tqdm import tqdm\nfrom sklearn.linear_model import *\n\n\nfrom recommenders.recommender import Recommender\nfrom util import _is_intersection\n\nfrom recommenders.recommender_weights_base import RecommenderWeightsBase\n\nlogging.basicConfig(filename=\"recommendations.log\", level=logging.INFO)\n\n\nclass RecommenderWeightsLinear(RecommenderWeightsBase):\n def train(self):\n X = []\n y = []\n\n for device_id in tqdm(self.recommend_input_done.keys()):\n test_device_events, tips = self.recommend_input_done[device_id]\n recommendations = {}\n for i in range(len(self.algorithms)):\n recommendations[i] = self.algorithms[i].recommend_with_scores(test_device_events, tips)\n recommendations[i] = Recommender.normalize(recommendations[i])\n\n for tip in tips:\n x_value = []\n for i in range(len(self.algorithms)):\n if tip in recommendations[i].keys():\n x_value.append(recommendations[i][tip])\n else:\n x_value.append(0)\n if _is_intersection(self.user_to_done_tips[device_id], [tip]):\n X.append(x_value)\n y.append(1)\n\n cnt = 0\n for device_id in tqdm(self.recommend_input_not_done.keys()):\n if cnt == 300:\n break\n cnt += 1\n test_device_events, tips = self.recommend_input_not_done[device_id]\n recommendations = {}\n for i in range(len(self.algorithms)):\n recommendations[i] = self.algorithms[i].recommend_with_scores(test_device_events, tips)\n recommendations[i] = Recommender.normalize(recommendations[i])\n\n for tip in tips:\n x_value = []\n for i in range(len(self.algorithms)):\n if tip in recommendations[i].keys():\n x_value.append(recommendations[i][tip])\n else:\n x_value.append(0)\n if _is_intersection(self.user_to_not_done_tips[device_id], [tip]):\n X.append(x_value)\n y.append(0)\n self.model.fit(X, y)\n\n def __init__(self, train_devices, event_types, train_events, is_logging=True):\n if is_logging:\n logging.info(\"RecommenderWeightsLinear:init: init started.\")\n self.model = BayesianRidge()\n\n super(RecommenderWeightsLinear, self).__init__(train_devices, event_types, train_events, is_logging)\n\n def recommend(self, test_device_events, tips):\n recommendations = {}\n for i in range(len(self.algorithms)):\n recommendations[i] = self.algorithms[i].recommend_with_scores(test_device_events, tips)\n recommendations[i] = Recommender.normalize(recommendations[i])\n\n tip_to_score = {}\n for tip in tips:\n x_value = []\n for i in range(len(self.algorithms)):\n if tip in recommendations[i].keys():\n x_value.append(recommendations[i][tip])\n else:\n x_value.append(0)\n tip_to_score[tip] = self.model.predict([x_value])[0]\n\n sorted_tips = sorted(tip_to_score.items(), key=operator.itemgetter(1), reverse=True)\n\n sorted_tips = [tip[0] for tip in sorted_tips]\n return list(sorted_tips)\n","repo_name":"JetBrains-Research/feature-recommendations","sub_path":"docker/recommenders/recommender_weights_lin_reg.py","file_name":"recommender_weights_lin_reg.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"29549038803","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom DecisionTree.id3 import DecisionTree\nfrom DecisionTree.utils.data import get_attributes_and_labels, apply_thresholding\n\n\nclass Adaboost:\n def __init__(self, dataframe, features, labels, number_of_trees, test_x, test_y, impurity_type='entropy'):\n self.dataframe = dataframe\n self.features = features\n self.labels = labels\n self.max_depth = 2\n self.number_of_trees = number_of_trees\n self.impurity_type = impurity_type\n self.test_x = test_x\n self.test_y = test_y\n self.stumps = []\n self.stump_training_errors = []\n self.stump_testing_errors = []\n self.build_trees()\n\n def build_trees(self, save_errors=True):\n weights = np.ones(len(self.dataframe)) / len(self.dataframe)\n for _ in range(self.number_of_trees):\n stump = DecisionTree(self.dataframe, self.features, self.labels, self.max_depth, self.impurity_type)\n predictions = stump.predictions(self.dataframe)\n error = np.sum(weights[predictions != self.labels])\n tree_weight = 0.5 * np.log((1 - error) / error) # alpha_t\n tmp = predictions.apply(lambda row: 1 if row == \"yes\" else -1).astype(float)\n weights *= np.exp(-tree_weight * tmp)\n weights /= np.sum(weights)\n self.stumps.append((stump, tree_weight))\n\n if save_errors:\n self.stump_training_errors.append(stump.training_error(\"y\"))\n self.stump_testing_errors.append(stump.evaluate(self.test_x, self.test_y))\n\n def predict(self, row):\n return np.sign(np.sum([tree.predict(row) * weight for tree, weight in self.stumps]))\n\n def predictions(self, data):\n return data.apply(self.predict, axis=1)\n\n def evaluate(self, data, label):\n predictions = self.predictions(data)\n return np.mean(predictions != data[label])\n\n def training_error(self, label: str):\n return self.evaluate(self.dataframe, label)\n\n\nif __name__ == \"__main__\":\n train_filename = \"../Data/Bank/train.csv\"\n test_filename = \"../Data/Bank/test.csv\"\n adaboost_file = open(\"Adaboost_logs.txt\", 'w')\n adaboost_file.write(\"iteration\\t training_error\\t testing_error\\n\")\n\n columns = ['age', 'job', 'marital', 'education', 'default', 'balance', 'housing', 'loan', 'contact',\n 'day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'y']\n all_train, x_train, y_train = get_attributes_and_labels(filename=train_filename, columns=columns)\n all_test, x_test, y_test = get_attributes_and_labels(filename=test_filename, columns=columns)\n\n non_categorical_columns = ['age', 'balance', 'day', 'duration', 'campaign', 'pdays', 'previous']\n all_train = apply_thresholding(\n all_train,\n threshold=all_train[non_categorical_columns].median(),\n columns=non_categorical_columns\n )\n all_test = apply_thresholding(\n all_test,\n threshold=all_test[non_categorical_columns].median(),\n columns=non_categorical_columns\n )\n y_train = y_train.apply(lambda row: 1 if row == \"yes\" else -1).astype(float)\n all_train[\"y\"] = y_train\n y_test = y_test.apply(lambda row: 1 if row == \"yes\" else -1).astype(float)\n all_test[\"y\"] = y_test\n number_of_iterations = 500\n adaboost_training_errors = []\n adaboost_testing_errors = []\n for i in range(number_of_iterations):\n boost_classifier = Adaboost(\n dataframe=all_train,\n number_of_trees=i,\n features=x_train,\n labels=y_train,\n test_x=all_test,\n test_y=columns[-1]\n )\n training_error = boost_classifier.training_error(columns[-1])\n testing_error = boost_classifier.evaluate(all_test, columns[-1])\n adaboost_training_errors.append(training_error)\n adaboost_testing_errors.append(testing_error)\n adaboost_file.write(f\"{i}\\t {training_error}\\t {testing_error}\\n\")\n\n fig1 = plt.figure(1)\n ax2 = plt.axes()\n ax2.plot(list(range(1, number_of_iterations))*2, adaboost_training_errors, c='b', label='Train Error')\n ax2.plot(list(range(1, number_of_iterations))*2, adaboost_testing_errors, c='r', label='Test Error')\n ax2.set_title(\"Random Forest, Feature Subset Size = 2\")\n plt.xlabel('Iteration', fontsize=18)\n plt.ylabel('Error Rate', fontsize=16)\n plt.legend(['train', 'test'])\n plt.savefig(\"adaboost.png\")\n plt.show()","repo_name":"zahidemon/CS-6350-HW","sub_path":"EnsembleLearning/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74041880912","text":"\"\"\" Module provide functions to make data \"\"\"\nimport os\nimport re\nfrom typing import List, Callable, Tuple, TypeVar\nimport requests\nimport pygit2\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom markdown import markdown\nfrom settings import ROOTDIR, HEADERS, VALID_RN_NUM, VALID_LINK_NUM, CM, PR, IS\n\nTime = TypeVar(\"Time\")\nMarkdown = TypeVar(\"Markdown\")\n\nclass MyRemoteCallbacks(pygit2.RemoteCallbacks):\n \"\"\" Define function to show state of cloning process \"\"\"\n def transfer_progress(self, stats):\n print(f'{stats.indexed_objects}/{stats.total_objects}')\n\n\ndef crawl_repos(result_path: str) -> None:\n \"\"\" Crawl Github repo with highest star number \n (Assume that the higher star number the higher project quality) \n Store result in result_path \"\"\"\n\n result = []\n for i in range(50):\n print(i + 1)\n resp = requests.get(f\"https://gitstar-ranking.com/repositories?page={i + 1}\")\n soup = BeautifulSoup(resp.text, \"html.parser\")\n repos_container = soup.find(\"div\", {\"class\": \"row\"})\n repos = repos_container.find_all('a')\n for repo in repos:\n result.append('/'.join(repo[\"href\"].split('/')[-2:])) \n result = pd.DataFrame({\"Repo\": result})\n result.to_csv(result_path)\n\n\ndef traverse_repos(repo_list_path: str, func: Callable[[str, str], None]) -> None:\n \"\"\" This function do func in range of all repositories in repo list file\"\"\"\n\n repos = pd.read_csv(repo_list_path)\n error_log = open(\"error_log.txt\", \"a+\")\n for repo in repos[\"Repo\"]:\n try:\n func(repo)\n except Exception as e:\n error_log.write((f\"Repo {repo} encounter error: {e.message if hasattr(e, 'message') else e} \"\n f\"in function {func.__name__}\\n\"))\n error_log.close()\n\n\ndef github_api(repo: str, component: str, func: Callable, params: str=\"\") -> List[str]:\n \"\"\" Get all specific component of element has type is type using github_api \"\"\"\n\n page = 1\n all_els = []\n while True:\n url =f\"https://api.github.com/repos/{repo}/{component}?{params}&per_page=100&page={page}\"\n try:\n response = requests.get(url, headers=HEADERS)\n response.raise_for_status()\n except requests.HTTPError:\n if response.status_code == 422:\n break\n else:\n raise IOError(\"Http Error\")\n except requests.Timeout:\n raise IOError(\"Timeout Error\")\n els = response.json()\n els_per_page = [func(el) for el in els]\n all_els += els_per_page\n # 100 is the limit of per_page param in github api\n if len(els) < 100: \n break\n page += 1\n\n return all_els\n\n\ndef crawl_rn(repo: str) -> Callable[[str, str, str, Callable], List[str]]:\n \"\"\" Crawl all release notes at repo\"\"\"\n\n print(repo)\n \n return github_api(repo, component=\"releases\", func=lambda el: el)\n\n\ndef crawl_pr(repo: str) -> Callable[[str, str, str, Callable], List[str]]:\n \"\"\" Crawl all pull requests of repo \"\"\"\n\n print(repo)\n \n return github_api(repo, component=\"pulls\", params=\"state=all\", func=lambda el: el)\n\n\ndef crawl_issue(repo: str) -> Callable[[str, str, str, Callable], List[str]]:\n \"\"\" Crawl all issues of repo \"\"\"\n\n print(repo)\n\n return github_api(repo, component=\"issues\", params=\"state=all\", func=lambda el: el)\n\n\ndef crawl_cm(repo: str) -> List[str]:\n \"\"\" Crawl all commits in repo \"\"\"\n\n folder = repo.replace('/', '_')\n path = os.path.join(ROOTDIR, \"repos\", folder)\n assert os.path.exists(path)\n cmd = f\"\"\" cd {path}\n git branch -a\"\"\"\n all_branches = os.popen(cmd).read().split('\\n')[:-1]\n all_branches = [branch.strip() for branch in all_branches if \"HEAD ->\" not in branch]\n all_commit_shas = set()\n for branch in all_branches[1:]:\n try:\n cmd = f\"\"\"cd {path}\n git rev-list {branch}\"\"\"\n commit_shas = os.popen(cmd).read()\n # Each line is a commit sha and the last line is empty line\n commit_shas = commit_shas.split('\\n')[:-1]\n all_commit_shas.update(commit_shas)\n except Exception:\n continue\n repo = pygit2.Repository(path)\n # Get commit message from commit sha\n commits = [repo.revparse_single(commit_sha) for commit_sha in all_commit_shas]\n # Get all commit message and commit sha\n commits = [\n {\n \"message\": commit.message, \n \"sha\": commit.hex, \n \"author\": commit.author, \n \"commit_time\": commit.commit_time, \n \"committer\": commit.committer\n }\n for commit in commits\n ]\n commits = pd.DataFrame(commits)\n return commits\n\n\ndef cm_spliter(message: str) -> Tuple[str, str]:\n \"\"\" Split commit into commit summary (the first line) and follow by commit description \"\"\"\n\n try:\n # Convert markdown into html\n html = markdown(message)\n soup = BeautifulSoup(html, \"html.parser\")\n lines = [p.text.strip() for p in soup.find_all('p')]\n summary= lines[0]\n description = \"<.> \".join(lines[1:])\n\n return summary, description\n except Exception:\n return None, None\n\n\ndef clone_repos(repo: str) -> None:\n \"\"\" Clone github repository \"\"\"\n\n folder = repo.replace('/', '_')\n path = os.path.join(ROOTDIR, \"repos\", folder)\n if os.path.exists(path):\n return None\n print(repo)\n pygit2.clone_repository(f\"https://github.com/{repo}\", path, callbacks=MyRemoteCallbacks())\n\n\ndef build_rn_info(repo: str) -> None:\n \"\"\" Get information of release notes at repo and store into a csv file at data/[repo] \"\"\"\n\n folder = repo.replace('/', '_')\n print(\"Repo:\",repo)\n folder_path = os.path.join(ROOTDIR, \"data\", folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n rn_info_path = os.path.join(folder_path, \"rn_info.csv\")\n # Release note info path exists mean that this repo is processed so pass it\n if os.path.exists(rn_info_path):\n return None\n try:\n # Crawl changelogs\n print(\"Start crawl release notes\")\n rn_info = crawl_rn(repo)\n print(\"Crawl release notes done\")\n assert rn_info is not None\n rn_info = pd.DataFrame(rn_info)\n rn_info.to_csv(rn_info_path)\n except Exception as e:\n print(\"Wrong implement at build_rn_info\")\n raise e\n\n\ndef build_cm_info(repo: str) -> None:\n \"\"\" Get information of commits at repo and store into a csv file at data/[repo] \"\"\"\n\n folder = repo.replace('/', '_')\n print(\"Repo:\", repo)\n folder_path = os.path.join(ROOTDIR, \"data\", folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n commit_path = os.path.join(folder_path, \"commit.csv\")\n # Commit path exists mean that this repo is processed so pass it\n # if os.path.exists(commit_path):\n # return None\n try:\n print(\"Start load commits\")\n commits = crawl_cm(repo)\n print(\"Commits loaded\")\n assert commits is not None\n # Get commit messages and commit descriptions\n summa_des = [cm_spliter(commit)\n for commit in commits.loc[:, \"message\"]]\n summaries, descriptions = zip(*summa_des)\n commit_df = pd.DataFrame({\n \"Summary\": summaries, \n \"Description\": descriptions,\n \"Sha\": commits[\"sha\"],\n \"Author\": commits[\"author\"],\n \"Committer\": commits[\"committer\"],\n \"Commit Time\": commits[\"commit_time\"]\n })\n # Check commit messages\n print(\"Num commit messages:\", len(commit_df))\n print(\"\\n\")\n print(\"==============================================\")\n print(\"\\n\")\n commit_df.to_csv(commit_path)\n except Exception as e:\n print(\"Wrong implemen at build_cm_info function\")\n raise e\n\n\ndef build_pr_info(repo: str) -> None:\n \"\"\" Get information of pull requests at repo and store into a csv file at data/[repo] \"\"\"\n\n folder = repo.replace('/', '_')\n print(\"Repo:\", repo)\n folder_path = os.path.join(ROOTDIR, \"data\", folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n pr_info_path = os.path.join(folder_path, \"pr_info.csv\")\n # Pull request info path exists mean that this repo is processed so pass it\n if os.path.exists(pr_info_path):\n return None\n try:\n # Crawl changelogs\n print(\"Start crawl pull requests\")\n pr_info = crawl_pr(repo)\n print(\"Crawl pull requests done\")\n assert pr_info is not None\n pr_info = pd.DataFrame(pr_info)\n pr_info.to_csv(pr_info_path)\n except Exception as e:\n print(\"Wrong implement at build_pr_info function\")\n raise e\n\n\ndef build_issue_info(repo: str) -> None:\n \"\"\" Get information of release notes at repo and store into a csv file at data/[repo] \"\"\"\n\n folder = repo.replace('/', '_')\n print(\"Repo\",repo)\n folder_path = os.path.join(ROOTDIR, \"data\", folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n issue_info_path = os.path.join(folder_path, \"issue_info.csv\")\n \n # Issue info path exists mean that this repo is processed so pass it\n if os.path.exists(issue_info_path):\n return None\n try:\n # Crawl changelogs\n print(\"Start crawl issues\")\n issue_info = crawl_issue(repo)\n print(\"Crawl issues done\")\n issue_info = pd.DataFrame(issue_info)\n issue_info.to_csv(issue_info_path)\n except Exception as e:\n print(\"Wrong implement at build_issue_info function\")\n raise e\n\n\ndef make_data() -> None:\n \"\"\" This function define a pipeline to get data from top repositories in Github (sort by stars) that statisfy\n some rule for specific problem \"\"\"\n\n # crawl_repos(\"raw_repos.csv\")\n # traverse_repos(\"valid_repos.csv\", clone_repos)\n # traverse_repos(\"valid_repos.csv\", build_rn_info)\n # traverse_repos(\"valid_repos.csv\", build_cm_info)\n # traverse_repos(\"valid_repos.csv\", build_pr_info)\n # traverse_repos(\"valid_repos.csv\", build_issue_info)\n","repo_name":"lvdthieu/CC","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":10152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29163128870","text":"import random\nimport json\nimport os\n\nimport fire\nimport wandb\nimport torch\nimport numpy as np\nfrom transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForTokenClassification, AutoConfig\nfrom transformers import Trainer, TrainingArguments, logging, TrainerCallback, TrainerState, TrainerControl, BitsAndBytesConfig\nfrom transformers.trainer_utils import PREFIX_CHECKPOINT_DIR\nfrom peft import get_peft_model, LoraConfig, prepare_model_for_kbit_training\n\nfrom src.dataset import ChatDataset\nfrom src.util.dl import set_random_seed, fix_tokenizer, fix_model\nfrom src.util.io import read_jsonl\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\n\nclass TrainerNoBaseSave(Trainer):\n def __init__(self, *args, **kwargs):\n return super().__init__(*args, **kwargs)\n\n def _save_checkpoint(self, model, trial, metrics=None):\n print(\"Running custom _save_checkpoint\")\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n run_dir = self._get_output_dir(trial=trial)\n output_dir = os.path.join(run_dir, checkpoint_folder)\n\n if metrics is not None and self.args.metric_for_best_model is not None:\n metric_to_check = self.args.metric_for_best_model\n if not metric_to_check.startswith(\"eval_\"):\n metric_to_check = f\"eval_{metric_to_check}\"\n metric_value = metrics[metric_to_check]\n operator = np.greater if self.args.greater_is_better else np.less\n if (\n self.state.best_metric is None\n or self.state.best_model_checkpoint is None\n or operator(metric_value, self.state.best_metric)\n ):\n self.state.best_metric = metric_value\n self.state.best_model_checkpoint = output_dir\n\n os.makedirs(output_dir, exist_ok=True)\n if self.args.should_save:\n self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)\n\n\nclass SavePeftModelCallback(TrainerCallback):\n def on_save(\n self,\n args: TrainingArguments,\n state: TrainerState,\n control: TrainerControl,\n **kwargs,\n ):\n checkpoint_path = f\"{PREFIX_CHECKPOINT_DIR}-{state.global_step}\"\n checkpoint_folder = os.path.join(args.output_dir, checkpoint_path)\n kwargs[\"model\"].save_pretrained(checkpoint_folder)\n return control\n\n\ndef custom_prepare_model_for_int8_training(\n model,\n output_embedding_layer_name=\"lm_head\",\n layer_norm_names=[\"layer_norm\"]\n):\n for name, param in model.named_parameters():\n param.requires_grad = False\n\n for name, param in model.named_parameters():\n if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names):\n param.data = param.data.to(torch.float32)\n\n if hasattr(model, \"enable_input_require_grads\"):\n model.enable_input_require_grads()\n else:\n def make_inputs_require_grad(module, input, output):\n output.requires_grad_(True)\n model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)\n\n if hasattr(model, output_embedding_layer_name):\n output_embedding_layer = getattr(model, output_embedding_layer_name)\n input_dtype = output_embedding_layer.weight.dtype\n\n class CastOutputToFloat(torch.nn.Sequential):\n def forward(self, x):\n return super().forward(x.to(input_dtype)).to(torch.float32)\n setattr(model, output_embedding_layer_name, CastOutputToFloat(output_embedding_layer))\n\n model.gradient_checkpointing_enable()\n\n return model\n\n\ndef train(\n config_file: str,\n train_file: str,\n val_file: str,\n output_dir: str,\n checkpoint: str = None,\n sample_rate: float = 1.0,\n report_to: str = \"wandb\",\n seed: int = 42,\n use_flash_attention_2: bool = False\n):\n set_random_seed(seed)\n logging.set_verbosity_info()\n with open(config_file, \"r\") as r:\n config = json.load(r)\n\n device_map = \"auto\"\n world_size = int(os.environ.get(\"WORLD_SIZE\", 1))\n ddp = world_size != 1\n\n deepspeed_config = config.get(\"deepspeed\")\n trainer_config = config.get(\"trainer\")\n lora_config = config.get(\"lora\")\n callbacks = [SavePeftModelCallback] if lora_config else []\n training_args = TrainingArguments(\n output_dir=output_dir,\n save_total_limit=1,\n load_best_model_at_end=True,\n report_to=report_to,\n ddp_find_unused_parameters=False if ddp else None,\n deepspeed=deepspeed_config,\n **trainer_config\n )\n model_name = config[\"model_name\"]\n\n if ddp:\n device_map = {\"\": int(os.environ.get(\"LOCAL_RANK\") or 0)}\n gradient_accumulation_steps = trainer_config[\"gradient_accumulation_steps\"]\n gradient_accumulation_steps = gradient_accumulation_steps // world_size\n trainer_config[\"gradient_accumulation_steps\"] = gradient_accumulation_steps\n\n tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)\n model_config = AutoConfig.from_pretrained(model_name)\n tokenizer = fix_tokenizer(tokenizer, model_config)\n tokenizer.save_pretrained(output_dir)\n\n train_records = read_jsonl(train_file)\n val_records = read_jsonl(val_file)\n random.shuffle(train_records)\n print(train_records[0])\n\n model_type = config.get(\"model_type\", \"causal\")\n templates_path = config[\"templates_path\"]\n only_target_loss = config.get(\"only_target_loss\", True)\n mode = config.get(\"mode\", \"chat\")\n assert mode == \"chat\", \"Only chat mode is supported in new versions!\"\n assert model_type == \"causal\", \"Only causal models are supported in new versions!\"\n max_tokens_count = config[\"max_tokens_count\"]\n\n datasets = []\n for records in (train_records, val_records):\n datasets.append(ChatDataset(\n records,\n tokenizer,\n max_tokens_count=max_tokens_count,\n sample_rate=sample_rate,\n templates_path=templates_path,\n only_target_loss=only_target_loss\n ))\n train_dataset, val_dataset = datasets\n data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8)\n\n print(\"INPUT_IDS\")\n print(data_collator([train_dataset[0], train_dataset[1]])[\"input_ids\"][0])\n print(\"MASK\")\n print(data_collator([train_dataset[0], train_dataset[1]])[\"attention_mask\"][0])\n print(\"LABELS\")\n print(data_collator([train_dataset[0], train_dataset[1]])[\"labels\"][0])\n\n model_types = {\n \"causal\": AutoModelForCausalLM,\n }\n load_in_8bit = bool(config.get(\"load_in_8bit\", False))\n load_in_4bit = bool(config.get(\"load_in_4bit\", False))\n use_bf16 = bool(trainer_config.get(\"bf16\", False))\n torch_dtype = torch.bfloat16 if use_bf16 else torch.float16\n if load_in_8bit:\n assert not load_in_4bit\n model = model_types[model_type].from_pretrained(\n model_name,\n load_in_8bit=True,\n device_map=device_map,\n torch_dtype=torch_dtype,\n use_flash_attention_2=use_flash_attention_2\n )\n model = fix_model(model, tokenizer, use_resize=False)\n model = custom_prepare_model_for_int8_training(model)\n\n elif load_in_4bit:\n assert not load_in_8bit\n model = model_types[model_type].from_pretrained(\n model_name,\n load_in_4bit=True,\n device_map=device_map,\n quantization_config=BitsAndBytesConfig(\n load_in_4bit=True,\n llm_int8_threshold=6.0,\n llm_int8_has_fp16_weight=False,\n bnb_4bit_compute_dtype=torch_dtype,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\"\n ),\n torch_dtype=torch_dtype\n )\n model = fix_model(model, tokenizer, use_resize=False)\n model = prepare_model_for_kbit_training(model)\n\n else:\n model = model_types[model_type].from_pretrained(model_name)\n model = fix_model(model, tokenizer)\n\n # Default model generation params\n model.config.num_beams = 5\n model.config.max_length = max_tokens_count\n\n if not ddp and torch.cuda.device_count() > 1:\n model.is_parallelizable = True\n model.model_parallel = True\n\n if lora_config:\n lora_config = LoraConfig(**lora_config)\n model = get_peft_model(model, lora_config)\n\n trainer = TrainerNoBaseSave(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=val_dataset,\n callbacks=callbacks,\n data_collator=data_collator\n )\n\n if trainer_config.get(\"report_to\", \"wandb\") == \"wandb\":\n wandb.init(project=\"rulm_self_instruct\", name=config_file)\n\n trainer.train(checkpoint)\n model.save_pretrained(output_dir)\n\n\nif __name__ == \"__main__\":\n fire.Fire(train)\n","repo_name":"IlyaGusev/rulm","sub_path":"self_instruct/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","stars":291,"dataset":"github-code","pt":"83"} +{"seq_id":"5005774780","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param A : head node of linked list\n # @return the head node in the linked list\n def deleteDuplicates(self, A):\n if (A == None):\n return A\n \n ptr1, ptr2 = A, A.next\n if (ptr2 == None):\n return A\n \n while(ptr1 != None and ptr2 != None):\n if (ptr1.val == ptr2.val):\n ptr1.next = ptr2.next\n ptr2 = ptr1.next\n else:\n ptr1 = ptr1.next\n ptr2 = ptr1.next\n return A\n","repo_name":"neeraj2681/InterviewBit-Practice","sub_path":"programming/linked_lists/from_sorted_list/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42425834775","text":"from util import *\n\n\n\n@apply(given=None)\ndef apply(given, index=None, reverse=False):\n p, q = given.of(Infer)\n if index is None:\n if p.is_Equal:\n old, new = p.args\n else:\n eqs = p.of(And)\n for eq in eqs:\n if eq.is_Equal:\n old, new = eq.args\n break\n else:\n eqs = p.of(And)\n old, new = axiom.is_Equal(eqs[index])\n\n if reverse:\n old, new = new, old\n q = q._subs(old, new)\n return Equivalent(given, Infer(p, q), evaluate=False)\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n x, y = Symbol(real=True)\n A = Symbol(etype=dtype.real)\n f, g = Function(real=True)\n\n Eq << apply(Infer(Equal(f(x), x + 1) & Element(x, A), Equal(g(f(x)), y)))\n\n Eq.suffice, Eq.necessary = algebra.iff.given.et.apply(Eq[-1])\n\n Eq << Eq.suffice.this.lhs.apply(algebra.infer.imply.infer.et, index=0)\n\n Eq << Eq[-1].this.lhs.rhs.apply(algebra.eq.cond.imply.cond.subs)\n\n Eq << Eq.necessary.this.rhs.apply(algebra.infer.imply.infer.et, index=0)\n\n Eq << Eq[-1].this.rhs.rhs.apply(algebra.eq.cond.imply.cond.subs, reverse=True)\n\n\nif __name__ == '__main__':\n run()\n\nfrom . import bool\n# created on 2018-02-06\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/algebra/infer/subs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"38660437496","text":"''' Multivariate Linear Regression using all features (all-in approach) '''\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndataset = pd.read_csv(\"50_Startups.csv\")\n\n# Step 1: Seperate X (input) and Y (target)\nX_matrix = dataset.iloc[:, :-1].values\nY_matrix = dataset.iloc[:, [-1]].values\n\n# Step 2: Replace Missing Data (None)\n\n# Step 3: Encode Categorical Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nencoder = LabelEncoder()\nX_matrix[:, -1] = encoder.fit_transform(X_matrix[:, -1])\n\nhot_encoder = OneHotEncoder(categorical_features=[3])\nX_matrix = hot_encoder.fit_transform(X_matrix).toarray()\n\n# Multiple Colinearity: select n-1 dummy variables\nX_matrix = X_matrix[:, 1:]\n\n# Step 4: train_test_split\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X_matrix, Y_matrix, \\\n test_size=0.2, \\\n random_state=0)\n\n''' Multivariate Linear Regression w/ LinearRegression '''\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n# As of now, this is an \"All-In\" approach -- we use all features\nregressor.fit(X_train, Y_train)\n\n# Predicting test set results\nY_test_predictions = regressor.predict(X_test)\n\n# Would need countour plot for multi-dimensional plotting","repo_name":"PSP17SCM45P/learn_ml","sub_path":"intro sci-kit/regression/multivariate linear regression/multiple_linear_regression.py","file_name":"multiple_linear_regression.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13942315166","text":"valid_pin=303030\ncurrent_account_bal=400000\nname=input(\"Please enter your name\")\npin=int(input(\"Please enter account pin.\"))\ndef atm():\n if pin==valid_pin:\n amount=int(input(\"How much are you withdrawing?\"))\n else :\n print(\"Your pin is incorrect.\")\n if amount < current_account_bal:\n new_bal=current_account_bal-amount\n print(name,\",\", amount,\"has been withdrawn from your account.\")\n print(\"Balance is:\",new_bal)\n else :\n print(\"Your account Balance is insufficient.\")\n return amount\natm()","repo_name":"bbagyema/py_practice","sub_path":"input_output.py","file_name":"input_output.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10714755850","text":"\r\nimport vk_api\r\n\r\nsession = vk_api.VkApi(token=\"\")\r\nvk = session.get_api()\r\n\r\ndef send_msg(_user_id):\r\n vk.messages.send(peer_id = _user_id, \r\n message = \"Match is 1\",\r\n random_id = 0 )\r\n\r\nsend_msg(344092825)\r\n","repo_name":"eastpriSs/Training_ofCode","sub_path":"autoAccept/AutoAccept_wMsg_vk/sources/send_msg.pyw","file_name":"send_msg.pyw","file_ext":"pyw","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74337827720","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 9 11:15:51 2022\n\n@author: user\n\"\"\"\nimport urllib\nimport requests\nfrom bs4 import BeautifulSoup\nmy_headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'}\nmy_params={'q':'寒流'}\n#data = requests.get('https://www.google.com/search?q=%E5%AF%92%E6%B5%81',headers= my_headers)\ndata = requests.get('https://www.google.com/search',headers= my_headers,params=my_params)\nif data.status_code==200:\n soup = BeautifulSoup(data.text,'html.parser')\n # print(soup)\n items=soup.select('div.v7W49e h3')\n itemslink=soup.select('div.v7W49e a')\n for t,l in zip(items,itemslink):\n print('標題: ',t.text.strip())\n print('網址: '+urllib.parse.unquote(l.get('href'))) \n #urllib.parse.unquote 將中文網址轉成中文\n \n \n \n \n ","repo_name":"git-kenjr/test","sub_path":"google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6041884301","text":"from flask import Flask, redirect, request\nfrom stravalib.client import Client\nimport requests\nimport os\nimport time\n\nCLIENTID = 50880\nCLIENTSECRET = \"179f538ea081f2553c200441892e8fde3dc5255e\"\n\napi = Flask(__name__)\n\nCLIENTID = 50880\nCLIENTSECRET = \"179f538ea081f2553c200441892e8fde3dc5255e\"\nclient = Client()\n\n\n@api.route(\"/\")\ndef root():\n return(\"Welcome to strava-oauth\")\n\n\n@api.route(\"/authorize\")\ndef authorize():\n \"\"\"Redirect user to the Strava Authorization page\"\"\"\n authorize_url = client.authorization_url(client_id=CLIENTID, redirect_uri='http://localhost:5000/authorization_successful')\n return redirect(authorize_url)\n\n\n@api.route(\"/authorization_successful\")\ndef authorization_successful():\n \"\"\"Exchange code for a user token\"\"\"\n params = {\n \"client_id\": CLIENTID,\n \"client_secret\": CLIENTSECRET,\n \"code\": request.args.get('code'),\n \"grant_type\": \"authorization_code\"\n }\n\n r = requests.post(\"https://www.strava.com/oauth/token\", params)\n\n client.token_expires_at = r.json()[\"expires_at\"]\n client.access_token = r.json()[\"access_token\"]\n client.refresh_token = r.json()[\"refresh_token\"]\n return \"Authorization successful\"\n\n\n@api.route(\"/token\") \ndef token():\n if time.time() > client.token_expires_at:\n refresh_response = client.refresh_access_token(client_id=CLIENTID, client_secret=CLIENTSECRET,\n refresh_token=client.refresh_token)\n access_token = refresh_response['access_token']\n refresh_token = refresh_response['refresh_token']\n expires_at = refresh_response['expires_at']\n client.token_expires_at = token_expires_at\n client.access_token = access_token\n client.refresh_token - refresh_token\n else:\n print(client.access_token)\n\n return \"{\\\"accessToken\\\": \\\"\" + client.access_token + \"\\\"}\"\n\nif __name__ == '__main__':\n api.run()","repo_name":"mayamshah/raahi","sub_path":"strava/strava_server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35037361863","text":"from bge import logic, render\n\nfrom math import radians\n\ndef _mouselook_core(self, id):\n\tdeadzone = 0.001 # used to prevent floating when mouse isn't moving\n\tscreen_x = 0.5 - logic.mouse.position[0]\n\tif -deadzone < screen_x < deadzone:\n\t\tscreen_x = 0\n\tscreen_y = 0.5 - logic.mouse.position[1]\n\tif -deadzone < screen_y < deadzone:\n\t\tscreen_y = 0\n\n\tself.applyRotation([screen_y*sensitivity_x, 0, screen_x*sensitivity_y], True, per_second=True)\n\n\trender.setMousePosition(int(render.getWindowWidth() / 2), int(render.getWindowHeight() / 2))\n\ndef mouselook_6dof(self, id):\n\t_mouselook_core(self, id)\n\n\ndef mouselook_6dof_planar(self, id):\n\teuler = self.worldOrientation.to_euler()\n\tif euler.y > radians(30):\n\t\teuler.y = radians(30)\n\telif euler.y < radians(-30):\n\t\teuler.y = radians(-30)\n\telif euler.y < -0.01:\n\t\teuler.y += 0.002\n\telif euler.y > 0.01:\n\t\teuler.y -= 0.002\n\tself.worldOrientation = euler.to_matrix()\n\n\t_mouselook_core(self, id)\n\n\ndef make_mouselook_fps():\n\tsensitivity_x = 30\n\tsensitivity_y = 40\n\tdef mouselook_fps_callback(self, id):\n\t\t_mouselook_core(self,id)\n\n\t\teuler = self.worldOrientation.to_euler()\n\t\teuler.y = 0\n\t\tself.worldOrientation = euler.to_matrix()\n\n\treturn mouselook_fps_callback\n","repo_name":"GalanCM/BGELive","sub_path":"live/components/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"34663285159","text":"import turtle\n\nalex = turtle.Turtle()\n\ndot_distance = 30\nwidth = 5\nheight = 7\n\nalex.penup() # Não vai mais \"riscar\" a tela\n\nfor i in range(height):\n for j in range(width):\n alex.dot() # Desenha um ponto na tela\n alex.forward(dot_distance) # Anda sem tocar na tela\n # Voltar para o começo da linha\n alex.backward(dot_distance * width)\n # Descer para a próxima linha\n alex.right(90)\n alex.forward(dot_distance)\n alex.left(90)\n\nturtle.done()","repo_name":"LarissaRC/python-turtle","sub_path":"nested_loop.py","file_name":"nested_loop.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4516769888","text":"from data import MidiDataTFRecordProvider\nfrom preprocessing import MidiPreprocessor\n\nfrom ddsp.colab import colab_utils\nimport ddsp\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow.compat.v2 as tf\nimport tensorboard as tb\nfrom absl import logging\nimport time\nfrom ddsp.training.preprocessing import DefaultPreprocessor\nimport os\nfrom load_midi import load_midi\nimport io\nimport librosa\nfrom plot_prediction import plot_prediction\nimport collections\n\n\ndef tr(x): return '%.6f' % (x)\n\n\ndef write_audio_dict_to_summary(audio_dict: dict, summary_writer, step, SAMPLE_RATE):\n with summary_writer.as_default():\n for key, value in audio_dict.items():\n tf.summary.audio(\n key, value, SAMPLE_RATE, step=step, max_outputs=8)\n\n\ndef save_audio_from_dict(audio_dict: dict, save_path: str, SAMPLE_RATE: int, prefix=\"\"):\n for key, value in audio_dict.items():\n librosa.output.write_wav(\n save_path+\"/\"+prefix+key+\".wav\", tf.squeeze(value[0, ...]).numpy(), SAMPLE_RATE, norm=False)\n\n\ndef generate_audio_examples(control_model, synthesis_model, batch, is_midi=False):\n\n example_dict = {}\n\n if not is_midi:\n real_performance_batch = batch\n\n real_performance = synthesis_model.call(batch)\n\n example_dict = {**example_dict,\n \"real_performance_synthesized\": real_performance[..., None]}\n\n target = batch[\"audio\"][..., None]\n\n naive = naive_audio_example(\n control_model, synthesis_model, batch)\n\n generated_performance = generated_performance_audio_example(control_model,\n synthesis_model, batch)\n\n # pre_vibrato_performance = generated_performance_audio_example(control_model,\n # synthesis_model, batch, vibrato_level=0.002, vibrato_hz=6.0)\n\n # post_vibrato_performance = generated_performance_audio_example(control_model,\n # synthesis_model, batch, vibrato_level=0.002, vibrato_hz=6.0, vibrato_before_synthesis_model=False)\n\n # eighth_resample_performance = generated_performance_audio_example(control_model,\n # synthesis_model, batch, resample_ratio=0.125)\n\n # quarter_resample_performance = generated_performance_audio_example(control_model,\n # synthesis_model, batch, resample_ratio=0.25)\n\n if is_midi:\n\n naive = tf.math.reduce_sum(\n naive, axis=0, keepdims=True)\n\n generated_performance = tf.math.reduce_sum(\n generated_performance, axis=0, keepdims=True)\n\n example_dict = {**example_dict,\n \"naive\": naive,\n \"generated_performance\": generated_performance,\n \"target\": target,\n # \"generated_performance_w_previbrato\": pre_vibrato_performance,\n # \"generated_performance_w_postvibrato\": post_vibrato_performance,\n # \"eighth_resample\": eighth_resample_performance,\n # \"quarter_resample\": quarter_resample_performance\n }\n\n return example_dict\n\n\ndef naive_audio_example(control_model, synthesis_model, batch):\n\n batch = control_model.preprocess(batch)\n\n naive_synth_inputs = {\n \"ld_scaled\": batch[\"midi_velocity_scaled\"],\n \"f0_scaled\": batch[\"midi_pitch_scaled\"],\n \"f0_hz\": ddsp.core.midi_to_hz(batch[\"midi_pitch_scaled\"]*127.0)\n }\n\n naive_audio = synthesis_model.decode(naive_synth_inputs)\n\n return naive_audio[..., None]\n\n\ndef qt_f0_hz(f0_hz):\n\n f0_midi = ddsp.core.hz_to_midi(f0_hz)\n\n f0_midi = tf.math.round(f0_midi)\n\n f0_hz = ddsp.core.midi_to_hz(f0_midi)\n\n return f0_hz\n\n\ndef generated_performance_audio_example(control_model, synthesis_model, batch, vibrato_level=0.0, vibrato_hz=0.0, resample_ratio=1.0, vibrato_before_synthesis_model=True):\n\n performance_params = control_model(batch, training=False)\n\n n_frames = performance_params[\"predicted_ld_scaled\"].shape[1]\n\n MIDI_FRAME_RATE = 250.0\n vibrato_unit = tf.math.sin(tf.linspace(\n 0.0, n_frames/MIDI_FRAME_RATE, n_frames)*vibrato_hz*2.0*3.14)[None, ..., None]\n\n vibrato = vibrato_unit*vibrato_level\n\n vibrato_f0_scaled = vibrato + performance_params[\"predicted_f0_scaled\"]\n\n ld_scaled = performance_params[\"predicted_ld_scaled\"]\n\n f0_hz = ddsp.core.midi_to_hz(vibrato_f0_scaled*127.0)\n\n if vibrato_before_synthesis_model:\n f0_scaled = vibrato_f0_scaled\n else:\n f0_scaled = performance_params[\"predicted_f0_scaled\"]\n\n if resample_ratio < 1.0:\n\n ld_scaled = ddsp.core.resample(\n ld_scaled, int(ld_scaled.shape[1]*resample_ratio))\n f0_scaled = ddsp.core.resample(\n f0_scaled, int(f0_scaled.shape[1]*resample_ratio))\n f0_hz = ddsp.core.resample(\n f0_hz, int(f0_hz.shape[1]*resample_ratio))\n\n synth_inputs = {\n \"ld_scaled\": ld_scaled,\n \"f0_scaled\": f0_scaled,\n \"f0_hz\": f0_hz\n }\n\n performance_audio = synthesis_model.decode(synth_inputs)\n\n performance_params = control_model(batch, training=False)\n\n return performance_audio[..., None]\n\n\ndef save(model, optimizer, epoch, save_dir):\n \"\"\"Saves model and optimizer to a checkpoint.\"\"\"\n # Saving weights in checkpoint format because saved_model requires\n # handling variable batch size, which some synths and effects can't.\n start_time = time.time()\n checkpoint = tf.train.Checkpoint(\n model=model, optimizer=optimizer)\n manager = tf.train.CheckpointManager(\n checkpoint, directory=save_dir, max_to_keep=5)\n step = epoch\n manager.save(checkpoint_number=step)\n logging.info('Saved checkpoint to %s at step %s', save_dir, step)\n logging.info('Saving model took %.1f seconds',\n time.time() - start_time)\n\n\ndef restore(model, optimizer, epoch, checkpoint_path):\n \"\"\"Restore model and optimizer from a checkpoint if it exists.\"\"\"\n logging.info('Restoring from checkpoint...')\n start_time = time.time()\n\n # Restore from latest checkpoint.\n checkpoint = tf.train.Checkpoint(\n model=model, optimizer=optimizer)\n latest_checkpoint = ddsp.training.train_util.get_latest_chekpoint(\n checkpoint_path)\n if latest_checkpoint is not None:\n # checkpoint.restore must be within a strategy.scope() so that optimizer\n # slot variables are mirrored.\n checkpoint.restore(latest_checkpoint)\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds',\n time.time() - start_time)\n epoch = int(latest_checkpoint.split(\"ckpt-\")[1])\n else:\n logging.info('No checkpoint, skipping.')\n epoch = 0\n\n return epoch\n\n\ndef plot_conditionings(out):\n\n N_FT_FRAMES = 1000\n\n out = {key: value[:, 0:N_FT_FRAMES] for key, value in out.items(\n ) if isinstance(value, collections.Hashable)}\n\n plots = []\n\n for i in range(out[\"ld_scaled\"].shape[0]):\n\n fig = plot_prediction(out[\"f0_scaled\"][i, :, 0], out[\"ld_scaled\"][i, :, 0], out[\"predicted_f0_scaled\"][i, :, 0],\n out[\"predicted_ld_scaled\"][i, :, 0], out[\"midi_pitch\"][i, :, 0], out[\"midi_velocity\"][i, :, 0])\n\n plots.append(figure2tensor(fig))\n\n plt.clf()\n\n return plots\n\n\ndef figure2tensor(fig):\n DPI = 100\n io_buf = io.BytesIO()\n fig.savefig(io_buf, format='png', dpi=DPI)\n io_buf.seek(0)\n im = tf.image.decode_png(io_buf.getvalue(), channels=4)\n io_buf.close()\n return im\n","repo_name":"erl-j/control-synthesis","sub_path":"trn_lib.py","file_name":"trn_lib.py","file_ext":"py","file_size_in_byte":7771,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"63"} +{"seq_id":"3380570462","text":"# -*- coding: utf-8 -*-\nimport scrapy, json\nimport math\nfrom lxml import etree\nfrom ..items import DataFangItem, DataDynamicJson, DataCommentJson, DataHouseapartment, ImageItem\nimport re, io\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom time import sleep\n\n\nclass FangtianxiaSpider(scrapy.Spider):\n name = \"fangtianxia\"\n allowed_domains = [\"fang.com\"]\n # city_link_list = io.open(r\"/home/kevin/work/data_fang/data_fang/spiders/城市url\", \"r\", encoding=\"gbk\")\n city_link_list = ['http://bj.fang.com/']\n # city_link_list = city_link_list.readline()\n\n\n for link in city_link_list:\n start_urls = link.replace(\"\\n\", \"\")\n start_urls = [start_urls]\n\n def __init__(self):\n super(FangtianxiaSpider, self).__init__()\n\n self.dynamic_urls = []\n self.dynamicJson = []\n self.house_list = []\n\n def parse(self, response):\n # 解析每个城市\n new_house = response.xpath(\n \"//div[@class='newnav20141104nr']//div/a[contains(text(),'新房')]/@href\").extract()[0] # 获取新房url\n new_house = re.sub(r\"\\?\\w+\\=\\w+\", \"\", new_house) # 去掉?后面的字符\n print('新房信息:', self.parse_all_house)\n yield scrapy.Request(new_house, callback=self.parse_all_house)\n\n def parse_all_house(self, response):\n # 解析所有房源\n url = response.url\n\n all_house = response.xpath(\"//*[@class='clearfix']/div/a/@href \").extract() # 获取当前页面所有的房源url\n for one_house in all_house:\n house = u\"https:\" + one_house\n house = re.sub(r\"\\?\\w+=\\w+_\\w+\", \"\", house) # 去掉?后面的字符\n self.house_list.append(house)\n\n # The_next_page = response.xpath(\n # '//li[@class=\"floatr rankWrap\"]/div/a[contains(text(),\">\")]/@href').extract_first() # 获取下一页\n # if The_next_page != None:\n # The_next_page = url + The_next_page\n # yield scrapy.Request(The_next_page, callback=self.parse_all_house)\n\n the_next_page = response.xpath(\n '//li[@class=\"floatr rankWrap\"]/div/a[contains(text(),\">\")]/@href').extract_first() # 获取下一页\n if the_next_page is None:\n url = self.house_list.pop()\n yield scrapy.Request(url, callback=self.home_page)\n\n else:\n the_next_page_url = url + the_next_page\n yield scrapy.Request(the_next_page_url, callback=self.parse_all_house)\n\n def home_page(self, response):\n # 解析首页获取详情页\n item = DataFangItem()\n item['_id'] = response.url\n item['subarea'] = response.xpath('//div[@class=\"br_left\"]//ul[@class=\"tf f12\"]//li[3]/a/text()').extract()\n item['subarea'] = \"\".join(item['subarea']) # 字符串切片,去掉后面2个字\n item['subarea'] = item['subarea'][:-2]\n item['area'] = response.xpath('//div[@class=\"s2\"]/div/a/text()').extract() # 当前城市\n\n\n\n\n positioning = response.xpath(\"//div[@class='mapbox_dt']/iframe/@src\").extract_first() # 获取楼盘定位地址\n positioning = u\"https:\" + positioning\n\n particulars = response.xpath(\"//*[@class='navleft tf']//a[contains(text(),'详情')]/@href|\"\n \"//*[@class='navleft tf']//a[contains(text(),'详细')]/@href\").extract() # 楼盘详情\n particulars = \"\".join(particulars)\n particulars = u\"https:\" + particulars\n\n # yield scrapy.Request(positioning, meta={\"item\": item, \"xiangqing\": particulars}, callback=self.positioning) # 爬取详情打开注释\n\n try:\n dynamic = response.xpath(\"//*[@class='navleft tf']//a[contains(text(),'动态')]/@href\").extract() # 楼盘动态\n dynamic = \"\".join(dynamic)\n dynamic = u\"https:\" + dynamic\n yield scrapy.Request(dynamic, callback=self.parse_dynamic)\n except Exception as e:\n pass\n url = self.house_list.pop()\n yield scrapy.Request(url, callback=self.home_page)\n\n \"\"\"爬取点评打开注释\"\"\"\n # try:\n # comments = response.xpath(\n # \"//*[@class='navleft tf']//a[contains(text(),'点评')]/@href\").extract_first() # 楼盘点评\n # comments = \"\".join(comments)\n # comments = u\"https:\" + comments\n # yield scrapy.Request(comments, callback=self.parse_comments)\n # except Exception as e:\n # pass\n\n \"\"\"爬取户型打开注释\"\"\"\n # try:\n # houseapartment = response.xpath(\n # \"//*[@class='navleft tf']//a[contains(text(),'户型')]/@href\").extract_first() # 楼盘户型\n # # houseapartment = \"\".join(houseapartment)\n # houseapartment = u\"https:\" + houseapartment\n # yield scrapy.Request(houseapartment, callback=self.parse_houseapartment)\n # except Exception as e:\n # pass\n\n \"\"\"爬取相册打开注释\"\"\"\n # try:\n # houseImage = response.xpath(\n # \"//*[@class='navleft tf']//a[contains(text(),'相册')]/@href\").extract_first() # 楼盘相册\n # if not houseImage:\n # yield {\"_id\": item['_id'], \"houseImage\": json.dumps([])}\n # houseImage = u\"https:\" + houseImage\n # yield scrapy.Request(houseImage, meta={\"_id\": response.url}, callback=self.parse_image_base)\n # except Exception as e:\n # pass\n\n def parse_image_base(self, response):\n \"\"\"\n 使用正则的方法匹配url,并且直接使用ajax请求\n :return:\n \"\"\"\n html = response.text\n _id = response.meta[\"_id\"]\n building_name = re.search(r\"//(\\w+)\\.\", response.url).group(1)\n building_number = re.search(r\"(\\d+)\\.htm\", response.url).group(1)\n module_dict = {}\n image_list = []\n re_effect_image = re.compile(r\"\\效果图\\<\\/span\\>.*?\\<\\/a\\>\")\n re_realsight_image = re.compile(r\"\\实景图\\<\\/span\\>.*?\\<\\/a\\>\")\n re_traffic_image = re.compile(r\"\\交通图\\<\\/span\\>.*?\\<\\/a\\>\")\n re_prototype_room = re.compile(r\"\\样板间\\<\\/span\\>.*?\\<\\/a\\>\")\n\n effect_image = re_effect_image.findall(html)\n realsight_image = re_realsight_image.findall(html)\n traffic_image = re_traffic_image.findall(html)\n prototype_room = re_prototype_room.findall(html)\n\n pattern_url = re.compile(r\"//.*?htm\")\n pattern_num = re.compile(r\"(\\d+)<\\/em>\")\n\n # use interface to get data directly\n effect_number, realsight_number, traffic_number, prototype_number = (0, 0, 0, 0)\n if effect_image:\n try:\n effect_image_url = \"http:\" + pattern_url.findall(effect_image[0])[0]\n effect_number = int(pattern_num.search(effect_image[0]).group(1))\n module_dict.update({\"xiaoguotu\": [effect_number, effect_image_url, 904]})\n except Exception as e:\n print(str(e))\n effect_number = 0\n if realsight_image:\n try:\n realsight_image_url = \"http:\" + pattern_url.findall(realsight_image[0])[0]\n realsight_number = int(pattern_num.search(realsight_image[0]).group(1))\n module_dict.update({\"shijingtu\": [realsight_number, realsight_image_url, 903]})\n except Exception as e:\n print(str(e))\n realsight_number = 0\n if traffic_image:\n try:\n traffic_image_url = \"http:\" + pattern_url.findall(traffic_image[0])[0]\n traffic_number = int(pattern_num.search(traffic_image[0]).group(1))\n module_dict.update({\"jiaotongtu\": [traffic_number, traffic_image_url, 901]})\n except Exception as e:\n print(str(e))\n traffic_number = 0\n if prototype_room:\n try:\n prototype_room_url = \"http:\" + pattern_url.findall(prototype_room[0])[0]\n prototype_number = int(pattern_num.search(prototype_room[0]).group(1))\n module_dict.update({\"yangbanjian\": [prototype_number, prototype_room_url, 905]})\n except Exception as e:\n print(str(e))\n prototype_number = 0\n\n # 判断如果各种图全部没有,则直接返回\n if effect_number + realsight_number + traffic_number + prototype_number == 0:\n print(\"一张图片都没有\")\n yield {\"_id\": _id, \"picJson\": json.dumps(\"\", ensure_ascii=False)}\n else:\n full_image_interface_list = []\n for key, value in module_dict.items():\n base_url = \"http://\" + building_name + \".fang.com/house/ajaxrequest/photolist_get.php?newcode=\" + building_number + \"&type=\" + str(\n value[2]) + \"&room=&nextpage=\"\n page_number = value[0] // 6 + 2 if value[0] % 6 else value[0] // 6 + 1\n [full_image_interface_list.append([base_url + str(i), key]) for i in range(1, page_number)]\n first_url = full_image_interface_list.pop()\n meta = {\"_id\": _id, \"request_list\": full_image_interface_list, \"type\": first_url[1], \"json_data\": []},\n yield scrapy.Request(first_url[0], meta={\"item\": meta}, callback=self.parse_images)\n\n def parse_images(self, response):\n \"\"\"\n :param total_number: total number of images of effect\n :param style_number: total number of images of effect\n :return:\n \"\"\"\n item = dict(response.meta[\"item\"][0])\n TuUrl = ImageItem()\n data_list = item[\"json_data\"]\n image_type = item[\"type\"]\n _id = item[\"_id\"]\n full_image_interface_list = item[\"request_list\"]\n\n if full_image_interface_list:\n data = json.loads(response.body)\n # print(\"相册借口\", data)\n [data_list.append({\"picUrl\": \"http:\" + re.sub(r\"\\d+x\\d+\\.\", \"880x600.\", i[\"url_s\"]), \"type\": image_type})\n for i in data]\n\n first_url = full_image_interface_list.pop()\n meta = {\"_id\": _id, \"request_list\": full_image_interface_list, \"type\": first_url[1],\n \"json_data\": data_list},\n yield scrapy.Request(first_url[0], meta={\"item\": meta}, callback=self.parse_images)\n else:\n yield {\"_id\": _id, \"picJson\": json.dumps(data_list)}\n\n data = json.loads(response.text)\n for images in data:\n TuUrl[\"image_urls\"] = [\"http:\" + re.sub(r\"\\d+x\\d+\\.\", \"880x600.\", images[\"url_s\"])]\n yield TuUrl\n\n def parse_houseapartment(self, response):\n # 解析户型页面 通过拼接 接口获取数据\n data_url = response.url\n building_name = re.sub(r\"\\w+\\/\\w+_\\d+_\\d+.htm\", \"\", data_url)\n building_number = re.search(r\"(\\d+)\\.htm\", data_url).group(1)\n\n jiekou_url = building_name + \"house/ajaxrequest/householdlist_get.php?newcode=\" + building_number + \"&room=all\"\n yield scrapy.Request(jiekou_url, meta={\"house_url\": building_name}, callback=self.house_interface)\n\n def house_interface(self, response):\n # 户型接口\n item = DataHouseapartment()\n house_url = response.meta[\"house_url\"]\n all_comment_dict = {\"_id\": house_url}\n houseapartment = []\n datas = json.loads(response.text)\n for data in datas:\n # item[\"houseUrl\"]\n images = []\n imag = \"http:\" + re.sub(\"220x150\", \"748x600\", data[\"houseimageurl\"])\n images.append({\"picUrl\": imag})\n item[\"imgs\"] = images # 户型名称\n # item[\"_id\"] = house_url # 户型url\n item[\"name\"] = data[\"housetitle\"] # 户型名称\n item[\"houseUrl\"] = house_url + \"photo/d_house_\" + data[\"picID\"] + \".htm\"\n item[\"salesStatus\"] = data[\"status\"] # 在售状态\n item[\"roomNum\"] = data[\"room\"] # 户型(房)\n item[\"hallNum\"] = data[\"hall\"] # 户型(厅)\n item[\"toiletNum\"] = data[\"toilet\"] # 户型(卫)\n item[\"constructSpace\"] = data[\"buildingarea\"]\n # item[\"price\"] = data[\"toilet\"]\n # item[\"propertyType\"] = data[\"toilet\"]\n # item[\"remark\"] = data[\"toilet\"]\n\n try:\n if \"-\" in data[\"reference_price\"]:\n lower_price, high_price = data[\"reference_price\"].split(\"-\")\n data[\"reference_price\"] = str((float(lower_price) + float(high_price)) / 2)\n except Exception as e:\n print(str(e))\n try:\n item[\"price\"] = int(float(data[\"reference_price\"]) / float(data[\"buildingarea\"]) * 10000) if \\\n data[\"reference_price\"] != \"待定\" and data[\"buildingarea\"] != \"待定\" \\\n and data[\"reference_price\"] and data[\"buildingarea\"] \\\n and float(data[\"reference_price\"]) and \\\n float(data[\"buildingarea\"]) else None # 参考均价\n except Exception as e:\n print(str(e))\n if not data[\"reference_price\"]:\n item[\"totalPrices\"] = \"\"\n elif data[\"reference_price\"] == \"待定\":\n item[\"totalPrices\"] = data[\"reference_price\"]\n else:\n item[\"totalPrices\"] = data[\"reference_price\"] + \"万元/套\"\n houseapartment.append(dict(item))\n\n houseapartment = json.dumps(houseapartment, ensure_ascii=False)\n all_comment_dict.update({\"houseapartment\": houseapartment})\n yield all_comment_dict\n\n def parse_comments(self, response):\n # 解析评论\n url = response.url\n house = re.sub(r\"dianping/\", \"\", url)\n\n particulars = response.xpath(\"//*[@class='navleft tf']//a[contains(text(),'详情')]/@href|\"\n \"//*[@class='navleft tf']//a[contains(text(),'详细')]/@href\").extract_first()\n particulars = u\"https:\" + particulars\n parameter = re.search(r\"/(\\d+)/\", particulars).group(1)\n\n comments_data = response.xpath(\"//*[@id='dpCount']/text()\").extract_first()\n comments_data = re.search(r\"(\\d+)\", comments_data).group(1)\n comments_data = int(comments_data)\n port_url = house + \"house/ajaxrequest/dianpingList_201501.php\" # pc端接口\n port = {\n \"dianpingNewcode\": str(parameter),\n \"ifjiajing\": \"0\",\n # \"page\": \"1\",\n \"tid\": \"null\",\n \"pagesize\": str(comments_data),\n \"starnum\": \"6\",\n \"shtag\": \"-1\",\n\n }\n yield scrapy.FormRequest(url=port_url, method=\"POST\", formdata=port, callback=self.comment_port) # 发送post请求\n\n def comment_port(self, response):\n item = DataCommentJson()\n url = response.url\n url = re.sub(r\"house\\/\\w+\\/\\w+_\\d+.php\", \"\", url)\n all_comment_dict = {\"_id\": url}\n commentJson = []\n # datas = json.loads(response.body)[\"list\"]\n datas = json.loads(response.text)[\"list\"]\n # datas = json.loads(response.body.decode(\"gb18030\"))\n for data in datas:\n item[\"source\"] = \"房天下\"\n item[\"userNick\"] = data[\"nickname\"]\n if item[\"userNick\"] == \"\":\n item[\"userNick\"] = data[\"username\"]\n item[\"content\"] = data[\"content\"]\n item[\"sourceUrl\"] = url + \"dianping/\"\n item[\"createDate\"] = data[\"create_time\"]\n commentJson.append(dict(item))\n # commentJson = json.dumps(commentJson, ensure_ascii=False)\n all_comment_dict.update({\"commentJson\": commentJson})\n yield all_comment_dict\n\n def parse_dynamic(self, response):\n # 解析动态\n # dynamic_urls = []\n\n dynamic = response.xpath(\"//*[@class='navleft tf']//a[contains(text(),'首页')]/@href\").extract() # 楼盘首页\n dynamic = \"\".join(dynamic)\n _id = u\"https:\" + dynamic\n\n try:\n dynamic_url = response.xpath(\n '//div[@id=\"gushi_all\"]//a[contains(text(),\"详情\")]/@href').extract() # 获取动态里详情链接\n # dynamic_url = \"\".join(dynamic_url)\n if dynamic_url != None:\n for one_dynameic_url in dynamic_url:\n one_dynameic_url = u\"https:\" + one_dynameic_url\n # yield scrapy.Request(one_dynameic_url, callback=self.dynamic_particulars)\n self.dynamic_urls.append(one_dynameic_url)\n\n the_next_page = response.xpath(\n '//div[@id=\"gushi_all\"]//li[@class=\"clearfix dbib\"]//a[contains(text(),\"下一页\")]/@href').extract_first() # 下一页\n\n if the_next_page is None:\n url = self.dynamic_urls.pop()\n yield scrapy.Request(url, callback=self.dynamic_particulars)\n else:\n the_next_page = _id + the_next_page\n yield scrapy.Request(the_next_page, callback=self.parse_dynamic)\n except Exception as e:\n pass\n\n # the_next_page = response.xpath('//div[@id=\"gushi_all\"]//li[@class=\"clearfix dbib\"]//a[contains(text(),\"下一页\")]/@href').extract_first() # 下一页\n # print(\"0033\",the_next_page)\n # if the_next_page != None:\n # the_next_page = _id + the_next_page\n # yield scrapy.Request(the_next_page,callback=self.parse_dynamic)\n\n def dynamic_particulars(self, response):\n item = DataDynamicJson()\n dynamic = response.xpath(\"//*[@class='navleft tf']//a[contains(text(),'首页')]/@href\").extract() # 楼盘首页\n dynamic = \"\".join(dynamic)\n _id = u\"https:\" + dynamic\n all_comment_dict = {\"_id\": _id}\n # dynamicJson = []\n url = response.url\n url = re.sub(r\"\\d+_\\d+\\.htm\", \"\", url)\n dynamic_content = response.xpath(\"//div[@class='atc-wrapper']\")\n for i in dynamic_content:\n item[\"soutse\"] = \"房天下\"\n item[\"title\"] = i.xpath(\"./h1/text()\").extract_first()\n item[\"publishDate\"] = i.xpath(\"./h2/text()[3]\").extract_first()\n item['publishDate'] = re.search(r\"\\d+.*\", item[\"publishDate\"], re.S).group() # 时间\n # item[\"publishDate\"] = item[\"publishDate\"].replace(\" \", \"\")\n item[\"publishDate\"] = item[\"publishDate\"].replace(\"\\n\", \"\")\n item[\"publishDate\"] = item[\"publishDate\"].replace(\"\\t\", \"\")\n item[\"publishDate\"] = item[\"publishDate\"].replace(\"\\r\", \"\")\n # time = \"\".join(time)\n # data[\"publishDate\"] =re.search(r\"/d+.*\",time,re.S).group()\n item[\"content\"] = i.xpath(\n \".//p[@style='text-indent:2em;']//text()|//div[@class='leftboxcom']//text()\").extract()\n item[\"content\"] = \"\".join(item[\"content\"])\n item[\"content\"] = item[\"content\"].replace(\" \", \"\")\n item[\"content\"] = item[\"content\"].replace(\"\\n\", \"\")\n item[\"content\"] = item[\"content\"].replace(\"\\t\", \"\")\n item[\"content\"] = item[\"content\"].replace(\"\\r\", \"\")\n self.dynamicJson.append(dict(item))\n\n the_next_page1 = response.xpath('//div[@class=\"fy-wrapper\"]/a[@class=\"xyp\"]/@href').extract_first()\n the_next_page = url + the_next_page1\n\n if self.dynamic_urls == []:\n # if the_next_page1 == \"javascript:void(0);\":\n all_comment_dict.update({\"dynamicJson\": self.dynamicJson})\n yield all_comment_dict\n self.dynamicJson.clear()\n else:\n url = self.dynamic_urls.pop()\n yield scrapy.Request(url, callback=self.dynamic_particulars)\n\n def positioning(self, response):\n item = response.meta[\"item\"]\n particulars = response.meta[\"xiangqing\"]\n ditu = response.body.decode(\"utf8\")\n # re_search = re.search(r'\"mapx\":\"(.*?)\",\"mapy\":\"(.*?)\"', ditu, re.DOTALL)\n re_search = re.search(r'\"mapx\":\"(\\d+\\.\\d+)\",\"mapy\":\"(\\d+\\.\\d+)\"', ditu, re.DOTALL)\n housecoord = re_search.group(2) + \",\" + re_search.group(1)\n item[\"housecoord\"] = housecoord\n yield scrapy.Request(particulars, meta={\"item\": item}, callback=self.parse_particulars)\n\n def parse_particulars(self, response):\n # 解析详情页\n url = re.sub(r\"house/\\d+/\\w+.htm\", \"\", response.url)\n pattern = re.compile(r'\\W+', re.S)\n html = response.body.decode(\"gb18030\")\n soup = BeautifulSoup(html, \"html.parser\")\n html = etree.HTML(html)\n item = response.meta['item']\n item['housename'] = response.xpath('//*[@id=\"daohang\"]//h1/a/text()').extract() # 楼盘名称\n item['housename'] = \"\".join(item['housename'])\n try:\n housename2 = response.xpath('//*[@id=\"daohang\"]//div/span/text()').extract() # 楼盘别名\n housename2 = \"\".join(housename2)\n item['housename2'] = housename2[3:] # 字符串切片去掉前面三个字符\n if not item['housename2']:\n item['housename2'] = \"\"\n except Exception as e:\n item['housename2'] = None\n houseproperty = response.xpath('//div[@class=\"lpicon tf\"]//text()').extract() # 楼盘标签\n houseproperty = [pattern.sub('', i) for i in houseproperty]\n re_houseproperty = []\n [re_houseproperty.append(i)\n for i in houseproperty if i]\n houseproperty = \",\".join(re_houseproperty) # 空格替换逗号\n houseproperty = \"\".join(houseproperty)\n item[\"houseproperty\"] = houseproperty\n # ---------------------------预售证------------------------\n try:\n basic_information = response.xpath(\n \"//div//h3[contains(text(),'销售信息')]/..//div[@class='table-all']//tr[position()>1]\")\n if basic_information == []:\n basic_information = response.xpath(\n \"//div//h3[contains(text(),'销售信息')]/..//div[@class='table-part']//tr[position()>1]\")\n except Exception as e:\n basic_information = None\n pass\n all_comment_dict = {\"_id\": url}\n presale = []\n for i in basic_information:\n # 基本信息\n # data_lists = []\n data = {}\n # data = DataPresale()\n budgetLicence = i.xpath(\".//td[1]/text()\").extract()\n data['budgetLicence'] = \"\".join(budgetLicence)\n licenceDate = i.xpath(\".//td[2]/text()\").extract()\n data[\"licenceDate\"] = \"\".join(licenceDate) # 获取时间\n pattern = re.compile(r'(\\d{4}).*?(\\d{1,2}).*?(\\d{1,2})')\n pattern_without_day = re.compile(r'(\\d{4}).*?(\\d{1,2})')\n if data[\"licenceDate\"]:\n re_serch = pattern.search(data[\"licenceDate\"])\n if re_serch:\n start_year, start_month, start_day = re_serch.group(1), re_serch.group(2), re_serch.group(3)\n start_month, start_day = start_month.rjust(2, '0'), start_day.rjust(2, '0')\n data[\"licenceDate\"] = start_year + \"-\" + start_month + \"-\" + start_day + \" 00:00:00\"\n else:\n try:\n re_serch = pattern_without_day.search(data[\"licenceDate\"])\n start_year, start_month = re_serch.group(1), re_serch.group(2)\n start_month = start_month.rjust(2, '0')\n data[\"licenceDate\"] = start_year + \"-\" + start_month + \"-01 00:00:00\"\n except:\n pass\n # data['bindingHouse'] = i.find_element_by_xpath(\".//td[3]\").text\n bindingHouse = i.xpath(\".//td[3]/text()\").extract()\n data['bindingHouse'] = \"\".join(bindingHouse)\n # if not data['bindingHouse'] and not data[\"licenceDate\"] and not data['budgetLicence']:\n # continue\n # data_lists.append(data)\n presale.append(data)\n # presale = json.dumps(presale, ensure_ascii=False)\n all_comment_dict.update({\"presale\": presale})\n yield all_comment_dict\n # ---------------预售证----------------------\n\n basic_information = response.xpath(\"//div[@class='main-left']\")\n for i in basic_information:\n # 基本信息\n # item['_id'] = url # 楼盘url\n item['source'] = \"房天下\" # 来源\n item['allstatus'] = \"1\" # 采集状态\n price = i.xpath('./div[1]//em/text()').extract() # 均价\n price = ''.join(price)\n try:\n price = price.replace(\"\\n\", \"\")\n price = price.replace(\"\\t\", \"\")\n price = price.replace(\" \", \"\")\n except Exception as e:\n pass\n try:\n item['houseprice'] = re.search(r\"\\d+.*\", price, re.S).group() # 取出数字及后面的字\n except Exception as e:\n item['houseprice'] = \"待定\"\n book_list = soup.find(attrs={\"class\": \"main-left\"})\n book_list_name = book_list.find_all(\"li\")\n data_dict = {}\n for i in book_list_name:\n key = i.find(attrs={\"class\": \"list-left\"})\n try:\n key = key.text\n except Exception as e:\n pass\n value = i.find(attrs={\"class\": [\"list-right\", \"list-right-text\", \"list-right-floor\"]}) # 获取两个class名\n try:\n value = value.text\n except Exception as e:\n pass\n try:\n key = key.replace(\" \", \"\")\n key = key.replace(\"\\n\", \"\")\n key = key.replace(\"\\t\", \"\")\n except Exception as e:\n pass\n try:\n value = value.replace(\"\\n\", \"\")\n # value = value.replace(\" \", \",\")\n value = value.replace(\"\\t\", \"\")\n except Exception as e:\n pass\n data_dict.update({key: value})\n # 基本信息\n if \"物业类别:\" in data_dict.keys():\n item['houseatr'] = data_dict[\"物业类别:\"]\n item['houseatr'] = item['houseatr'].replace(\",\", \"\")\n item['houseatr'] = item['houseatr'].replace(\" \", \"\")\n if \"建筑类别:\" in data_dict.keys():\n item['housetype'] = data_dict[\"建筑类别:\"]\n item['housetype'] = item['housetype'].replace(\" \", \",\")\n elif \"写字楼级别:\" in data_dict.keys():\n item['housetype'] = data_dict[\"写字楼级别:\"]\n # item['housetype'] = item['housetype'].replace(\" \", \",\")\n if \"产权年限:\" in data_dict.keys():\n item['years'] = data_dict[\"产权年限:\"]\n item['years'] = item['years'].replace(\",\", \"\")\n if \"装修状况:\" in data_dict.keys():\n item['decoration'] = data_dict[\"装修状况:\"]\n if \"开发商:\" in data_dict.keys():\n item['developer'] = data_dict[\"开发商:\"]\n if \"楼盘地址:\" in data_dict.keys():\n item['houseaddress'] = data_dict[\"楼盘地址:\"]\n # 销售信息\n if \"销售状态:\" in data_dict.keys():\n item['salestatus'] = data_dict[\"销售状态:\"]\n item['salestatus'] = item['salestatus'].replace(\" \", \"\")\n if \"开盘时间:\" in data_dict.keys():\n item['startSaleString'] = data_dict[\"开盘时间:\"]\n if \"交房时间:\" in data_dict.keys():\n item['endSaleString'] = data_dict[\"交房时间:\"]\n if \"售楼地址:\" in data_dict.keys():\n item['saleaddress'] = data_dict[\"售楼地址:\"]\n # 小区规划\n if \"占地面积:\" in data_dict.keys():\n landarea = data_dict[\"占地面积:\"]\n data_re = re.findall(r\"\\d+\", landarea, re.S) # 取出数字\n item['landarea'] = (\"\".join(data_re)) # 列表转字符串\n if \"建筑面积:\" in data_dict.keys():\n housearea = data_dict[\"建筑面积:\"]\n data_re = re.findall(r\"[\\d\\.]+\", housearea, re.S) # 取出数字\n item['housearea'] = (\"\".join(data_re)) # 列表转字符串\n if \"容积率:\" in data_dict.keys():\n item['plotratio'] = data_dict[\"容积率:\"]\n item['plotratio'] = ''.join(item['plotratio'].split())\n if \"绿化率:\" in data_dict.keys():\n item['greenrate'] = re.sub(r'\\%', '', data_dict[\"绿化率:\"]) # 去掉%\n if item['greenrate'] == \"暂无资料\":\n item['greenrate'] = None\n if \"停车位:\" in data_dict.keys():\n item['carsite'] = data_dict[\"停车位:\"]\n try:\n item['carsite'] = item['carsite'].replace(\"\\r\", \"\")\n item['carsite'] = item['carsite'].replace(\"\\n\", \"\")\n item['carsite'] = item['carsite'].replace(\"\\t\", \"\")\n item['carsite'] = item['carsite'].replace(\" \", \"\")\n except Exception as e:\n pass\n elif \"停车位配置:\" in data_dict.keys():\n item['carsite'] = data_dict[\"停车位配置:\"]\n try:\n item['carsite'] = item['carsite'].replace(\"\\r\", \"\")\n item['carsite'] = item['carsite'].replace(\"\\n\", \"\")\n item['carsite'] = item['carsite'].replace(\"\\t\", \"\")\n item['carsite'] = item['carsite'].replace(\" \", \"\")\n except Exception as e:\n pass\n if \"楼栋总数:\" in data_dict.keys():\n housecount = data_dict[\"楼栋总数:\"]\n data_re = re.findall(r\"\\d+\", housecount, re.S) # 取出数字\n item['housecount'] = (\"\".join(data_re)) # 列表转字符串\n item['housecount'] = item['housecount'].replace(\" \", \"\")\n elif \"楼栋情况:\" in data_dict.keys():\n item['housecount'] = data_dict[\"楼栋情况:\"]\n item['housecount'] = item['housecount'].replace(\" \", \"\")\n if \"总户数:\" in data_dict.keys():\n allcount = data_dict[\"总户数:\"]\n data_re = re.findall(r\"\\d+\", allcount, re.S) # 取出数字\n item['allcount'] = (\"\".join(data_re)) # 列表转字符串\n if \"物业公司:\" in data_dict.keys():\n item['managecompany'] = data_dict[\"物业公司:\"]\n if \"物业费:\" in data_dict.keys():\n item['managefee'] = data_dict[\"物业费:\"]\n item['managefee'] = \"\".join(item['managefee'].split()) # 去掉\\xa0字符\n if \"楼层状况:\" in data_dict.keys():\n item['floorCondition'] = data_dict[\"楼层状况:\"]\n\n item['fetch_time'] = str(datetime.now()) # 获取当前时间\n pattern = re.compile(r'(\\d{4}).*?(\\d{1,2}).*?(\\d{1,2})')\n pattern_without_day = re.compile(r'(\\d{4}).*?(\\d{1,2})')\n\n if item['startSaleString']:\n re_serch = pattern.search(item[\"startSaleString\"])\n if re_serch:\n start_year, start_month, start_day = re_serch.group(1), re_serch.group(2), re_serch.group(3)\n start_month, start_day = start_month.rjust(2, '0'), start_day.rjust(2, '0')\n item[\"startsaletime\"] = start_year + \"-\" + start_month + \"-\" + start_day + \" 00:00:00\"\n else:\n try:\n re_serch = pattern_without_day.search(item[\"startSaleString\"])\n start_year, start_month = re_serch.group(1), re_serch.group(2)\n start_month = start_month.rjust(2, '0')\n item[\"startsaletime\"] = start_year + \"-\" + start_month + \"-01 00:00:00\"\n except:\n pass\n if item[\"endSaleString\"]:\n re_serch = pattern.search(item[\"endSaleString\"])\n if re_serch:\n start_year, start_month, start_day = re_serch.group(1), re_serch.group(2), re_serch.group(3)\n start_month, start_day = start_month.rjust(2, '0'), start_day.rjust(2, '0')\n item[\"endsaletime\"] = start_year + \"-\" + start_month + \"-\" + start_day + \" 00:00:00\"\n else:\n try:\n re_serch = pattern_without_day.search(item[\"endSaleString\"])\n start_year, start_month = re_serch.group(1), re_serch.group(2)\n start_month = start_month.rjust(2, '0')\n item[\"endsaletime\"] = start_year + \"-\" + start_month + \"-\" + \"-01 00:00:00\"\n except:\n pass\n\n for key, value in item.items():\n if value and value.endswith(\",\"):\n item[key] = value[:-1]\n if value and type(value) == str and '[' in value: # 去掉[]内的内容\n item[key] = re.sub(r'[^\\w]?\\[.*?\\]', '', value)\n yield item\n","repo_name":"maidougit/python-test","sub_path":"first_scrapy/first_scrapy/spiders/fangtianxia.py","file_name":"fangtianxia.py","file_ext":"py","file_size_in_byte":33474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29799036167","text":"import numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\nexporter = tf.contrib.session_bundle.exporter\n\ndef classify():\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n sess = tf.InteractiveSession(config = sess_config)\n\n X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape = (-1, 784))\n\n x = tf.placeholder(tf.float32, shape = [None, 784], name = 'x')\n y_ = tf.placeholder(tf.int64, shape = [None, ], name = 'y_')\n\n network = tl.layers.InputLayer(x, name = 'input_layer')\n network = tl.layers.DropoutLayer(network, keep = 0.8, name = 'drop1')\n network = tl.layers.DenseLayer(network, n_units = 800, act = tf.nn.relu, name = 'relu1')\n network = tl.layers.DropoutLayer(network, keep = 0.5, name = 'drop2')\n network = tl.layers.DenseLayer(network, n_units = 800, act = tf.nn.relu, name = 'relu2')\n network = tl.layers.DropoutLayer(network, keep = 0.5, name = 'drop3')\n network = tl.layers.DenseLayer(network, n_units = 10, act =tf.identity, name = 'output_layer')\n\n y = network.outputs\n cost = tl.cost.cross_entropy(y, y_, name = 'cost')\n correct_prediction = tf.equal(tf.argmax(y, 1), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n y_op = tf.argmax(tf.nn.softmax(y))\n\n train_params = network.all_params\n train_op = tf.train.AdamOptimizer(learning_rate = 0.0001, beta1 = 0.9, beta2 = 0.999, \n epsilon = 1e-08, use_locking = False).minimize(cost, var_list = train_params)\n\n tl.layers.initialize_global_variables(sess)\n\n network.print_params()\n network.print_layers()\n\n tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc = acc, batch_size = 500, \n n_epoch = 10, print_freq = 1, X_val = X_val, y_val = y_val, eval_train = False)\n\n tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size = None, cost = cost)\n\n tl.files.save_npz(network.all_params, name = 'ckpts/model.npz')\n sess.close()\n\ndef linear():\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n sess = tf.InteractiveSession(config = sess_config)\n \n trX = np.linspace(-1, 1, 101) \n # trY = 2 * trX + np.ones(*trX.shape) * 4 + np.random.randn(*trX.shape) * 0.03\n trY = 2 * trX + np.random.randn(*trX.shape) * 0.03\n trX = trX.reshape([-1, 1])\n trY = trY.reshape([-1])\n X = tf.placeholder(tf.float32, shape = [None, 1])\n Y = tf.placeholder(tf.float32, shape = [None])\n \n def model(X, w, b): \n # return X * w + b\n return tf.matmul(X, w) + b\n \n w = tf.Variable(0.0, name=\"weights\") \n b = tf.Variable(0.0, name=\"biases\") \n y_model = model(X, [[w]], b) \n \n cost = tf.square(Y - y_model)\n train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost)\n init = tf.initialize_all_variables() \n sess.run(init) \n\n for i in range(10):\n for (x, y) in zip(trX, trY):\n _, w_val, b_val = sess.run([train_op, w, b], feed_dict={X: [x], Y: [y]})\n print('[*] epoch: {:2d}, w: {:.6f}, b: {:.6f}'.format(i, w_val, b_val))\n\n print('[*] test begins')\n x = np.array([-0.4, -0.8, 1.0, 0.5, 0.4, -0.8, 0.0, 0.7]).reshape([-1, 1])\n y_val = sess.run(y_model, feed_dict={X: x})\n print('x: \\n{}'.format(x))\n print('y: \\n{}'.format(y_val))\n return\n # below to convert to serveing model\n saver = tf.train.Saver()\n saver.save(sess, 'ckpts/model.ckpt')\n saver.restore(sess, 'ckpts/model.ckpt')\n model_exporter = exporter.Exporter(saver)\n model_exporter.init(\n sess.graph.as_graph_def(),\n named_graph_signatures = {\n 'inputs': exporter.generic_signature({'x': X}),\n 'outputs': exporter.generic_signature({'y': y_model})})\n model_exporter.export('ckpts', \n tf.constant(1.1), sess)\n return\n\ndef launch():\n linear()\n\ndef main(_):\n linear()\n\n\nif __name__ == '__main__':\n tf.app.run()","repo_name":"lamia482/practices","sub_path":"example/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"17938358036","text":"from django.http import JsonResponse\r\n\r\nfrom account.models import User\r\nfrom blog.models import ArticlePage, Category\r\n\r\n\r\ndef tagify_query(request):\r\n\tall_articles = ArticlePage.objects.live().public()\r\n\tauthors = list([user.first_name + ' ' + user.last_name for user in User.objects.filter(is_staff=True)])\r\n\ttags = list(dict.fromkeys(all_articles.values_list('tags__name', flat=True)))\r\n\tarticles = list(all_articles.values_list('title', flat=True))\r\n\tcategories = list(Category.objects.all().values_list('name', flat=True))\r\n\treturn JsonResponse({\r\n\t\t'articles': articles,\r\n\t\t'authors': authors,\r\n\t\t'categories': categories,\r\n\t\t'tags': tags,\r\n\t})\r\n","repo_name":"calenwu/webshop","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7135379898","text":"ssmatr=[[3,7,5,3,4,5],[4,5,2,6,5,4],[7,4,9,7,8,3]]\ndef matrix(mat):\n tom=0\n jerry=0\n stage_1=[]\n for j in range(len(mat[0])):\n temp=[]\n for i in range(len(mat)):\n temp.append(mat[i][j])\n stage_1.append(sorted(temp, reverse=True))\n stage_2=sorted(stage_1, key=lambda x: x[0], reverse=True)\n print(stage_2)\n for i in range(len(stage_2)):\n if i%2==0:\n tom+=stage_2[i][0]\n else:\n jerry+=stage_2[i][0]\n return tom-jerry\n \nprint(matrix(matr))\n\n\n\n\n\n","repo_name":"doyin315/Algorithms","sub_path":"matrix_game.py","file_name":"matrix_game.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"24815493676","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom openpyxl import load_workbook\n\npage = requests.get(\"https://jeopardylabs.com/play/5th-grade-science-review80\")\nsoup = BeautifulSoup(page.content, 'html.parser')\ngrid=soup.find('div', id='question-grid')\ntable=grid.find_all('div', class_='table-cell-inner')\n\n\n#first_row=table[1]\nprint(type(table))\nprint((len(table)))\nprint(table)\nquestion=[]\nanswer=[]\nfor rows in table:\n q=rows.find('div', class_='question').text\n question.append(q)\n \n a=rows.find('div', class_='answer').text\n answer.append(a)\n \n#print(question)\n#print(answer)\n#row_1=soup.find_all('div', class_=\"table-cell-inner\")\n#print(row_1)\n#q=grid.find_all('div', class_='table-row')\n#print(q.text)\n#a=q.find_all('div', class_='answer')\n#print(a.text)\n\ntest_df = pd.DataFrame({'question': question,\n 'answer': answer,})\n#print(test_df)\nbook = load_workbook('D:\\\\Projects\\\\Python\\\\science-jeapordy-2\\\\temp.xlsx')\nsheet=book.worksheets[0]\nrow_count = sheet.max_row\nprint(row_count)\nwriter = pd.ExcelWriter('D:\\\\Projects\\\\Python\\\\science-jeapordy-2\\\\temp.xlsx', engine='openpyxl')\nwriter.book = book\nwriter.sheets = dict((ws.title, ws) for ws in book.worksheets)\n#test_df.to_excel(writer, \"Main\", startrow=len(test_df)+2)\n\ntest_df.to_excel(writer,header=False,startrow =row_count+2)\n#test_df.to_excel(writer, index=False)\nwriter.save()\n#test_df.to_excel('D:\\\\Projects\\\\Python\\\\science-jeapordy-2\\\\temp.xls')","repo_name":"saujla1/Github","sub_path":"science-jeapordy-2/science_jeapordy_2.py","file_name":"science_jeapordy_2.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10183397381","text":"import os\nfrom config import MONGO_URI\nfrom pymongo import MongoClient\nbot = \"ExtremeProuserbot\"\nMONGOCLIENT = MongoClient(MONGO_URI, 27017, serverSelectionTimeoutMS=1)\n\nMONGO = MONGOCLIENT.userbot\n\nBOTLOG = (os.environ.get(\"BOTLOG\") == 'True')\n\nBOTLOG_CHATID = int(os.environ.get(\"BOTLOG_CHATID\")) if BOTLOG else 0\n\nPM_AUTO_BAN = (os.environ.get(\"PM_AUTO_BAN\") == 'True')\n\ndef is_mongo_alive():\n try:\n MONGOCLIENT.server_info()\n except BaseException as e:\n print(e)\n return False\n return True\n\n\nCOUNT_MSG = 0\n\nUSERS = {}\n\nCOUNT_PM = {}\n\nLASTMSG = {}\n\nCMD_HELP = {}\n","repo_name":"TeamExtremePro/ExtremeProUserbot","sub_path":"extreme.py","file_name":"extreme.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"40952139238","text":"import threading as th\nfrom random import randint, expovariate, choice\nimport time\n\n\nclass Laberinto:\n with open(\"registros.txt\", \"w\"):\n pass\n\n def __init__(self):\n self.start = None\n self.end = None\n self.nodos = {}\n self.salvados = []\n\n def add_node(self, n, start=False, end=False):\n if n not in self.nodos:\n new_node = Nodo(n)\n self.nodos[n] = new_node\n if start:\n self.start = new_node\n self.start.start = True\n if end:\n self.end = new_node\n self.end.end = True\n\n def add_connection(self, x, y):\n for i in self.nodos:\n if i == x:\n if y not in self.nodos[i].siguientes:\n self.nodos[i].agregar_conexion(y)\n\n def agregar_persona(self, persona):\n self.start.entrar(persona)\n\n\n\n\nclass Nodo:\n def __init__(self, valor, start=False, end=False):\n self.valor = valor\n self.siguientes = []\n self.persona = None\n self.lock_pieza = th.Lock()\n self.start = start\n self.end = end\n\n def agregar_conexion(self, nodo):\n self.siguientes.append(nodo)\n\n def entrar(self, persona):\n #if self.end or self.start:\n #if isinstance(self.persona, list):\n # self.persona.append(persona)\n # else:\n # self.persona = [persona]\n #else:\n with self.lock_pieza:\n self.persona = persona\n persona.pieza_actual = self\n time.sleep(randint(1, 3))\n\n\ndef set_id():\n contador = 0\n while True:\n yield contador\n contador += 1\na = set_id()\n\n\nclass Persona(th.Thread):\n\n def __init__(self, grafo, start=1, hp=None):\n super().__init__()\n if not hp:\n self.hp = randint(80, 120)\n else:\n self.hp = hp\n self.id = next(a)\n self.resistencia = randint(1, 3)\n self.pieza_actual = start\n self.grafo = grafo\n self.daemon = True\n\n @property\n def muerto(self):\n if self.hp <= 0:\n self.hp = 0\n return True\n else:\n return False\n\n @property\n def vivo(self):\n if self.hp > 0:\n return True\n else:\n return False\n\n def sufrir(self):\n self.hp = self.hp - (6 - self.resistencia)\n print(\"Persona n° {} perdio vida\".format(self.id))\n\n def run(self):\n while self.vivo:\n if len(self.grafo.salvados) >= 3:\n exit()\n time.sleep(1)\n self.sufrir()\n #print(self.grafo.end.valor)\n if self.pieza_actual is self.grafo.end:\n print(\"LLEGO AL ULTIMO !!! -------------\")\n if len(self.grafo.salvados) <= 3:\n self.grafo.salvados.append(self)\n self.pieza_actual = None\n print(\"Persona n° {} SE SALVO\".format(self.id))\n return\n else:\n exit()\n\n elif self.pieza_actual is self.grafo.start:\n print(\"Estaba en el primero\")\n valor = choice(self.grafo.start.siguientes)\n self.grafo.nodos[valor].entrar(self)\n print(\"Persona n° {0} cambio de pieza\".format(self.id))\n else:\n for nodo in self.grafo.nodos.values():\n if nodo is self.pieza_actual:\n if nodo.siguientes:\n valor = choice(nodo.siguientes)\n self.grafo.nodos[valor].entrar(self)\n print(\"Persona n° {0} cambio de pieza de nuevo\".format(self.id))\n\n print(\"---------{}----------\".format(self.pieza_actual.valor))\n print(\"Murio persona n° {}\".format(self.id))\n\n\nclass CreadorPersonas(th.Thread):\n def __init__(self, laberinto):\n super().__init__()\n self.daemon = False\n self.lab = laberinto\n\n def run(self):\n while len(self.lab.salvados) < 3:\n time.sleep(expovariate(0.2))\n person = Persona(self.lab, start=self.lab.start)\n print(\"La persona n° {} entro al laberinto\".format(person.id))\n self.lab.agregar_persona(person)\n person.start()\n\n\nclass Barrendero(th.Thread):\n def __init__(self, laberinto):\n super().__init__()\n self.lab = laberinto\n self.daemon = True\n\n def run(self):\n while True:\n for nodo in self.lab.nodos.values():\n try:\n if nodo.persona.muerto:\n print(\"Saco un muerto\")\n nodo.persona = None\n except AttributeError:\n pass\n\n\nif __name__ == \"__main__\":\n laberinto = Laberinto()\n with open(\"laberinto.txt\", \"r\") as lab:\n laberinto.add_node(int(lab.readline()), start=True)\n laberinto.add_node(int(lab.readline()), end=True)\n for line in lab:\n laberinto.add_node(int(line.split(\",\")[0]))\n laberinto.add_connection(int(line.split(\",\")[0]), int(line.split(\",\")[1]))\n barrendero = Barrendero(laberinto)\n Dios = CreadorPersonas(laberinto)\n Dios.start()\n barrendero.start()\n\n\n\n","repo_name":"felipesilvadv/felipesilvadv-iic2233-2017-1","sub_path":"Actividades/AC10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"2798497500","text":"import unittest\nimport time\nimport threading\n\nfrom coilmq.util.concurrency import CoilThreadingTimer\n\n\nclass CoilTimerTestCase(unittest.TestCase):\n\n def setUp(self):\n class CountedCallback(object):\n\n def __init__(self):\n self.lock = threading.Lock()\n self.n_called = 0\n\n def __call__(self, *args, **kwargs):\n with self.lock:\n self.n_called += 1\n\n self.counter = CountedCallback\n\n def test_periodic_callback(self):\n period = 0.1\n factor = 10\n counter = self.counter()\n timer = CoilThreadingTimer()\n timer.schedule(period, counter)\n with timer:\n time.sleep(period * factor)\n self.assertAlmostEqual(counter.n_called, factor, delta=factor * 0.5, msg='Should provide 50% accuracy')\n","repo_name":"hozn/coilmq","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"63"} +{"seq_id":"43258537328","text":"#Read the corrupted file\nembeddingCorrupted=open('pre_trainedVectorEmbeddings/pre_emb_glove.txt','r')\n\n\n#Open the new file\nembeddingFixed=open('pre_trainedVectorEmbeddings/pre_emb_glove_fixed.txt','w')\n\n\n\nfor line in embeddingCorrupted:\n line_mod=line.replace(\"\\n\",\"\")\n line_mod=line_mod.split(\"\\t\")\n word=line_mod[0]\n vector=line_mod[1].split(\" \")\n new_line=word+\"\\t\"\n if (len(vector)==300):\n embeddingFixed.write(line)\n else:\n print (len(vector))\n flag=0\n for element in vector:\n if element=='' and flag==0:\n new_line=new_line+\" \"\n flag=1\n elif element=='' and flag==1:\n flag=0\n else:\n new_line=new_line+str(element)\n embeddingFixed.write(new_line+\"\\n\")\n\nembeddingFixed.close()\n\n\nembeddingCorrupted=open('pre_trainedVectorEmbeddings/pre_emb_glove_fixed.txt','r')\n\nfor line in embeddingCorrupted:\n line_mod=line.replace(\"\\n\",\"\")\n line_mod=line_mod.split(\"\\t\")\n word=line_mod[0]\n vector=line_mod[1].split(\" \")\n print (vector)\n","repo_name":"inigo-jauregi/healthNER","sub_path":"Bidirectional_LSTM-CRF/fixGlove.py","file_name":"fixGlove.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"63"} +{"seq_id":"19010226348","text":"__author__ = 'lachesis'\n\nimport os, sys, subprocess\nfrom Bio import SeqIO\n\n\ndef sanity_check_cdhit():\n if os.system(\"cd-hit --version\")!=256:\n raise Exception(\"Cannot find cd-hit! Abort!\")\n\ndef sanity_check_ATCG(func):\n def g(input_string, *arg):\n if type(input_string) is not str:\n raise TypeError(\"Input string must be of string type!\")\n for s in input_string:\n if s not in ('A','T','C','G'):\n raise TypeError(\"Input string must consist of only ATCG!\")\n return func(input_string, *arg)\n return g\n\ndef convert_non_ATCG(seq, replace_with='A'):\n new_seq = ''\n for x in seq.upper():\n if x not in ('A','T','C','G'):\n new_seq += replace_with\n else:\n new_seq += x\n\n return new_seq\n\n\n@sanity_check_ATCG\ndef test(seq):\n print(\"just a test. input is\", seq)\n\n\ndef format_ORF_id(name, type, frame, start, end, strand):\n return \"{name} type:{t} len:{l} strand:{strand} pos:{s}-{e}\".format(\\\n name=name, t=type, l=(end-start)/3, strand=strand, s=start+1, e=end)\n\ndef write_CDS_n_PEP(ORFs, output_prefix, min_utr_length=50, append_file=False, starting_index=1):\n \"\"\"\n ORFs --- list of (Bio.SeqRecord, result, strand) where\n result is dict of frame --> list of (type, start, end)\n \"\"\"\n index = starting_index\n f_cds = open(output_prefix + '.cds', 'w' if not append_file else 'a')\n f_pep = open(output_prefix + '.pep', 'w' if not append_file else 'a')\n f_utr = open(output_prefix + '.utr', 'w' if not append_file else 'a')\n for rec, result, strand in ORFs:\n seq_len = len(rec.seq)\n for frame, orfs in result.items():\n for type, start, end in orfs:\n name = rec.id + '|m.' + str(index)\n index += 1\n if strand == '+':\n orf_id = format_ORF_id(name, type, frame, start, end, strand)\n f_cds.write(\">{0}\\n{1}\\n\".format(orf_id, rec.seq[start:end]))\n f_pep.write(\">{0}\\n{1}\\n\".format(orf_id, rec.seq[start:end].translate()))\n if start >= min_utr_length:\n utr_id = format_ORF_id(name, '5UTR', 'NA', 0, start, strand)\n f_utr.write(\">{0}\\n{1}\\n\".format(utr_id, rec.seq[:start]))\n if seq_len - end + 1 >= min_utr_length:\n utr_id = format_ORF_id(name, '3UTR', 'NA', end, seq_len, strand)\n f_utr.write(\">{0}\\n{1}\\n\".format(utr_id, rec.seq[end:]))\n else: # strand == '-', need to adjust start, end, and seq\n r_seq = rec.seq.reverse_complement()\n orf_id = format_ORF_id(name, type, frame, seq_len-end, seq_len-start, strand)\n f_cds.write(\">{0}\\n{1}\\n\".format(orf_id, r_seq[start:end]))\n f_pep.write(\">{0}\\n{1}\\n\".format(orf_id, r_seq[start:end].translate()))\n if start >= min_utr_length: # 0:start\n utr_id = format_ORF_id(name, '5UTR', 'NA', seq_len-start, seq_len-0, strand)\n f_utr.write(\">{0}\\n{1}\\n\".format(utr_id, r_seq[:start]))\n if seq_len - end + 1 >= min_utr_length: #end:seq_len\n utr_id = format_ORF_id(name, '3UTR', 'NA', 0, seq_len-end, strand)\n f_utr.write(\">{0}\\n{1}\\n\".format(utr_id, r_seq[end:]))\n\n f_cds.close()\n f_pep.close()\n f_utr.close()\n return index\n\ndef selective_write(input_filename, output_filename, selected_ids, append_file=False):\n \"\"\"\n .cds, .utr, .pep MUST exist!\n For each of the entries in the above, output to .cds, .utr, .pep ONLY if in \n \"\"\"\n f_out = open(output_filename, 'w' if not append_file else 'a')\n for r in SeqIO.parse(open(input_filename), 'fasta'):\n if r.id in selected_ids:\n f_out.write(\">{0}\\n{1}\\n\".format(r.description, r.seq))\n f_out.close()\n\n\n\nif __name__ == \"__main__\":\n test(sys.argv[1])","repo_name":"PacificBiosciences/ANGEL","sub_path":"src/ORFutils.py","file_name":"ORFutils.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"63"} +{"seq_id":"17487444162","text":"############################################# IMPORTING ################################################\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox as mess\nimport tkinter.simpledialog as tsd\nimport cv2\nimport os\nimport csv\nimport numpy as np\nfrom PIL import Image\nimport pandas as pd\nimport datetime\nimport time\nimport json\n\nimport pyodbc\n# Some other example server values are\n# server = 'localhost\\sqlexpress' # for a named instance\n# server = 'myserver,port' # to specify an alternate port\n\nwith open(\"config.json\") as config:\n data = json.load(config)\n\nmssql = data[\"mssql\"]\n\nserver = mssql[\"server\"]\ndatabase = mssql[\"database\"]\nusername = mssql[\"username\"]\npassword = mssql[\"password\"]\n\ncnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +\n server+';DATABASE='+database+';UID='+username+';PWD=' + password)\ncursor = cnxn.cursor()\n\ncursor.execute(\"\"\"if not exists (select * from sysobjects where name='Students' and xtype='U')\n CREATE TABLE Students\n\t(\n\t\tId int identity(1,1),\n\t\tUserId int,\n\t\t[Name] nvarchar(500),\n\t\t[Date] nvarchar(100)\n\t);\"\"\")\nprint(\"create table successfully\")\n\n############################################# FUNCTIONS ################################################\n\ndef assure_path_exists(path):\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n##################################################################################\n\n\ndef tick():\n time_string = time.strftime('%H:%M:%S')\n clock.config(text=time_string)\n clock.after(200, tick)\n\n###################################################################################\n\n\ndef contact():\n mess._show(title='Contact us',\n message=\"Please contact us on : 'shubhamkumar8180323@gmail.com' \")\n\n###################################################################################\n\n\ndef check_haarcascadefile():\n exists = os.path.isfile(\"haarcascade_frontalface_default.xml\")\n if exists:\n pass\n else:\n mess._show(title='Một số file yêu cầu bị lỗi, vui lòng kiểm tra',\n message='Vui lòng liên hệ để được hỗ trợ')\n window.destroy()\n\n###################################################################################\n\n\ndef save_pass():\n assure_path_exists(\"TrainingImageLabel/\")\n exists1 = os.path.isfile(\"TrainingImageLabel\\psd.txt\")\n if exists1:\n tf = open(\"TrainingImageLabel\\psd.txt\", \"r\")\n key = tf.read()\n else:\n master.destroy()\n new_pas = tsd.askstring('Mật khẩu cũ không được để trống',\n 'Vui lòng nhập mật khẩu mới', show='*')\n if new_pas == None:\n mess._show(title='Không có mật khẩu nào được nhập',\n message='Vui lòng thử lại')\n else:\n tf = open(\"TrainingImageLabel\\psd.txt\", \"w\")\n tf.write(new_pas)\n mess._show(title='Đổi mật khẩu',\n message='Đổi thành công!!')\n return\n op = (old.get())\n newp = (new.get())\n nnewp = (nnew.get())\n if (op == key):\n if(newp == nnewp):\n txf = open(\"TrainingImageLabel\\psd.txt\", \"w\")\n txf.write(newp)\n else:\n mess._show(title='Lỗi', message='Nhập lại mật khẩu!!!')\n return\n else:\n mess._show(title='Wrong Password',\n message='Mật khẩu cũ sai.')\n return\n mess._show(title='Password Changed',\n message='Đổi thành công!!')\n master.destroy()\n\n###################################################################################\n\n\ndef change_pass():\n global master\n master = tk.Tk()\n master.geometry(\"400x160\")\n master.resizable(False, False)\n master.title(\"Change Password\")\n master.configure(background=\"white\")\n lbl4 = tk.Label(master, text=' Nhập mật khẩu mới',\n bg='white', font=('times', 12, ' bold '))\n lbl4.place(x=10, y=10)\n global old\n old = tk.Entry(master, width=25, fg=\"black\", relief='solid',\n font=('times', 12, ' bold '), show='*')\n old.place(x=180, y=10)\n lbl5 = tk.Label(master, text=' Nhập mật khẩu mới',\n bg='white', font=('times', 12, ' bold '))\n lbl5.place(x=10, y=45)\n global new\n new = tk.Entry(master, width=25, fg=\"black\", relief='solid',\n font=('times', 12, ' bold '), show='*')\n new.place(x=180, y=45)\n lbl6 = tk.Label(master, text='Nhập lại mật khẩu mới',\n bg='white', font=('times', 12, ' bold '))\n lbl6.place(x=10, y=80)\n global nnew\n nnew = tk.Entry(master, width=25, fg=\"black\", relief='solid',\n font=('times', 12, ' bold '), show='*')\n nnew.place(x=180, y=80)\n cancel = tk.Button(master, text=\"Cancel\", command=master.destroy, fg=\"black\", bg=\"red\",\n height=1, width=25, activebackground=\"white\", font=('times', 10, ' bold '))\n cancel.place(x=200, y=120)\n save1 = tk.Button(master, text=\"Save\", command=save_pass, fg=\"black\", bg=\"#3ece48\",\n height=1, width=25, activebackground=\"white\", font=('times', 10, ' bold '))\n save1.place(x=10, y=120)\n master.mainloop()\n\n#####################################################################################\n\n\ndef psw():\n assure_path_exists(\"TrainingImageLabel/\")\n exists1 = os.path.isfile(\"TrainingImageLabel\\psd.txt\")\n if exists1:\n tf = open(\"TrainingImageLabel\\psd.txt\", \"r\")\n key = tf.read()\n else:\n new_pas = tsd.askstring('Không tìm thấy mật khẩu cũ',\n 'Vui lòng nhập lại mật khẩu cũ', show='*')\n if new_pas == None:\n mess._show(title='Không có mật khẩu nào được nhập',\n message='Mật khẩu mới trống')\n else:\n tf = open(\"TrainingImageLabel\\psd.txt\", \"w\")\n tf.write(new_pas)\n mess._show(title='Password Registered',\n message='Cập nhật mật khẩu mới thành công!!')\n return\n password = tsd.askstring('Password', 'Enter Password', show='*')\n if (password == key):\n TrainImages()\n elif (password == None):\n pass\n else:\n mess._show(title='Sai mật khẩu',\n message='Bạn đã nhập sai mật khẩu')\n\n######################################################################################\n\n\ndef clear():\n txt.delete(0, 'end')\n res = \"Xin chụp ảnh trước khi điểm danh nếu bạn chưa chụp trước đây\"\n message1.configure(text=res)\n\n\ndef clear2():\n txt2.delete(0, 'end')\n res = \"1)Chụp ảnh >>> 2)Lưu thông tin\"\n message1.configure(text=res)\n\n#######################################################################################\n\n\ndef TakeImages():\n check_haarcascadefile()\n columns = ['SERIAL NO.', '', 'ID', '', 'NAME']\n assure_path_exists(\"StudentDetails/\")\n assure_path_exists(\"TrainingImage/\")\n serial = 0\n exists = os.path.isfile(\"StudentDetails\\StudentDetails.csv\")\n if exists:\n with open(\"StudentDetails\\StudentDetails.csv\", 'r') as csvFile1:\n reader1 = csv.reader(csvFile1)\n for l in reader1:\n serial = serial + 1\n serial = (serial // 2)\n csvFile1.close()\n else:\n with open(\"StudentDetails\\StudentDetails.csv\", 'a+') as csvFile1:\n writer = csv.writer(csvFile1)\n writer.writerow(columns)\n serial = 1\n csvFile1.close()\n Id = (txt.get())\n name = (txt2.get())\n if ((name.isalpha()) or (' ' in name)):\n cam = cv2.VideoCapture(0)\n harcascadePath = \"haarcascade_frontalface_default.xml\"\n detector = cv2.CascadeClassifier(harcascadePath)\n sampleNum = 0\n while (True):\n ret, img = cam.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = detector.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n # incrementing sample number\n sampleNum = sampleNum + 1\n # saving the captured face in the dataset folder TrainingImage\n cv2.imwrite(\"TrainingImage\\ \" + name + \".\" + str(serial) + \".\" + Id + '.' + str(sampleNum) + \".jpg\",\n gray[y:y + h, x:x + w])\n # display the frame\n cv2.imshow('Taking Images', img)\n # wait for 100 miliseconds\n if cv2.waitKey(100) & 0xFF == ord('q'):\n break\n # break if the sample number is morethan 100\n elif sampleNum > 100:\n break\n cam.release()\n cv2.destroyAllWindows()\n res = \"Images Taken for ID : \" + Id\n row = [serial, '', Id, '', name]\n with open('StudentDetails\\StudentDetails.csv', 'a+') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(row)\n csvFile.close()\n message1.configure(text=res)\n else:\n if (name.isalpha() == False):\n res = \"Enter Correct name\"\n message.configure(text=res)\n\n########################################################################################\n\n\ndef TrainImages():\n check_haarcascadefile()\n assure_path_exists(\"TrainingImageLabel/\")\n recognizer = cv2.face_LBPHFaceRecognizer.create()\n harcascadePath = \"haarcascade_frontalface_default.xml\"\n detector = cv2.CascadeClassifier(harcascadePath)\n faces, ID = getImagesAndLabels(\"TrainingImage\")\n try:\n recognizer.train(faces, np.array(ID))\n except:\n mess._show(title='Chưa có ai đăng kí',\n message='Vui lòng đăng kí trước khi điểm danh!!!')\n return\n recognizer.save(\"TrainingImageLabel\\Trainner.yml\")\n res = \"Lưu thông tin thành công\"\n message1.configure(text=res)\n message.configure(text='Tổng số đã đăng kí: ' + str(ID[0]))\n\n# 3\n\n\ndef getImagesAndLabels(path):\n # get the path of all the files in the folder\n imagePaths = [os.path.join(path, f) for f in os.listdir(path)]\n # create empth face list\n faces = []\n # create empty ID list\n Ids = []\n # now looping through all the image paths and loading the Ids and the images\n for imagePath in imagePaths:\n # loading the image and converting it to gray scale\n pilImage = Image.open(imagePath).convert('L')\n # Now we are converting the PIL image into numpy array\n imageNp = np.array(pilImage, 'uint8')\n # getting the Id from the image\n ID = int(os.path.split(imagePath)[-1].split(\".\")[1])\n # extract the face from the training image sample\n faces.append(imageNp)\n Ids.append(ID)\n return faces, Ids\n\n###########################################################################################\n\n\ndef TrackImages():\n check_haarcascadefile()\n assure_path_exists(\"Attendance/\")\n assure_path_exists(\"StudentDetails/\")\n for k in tv.get_children():\n tv.delete(k)\n msg = ''\n i = 0\n j = 0\n recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()\n exists3 = os.path.isfile(\"TrainingImageLabel\\Trainner.yml\")\n if exists3:\n recognizer.read(\"TrainingImageLabel\\Trainner.yml\")\n else:\n mess._show(title='Data Missing',\n message='Nhấn Lưu thông tin để đặt lại!!')\n return\n harcascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(harcascadePath)\n\n cam = cv2.VideoCapture(0)\n font = cv2.FONT_HERSHEY_SIMPLEX\n col_names = ['Id', '', 'Name', '', 'Date', '', 'Time']\n exists1 = os.path.isfile(\"StudentDetails\\StudentDetails.csv\")\n if exists1:\n df = pd.read_csv(\"StudentDetails\\StudentDetails.csv\")\n else:\n mess._show(title='Details Missing',\n message='Thông tin chưa đủ')\n cam.release()\n cv2.destroyAllWindows()\n window.destroy()\n while True:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.2, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)\n serial, conf = recognizer.predict(gray[y:y + h, x:x + w])\n if (conf < 50):\n ts = time.time()\n date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')\n timeStamp = datetime.datetime.fromtimestamp(\n ts).strftime('%H:%M:%S')\n aa = df.loc[df['SERIAL NO.'] == serial]['NAME'].values\n ID = df.loc[df['SERIAL NO.'] == serial]['ID'].values\n ID = str(ID)\n ID = ID[1:-1]\n bb = str(aa)\n bb = bb[2:-2]\n attendance = [str(ID), '', bb, '', str(date),\n '', str(timeStamp)]\n\n else:\n Id = 'Unknown'\n bb = str(Id)\n cv2.putText(im, str(bb), (x, y + h), font, 1, (255, 255, 255), 2)\n cv2.imshow('Điểm danh', im)\n if (cv2.waitKey(1) == ord('q')):\n break\n ts = time.time()\n date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')\n exists = os.path.isfile(\"Attendance\\Attendance_\" + date + \".csv\")\n if exists:\n with open(\"Attendance\\Attendance_\" + date + \".csv\", 'a+') as csvFile1:\n writer = csv.writer(csvFile1)\n writer.writerow(attendance)\n csvFile1.close()\n print(attendance[0], attendance[2], attendance[4] + attendance[6])\n cursor.execute(\"\"\"\n INSERT INTO Students (UserId, Name, Date)\n VALUES (?,?,?)\"\"\", attendance[0], str(attendance[2]), str(attendance[4]) + ' ' + str(attendance[6]))\n cnxn.commit()\n print(\"insert success\")\n else:\n with open(\"Attendance\\Attendance_\" + date + \".csv\", 'a+') as csvFile1:\n writer = csv.writer(csvFile1)\n writer.writerow(col_names)\n writer.writerow(attendance)\n csvFile1.close()\n cursor.execute(\"\"\"\n INSERT INTO Students (UserId, Name, Date) \n VALUES (?,?,?)\"\"\",attendance[0], str(attendance[2]), str(attendance[4]) + ' ' + str(attendance[6]))\n\n cnxn.commit()\n print(\"insert success\")\n print(attendance[0], attendance[2], attendance[4] + attendance[6])\n with open(\"Attendance\\Attendance_\" + date + \".csv\", 'r') as csvFile1:\n reader1 = csv.reader(csvFile1)\n for lines in reader1:\n i = i + 1\n if (i > 1):\n if (i % 2 != 0):\n iidd = str(lines[0]) + ' '\n tv.insert('', 0, text=iidd, values=(\n str(lines[2]), str(lines[4]), str(lines[6])))\n csvFile1.close()\n cam.release()\n cv2.destroyAllWindows()\n\n######################################## USED STUFFS ############################################\n\nglobal key\nkey = ''\n\nts = time.time()\ndate = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')\nday,month,year = date.split(\"-\")\n\nmont ={'01':'January',\n '02': 'February',\n '03': 'March',\n '04': 'April',\n '05': 'May',\n '06': 'June',\n '07': 'July',\n '08': 'August',\n '09': 'September',\n '10': 'October',\n '11': 'November',\n '12': 'December'\n }\n\n######################################## GUI FRONT-END ###########################################\n\nwindow = tk.Tk()\nwindow.geometry(\"1280x720\")\nwindow.resizable(True, False)\nwindow.title(\"Attendance System\")\nwindow.configure(background='#262523')\n\nframe1 = tk.Frame(window, bg=\"#00aeff\")\nframe1.place(relx=0.11, rely=0.17, relwidth=0.39, relheight=0.80)\n\nframe2 = tk.Frame(window, bg=\"#00aeff\")\nframe2.place(relx=0.51, rely=0.17, relwidth=0.38, relheight=0.80)\n\nmessage3 = tk.Label(window, text=\"Hệ thống điểm danh bằng nhận diện khuôn mặt\" , fg=\"white\",bg=\"#262523\" ,width=55 ,height=1,font=('times', 29, ' bold '))\nmessage3.place(x=10, y=10)\n\nframe3 = tk.Frame(window, bg=\"#c4c6ce\")\nframe3.place(relx=0.52, rely=0.09, relwidth=0.09, relheight=0.07)\n\nframe4 = tk.Frame(window, bg=\"#c4c6ce\")\nframe4.place(relx=0.36, rely=0.09, relwidth=0.16, relheight=0.07)\n\ndatef = tk.Label(frame4, text = day+\"-\"+mont[month]+\"-\"+year+\" | \", fg=\"orange\", bg=\"#262523\" ,width=55 ,height=1,font=('times', 22, ' bold '))\ndatef.pack(fill='both', expand=1)\n\nclock = tk.Label(frame3, fg=\"orange\",bg=\"#262523\" ,width=55 ,height=1,font=('times', 22, ' bold '))\nclock.pack(fill='both', expand=1)\ntick()\n\nhead2 = tk.Label(frame2, text=\" Tạo mới thông tin \", fg=\"black\", bg=\"#3ece48\" ,font=('times', 17, ' bold ') )\nhead2.grid(row=0, column=0)\n\nhead1 = tk.Label(frame1, text=\" Đã đăng kí \", fg=\"black\", bg=\"#3ece48\" ,font=('times', 17, ' bold ') )\nhead1.place(x=0, y=0)\n\nlbl = tk.Label(frame2, text=\"ID\", width=20 ,height=1 ,fg=\"black\" ,bg=\"#00aeff\" ,font=('times', 17, ' bold ') )\nlbl.place(x=80, y=55)\n\ntxt = tk.Entry(frame2, width=32 ,fg=\"black\",font=('times', 15, ' bold '))\ntxt.place(x=30, y=88)\n\nlbl2 = tk.Label(frame2, text=\"Họ và tên\", width=20 ,fg=\"black\" ,bg=\"#00aeff\" ,font=('times', 17, ' bold '))\nlbl2.place(x=80, y=140)\n\ntxt2 = tk.Entry(frame2, width=32 ,fg=\"black\",font=('times', 15, ' bold ') )\ntxt2.place(x=30, y=173)\n\nmessage1 = tk.Label(frame2, text=\"Vui lòng chụp ảnh trước khi điểm danh\" , bg=\"#00aeff\" ,fg=\"black\" ,width=39 ,height=1, activebackground = \"yellow\" ,font=('times', 15, ' bold '))\nmessage1.place(x=7, y=230)\n\nmessage = tk.Label(frame2, text=\"\" , bg=\"#00aeff\" ,fg=\"black\" ,width=39,height=1, activebackground = \"yellow\" ,font=('times', 16, ' bold '))\nmessage.place(x=7, y=450)\n\nlbl3 = tk.Label(frame1, text=\"Điểm danh\", width=20 ,fg=\"black\" ,bg=\"#00aeff\" ,height=1 ,font=('times', 17, ' bold '))\nlbl3.place(x=100, y=115)\n\nres = 0\nexists = os.path.isfile(\"StudentDetails\\StudentDetails.csv\")\nif exists:\n with open(\"StudentDetails\\StudentDetails.csv\", 'r') as csvFile1:\n reader1 = csv.reader(csvFile1)\n for l in reader1:\n res = res + 1\n res = (res // 2) - 1\n csvFile1.close()\nelse:\n res = 0\nmessage.configure(text='Tổng số học sinh : '+str(res))\n\n##################### MENUBAR #################################\n\nmenubar = tk.Menu(window, relief='ridge')\nfilemenu = tk.Menu(menubar, tearoff=0)\nfilemenu.add_command(label='Đổi mật khẩu', command= change_pass)\nfilemenu.add_command(label='Liên hệ', command= contact)\nfilemenu.add_command(label='Thoát', command = window.destroy)\nmenubar.add_cascade(label='Help', font=('times', 29, ' bold '),menu=filemenu)\n\n################## TREEVIEW ATTENDANCE TABLE ####################\n\ntv = ttk.Treeview(frame1,height =13,columns = ('name','date','time'))\ntv.column('#0', width=82)\ntv.column('name', width=130)\ntv.column('date', width=133)\ntv.column('time', width=133)\ntv.grid(row=2, column=0,padx=(0,0),pady=(150,0),columnspan=4)\ntv.heading('#0', text ='ID')\ntv.heading('name', text ='Tên')\ntv.heading('date', text ='Ngày')\ntv.heading('time', text ='Giờ')\n\n###################### SCROLLBAR ################################\n\nscroll = ttk.Scrollbar(frame1,orient='vertical',command=tv.yview)\nscroll.grid(row=2, column=4,padx=(0,100),pady=(150,0),sticky='ns')\ntv.configure(yscrollcommand=scroll.set)\n\n###################### BUTTONS ##################################\n\nclearButton = tk.Button(frame2, text=\"Xóa\", command=clear , fg=\"black\" ,bg=\"#ea2a2a\" ,width=11 ,activebackground = \"white\" ,font=('times', 11, ' bold '))\nclearButton.place(x=335, y=86)\nclearButton2 = tk.Button(frame2, text=\"Xóa\", command=clear2 , fg=\"black\" ,bg=\"#ea2a2a\" ,width=11 , activebackground = \"white\" ,font=('times', 11, ' bold '))\nclearButton2.place(x=335, y=172)\ntakeImg = tk.Button(frame2, text=\"Chụp ảnh\", command=TakeImages , fg=\"white\" ,bg=\"blue\" ,width=34 ,height=1, activebackground = \"white\" ,font=('times', 15, ' bold '))\ntakeImg.place(x=30, y=300)\ntrainImg = tk.Button(frame2, text=\"Lưu ảnh\", command=psw , fg=\"white\" ,bg=\"blue\" ,width=34 ,height=1, activebackground = \"white\" ,font=('times', 15, ' bold '))\ntrainImg.place(x=30, y=380)\ntrackImg = tk.Button(frame1, text=\"Điểm danh\", command=TrackImages , fg=\"black\" ,bg=\"yellow\" ,width=35 ,height=1, activebackground = \"white\" ,font=('times', 15, ' bold '))\ntrackImg.place(x=30, y=50)\nquitWindow = tk.Button(frame1, text=\"Thoát\", command=window.destroy , fg=\"black\" ,bg=\"red\" ,width=35 ,height=1, activebackground = \"white\" ,font=('times', 15, ' bold '))\nquitWindow.place(x=30, y=450)\n\n##################### END ######################################\n\nwindow.configure(menu=menubar)\nwindow.mainloop()\n\n####################################################################################################\n","repo_name":"ThanhThuan2k/Face_Detection_Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35147385629","text":"import json\r\n\r\n\r\nclass Anagram:\r\n def __init__(self, a_type=\"country\"):\r\n self.type = a_type\r\n self.__load_database()\r\n\r\n def __load_database(self):\r\n import json\r\n\r\n with open(\"./{}-database.json\".format(self.type), \"r\") as fp:\r\n li = json.loads(fp.read())\r\n self.database = li\r\n\r\n def match(self, inputword):\r\n for con in self.database:\r\n if sorted(str(con).lower()) == sorted(inputword.lower()):\r\n return True, con\r\n return False, \"No match!\"\r\n\r\n def pro_match(self, inputword):\r\n database = [\r\n con for con in self.__load_compiled_database() if len(con) == len(inputword)\r\n ]\r\n\r\n result_list = []\r\n # print(database)\r\n for con_name in database:\r\n points = 0\r\n list_input = list(inputword.lower())\r\n for letter in con_name:\r\n if letter in list_input:\r\n points += 1\r\n # print(list_input)\r\n # print(letter)\r\n list_input.remove(letter)\r\n if len(con_name) > len(inputword):\r\n\r\n result_list.append(((points / len(con_name)) * 100, con_name))\r\n else:\r\n result_list.append(((points / len(inputword)) * 100, con_name))\r\n return sorted(result_list, key=lambda x: x[0], reverse=True)\r\n\r\n def __get_maximum(self, list_of_data):\r\n lista = list_of_data\r\n maximum_matched = lista[0][1]\r\n n_list = []\r\n n_list.append(lista.pop(0))\r\n for x in lista:\r\n if x[1] != maximum_matched:\r\n break\r\n else:\r\n n_list.append(x[0])\r\n return n_list\r\n\r\n def pro_match_max(self, inputword):\r\n return self.__get_maximum(self.pro_match(inputword))\r\n\r\n def __load_compiled_database(self):\r\n return [x.lower() for x in self.database]\r\n\r\n def ultra_pro_match(self, inputword):\r\n database = self.__load_compiled_database()\r\n\r\n result_list = []\r\n # print(database)\r\n for con_name in database:\r\n points = 0\r\n list_input = list(inputword.lower())\r\n for letter in con_name:\r\n if letter in list_input:\r\n points += 1\r\n # print(list_input)\r\n # print(letter)\r\n list_input.remove(letter)\r\n if len(con_name) > len(inputword):\r\n\r\n result_list.append(((points / len(con_name)) * 100, con_name))\r\n else:\r\n result_list.append(((points / len(inputword)) * 100, con_name))\r\n return sorted(result_list, key=lambda x: x[0], reverse=True)\r\n\r\n def ultra_pro_match_max(self, inputword):\r\n return self.__get_maximum(self.ultra_pro_match(inputword))\r\n\r\n def similarity_match(self, inputword):\r\n n_li = []\r\n for x in self.__load_compiled_database():\r\n points = 0\r\n\r\n wordforlen = min(len(inputword), len(x))\r\n\r\n for y in range(0, wordforlen):\r\n # print(y)\r\n if inputword[y] == x[y]:\r\n points += 1\r\n n_li.append((x, (points / wordforlen) * 100))\r\n return sorted(n_li, key=lambda x: x[1], reverse=True)\r\n\r\n\r\nsolver = Anagram()\r\nprint(solver.match(\"Bnladshage\"))\r\nprint(solver.pro_match(\"artinagen\"))\r\nprint(solver.pro_match_max(\"madasagcar\"))\r\nsolver = Anagram(\"capital\")\r\nprint(solver.pro_match(\"hakad\"))\r\nprint(solver.ultra_pro_match_max(\"hakad\"))\r\nprint(solver.pro_match_max(\"hakad\"))\r\nprint(solver.similarity_match(\"hakad\"))\r\n","repo_name":"Shuddho11288/anagram-solver","sub_path":"anagram-solver.py","file_name":"anagram-solver.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72736559241","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom db.models import Course\n\ndef get_courses_on_page(url):\n # Load the HTML document into BeautifulSoup\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n courses = []\n\n # Get all \"courseblock\" divs\n courseblocks = soup.find_all('div', 'courseblock')\n for courseblock in courseblocks:\n # Get the title and description elements for each courseblock\n title_el = courseblock.find('p', 'courseblocktitle')\n desc_el = courseblock.find('p', 'courseblockdesc')\n\n # Split the title string to get the course code, name, and credits\n course_title_split = title_el.string.split('.')\n course_code_split = course_title_split[0].split('\\xa0')\n course_credits_split = course_title_split[2].split()\n \n course_subject = course_code_split[0]\n course_num = course_code_split[1]\n course_name = course_title_split[1].strip()\n try:\n course_credits = int(course_credits_split[0])\n except:\n course_credits = 0\n \n # Get the course description if it is provided\n if len(desc_el.contents) == 0:\n course_comments = None\n else:\n course_comments = ''\n for string in desc_el.strings:\n course_comments += string\n course_comments = course_comments.strip()\n course_comments = course_comments.replace('\\xa0', ' ')\n \n courses.append(Course(\n subject=course_subject,\n num=course_num,\n course=course_name,\n comments=course_comments,\n credits=course_credits\n ))\n\n return courses\n\ndef get_all_courses():\n # Load the GW A-Z course page\n r = requests.get('http://bulletin.gwu.edu/courses/')\n soup = BeautifulSoup(r.text, 'html.parser')\n\n urls = []\n courses = []\n\n # Find the active 'courses' tab\n courses_tab = soup.find('li', 'active self')\n # Get all course tabs in this list\n course_list = courses_tab.find('ul', 'nav levelone')\n # Get all url's in this list\n course_links = course_list.find_all('a')\n for link in course_links:\n href = link.get('href')\n full_url = f'http://bulletin.gwu.edu{href}'\n urls.append(full_url)\n \n # Get all courses for each url\n for i in range(len(urls)):\n url = urls[i]\n print(f'Getting courses at {url}... {i+1}/{len(urls)}')\n new_courses = get_courses_on_page(url)\n courses.extend(new_courses)\n \n return courses","repo_name":"kevindweb/curriculum-utility","sub_path":"scripts/datafetching/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16833489105","text":"import matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\n\nimport requests\nfrom io import BytesIO\nfrom PIL import Image\nimport numpy as np\n\nimport cv2\n\n# this makes our figures bigger\npylab.rcParams['figure.figsize'] = 20, 12\n\nfrom maskrcnn_benchmark.config import cfg\nfrom predictor import COCODemo\n\nconfig_file = \"configs/e2e_ms_rcnn_R_50_FPN_1x.yaml\"\n\n# update the config options with the config file\ncfg.merge_from_file(config_file)\n# manual override some options\ncfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n\n\ncoco_demo = COCODemo(\n cfg,\n min_image_size=800,\n confidence_threshold=0.7,\n)\n\ndef load(url):\n \"\"\"\n Given an url of an image, downloads the image and\n returns a PIL image\n \"\"\"\n response = requests.get(url)\n pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image\n\ndef imshow(img):\n plt.imshow(img[:, :, [2, 1, 0]])\n plt.axis(\"off\")\n\n# from http://cocodataset.org/#explore?id=345434\nimage = load(\"http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg\")\ncv2.imshow('ori_img', image)\n\n# compute predictions\npredictions = coco_demo.run_on_opencv_image(image)\ncv2.imshow('result', predictions)\n\ncv2.waitKey(0)\n\n","repo_name":"XiaoLaoDi/maskscoring_rcnn","sub_path":"maskrcnn_benchmark/demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"63"} +{"seq_id":"34345569337","text":"import numpy as np\nimport scipy.interpolate as interpolate\n\n\ndef wave_match(w1,s1,w2,s2,verbose=False):\n \"\"\"Match template and observation wavelengths\n\n Parameters\n ----------\n w1, s1 : ndarray\n Observed spectrum\n w2, s2 : ndarray\n Template spectrum\n verbose: bool, optional\n If True, print additional information statements\n \n Returns\n -------\n w1_fine, s1_fine : ndarray\n Re-scaled observed spectrum\n w2_fine, s2_fine : ndarray\n Re-scaled template spectrum\n \"\"\"\n dw1 = w1[1:] - w1[:-1]\n wmid = (w1[0] + w1[-1])/2.\n \n dw2 = w2[1:] - w2[:-1]\n \n # Determine new uniformly spaced model wavelength scale\n min_disp = min(dw2) # Smallest spacing in template spectrum\n deltaw = w2[-1] - w2[0] # Wavelength range\n nx = (int(deltaw/float(min_disp))+2) # Number of pixels in uniform scale\n if nx%2==0: nx+=1 # nx should be odd\n w2_fine = w2[0] + deltaw * np.arange(nx)/(nx-1.) # new wavelength scale (x_seg)\n \n s1_range=np.logical_and(w2_fine>min(w1), w2_fine= np.min(wvl1)) & (targetwvl <= np.max(wvl1)))[0]\n f = interpolate.interp1d(wvl1, flx1, kind=\"cubic\", fill_value=0)\n return f(targetwvl[wvlrng])\n","repo_name":"parkerholzer/safe_statistic","sub_path":"App_to_EXPRES/wave_match.py","file_name":"wave_match.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35284876902","text":"\"\"\"Script to convert datasets to a numpy binary.\n\nCurrently only works in Linux because the 'split' command is used.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport numpy as np\nfrom tokenizer import GPT2BPETokenizer\n\nTMP = \"tmp\"\n\ndef _parse_args():\n \"\"\"Command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Command line arguments for converting large txt files into numpy binaries. Currently the GPT2 tokenizer will always be used.\")\n\n parser.add_argument(\"--input-path\", type=str, help=\"Path to the txt file that will be converted to a binary.\")\n parser.add_argument(\"--output-directory\", type=str, required=True)\n parser.add_argument(\"--n-shards\", type=int, default=10, help=\"Number of shards the txt file is split in before convergence to binaries and subsequent merging. Python has a large memory overhead for strings, so for very large txt files a lot of shards might be needed during the conversion.\")\n parser.add_argument(\"--test\", action=\"store_true\", help=\"Use to test whether the script is working. This will generate a binary, and decode it and check it is equal to the original text. Will not work for very large text files that do not fit into memory.\")\n # TODO: Potentially add other tokenizers and appropriate data types for their resulting binaries.\n\n return parser.parse_args()\n\ndef _tmp_dir(output_directory: str):\n \"\"\"Temporary directory for binary shards.\"\"\"\n return os.path.join(output_directory, TMP)\n\ndef _clean_dir(path: str):\n \"\"\"Clean files from a temporary directory.\"\"\"\n for root, _, files in os.walk(path):\n for f in files:\n os.remove(os.path.join(root, f))\n\ndef _shard_txt_file(txt_path: str, num_shards: int, dir_path: str):\n \"\"\"Shard a txt file into multiple small files with the 'split' command in linux.\"\"\"\n os.system(f\"split -n {num_shards} {txt_path} {dir_path}/\")\n\n\ndef _convert_txt_shards_to_binary(tokenizer, dir_path):\n \"\"\"Convert a directory containing a number of txt files to binary files.\n\n This will be run on the directory where the split shards of a big txt file are stored.\n \"\"\"\n # WARNING: It is crucial to keep the correct order here.\n for i, f in enumerate(sorted(os.listdir(dir_path))):\n fp = os.path.join(dir_path, f)\n \n with open(fp) as txt_file:\n array = np.array(tokenizer.encode(txt_file.read()), dtype=np.uint16)\n\n # Save the full encoded array as a numpy binary.\n out_name = (os.path.splitext(f)[0] + f\"_binary_shard_{i}.npy\")\n out_path = os.path.join(dir_path, out_name)\n np.save(out_path, array)\n\n\ndef _merge_binaries(input_directory, merged_path):\n \"\"\"Merge a all numpy binaries in a temporary directory into a single file.\"\"\"\n # WARNING: It is crucial that the order of the files is maintained here.\n def _extract_index(path):\n return int(os.path.splitext(path)[0].split(\"_\")[-1])\n file_list = [(f, _extract_index(f)) for f in os.listdir(input_directory) if f.endswith(\".npy\")]\n file_list = sorted(file_list, key=lambda x: x[1])\n file_list = [f[0] for f in file_list]\n\n arrays = []\n for f in file_list:\n arrays.append(np.load(os.path.join(input_directory, f)))\n np.save(merged_path, np.concatenate(arrays, axis=0))\n\nif __name__ == \"__main__\":\n args = _parse_args()\n\n # Ensure the temporary directory for the binary shards exists.\n tmp_dir = _tmp_dir(args.output_directory)\n os.makedirs(tmp_dir, exist_ok=True)\n\n # Clean up the current temporary directory.\n _clean_dir(tmp_dir)\n\n # Shard the input text file.\n _shard_txt_file(args.input_path, args.n_shards, tmp_dir)\n\n # Tokenize the text files and store them as binaries.\n tokenizer = GPT2BPETokenizer()\n _convert_txt_shards_to_binary(tokenizer, tmp_dir)\n\n # Merge the binaries into a single file.\n output_name = os.path.splitext(args.input_path)[0] + \".npy\"\n output_path = os.path.join(args.output_directory, output_name)\n _merge_binaries(tmp_dir, output_path)\n\n # Clean the temporary directory.\n _clean_dir(tmp_dir)\n\n # Test the functionality if requested.\n # We verify that decoding the binary file results in the original text.\n if not args.test:\n sys.exit()\n\n with open(args.input_path) as f:\n original = f.read()\n decoded = tokenizer.decode(np.load(output_path))\n assert decoded == original, \"Decoded and original text must be the same!\"\n","repo_name":"wverbeke/ShitGPT","sub_path":"text_to_encoded_binary.py","file_name":"text_to_encoded_binary.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72476575561","text":"import cv2\n\n\ndef extract_video(length, frame_height, frame_width, fps):\n count = 0\n out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (int(frame_width), int(frame_height)))\n while(1):\n img = cv2.imread('frames/frame%d.tif' % count)\n if img is None:\n break;\n print('phase 3: saving video %d%%' % int(100*count/length))\n out.write(img)\n count += 1\n out.release()\n cv2.destroyAllWindows()","repo_name":"ashrafsrv/motion_estimation","sub_path":"extract_video.py","file_name":"extract_video.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8646188966","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\n\nsigma = 1\nmu = 0\nn_points = 1000\n\ny_values = mu + sigma * np.random.randn( n_points )\nplt.hist(y_values, density= True)\n\nx_values = np.linspace( mu - 3*sigma, mu + 3*sigma, n_points )\nplt.plot( x_values, stats.norm.pdf ( x_values, mu, sigma ) )\n\n\nplt.show()\n\n","repo_name":"krsheshu/python_libs","sub_path":"libs/normal_distribution/normDist.py","file_name":"normDist.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"5188898670","text":"from nf_common_source.code.nf.types.nf_column_types import NfColumnTypes\nfrom nf_common_source.code.services.dataframe_service.dataframe_helpers.dataframe_filter_and_renamer import dataframe_filter_and_rename\n\nfrom nf_ea_common_tools_source.b_code.nf_ea_common.common_knowledge.column_types.ea_t.ea_t_connector_column_types import \\\n EaTConnectorColumnTypes\nfrom nf_ea_common_tools_source.b_code.nf_ea_common.common_knowledge.ea_collection_types import EaCollectionTypes\nfrom pandas import DataFrame\n\nfrom nf_ea_common_tools_source.b_code.services.general.nf_ea.com.common_knowledge.column_types.nf_ea_com_column_types import \\\n NfEaComColumnTypes\n\n\nclass ThinEaConnectorsFactories:\n def __init__(\n self,\n nf_ea_com_universe):\n self.nf_ea_com_universe = \\\n nf_ea_com_universe\n\n def create(\n self) \\\n -> DataFrame:\n ea_connectors = \\\n self.__create_ea_connectors()\n\n return \\\n ea_connectors\n\n def __create_ea_connectors(\n self) \\\n -> DataFrame:\n extended_t_connector_dataframe = \\\n self.nf_ea_com_universe.ea_tools_session_manager.nf_ea_sql_stage_manager.nf_ea_sql_universe_manager.get_extended_ea_t_table_dataframe(\n ea_repository=self.nf_ea_com_universe.ea_repository,\n ea_collection_type=EaCollectionTypes.EXTENDED_T_CONNECTOR)\n\n nf_uuids_column_name = \\\n NfColumnTypes.NF_UUIDS.column_name\n\n ea_connector_place1_supplier_element_column_name = \\\n NfEaComColumnTypes.ELEMENTS_SUPPLIER_PLACE1_END_CONNECTORS.column_name\n\n ea_connector_place2_client_element_column_name = \\\n NfEaComColumnTypes.ELEMENTS_CLIENT_PLACE2_END_CONNECTORS.column_name\n\n ea_connector_direction_type_name_column_name = \\\n NfEaComColumnTypes.CONNECTORS_DIRECTION_TYPE_NAME.column_name\n\n ea_connector_element_type_name_column_name = \\\n NfEaComColumnTypes.CONNECTORS_ELEMENT_TYPE_NAME.column_name\n\n ea_connector_supplier_cardinality_column_name = \\\n NfEaComColumnTypes.CONNECTORS_SOURCE_CARDINALITY.column_name\n\n ea_connector_client_cardinality_column_name = \\\n NfEaComColumnTypes.CONNECTORS_DEST_CARDINALITY.column_name\n\n ea_connectors = \\\n dataframe_filter_and_rename(\n dataframe=extended_t_connector_dataframe,\n filter_and_rename_dictionary=\n {\n nf_uuids_column_name: nf_uuids_column_name,\n 'start_t_object_nf_uuids': ea_connector_place1_supplier_element_column_name,\n 'end_t_object_nf_uuids': ea_connector_place2_client_element_column_name,\n EaTConnectorColumnTypes.T_CONNECTOR_DIRECTIONS.nf_column_name: ea_connector_direction_type_name_column_name,\n EaTConnectorColumnTypes.T_CONNECTOR_TYPES.nf_column_name: ea_connector_element_type_name_column_name,\n EaTConnectorColumnTypes.T_CONNECTOR_SOURCE_CARDINALITIES.nf_column_name: ea_connector_supplier_cardinality_column_name,\n EaTConnectorColumnTypes.T_CONNECTOR_DEST_CARDINALITIES.nf_column_name: ea_connector_client_cardinality_column_name\n })\n\n return \\\n ea_connectors\n","repo_name":"boro-alpha/nf_ea_common_tools","sub_path":"nf_ea_common_tools_source/b_code/services/general/nf_ea/com/factories/objects/explicit_objects/thin/thin_ea_connectors_factories.py","file_name":"thin_ea_connectors_factories.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40698152286","text":"#!/usr/bin/python\r\n#coding=utf-8\r\n#Filename:IfcVectorOrDirection.py\r\n\r\n#TYPE IfcVectorOrDirection = SELECT\r\n\t#(IfcDirection,IfcVector);\r\n#END_TYPE;\r\n\r\nclass IfcVectorOrDirection(object):\r\n \"\"\"\"\"\"\r\n def __init__(self,obj):\r\n super(IfcVectorOrDirection,self).__init__()\r\n self.lid=obj.lid\r\n self.m_type=obj.type\r\n \r\n","repo_name":"chenxiaohui/BimCenter","sub_path":"IFCPythonSDK/ifclib/ifcvectorordirection.py","file_name":"ifcvectorordirection.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"74273537480","text":"#\r\n# Copyright (c) nexB Inc. and others. All rights reserved.\r\n# ScanCode is a trademark of nexB Inc.\r\n# SPDX-License-Identifier: Apache-2.0\r\n# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.\r\n# See https://github.com/nexB/ahocode for support or download.\r\n# See https://aboutcode.org for more information about nexB OSS projects.\r\n#\r\n# Tests are taken from: WojciechMula/pyahocorasick\r\n# https://github.com/WojciechMula/pyahocorasick/blob/master/tests\r\n\r\nimport unittest\r\n\r\nfrom ahocode import ahocode\r\n\r\n\r\nclass test_automaton_methods(unittest.TestCase):\r\n def test_find_all(self):\r\n automaton = ahocode.Automaton()\r\n words = \"he e hers his she hi him man he\".split()\r\n # 0 1 2 3 4 5 6 7 8\r\n for i, w in enumerate(words):\r\n automaton.add_word(w, (i, w))\r\n query = \"he rshershidamanza \"\r\n # 01234567890123\r\n automaton.make_automaton()\r\n\r\n assert query[2:8] == ' rsher'\r\n results = list(automaton.iter(string=query, start=2, end=8))\r\n assert results == [(6, (4, 'she')), (6, (8, 'he')), (6, (1, 'e'))]\r\n\r\n res = []\r\n\r\n def callback(index, item):\r\n res.append(dict(index=index, item=item))\r\n\r\n assert query[2:11] == ' rshershi'\r\n automaton.find_all(query, callback, 2, 11)\r\n\r\n expected = [\r\n {'index': 6, 'item': (4, 'she')},\r\n {'index': 6, 'item': (8, 'he')},\r\n {'index': 6, 'item': (1, 'e')},\r\n {'index': 8, 'item': (2, 'hers')},\r\n {'index': 10, 'item': (5, 'hi')},\r\n ]\r\n\r\n assert res == expected\r\n\r\n def test_item_keys_values(self):\r\n automaton = ahocode.Automaton()\r\n words = 'he e hers his she hi him man he'.split()\r\n # 0 1 2 3 4 5 6 7 8\r\n for i, w in enumerate(words):\r\n automaton.add_word(w, (i, w))\r\n\r\n expected_keys = ['man', 'she', 'e', 'hi', 'him', 'his', 'he', 'hers']\r\n\r\n expected_values = [\r\n (7, 'man'),\r\n (4, 'she'),\r\n (1, 'e'),\r\n (5, 'hi'),\r\n (6, 'him'),\r\n (3, 'his'),\r\n (8, 'he'),\r\n (2, 'hers'),\r\n ]\r\n\r\n assert sorted(automaton.keys()) == sorted(expected_keys)\r\n assert sorted(automaton.values()) == sorted(expected_values)\r\n assert sorted(dict(automaton.items()).values()) == sorted(expected_values)\r\n assert sorted(dict(automaton.items()).keys()) == sorted(expected_keys)\r\n\r\n automaton.make_automaton()\r\n\r\n assert sorted(automaton.keys()) == sorted(expected_keys)\r\n assert sorted(automaton.values()) == sorted(expected_values)\r\n assert sorted(dict(automaton.items()).values()) == sorted(expected_values)\r\n assert sorted(dict(automaton.items()).keys()) == sorted(expected_keys)\r\n\r\n\r\nclass TestCase(unittest.TestCase):\r\n\r\n def assertEmpty(self, collection):\r\n self.assertEqual(0, len(collection))\r\n\r\n def assertNotEmpty(self, collection):\r\n self.assertGreater(len(collection), 0)\r\n\r\n\r\nclass TestTrieStorePyObjectsBase(TestCase):\r\n\r\n def setUp(self):\r\n self.A = ahocode.Automaton()\r\n self.words = \"word python aho corasick \\x00\\x00\\x00\".split()\r\n self.inexisting = \"test foo bar dword\".split()\r\n\r\n\r\nclass TestTrieMethods(TestTrieStorePyObjectsBase):\r\n \"Test basic methods related to trie structure\"\r\n\r\n def test_add_word(self):\r\n A = self.A\r\n self.assertTrue(A.kind == ahocode.EMPTY)\r\n\r\n n = 0\r\n for word in self.words:\r\n n += 1\r\n A.add_word(word, None)\r\n self.assertEqual(A.kind, ahocode.TRIE)\r\n self.assertEqual(len(A), n)\r\n\r\n # dupliacted entry\r\n A.add_word(self.words[0], None)\r\n self.assertTrue(A.kind == ahocode.TRIE)\r\n self.assertTrue(len(A) == n)\r\n\r\n def test_add_empty_word(self):\r\n if ahocode.unicode:\r\n self.assertFalse(self.A.add_word(\"\", None))\r\n else:\r\n self.assertFalse(self.A.add_word(b\"\", None))\r\n\r\n self.assertEqual(len(self.A), 0)\r\n self.assertEqual(self.A.kind, ahocode.EMPTY)\r\n\r\n def test_clear(self):\r\n A = self.A\r\n self.assertTrue(A.kind == ahocode.EMPTY)\r\n\r\n for w in self.words:\r\n A.add_word(w, w)\r\n\r\n self.assertEqual(len(A), len(self.words))\r\n\r\n A.clear()\r\n self.assertEqual(A.kind, ahocode.EMPTY)\r\n self.assertEqual(len(A), 0)\r\n\r\n def test_exists(self):\r\n A = self.A\r\n\r\n for w in self.words:\r\n A.add_word(w, w)\r\n\r\n for w in self.words:\r\n self.assertTrue(A.exists(w))\r\n\r\n for w in self.inexisting:\r\n self.assertFalse(A.exists(w))\r\n\r\n def test_contains(self):\r\n A = self.A\r\n for w in self.words:\r\n A.add_word(w, w)\r\n\r\n for w in self.words:\r\n self.assertTrue(w in A)\r\n\r\n for w in self.inexisting:\r\n self.assertTrue(w not in A)\r\n\r\n def test_get1(self):\r\n A = self.A\r\n for i, w in enumerate(self.words):\r\n A.add_word(w, i + 1)\r\n\r\n for i, w in enumerate(self.words):\r\n self.assertEqual(A.get(w), i + 1)\r\n\r\n def test_get2(self):\r\n A = self.A\r\n for i, w in enumerate(self.words):\r\n A.add_word(w, i + 1)\r\n\r\n for w in self.inexisting:\r\n self.assertEqual(A.get(w, None), None)\r\n\r\n def test_get3(self):\r\n A = self.A\r\n for i, w in enumerate(self.words):\r\n A.add_word(w, i + 1)\r\n\r\n for w in self.inexisting:\r\n with self.assertRaises(KeyError):\r\n A.get(w)\r\n\r\n def test_get_from_an_empty_automaton(self):\r\n A = ahocode.Automaton()\r\n\r\n r = A.get('foo', None)\r\n self.assertEqual(r, None)\r\n","repo_name":"nexB/ahocode","sub_path":"tests/test_ahocode.py","file_name":"test_ahocode.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"32369535686","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('nameserver', '0007_server_keyname'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='staticrecord',\n name='expire',\n field=models.DateField(null=True, default=None),\n ),\n ]\n","repo_name":"ntnusky/shiftleader","sub_path":"nameserver/migrations/0008_staticrecord_expire.py","file_name":"0008_staticrecord_expire.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"991498068","text":"import dash \r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom dash.dependencies import Output, Input\r\nimport dash_table\r\nfrom openrouteservice import client\r\nimport json\r\nimport configparser\r\nimport os\r\nimport folium\r\nimport plotly.express as px\r\nimport random\r\nfrom backend.path_planning import path_planning\r\nfrom backend.zone_splitting import kmeans_subdivision\r\nfrom backend.prediction import MakePrediction\r\nimport tensorflow as tf\r\n\r\nrandom.seed(123)\r\nnp.random.seed(123)\r\n\r\nphysical_devices = tf.config.list_physical_devices('GPU') \r\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\r\n\r\ncurr_dir = os.getcwd()\r\nconfig_file = os.path.join(curr_dir, 'configs.ini')\r\nconfig = configparser.ConfigParser()\r\nconfig.read(config_file)\r\nAPI_KEY = config['GENERAL'].get('API_KEY')\r\nUPDATE_INTERVAL = config['GENERAL'].getint('UPDATE_INTERVAL')\r\n\r\n# APP PROPERTIES\r\napp = dash.Dash(name='UnWaste! FrontEnd')\r\napp.title = \"UnWaste! Project\"\r\n\r\n# EXTERNAL SETTINGS\r\n# should be loaded elsewhere and imported here / in a submodule\r\n\r\nSTART_COORDS = (41.89117549369146, 12.502362854652286)\r\n# positions are divided in groups for each garbage-truck\r\ngarbage_bins = pd.read_csv('./DATABASE/coords_groups.csv')\r\nPOSITIONS = garbage_bins[['latitude','longitude']].values\r\ngarbage_trucks = pd.read_csv('DATABASE/trucks_coords.csv')\r\nGARBAGE_TRUCKS = garbage_trucks[['latitude','longitude']].values\r\navailable_garbage_trucks = garbage_trucks['available'].sum() # only available garbage trucks\r\n\r\npredictor = MakePrediction('.')\r\n\r\nGARBAGE_LABELS = []\r\nfor k in garbage_trucks['truck_id']:\r\n GARBAGE_LABELS.append({'label': f'Truck #{k + 1}', 'value': str(k)})\r\nSHOW_ROUTES = {0: False, 1: False, 2: False, 3: False, 4: False, 5: False}\r\n\r\n# precompute paths\r\nbins_full = predictor.prediction()\r\nclusters, centers = kmeans_subdivision(bins_full, '.', available_garbage_trucks)\r\npaths = path_planning(clusters, centers, '.')\r\n\r\nclnt = client.Client(key=API_KEY) # Create client with api key\r\n\r\n# theoretically, the paths / markups should be loaded in real time from some service\r\n# in this demo, however, we can just add them from a database of precoumputed positions\r\n@app.callback(Output('map', 'srcDoc'),\r\n Input('interval-component', 'n_intervals')) # add an input here to load the pathon the map at a user's notice\r\ndef update_map(n):\r\n global START_COORDS, POSITIONS, GARBAGE_TRUCKS, SHOW_ROUTES, paths\r\n\r\n rome_map = folium.Map(location = START_COORDS, title = \"Rome\", zoom_start = 16, min_zoom = 16, max_zoom = 18)\r\n\r\n #add not-full garbage bins maps\r\n for i, p in enumerate(POSITIONS):\r\n if i not in bins_full:\r\n folium.Marker(location=[p[0], p[1]],\r\n icon = folium.features.CustomIcon(\"assets\\dustbin.png\",\r\n icon_size=(20, 20)),\r\n popup=f'Garbage bin #{int(i)}'\r\n ).add_to(rome_map)\r\n else:\r\n folium.Marker(location=[p[0], p[1]],\r\n icon = folium.features.CustomIcon(\"assets/bluebin.png\",\r\n icon_size=(20, 20)),\r\n popup=f'Garbage bin #{int(i)}'\r\n ).add_to(rome_map)\r\n\r\n # draw active trucks\r\n active_trucks_pos = garbage_trucks.loc[garbage_trucks['available'] == 1, ['truck_id', 'latitude','longitude']].values\r\n for pos in active_trucks_pos:\r\n folium.Marker(location=[pos[1], pos[2]],\r\n icon = folium.features.CustomIcon(\"assets\\garbagetruck.png\",\r\n icon_size=(35, 35)),\r\n popup=f'Garbage truck #{int(pos[0] + 1)}'\r\n ).add_to(rome_map)\r\n # draw inactive trucks\r\n inactive_trucks_pos = garbage_trucks.loc[garbage_trucks['available'] == 0, ['truck_id', 'latitude','longitude']].values\r\n for pos in inactive_trucks_pos:\r\n folium.Marker(location=[pos[1], pos[2]],\r\n icon = folium.features.CustomIcon(\"assets\\garbagetruck_off.png\",\r\n icon_size=(35, 35)),\r\n popup=f'Garbage truck #{int(pos[0] + 1)}'\r\n ).add_to(rome_map)\r\n\r\n #take trucks position, add it to maps\r\n for truck in garbage_trucks['truck_id'].values:\r\n\r\n # get directions\r\n if truck in paths['trucks'] and SHOW_ROUTES[truck]: # decomment to use API; TODO: add checkbox to toggle route drawing\r\n bins_ids = paths['zone_' + str(paths['trucks'].index(truck))]\r\n coordinates = [[GARBAGE_TRUCKS[truck][1], GARBAGE_TRUCKS[truck][0]]] + [[POSITIONS[idx][1], POSITIONS[idx][0]] for idx in bins_ids] \r\n route = clnt.directions(coordinates=coordinates,\r\n profile='driving-car',\r\n format='geojson',\r\n preference='fastest',\r\n geometry=True,\r\n geometry_simplify=True)\r\n # swap lat/long for folium\r\n points = [[p[1], p[0]] for p in route['features'][0]['geometry']['coordinates']]\r\n\r\n folium.PolyLine(points, color='red', weight=10, opacity=0.8).add_to(rome_map)\r\n\r\n return rome_map._repr_html_()\r\n\r\n@app.callback(\r\n dash.dependencies.Output('ignore-me', 'children'),\r\n [dash.dependencies.Input('submit-val', 'n_clicks')],\r\n [dash.dependencies.State('input-on-submit', 'value')])\r\ndef update_output(n_clicks, value):\r\n global SHOW_ROUTES\r\n truck_n = int(value)\r\n if truck_n in SHOW_ROUTES.keys():\r\n for k in SHOW_ROUTES.keys():\r\n SHOW_ROUTES[k] = False\r\n SHOW_ROUTES[truck_n] = True\r\n return ''\r\n\r\n##### callback for left-stats #####\r\n@app.callback(\r\n Output(\"pie-chart\", \"figure\"), \r\n [Input(\"names\", \"value\"), \r\n Input(\"values\", \"value\")])\r\ndef generate_chart(names, values):\r\n fig = px.pie(df_left, values= values, names= names)\r\n return fig\r\n\r\n\r\n##### callback for right- stats ###\r\n@app.callback(\r\n Output(\"histo\", \"figure\"), \r\n [Input(\"waste\", \"value\")])\r\ndef display_color(waste):\r\n waste_type = df_right['waste'] == waste\r\n fig = px.bar(df_right[waste_type], x ='day', y = 'total_waste')\r\n return fig\r\n\r\n\r\n\r\n\r\n\r\n############### Dataframe ############################## \r\ndata = {\"Report_id\":[000,111,222,333], \r\n \"Truck_id\":[234,567,876,766],\r\n \"Type\":['Report', \"Issiue\", \"NaN\", \"Injury\"],\r\n \"Operator\": ['Anil',\"Giuliano\", \"Marco\", \"Alberto\" ]}\r\n\r\ndf = pd.DataFrame(data)\r\n\r\nchoose_len = 20\r\n\r\n##### Trucks Dataframe ###\r\ncondition_fuel_mount =['Empty', 'Full', '50%']\r\nTIME = [\"morning\", \"evening\", \"afternoon\"]\r\n\r\ndata_left = {'Fuel_L':[random.randrange(1,80,1) for i in range(choose_len)],\r\n \"mount_mc\": [random.randrange(1,15,1) for i in range(choose_len)],\r\n \"truck_fuel_situation\": [random.choice(condition_fuel_mount) for i in range(choose_len)],\r\n \"time\": [random.choice(TIME) for i in range(choose_len)]}\r\n\r\ndf_left = pd.DataFrame(data_left)\r\n\r\n####### Waste dataframe \r\nwaste_type = ['PET', 'alluminium', \"paper\", \"glassware\", \"metalware\", \"undifferentiated\"]\r\nTIME_week = ['Monday',\"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Suday\"]\r\n\r\ndata_right = {\"waste\": [random.choice(waste_type) for i in range(choose_len)],\r\n \"total_waste\":[random.randrange(1,648,1) for i in range(choose_len)],\r\n \"day\": [random.choice(TIME_week) for i in range(choose_len)]}\r\n\r\n\r\ndf_right = pd.DataFrame(data_right)\r\nwastes = df_right.waste.unique()\r\n###########################################################\r\n\r\n\r\napp.layout = html.Div(\r\n #MAIN\r\n children = [ \r\n #header#\r\n html.Div(children = [\r\n html.H1(\"UnWaste!\", className = \"header-title\"),\r\n html.P('Demo dashboard', className = 'header-description')\r\n ],\r\n className = 'header'\r\n ),\r\n #Body1#\r\n html.Div(children = [\r\n #Map#\r\n html.Div([\r\n html.H3('Path Map', className = 'wintitle'),\r\n html.Iframe(id = 'map', srcDoc = None, className = 'inframe_map' ),\r\n #Button div\r\n html.Div([dcc.Dropdown(id='input-on-submit', options = GARBAGE_LABELS, value='0'),\r\n html.Button('Submit', id='submit-val', n_clicks=0),\r\n html.Div(id='ignore-me', hidden=True)\r\n ])\r\n ],\r\n \r\n className = \"Map\"),\r\n \r\n #Report#\r\n html.Div([\r\n # reportTitle\r\n html.H3('Real-Time Reports', className = 'wintitle'),\r\n #Table\r\n dash_table.DataTable(id='table',columns=[{\"name\": i, \"id\": i} for i in df.columns],data=df.to_dict('records')),\r\n \r\n ],\r\n className=\"Report\"\r\n ),\r\n html.Div([\r\n dcc.Interval(\r\n id='interval-component',\r\n interval=UPDATE_INTERVAL,\r\n n_intervals=0)\r\n ]),\r\n\r\n \r\n \r\n ],\r\n \r\n className = 'wrapper'),\r\n ## body 2\r\n html.Div(children = [\r\n ##left-stats\r\n html.Div([ \r\n html.H3('Real-time Trucks stats', className = 'wintitle'),\r\n html.P(\"Names:\"),\r\n dcc.Dropdown(\r\n id='names', \r\n value='truck_fuel_situation', \r\n options=[{'value': x, 'label': x} for x in ['truck_fuel_situation', 'time']],\r\n clearable=False\r\n ),\r\n html.P(\"Values:\"),\r\n dcc.Dropdown(\r\n id='values', \r\n value='Fuel_L', \r\n options=[{'value': x, 'label': x} for x in ['Fuel_L', 'mount_mc']],\r\n clearable=False\r\n ),\r\n \r\n dcc.Graph(id=\"pie-chart\"),\r\n \r\n ],\r\n className = \"left-stats\"),\r\n #right Stats\r\n html.Div([\r\n html.H3('Real-Time bin stats', className = 'wintitle'),\r\n \r\n html.P(\"Waste:\"),\r\n dcc.Dropdown(\r\n id='waste', \r\n value='PET', \r\n options=[{'value': x, 'label': x} for x in wastes],\r\n clearable=False\r\n ),\r\n\r\n dcc.Graph(id=\"histo\"),\r\n \r\n\r\n ],\r\n className=\"right-stats\")\r\n \r\n\r\n \r\n\r\n\r\n\r\n ], \r\n className = \"wrapper\"),\r\n \r\n ],\r\nclassName=\"HTML\"\r\n)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run_server(debug=True)","repo_name":"lorenzoloretucci/Impact_challenge","sub_path":"prova.py","file_name":"prova.py","file_ext":"py","file_size_in_byte":14312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"4295054791","text":"class Solution:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n text1 = tuple(reversed(text1))\n text2 = tuple(reversed(text2))\n\n m = len(text1)\n n = len(text2)\n\n memo = [0] * (n + 1)\n for i in range(1, m + 1):\n diag = 0\n for j in range(1, n + 1):\n tmp = memo[j]\n memo[j] = diag + 1 if text1[i - 1] == text2[j - 1] else max(memo[j], memo[j - 1])\n diag = tmp\n\n return memo[n]\n\n# Tests\nif __name__ == '__main__':\n Sol = Solution()\n Solve = Sol.longestCommonSubsequence(text1 = \"abc\", text2 = \"abc\" ) \n # text1 = \"abc\", text2 = \"abc\" -> 3\n print(Solve)\n Solve = Sol.longestCommonSubsequence(text1 = \"abc\", text2 = \"def\" ) \n # text1 = \"abc\", text2 = \"def\" -> 0\n print(Solve)\n","repo_name":"aurimas13/Solutions-To-Problems","sub_path":"LeetCode/Python Solutions/Longest Common Subsequence/longest.py","file_name":"longest.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"63"} +{"seq_id":"19719097841","text":"\nfrom django.core.management import BaseCommand\nfrom problema_1.phizz import Phizz\nimport sys\n\n'''\nClasse que gerencia os argumentos no manage.py\n'''\nclass Command(BaseCommand):\n '''\n Funcao para adicionar um argumento pelo comando no terminal\n '''\n def add_arguments(self, hpo): \n hpo.add_argument('hpo', nargs='+', type=str)\n '''\n Funcao que dispara acoes apos o comando ser digitado\n '''\n def handle(self, *args, **options): \n hpo = sys.argv[2]\n '''\n Iteracoes para saber se o parametro de entrada corresponde com o esperado\n '''\n if hpo[0:2] == 'HP':\n if hpo[2:3] == ':':\n try:\n int(hpo[3:])\n Phizz(hpo)\n except ValueError:\n self.error()\n else:\n self.error()\n else:\n self.error()\n\n def error(self):\n self.stdout.write('Termo incorreto, informe no formato HP:HPO_ID')\n","repo_name":"uchoavaz/estagio_genomika","sub_path":"estagio_genomika/problema_1/management/commands/phizz.py","file_name":"phizz.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8956749382","text":"from ecomment.cli import read_program\n\nimport contextlib\nimport io\n\n\ndef test_read_inline():\n cli_args = \"read tests/example-files/inline.py\".split()[1:]\n\n string_io = io.StringIO()\n with contextlib.redirect_stdout(string_io):\n read_program(cli_args)\n\n inline_example_markup = string_io.getvalue()\n with open(\"tests/example-files/inline.ecomment\", \"r\") as f:\n assert inline_example_markup == f.read()\n","repo_name":"ecomment/ecomment","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20528279428","text":"from pathlib import Path\nfrom argparse import ArgumentParser\nimport sqlite3\n\nimport tweepy\n\nimport secrets\n\nparser = ArgumentParser(description='Downloads a bunch of tweets')\nparser.add_argument('account', type=str, help='the account to scrape')\nparser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true', default=False, help='do not hit the twitter api')\nparser.add_argument('-r', '--resume', dest='resume', action='store_true', default=False, help='try to resume a scrape')\nparser.add_argument('--db', dest='db', type=str, default='./tweets.db', help='what database to use (sqlite3 .db file)')\n\ndef create_tweets_table_if_not_exists(db):\n c = db.cursor()\n c.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets (\n status_id INTEGER PRIMARY KEY,\n timestamp DATE,\n user TEXT,\n text TEXT\n )\"\"\")\n db.commit()\n\ndef get_min_id_for_user(db: sqlite3.Connection, user: str):\n params = (user,)\n c = db.cursor()\n c.execute('SELECT min(status_id) FROM tweets WHERE user = ?', params)\n res = c.fetchone()\n return res[0]\n\ndef get_twitter_api():\n auth = tweepy.OAuthHandler(secrets.TW_API_KEY, secrets.TW_API_SECRET)\n auth.set_access_token(secrets.TW_TOKEN, secrets.TW_SECRET)\n return tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\ndef status_to_tuple(status):\n status_id = status.id\n timestamp = status.created_at\n user = status.author.screen_name\n text = status.text\n return status_id, timestamp, user, text\n\ndef save_tweet(db: sqlite3.Connection, tweet):\n c = db.cursor()\n c.execute(\"INSERT OR IGNORE INTO tweets (status_id, timestamp, user, text) VALUES(?, ?, ?, ?)\", tweet)\n db.commit()\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n if args.dry_run:\n print(\"** Dry run - one tweet will be loaded and not saved\")\n\n print(\"* Will collect tweets from user {}\".format(args.account))\n\n outfile_path = Path(args.db).absolute()\n print(\"* Will save tweets to {}\".format(outfile_path))\n db = sqlite3.connect(str(outfile_path))\n create_tweets_table_if_not_exists(db)\n\n max_id = None\n if args.resume:\n res = get_min_id_for_user(db, args.account)\n if res != None:\n print(\"* Resuming from id {}\".format(res))\n max_id = res\n\n api = get_twitter_api()\n\n cursor = tweepy.Cursor(api.user_timeline, id=args.account, max_id=max_id, include_rts=False)\n count = 0\n for status in cursor.items():\n tweet = status_to_tuple(status)\n\n if args.dry_run:\n print(tweet)\n break\n \n save_tweet(db, tweet)\n count += 1\n print('.', end=('\\n' if count % 60 == 0 else ''), flush=True)\n \n print('\\n')\n print('* done: {} tweet(s)'.format(count))","repo_name":"FLamparski/dril-or-no-dril","sub_path":"download-tweets.py","file_name":"download-tweets.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74533551880","text":"#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n'''\r\n# File: plots.py\r\n# Created Date: Wednesday March 25th 2020\r\n# Author: Debora Antunes\r\n# -----\r\n# Last Modified: Thursday, April 15th 2021, 11:28:42 am\r\n# -----\r\n'''\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nimport models\r\n\r\nfrom rpy2.robjects.packages import importr\r\nimport rpy2.robjects as ro\r\nfrom rpy2.robjects import pandas2ri\r\nfrom rpy2.rinterface_lib.callbacks import logger as rpy2_logger\r\nimport logging\r\nrpy2_logger.setLevel(logging.ERROR)\r\n\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.metrics import confusion_matrix, roc_curve, auc, plot_roc_curve\r\nfrom scipy.interpolate import interpn\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\n\r\ndef plotManhattan(data, name):\r\n\t\"\"\"Uses a R script to create a manhattan plot\r\n\t\r\n\tArguments:\r\n\t\tdata {pandas.Dataframe} -- Dataset used to plot\r\n\t\tname {string} -- Sufix for plot file\r\n\t\"\"\"\t\r\n\tprint('>>> Creating figure...')\r\n\tpandas2ri.activate()\r\n\tr_script = ro.r\r\n\tr_script['source'](r'../R/plotManhattan.R')\r\n\tr_script.fun(data, name)\r\n\r\ndef plotConfusionMatrix(real,prev,dir,model):\r\n\t\"\"\"Image for the confusion matrix\r\n\t\r\n\tArguments:\r\n\t\treal {list} -- Real labels\r\n\t\tprev {list} -- Predicted labels\r\n\t\tdir {string} -- Name of the directory\r\n\t\tmodel {string} -- add the model name to the file name\r\n\t\"\"\"\t\r\n\tprint('>>> Creating figure...')\r\n\r\n\tfig = plt.figure()\r\n\tplt.plot([2,2,4]) \r\n\tax0 = plt.subplot(2, 2, 1)\r\n\tax0 = sns.heatmap(confusion_matrix(real[0],prev[0]), annot=True, cbar = False, cmap=sns.light_palette('#dea369'))\r\n\tax0.set(xlabel='Predicted', ylabel='Real')\r\n\tax0.title.set_text('Confusion Matrix Fold 1')\r\n\tax1 = plt.subplot(2, 2, 2) \r\n\tax1 = sns.heatmap(confusion_matrix(real[1],prev[1]), annot=True, cbar = False, cmap=sns.light_palette('#dea369'))\r\n\tax1.set(xlabel='Predicted', ylabel='Real')\r\n\tax1.title.set_text('Confusion Matrix Fold 2')\r\n\tax2 = plt.subplot(2, 2, 3) \r\n\tax2 = sns.heatmap(confusion_matrix(real[2],prev[2]), annot=True, cbar = False, cmap=sns.light_palette('#dea369'))\r\n\tax2.set(xlabel='Predicted', ylabel='Real')\r\n\tax2.title.set_text('Confusion Matrix Fold 3')\r\n\tax3 = plt.subplot(2, 2, 4) \r\n\tax3 = sns.heatmap(confusion_matrix(real[3],prev[3]), annot=True, cbar = False, cmap=sns.light_palette('#dea369'))\r\n\tax3.set(xlabel='Predicted', ylabel='Real')\r\n\tax3.title.set_text('Confusion Matrix Fold 4')\r\n\r\n\tfig.tight_layout()\r\n\tprint('>>> Saving figure...')\r\n\tplt.savefig('../../data/figures/{}/matrix_{}.png'.format(dir, model))\r\n\r\ndef plotRocCurve(X,y, name, dir, best=False):\r\n\t\"\"\"Image for the ROC curve\r\n\t\r\n\tArguments:\r\n\t\tX {numpy.ndarray} -- Dataset to train\r\n\t\ty {numpy.ndarray} -- Labels for the dataset\r\n\t\tname {string} -- Name for the file\r\n\t\tdir {string} -- Name of the directory\r\n\t\tbest {list} -- Hyperparameters\r\n\t\"\"\"\t\r\n\tprint(dir, name)\r\n\tskf = StratifiedKFold(n_splits=5, shuffle = True)\r\n\ttprs, aucs, real, prev = [], [], [], []\r\n\tmean_fpr = np.linspace(0, 1, 100)\r\n\r\n\t# Parse hyperparameters if provided\r\n\tif best:\r\n\t\thyper = {}\r\n\t\tfor i in best:\r\n\t\t\tkey, val = i.split(':')[0], i.split(':')[1]\r\n\t\t\ttry: \r\n\t\t\t\tif val == str(float(val)): val = float(val)\r\n\t\t\t\telse: val = int(float(val))\r\n\t\t\texcept: pass\r\n\t\t\tfinally: hyper[key]=val\r\n\t\tbest = hyper\r\n\t\tprint(best)\r\n\r\n\t# Cross-Validation\r\n\tfig, ax = plt.subplots()\r\n\tfor i, (train, test) in enumerate(skf.split(X, y)):\r\n\t\tif name == 'svm': model, p = models.trainSvm(X[train], y[train], X[test], y[test], best)\r\n\t\telif name == 'tree': model, p = models.trainTree(X[train], y[train], X[test], y[test], best)\r\n\t\telif name == 'knn': model, p, best = models.trainKnn(X[train], y[train], X[test], y[test], best)\r\n\t\telif name == 'log': model, p = models.trainLog(X[train], y[train], X[test], y[test], best)\r\n\t\telif name == 'rf': model, p, best = models.trainRf(X[train], y[train], X[test], y[test], best)\r\n\t\telif name == 'nb': model, p = models.trainNb(X[train], y[train], X[test], y[test])\r\n\t\tprev.append(p)\r\n\t\treal.append(list(y[test]))\r\n\t\tviz = plot_roc_curve(model, X[test], y[test],\r\n name='ROC fold {}'.format(i+1),\r\n alpha=0.3, lw=1, ax=ax)\r\n\t\tinterp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\r\n\t\tinterp_tpr[0] = 0.0\r\n\t\ttprs.append(interp_tpr)\r\n\t\taucs.append(viz.roc_auc)\r\n\r\n\t# ROC Curves\r\n\tprint('>>> Creating figure...')\r\n\tax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8)\r\n\r\n\tmean_tpr = np.mean(tprs, axis=0)\r\n\tmean_tpr[-1] = 1.0\r\n\tmean_auc = auc(mean_fpr, mean_tpr)\r\n\tstd_auc = np.std(aucs)\r\n\tax.plot(mean_fpr, mean_tpr, color='b',\r\n\t\t\tlabel=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\r\n\t\t\tlw=2, alpha=.8)\r\n\r\n\tstd_tpr = np.std(tprs, axis=0)\r\n\ttprs_upper = np.minimum(mean_tpr + std_tpr, 1)\r\n\ttprs_lower = np.maximum(mean_tpr - std_tpr, 0)\r\n\tax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\r\n\t\t\t\t\tlabel=r'$\\pm$ 1 std. dev.')\r\n\r\n\tax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title=\"Receiver operating characteristic\")\r\n\tax.legend(loc=\"lower right\")\r\n\tprint('>>> Saving figure...')\r\n\tplt.savefig('../../data/figures/{}/roc_auc_{}.png'.format(dir, name))\r\n\tplt.close()\r\n\r\n\tplotConfusionMatrix(real,prev, dir, name)\r\n\r\n\r\n\r\ndef plotFeatures(labels, values, name):\r\n\t\"\"\"Image for the top features frequencies\r\n\t\r\n\tArguments:\r\n\t\tlabels {list} -- Selected features\r\n\t\tvalues {list} -- Frequence of the selected features\r\n\t\tname {string} -- add the dataset name to the file name\r\n\t\"\"\"\t\r\n\tprint('>>> Creating figure...')\r\n\tfig, ax = plt.subplots()\r\n\tax.bar(labels, values)\r\n\tax.set_xticklabels(labels, rotation = 90)\r\n\tfig.set_size_inches(20, 12)\r\n\tprint('>>> Saving figure...')\r\n\tax.figure.savefig('../../data/figures/features_{}.png'.format(name))\r\n\tplt.close()","repo_name":"Daantunes/ComplexDiseases_Pipeline","sub_path":"python/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24370661895","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom .forms import RegisterForm,UserEditForm,ProfileEditForm\nfrom django.views import generic\nfrom twitterapp.models import Tweet,Profile\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef register(request):\n\tif request.method == \"POST\":\n\t\tform = RegisterForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\treturn redirect('login')\n\telse:\n\t\tform = RegisterForm()\n\n\treturn render(request,'user/register.html',{'form':form})\nclass UserDetailView(generic.ListView):\n\tmodel = Tweet\n\tpaginate_by = 10\n\tcontext_object_name='user_tweets'\n\ttemplate_name = 'user/user_detail.html'\n\n\tdef get_queryset(self):\n\t\tuser = get_object_or_404(User,username = self.kwargs.get('username'))\n\t\treturn Tweet.objects.filter(user=user).order_by('-date_created')\n\tdef get_context_data(self,**kwargs):\n\t\tcontext = super(UserDetailView, self).get_context_data(**kwargs)\n\t\tuser = get_object_or_404(User,username = self.kwargs.get('username'))\n\t\tcontext['profiles']=Profile.objects.filter(user=user)\n\t\tcontext['following'] = Profile.objects.filter(follow=user)\n\t\treturn context\n\t\t\nclass UserLikesView(generic.ListView):\n\tmodel = Tweet\n\tpaginate_by = 10\n\tcontext_object_name = \"user_likes\"\n\ttemplate_name = 'user/user_likes.html'\n\n\tdef get_queryset(self):\n\t\tuser_likes = get_object_or_404(User,username=self.kwargs.get('username'))\n\t\treturn Tweet.objects.filter(likes=user_likes).order_by('-date_created')\n\tdef get_context_data(self,**kwargs):\n\t\tcontext = super(UserLikesView, self).get_context_data(**kwargs)\n\t\tuser = get_object_or_404(User,username = self.kwargs.get('username'))\n\t\tcontext['profiles']=Profile.objects.filter(user=user)\n\t\treturn context\n\ndef CreateMessage(request):\n\tif request.method == \"POST\":\n\t\tform = MessageForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tmessage = form.save(commit=False)\n\t\t\tmessage.sender = request.user\n\t\t\tmessage.save()\n\t\t\treturn redirect('home')\n\telse:\n\t\tform= MessageForm()\n\treturn render(request,'user/create_message.html',{'form':form})\n\n\t\ndef ProfileEditView(request):\n\tif request.method == 'POST':\n\t\tu_form = UserEditForm(request.POST,instance=request.user)\n\t\tp_form = ProfileEditForm(request.POST,request.FILES,instance=request.user.profile)\n\t\tif u_form.is_valid() and p_form.is_valid():\n\t\t\tu_form.save()\n\t\t\tp_form.save()\n\t\t\treturn redirect('user-detail',request.user)\n\telse:\n\t\tu_form = UserEditForm(instance=request.user)\n\t\tp_form = ProfileEditForm(instance = request.user.profile)\n\t\tcontext = {\n\t\t'u_form':u_form,\n\t\t'p_form':p_form\n\t\t}\n\t\treturn render(request,'user/profile_edit.html',context)\n","repo_name":"orbirpinar/Twitter-Like-App","sub_path":"twitter/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29066879963","text":"from django import forms\n\n# -*- coding: utf-8 -*-\nclass WordForm(forms.Form):\n your_word = forms.CharField(label='',required=True,widget=forms.Textarea(attrs=\n {\n 'placeholder': 'Digite uma palavra...', \n 'style': 'position: relative', \n 'class': 'form-word',}\n ))\n convert_choice = forms.ChoiceField(choices=[('upper', 'Maiscula'), ('lower', 'Minuscula'), ('capitalize', 'Primeira Letra')],\n widget=forms.RadioSelect(attrs={'class': 'convert_choice'}), label='Escolha como quer converter',\n\n)\n \n\n# class WordForm(forms.Form):\n# your_word = forms.CharField(max_length=100,required=True)\n# lower_case = forms.BooleanField(required=False, widget=forms.HiddenInput)\n\n# class UpperForm(forms.Form):\n# upper_word = forms.CharField(widget=forms.Textarea(attrs={'name': 'upper'}))\n\n# class NameForm(forms.Form):\n# anyword = forms.CharField(max_length=100, label=\"word\")","repo_name":"AthanaS20/web_site","sub_path":"Polls/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8851035081","text":"Mot1=input()\nMot2=input()\n\ndef buildMatrice(chaine1, chaine2):\n N=len(chaine1)\n M=len(chaine2)\n Matrice=[]\n\n Matrice.append([\" \"]+[\" \"]+ [chaine2[i] for i in range(M)])\n Matrice.append([\" \"]+[3*i for i in range (M+1) ])\n\n for i in range (N):\n Matrice.append([chaine1[i]]+[3*(i+1)]+[0 for k in range(2,M+2)] )\n\n return Matrice\n\n'''\ndef buildMatriceCout(Matrice):\n Cout=[]\n Cout.append(Matrice[0])\n for i in range(2, len(Matrice)):\n Cout.append([Matrice[i][0]]+[2*int(not Matrice[i][0]==Matrice[0][j]) for j in range(len(Matrice[0]))])\n return Cout\n'''\n\ndef editDistance(chaine1, chaine2):\n Matrice=buildMatrice(chaine1,chaine2)\n #Cout=buildMatriceCout(Matrice)\n lenLigneMatrice=len(Matrice)\n lenColMatrice=len(Matrice[0])\n #print(*Matrice,sep=\"\\n\")\n #print(Matrice)\n for i in range(2, lenLigneMatrice):\n for j in range(2, lenColMatrice):\n Matrice[i][j]=min(Matrice[i-1][j]+3, Matrice[i][j-1]+3, Matrice[i-1][j-1]+2*int(not Matrice[i][0]==Matrice[0][j]))#Cout[i-1][j-1])\n \n print(Matrice[lenLigneMatrice-1][lenColMatrice-1])\n\ndef main():\n editDistance(Mot1,Mot2)\nmain()","repo_name":"Amissan/prepaConcours","sub_path":"td3/D3/D3.py","file_name":"D3.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"73266721159","text":"\"\"\"\nThe nth term of the sequence of triangle numbers is given by, tn = 1/2n(n+1);\nso the first ten triangle numbers are:\n\n1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...\n\nBy converting each letter in a word to a number corresponding to its\nalphabetical position and adding these values we form a word value.\nFor example, the word value for SKY is 19 + 11 + 25 = 55 = t10.\nIf the word value is a triangle number then we shall call\nthe word a triangle word.\n\nUsing words.txt, a 16K text file\ncontaining nearly two-thousand common English words,\nhow many are triangle words?\n\"\"\"\n\nimport math\n\n#Up to 23rd term (10 z's)\n\ndef triangle(n):\n \"\"\" Returns a list of triangle numbers up to the nth term. \"\"\"\n return list(map(lambda x: int(0.5*x * (x+1)), range(1, n+1)))\n\nseq = triangle(23)\nvalues = dict([(chr(x), x - 96) for x in range(97, 123)])\ntriWords = 0\n\n# Parse the file\nf = open(\"p042_words.txt\")\nf = f.read()\nf = list(f.split(\",\"))\nfor i in range(len(f)):\n f[i] = f[i].lower()\n\nfor i in f:\n score = 0\n for j in i:\n score += values[j]\n if (score in seq):\n triWords += 1\n\nprint(triWords)\n \n","repo_name":"daki0607/Project-Euler","sub_path":"problem 42/problem 42.py","file_name":"problem 42.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6344181038","text":"# -*- coding: utf-8 -*-\nimport werkzeug\nimport requests\nfrom werkzeug.exceptions import NotFound\nfrom urllib.parse import unquote\n\nfrom odoo import fields, http, _\nfrom odoo.http import request\nfrom odoo.exceptions import ValidationError\nfrom odoo.addons.payment_jetcheckout.controllers.main import PayloxController as Controller\n\n\nclass PayloxApiController(Controller):\n\n def _confirm_bank_webhook(self, tx):\n try:\n url = tx.jetcheckout_api_bank_webhook_url\n data = {'id': tx.jetcheckout_api_id}\n response = requests.post(url, data=data)\n if response.status_code == 200:\n tx.write({\n 'state': 'pending',\n 'last_state_change': fields.Datetime.now(),\n })\n else:\n raise ValidationError('%s (Error Code: %s)' % (response.reason, response.status_code))\n except Exception as e:\n raise ValidationError(e)\n except:\n raise ValidationError('%s (Error Code: %s)' % ('Server Error', '-1'))\n\n def _set_hash(self, raise_exception=True, **kwargs):\n if '' in kwargs:\n hash = unquote(kwargs[''])\n self._set('hash', hash)\n elif 'hash' in kwargs:\n hash = unquote(kwargs['hash'])\n self._set('hash', hash)\n\n if not hash:\n if raise_exception:\n raise NotFound()\n return False\n return hash\n\n def _get_transaction(self):\n hash = self._get('hash')\n if not hash:\n return False\n\n tx = request.env['payment.transaction'].sudo().search([\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '=', hash),\n ('state', '=', 'draft')\n ], limit=1)\n if not tx:\n raise ValidationError(_('An error occured. Please restart your payment transaction.'))\n return tx\n\n def _prepare(self, transaction=None, partner=None, **kwargs):\n values = super()._prepare(transaction=transaction, partner=partner, **kwargs)\n if transaction and transaction.jetcheckout_api_ok:\n values.update({\n 'partner_name': transaction.partner_name,\n })\n return values\n\n def _process(self, **kwargs):\n url, tx, status = super()._process(**kwargs)\n if not status and tx.jetcheckout_api_hash:\n status = True\n self._del('hash')\n url = tx.jetcheckout_api_card_return_url\n return url, tx, status\n\n @http.route(['/payment'], type='http', methods=['GET', 'POST'], auth='public', csrf=False, sitemap=False, website=True)\n def page_api(self, **kwargs):\n hash = self._set_hash(raise_exception=False, **kwargs)\n tx = request.env['payment.transaction'].sudo().search([\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '=', hash),\n ('state', 'in', ('draft', 'cancel', 'expired'))\n ], limit=1)\n if not tx:\n raise NotFound()\n\n if tx.jetcheckout_api_method:\n return werkzeug.utils.redirect('/payment/%s' % tx.jetcheckout_api_method)\n\n acquirers = Controller._get_acquirer()\n order = request.env['payment.transaction'].sudo().search([\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '!=', hash),\n ('state', '=', 'pending'),\n ('jetcheckout_api_order', '=', tx.jetcheckout_api_order)\n ], limit=1)\n values = {\n 'acquirers': acquirers,\n 'tx': tx,\n 'order': order,\n }\n return request.render('payment_jetcheckout_api.payment_page', values, headers={\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '0'\n })\n\n @http.route(['/payment/card'], type='http', methods=['GET'], auth='public', csrf=False, sitemap=False, website=True)\n def page_api_card(self, **kwargs):\n hash = self._set_hash(raise_exception=False, **kwargs)\n tx = request.env['payment.transaction'].sudo().search([\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '=', hash),\n ('state', 'in', ('draft', 'cancel', 'expired'))\n ], limit=1)\n if not tx:\n raise NotFound()\n elif tx.jetcheckout_api_method and tx.jetcheckout_api_method != 'card':\n raise NotFound()\n\n acquirer = request.env['payment.acquirer']._get_acquirer(\n company=tx.company_id,\n website=request.website,\n providers=['jetcheckout'],\n limit=1,\n )\n values = self._prepare(\n acquirer=acquirer,\n company=tx.company_id,\n balance=False\n )\n values = self._prepare(acquirer=acquirer, company=tx.company_id, transaction=tx, balance=False)\n values.update({'tx': tx})\n return request.render('payment_jetcheckout_api.page_card', values, headers={\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '0'\n })\n\n @http.route(['/payment/bank'], type='http', methods=['GET'], auth='public', csrf=False, sitemap=False, website=True)\n def page_api_bank(self, **kwargs):\n hash = self._set_hash(**kwargs)\n tx = request.env['payment.transaction'].sudo().search([\n ('state', '=', 'draft'),\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '=', hash),\n ], limit=1)\n if not tx:\n raise NotFound()\n elif tx.jetcheckout_api_method and tx.jetcheckout_api_method != 'bank':\n raise NotFound()\n\n order = request.env['payment.transaction'].sudo().search([\n ('state', '=', 'pending'),\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '!=', hash),\n ('jetcheckout_api_order', '=', tx.jetcheckout_api_order),\n ], limit=1)\n if order:\n self._confirm_bank_webhook(tx)\n return werkzeug.utils.redirect('/payment/bank/result')\n\n acquirer = request.env['payment.acquirer']._get_acquirer(\n company=tx.company_id,\n website=request.website,\n providers=['transfer'],\n limit=1,\n )\n values = self._prepare(\n acquirer=acquirer,\n company=tx.company_id,\n balance=False\n )\n values.update({'tx': tx})\n return request.render('payment_jetcheckout_api.payment_bank_page', values, headers={\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '0'\n })\n\n @http.route(['/payment/bank/result'], type='http', methods=['GET'], auth='public', csrf=False, sitemap=False, website=True)\n def page_api_bank_result(self, **kwargs):\n hash = self._set_hash(**kwargs)\n tx = request.env['payment.transaction'].sudo().search([\n ('state', '=', 'pending'),\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '=', hash),\n ], limit=1)\n if not tx:\n raise NotFound()\n elif tx.jetcheckout_api_method and tx.jetcheckout_api_method != 'bank':\n raise NotFound()\n\n acquirer = request.env['payment.acquirer']._get_acquirer(\n company=tx.company_id,\n website=request.website,\n providers=['transfer'],\n limit=1,\n )\n values = self._prepare(\n acquirer=acquirer,\n company=tx.company_id,\n balance=False\n )\n values.update({'tx': tx})\n return request.render('payment_jetcheckout_api.payment_bank_page', values, headers={\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '0'\n })\n\n @http.route(['/payment/bank/confirm'], type='json', auth='public')\n def page_api_confirm(self, **kwargs):\n hash = self._set_hash(raise_exception=False, **kwargs)\n tx = request.env['payment.transaction'].sudo().search([\n ('state', '=', 'draft'),\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '=', hash),\n ], limit=1)\n if not tx:\n return '/404'\n elif tx.jetcheckout_api_method and tx.jetcheckout_api_method != 'bank':\n return '/404'\n\n self._confirm_bank_webhook(tx)\n return '/payment/bank/result'\n\n @http.route(['/payment/bank/return'], type='json', auth='public')\n def page_api_return(self, **kwargs):\n hash = self._set_hash(raise_exception=False, **kwargs)\n tx = request.env['payment.transaction'].sudo().search([\n ('jetcheckout_api_hash', '!=', False),\n ('jetcheckout_api_hash', '=', hash)\n ], limit=1)\n if not tx:\n return '/404'\n elif tx.jetcheckout_api_method and tx.jetcheckout_api_method != 'bank':\n return '/404'\n\n self._del()\n return tx.jetcheckout_api_bank_return_url\n","repo_name":"projetgrup/payment-service","sub_path":"payment_jetcheckout_api/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9224,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"9109681267","text":"import matplotlib.pyplot as plot\r\nimport numpy as np\r\nimport random\r\n\r\nn = int(input(\"Podaj n: \"))\r\n\r\nx = [round(random.uniform(0, 1), 2) for _ in range(n)]\r\ny = [round(random.uniform(0, -1), 2) for _ in range(n)]\r\nindex = np.arange(n)\r\n\r\nfig, ax = plot.subplots()\r\nbarX = ax.bar(index, x, 0.3, color=\"blue\")\r\nbarY = ax.bar(index, y, 0.3, color=\"red\")\r\noffset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off\r\nfor rect in barX:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width() * offset['center'], 1.01 * height,\r\n '{}'.format(height), ha='center', va='bottom')\r\nfor rect in barY:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width() * offset['center'], 1.01 * height,\r\n '{}'.format(height), ha='center', va='bottom')\r\nplot.show()","repo_name":"mniesiolowski/WSB_LAB3","sub_path":"Zad5.py","file_name":"Zad5.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"30515658020","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\n\nlog_file_paths = \\\n [\n '../output/small_log.txt',\n '../output/med_small_log.txt',\n '../output/med_large_log.txt',\n '../output/large_log.txt'\n ]\n\nNUM_EVALS_PERFORMED = 2000\n\nfor file_index in range(len(log_file_paths)):\n with open(log_file_paths[file_index], 'r') as log_file:\n # Create a list of lines from the log file, disregarding all config parameters and empty lines\n log_file = log_file.read().split('\\n')\n log_file = [line for line in log_file[log_file.index('Run 1'):] if not line == '']\n\n # Get the index of each run header\n run_header_indices = []\n for index, line in enumerate(log_file):\n if 'Run' in line:\n run_header_indices.append(index)\n\n # Determine the last best score\n last_best_score_run_index = 0\n best_score = 0\n for i, run_header_index in enumerate(run_header_indices[1:] + [len(log_file)]):\n last_line_index = run_header_index - 1\n\n last_score = int(log_file[last_line_index].split('\\t')[1])\n\n if last_score > best_score:\n best_score = last_score\n last_best_score_run_index = i\n\n # Get data for the run containing the last best score\n log_file = log_file[run_header_indices[last_best_score_run_index]:run_header_indices[last_best_score_run_index + 1]]\n\n # Get evals and fitnesses for the best run\n evals = []\n fits = []\n\n for line in log_file[1:]:\n eval_num, fitness = line.split('\\t')\n evals.append(int(eval_num))\n fits.append(int(fitness))\n\n # Adjust evals and fitnesses to be the correct length\n evals += [NUM_EVALS_PERFORMED]\n fits += [fits[-1]]\n\n # Plot the results\n plt.step(evals, fits, '-b')\n\n # Include necessary labels\n plt.xlabel('Evaluations')\n plt.ylabel('Fitness')\n\n # Save and close the plot\n plt.savefig(log_file_paths[file_index][:log_file_paths[file_index].find('log')] + 'graph.png')\n plt.close()\n\n","repo_name":"wwlorey/pacman-random-search","sub_path":"analysis/gen_fitness_graphs.py","file_name":"gen_fitness_graphs.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"36698580060","text":"#variable length arguments\n# *numbers defines you can pass mutiple numbers\ndef total(initial=5,*numbers):\n count=initial\n for num in numbers:\n count+=num\n return count\n\n\nprint(total(10,1,2,3,4))","repo_name":"ravituduru/itversity_pyspark","sub_path":"4-All-about-functions/variable_length_arguments.py","file_name":"variable_length_arguments.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"26302566570","text":"import os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# obtain version of SoS\nwith open('src/sos_rmarkdown/_version.py') as version:\n for line in version:\n if line.startswith('__version__'):\n __version__ = eval(line.split('=')[1])\n break\n\nCURRENT_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_long_description():\n with open(os.path.join(CURRENT_DIR, \"README.md\"), \"r\") as ld_file:\n return ld_file.read()\n\n\nsetup(\n name=\"sos-rmarkdown\",\n version=__version__,\n description='A Rmarkdown to SoS Notebook converter',\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author='Bo Peng',\n url='https://github.com/vatlab/sos-rmarkdown',\n author_email='bpeng@mdanderson.org',\n maintainer='Bo Peng',\n maintainer_email='bpeng@mdanderson.org',\n license='3-clause BSD',\n include_package_data=True,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n zip_safe=False,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n python_requires='>=3.6',\n install_requires=[\n 'sos>=0.20.9',\n 'sos-notebook>=0.20.9',\n 'sos-r',\n 'markdown-kernel',\n 'papermill',\n 'sos-papermill',\n 'nbformat',\n 'nbconvert>=5.1.1', \n ],\n entry_points='''\n[sos_converters]\nrmd-ipynb = sos_rmarkdown.converter:RmarkdownToNotebookConverter\nrmd-html = sos_rmarkdown.converter:RmarkdownToHTMLConverter\n''')\n","repo_name":"vatlab/sos-rmarkdown","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"83"} +{"seq_id":"70970744592","text":"from django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.views.generic import TemplateView\n\nfrom customers.models import Customer, Seller\nfrom expenses.models import Revenue, Expense, InsurancePrimary\nfrom orders.models import CustomerOrder, SellerOrder\nfrom settings.models import StartBalance\n\n\nclass ReportView(TemplateView):\n template_name = 'reports/index.html'\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated or not request.user.is_superuser:\n return redirect('/')\n return super().dispatch(request, args, kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n month = self.request.GET.get('month', 12)\n year = self.request.GET.get('year', 2019)\n\n ctx['total_selling'] = sum([item.total() for item in CustomerOrder.objects.filter(approved=True, created_at__month=month, created_at__year=year)])\n\n ctx['total_buying'] = sum([item.total() for item in SellerOrder.objects.filter(approved=True, created_at__month=month, created_at__year=year)])\n\n ctx['total_customer_payments'] = sum([item.total_payments(month, year) for item in Customer.objects.all()])\n ctx['total_seller_payments'] = sum([item.total_payments(month, year) for item in Seller.objects.all()])\n\n ctx['total_revenues'] = sum([item.value for item in Revenue.objects.filter(created_at__month=month, created_at__year=year)])\n ctx['total_expenses'] = sum([item.value for item in Expense.objects.filter(created_at__month=month, created_at__year=year)])\n\n ctx['total_selling_cost'] = sum([item.total_cost() for item in CustomerOrder.objects.filter(approved=True, created_at__month=month, created_at__year=year)])\n\n ctx['insurance_primary'] = sum([item.value for item in InsurancePrimary.objects.filter(refunded=True)])\n\n ctx['cash_balance'] = sum([item.total_cash_payments(month, year) for item in Customer.objects.all()]) + StartBalance.get_solo().cash\n ctx['bank_balance'] = sum(\n [item.total_bank_payments(month, year) for item in Customer.objects.all()]) + StartBalance.get_solo().bank\n return ctx\n","repo_name":"ashrafemad/django-e-commerce-simple-dashboard","sub_path":"reports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"26968239351","text":"import argparse\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport random\nfrom model import LSTM\nimport time, math\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anim\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"player_w_underscore\")\nparser.add_argument(\"speed\", nargs='?')\nparser.add_argument('train', nargs='?')\nargs = parser.parse_args()\ninit_speed = False\ninit_train = False\nif args.speed: init_speed = True\nif args.train: init_train = True\n\n\n########################\n\n# Load data\n\n#######################\n\ninput_tensors = []\ngold_values = []\n\n\n\ndfs = []\n\nhardcoded_ids = [args.player_w_underscore]\nfor id in hardcoded_ids:\n df1 = pd.read_csv('data/csv/' + id + '.csv')\n dfs.append(df1)\n\n\n\nfor df in dfs:\n df_list = np.split(df, df[df.isnull().all(1)].index)[:-1] # split into moments through splitting by empty rows\n if init_speed:\n for df in df_list:\n df.drop(df.index[0], inplace=True) # drop first row, which will be empty\n df = df[df.index % 3 == 0]\n input_numpy = df.values[:,:-2]\n gold_numpy = df.values[1:, :2]\n input_numpy = input_numpy[:-1, :]\n\n temp1, temp2 = np.zeros_like(input_numpy), np.zeros_like(gold_numpy)\n temp1[:], temp2[:] = input_numpy, gold_numpy\n input_numpy, gold_numpy = temp1, temp2\n\n input_numpy.resize(input_numpy.shape[0],1,input_numpy.shape[1])\n gold_numpy.resize(gold_numpy.shape[0],1,gold_numpy.shape[1])\n\n input_tensor = torch.from_numpy(input_numpy).float()\n gold_value = torch.from_numpy(gold_numpy).float()\n\n input_tensors.append(input_tensor)\n gold_values.append(gold_value)\n else:\n for df in df_list:\n df.drop(df.index[0], inplace=True) # drop first row, which will be empty\n input_numpy = df.values[:, :-2]\n input_numpy.resize(input_numpy.shape[0], 1, input_numpy.shape[1])\n gold_numpy = df.values[:, -2:]\n gold_numpy.resize(gold_numpy.shape[0], 1, gold_numpy.shape[1])\n\n input_tensor = torch.from_numpy(input_numpy).float()\n gold_value = torch.from_numpy(gold_numpy).float()\n\n input_tensors.append(input_tensor)\n gold_values.append(gold_value)\n\n\n\n\nassert(len(input_tensors) == len(gold_values))\n\n\ndef randomTrainingExample():\n rand = random.randint(0, len(input_tensors) - 1)\n return input_tensors[rand], gold_values[rand]\n\n\n########################\n\n# Helper functions\n\n#######################\n\n\ndef train(input_tensor, gold_value):\n hidden = rnn.initHidden()\n\n rnn.zero_grad()\n\n loss = 0\n\n for i in range(input_tensor.size(0)):\n output, hidden = rnn(input_tensor[i], hidden)\n l = criterion(output, gold_value[i])\n\n\n # testing additions to loss function: punish no movement, moving off screen\n piq_x_gold,piq_y_gold = float(gold_value[i][0,0]), float(gold_value[i][0,1])\n bh_x,bh_y = float(input_tensor[i][0,6]),float(input_tensor[i][0,7])\n\n if torch.all(torch.eq(input_tensor[i][0,:2],output[0])):\n # no movement\n l *= 10\n if 0 > float(output[0,0]) > 47 or 0 > float(output[0,1]) > 50:\n # moving off screen\n l*=1000\n '''\n if distanceFromHoop(float(output[0,0]),float(output[0,1])) > float(input_tensor[i][0,11]):\n # further from basket than ball handler\n l *= 10\n '''\n\n loss += l\n\n loss.backward()\n torch.nn.utils.clip_grad_norm(rnn.parameters(), 0.25)\n\n for p in rnn.parameters():\n p.data.add_(-learning_rate, p.grad.data)\n\n return output, loss.item() / input_tensor.size(0)\n\n\ndef timeSince(since):\n now = time.time()\n s = now - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef velocity(x1,y1,x2,y2,time):\n v = 1000*distance(x1,x2,y1,y2)/time\n return v\n\n\ndef distance(x1,x2,y1,y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ndef distanceFromHoop(x,y):\n return math.sqrt((x - 5) ** 2 + (y - 25) ** 2)\n\n\ndef createInput(piq_x_loc, piq_y_loc, prev_piq_x_loc, prev_piq_y_loc, x_loc, y_loc, prev_x_loc, prev_y_loc):\n input = torch.ones(12,)\n input[0], input[1] = piq_x_loc,piq_y_loc\n input[2] = velocity(piq_x_loc,piq_y_loc,prev_piq_x_loc,prev_piq_y_loc,40)\n input[3], input[4] = (piq_x_loc - prev_piq_x_loc), (piq_y_loc - prev_piq_y_loc)\n input[5] = distanceFromHoop(piq_x_loc,piq_y_loc)\n input[6], input[7] = x_loc, y_loc\n input[8] = velocity(x_loc,y_loc,prev_x_loc,prev_y_loc,40)\n input[9], input[10] = (x_loc - prev_x_loc), (y_loc - prev_y_loc)\n input[11] = distanceFromHoop(x_loc,y_loc)\n input.unsqueeze_(0)\n return input\n\n\ndef predict(input_tensors, gold_values):\n with torch.no_grad(): # no need to track history when predicting\n input = input_tensors[0]\n hidden = rnn.initHidden()\n\n diff = 0\n\n prev_x_loc, prev_y_loc = input[0,6], input[0,7]\n x_loc, y_loc = input[0,6], input[0,7]\n prev_piq_x_loc, prev_piq_y_loc = input[0,0], input[0,1]\n piq_x_loc, piq_y_loc = input[0,0], input[0,1]\n\n # for animation:\n\n bh_xy = []\n piq_xy = []\n piq_gold = [(float(gold_values[i][0,0]),float(gold_values[i][0,1])) for i in range(len(input_tensors))]\n\n for i in range(len(input_tensors)):\n output, hidden = rnn(input, hidden)\n\n # for new input vector\n prev_x_loc, prev_y_loc = x_loc, y_loc\n prev_piq_x_loc, prev_piq_y_loc = piq_x_loc, piq_y_loc\n piq_x_loc, piq_y_loc = output[0,0], output[0,1]\n x_loc, y_loc = input_tensors[i][0,6], input_tensors[i][0,7]\n\n # for animation\n piq_xy.append((float(piq_x_loc),float(piq_y_loc)))\n bh_xy.append((float(x_loc),float(y_loc)))\n\n if i == len(input_tensors)-1:\n diff = output - gold_values[i]\n x_diff,y_diff = float(diff[0,0]), float(diff[0,1])\n diff = (math.fabs(x_diff) + math.fabs(y_diff))/2\n\n input = createInput(piq_x_loc, piq_y_loc, prev_piq_x_loc, prev_piq_y_loc, x_loc, y_loc, prev_x_loc, prev_y_loc)\n\n return diff, len(input_tensors), bh_xy, piq_xy, piq_gold\n\ndef init():\n line.set_offsets([])\n return line,\n\ndef animate(i, num_frames):\n colors = []\n x_positions = []\n y_positions = []\n count = 0\n for player_coord in player_coords:\n x = player_coord[i][0]\n y = player_coord[i][1]\n x_positions.append(x)\n y_positions.append(y)\n if count == 0:\n colors.append('black')\n elif count == 1:\n colors.append('red')\n elif count == 2:\n colors.append('gold')\n count += 1\n x_positions = np.asarray(x_positions)\n y_positions = np.asarray(y_positions)\n positions = np.vstack((x_positions, y_positions)).T\n line.set_offsets(positions)\n line.set_color(c=colors)\n if i == num_frames-1:\n plt.close()\n return line,\n\n\n########################\n\n# Train or evaluate\n\n#######################\n\n\nif init_train:\n\n rnn = LSTM(12, 300, 2)\n\n criterion = nn.MSELoss()\n\n learning_rate = 0.0005\n #learning_rate = 0.0001\n\n n_iters = 25000\n print_every = 5000\n plot_every = 100\n all_losses = []\n total_loss = 0 # Reset every plot_every iters\n\n start = time.time()\n\n plt.ion()\n ax = plt.gca()\n ax.set_xlim([0, 5])\n ax.set_ylim([0, 300])\n plt.title(\"Loss over time\")\n plt.xlabel(\"(100x) Num Iterations\")\n plt.ylabel(\"Loss\")\n\n for iter in range(1, n_iters + 1):\n output, loss = train(*randomTrainingExample())\n total_loss += loss\n\n if iter % print_every == 0:\n print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, loss))\n\n if iter % plot_every == 0:\n all_losses.append(total_loss / plot_every)\n print(total_loss / plot_every)\n total_loss = 0\n ax.set_xlim([0, len(all_losses) + 10])\n ax.plot(all_losses)\n plt.draw()\n plt.pause(0.0001)\n\n torch.save(rnn.state_dict(), './data/' + args.player_w_underscore + '.model')\n\nelse:\n\n rnn = LSTM(12, 300, 2)\n rnn.load_state_dict(torch.load('./data/' + args.player_w_underscore + '.model'))\n\n num_test_examples = 100\n\n\n for i in range(3):\n court = plt.imread('halfcourt.png')\n fig = plt.figure(figsize=(15, 11.5))\n ax = plt.axes(xlim=(-10, 60), ylim=(-10, 60))\n line = ax.scatter([], [], s=50)\n colors = []\n\n with torch.no_grad():\n ex_input, ex_gold = torch.tensor([0]),torch.tensor([0])\n while float(ex_input.size()[0]) < num_test_examples:\n ex_input, ex_gold = randomTrainingExample()\n diff, num_frames, bh_xy, piq_xy, piq_gold = predict(ex_input, ex_gold)\n\n assert(len(bh_xy) == len(piq_xy) == len(piq_gold))\n\n player_coords = [bh_xy,piq_xy,piq_gold]\n\n\n animation = anim.FuncAnimation(fig, animate, init_func=init, frames=num_frames, fargs=[num_frames], interval=50, repeat=False, blit=True)\n\n plt.imshow(court, zorder=0, extent=[0, 47, 50, 0])\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mogryzko/NBA-Defensive-AI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"47805415134","text":"A = list()\nfirst_num = int(input())\nA.append(first_num)\nsecond_num = int(input())\nA.append(second_num)\nthird_num = int(input())\nA.append(third_num)\nfourth_num = int(input())\nA.append((fourth_num))\n\nminimal = first_num\nfor item in A:\n if item < minimal:\n minimal = item\nprint(minimal)\n\n# до\n# 13\n# включительно – детство;\n# от\n# 14\n# до\n# 24 – молодость;\n# от\n# 25\n# до\n# 59 – зрелость;\n# от\n# 60 – старость.\nage = int(input(\"Введите свой возраст: \"))\nif age <= 13:\n print('детство')\nelif 14 <= age <= 24:\n print('молодость')\nelif 25 <= age <= 50:\n print('зрелость')\nelse:\n print('старость')\n\nfirst_num = int(input())\n\nsecond_num = int(input())\n\nthird_num = int(input())\n\nsum = 0\n\nif (first_num > -1):\n\n sum += first_num\n\nif (second_num > -1):\n\n sum +=second_num\n\nif (third_num > -1):\n\n sum += third_num\n\nprint(sum)\n\n\nnum1 = 34\nnum2 = 81\nif num1 // 9 == 0 or num2 % 9 == 0:\n print('число', num1, 'выиграло')\nelse:\n print('число', num2, 'выиграло')\n","repo_name":"YaroslavBaienko/002_coursera_Python_Functions_Files_Dictionaries","sub_path":"003_stepik.py","file_name":"003_stepik.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26083468760","text":"print(f'convertisseur: {__name__}')\n\ndef deCversF(tc):\n tf = tc * 9 / 5 + 32\n return tf\n\ndef deFversC(tf):\n tc = (tf - 32) * 5 / 9\n return tc\n\nif __name__ == \"__main__\":\n tc_str = input(\"Temperature en Celsius ? \")\n tc = float(tc_str)\n tf = deCversF(tc)\n print(f\"Temperature en Farenheit: {tf} °F\")","repo_name":"christophesaintjean/IntroProgS1_2020","sub_path":"code/Solutions TP11/convertisseur.py","file_name":"convertisseur.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35175730711","text":"# -*- coding: utf-8 -*-\n\nfrom base.custom_model import GridModel\nfrom django.utils.translation import ugettext as _\nfrom mysite import sql_utils\nfrom mysite.att.report_utils import parse_grid_arg\n\nclass CardTimes(GridModel):\n verbose_name=_(u'统计结果详情表')\n app_menu =\"att\"\n menu_index=3\n visible = False\n template = 'grid_data.html'\n head = (('userid',u'用户ID'),('DeptName',u'部门名称'),('badgenumber',u'人员编号'),('name',u'姓名'),('card_date',u'日期'),('times',u'打卡次数'),('card_times',u'打卡时间'))\n option = {\n \"usepager\": True,\n \"useRp\": True,\n \"rp\": 20,\n \"height\":300,\n \"width\":1286 ,\n 'checkbox' : False,\n \"showTableToggleBtn\":True,\n \"buttons\":[{\"name\": '导出xls', \"bclass\": 'export_xls', \"onpress\" : '$do_export$'},\n {\"name\": '导出pdf', \"bclass\": 'export_pdf', \"onpress\" : '$do_export$'},\n {\"name\": '导出csv', \"bclass\": 'export_csv', \"onpress\" : '$do_export$'}\n ],\n }\n def __init__(self,request):\n super(CardTimes, self) .__init__()\n self.SetHide(\"userid\")\n self.grid.fields[\"DeptName\"][\"width\"]=120\n self.grid.fields[\"badgenumber\"][\"width\"]=120\n self.grid.fields[\"name\"][\"width\"]=120\n self.grid.fields[\"times\"][\"width\"]=120\n self.grid.fields[\"card_date\"][\"width\"]=180\n self.grid.fields[\"card_times\"][\"width\"]=380\n def MakeData(self,request,**arg):\n userids,d1,d2 = parse_grid_arg(request)\n if userids and d1 and d1:\n params={\"userids\": ','.join(userids),\"st\":d1,\"et\":d2}\n self.grid.sql = sql_utils.get_sql('sql',sqlid='cardtimes',app='att',params=params)\n else:\n self.SetBlank()\n","repo_name":"xianglei0610/zktime_wlm","sub_path":"units/adms/mysite/att/models/grid_models/card_times.py","file_name":"card_times.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"23613790519","text":"import os\nimport cPickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport calculate_QG10_var_shear_normalization as calcnorm\n\nNORMFILE = os.path.join('results', 'normalizationv2_G10_QuadPS_N1000_noise_sigma0.05.pkl')\n# Load up the tuple of results\nnorm_data = cPickle.load(open(NORMFILE, 'rb'))\n\nqG10unnorm = norm_data[0]\nqQuadPSunnorm = norm_data[1]\n\nctest = calcnorm.CTEST\nmtest = calcnorm.MTEST\n\nplt.clf()\nif not os.path.isdir(os.path.join('plots', 'v3')):\n os.mkdir(os.path.join('plots', 'vs'))\n\n# First plot the histograms of QG10 for each of the ctest values for m = mtest[0] (fiducial)\nplotfile1 = os.path.join('plots', 'v3', 'normalization_QG10_vs_c.png')\nfor c, i in zip(ctest, range(len(ctest))):\n plt.hist(qG10unnorm[i, 0, :].flatten(), bins=30, range=(0., 6.e7), label=r'c$_i$='+str(c))\nplt.xlabel(r'QG10 (unnormalized fiducial m$_i$='+str(mtest[0])+')')\nplt.legend()\nplt.savefig(plotfile1)\n\n# Then plot the histograms of QQuadPS for each of the ctest values for m = mtest[0] (fiducial)\nplotfile2 = os.path.join('plots', 'v3', 'normalization_QQuadPS_vs_c.png')\nplt.clf()\nfor c, i in zip(ctest, range(len(ctest))):\n plt.hist(qQuadPSunnorm[i, 0, :].flatten(), bins=30, range=(0., 6.e7), label=r'c$_i$='+str(c))\nplt.xlabel(r'QQuadPS (unnormalized fiducial m$_i$='+str(mtest[0])+')')\nplt.legend()\nplt.savefig(plotfile2)\n\n# Then calculate the normalization factors for the QG10 and QQuadPS metrics\nnormQG10 = np.mean(qG10unnorm[:, 0, :].flatten())\nnormQQuadPS = np.mean(qQuadPSunnorm[:, 0, :].flatten())\n# Use this to make normalized arrays of test case metric values\nqG10 = 1000. * qG10unnorm / normQG10\nqQuadPS = 1000. * qQuadPSunnorm / normQQuadPS\n\n# Then plot histograms of normalized values as a function of m\n# QG10\nplt.clf()\nplotfile3 = os.path.join('plots', 'v3', 'hists_QG10_vs_m.png')\nfor m, j in zip(mtest, range(len(mtest))):\n plt.hist(qG10[:, j, :].flatten(), bins=75, range=(0., 2500.), label=r'm$_i$='+str(m))\nplt.legend()\nplt.xlabel('QG10')\nplt.savefig(plotfile3)\n\n# QQuadPS\nplt.clf()\nplotfile4 = os.path.join('plots', 'v3', 'hists_QQuadPS_vs_m.png')\nfor m, j in zip(mtest, range(len(mtest))):\n plt.hist(qQuadPS[:, j, :].flatten(), bins=75, range=(0., 2500.), label=r'm$_i$='+str(m))\nplt.legend()\nplt.xlabel('QQuadPS')\nplt.savefig(plotfile4)\n\n# Then plot means and standard deviations of these histograms\n# QG10\nplt.clf()\nplotfile5 = os.path.join('plots', 'v3', 'QG10_vs_m.png')\nmeanQG10 = []\nstdQG10 = []\nfor m, j in zip(mtest, range(len(mtest))):\n meanQG10.append(np.mean(qG10[:, j, :].flatten()))\n stdQG10.append(np.std(qG10[:, j, :].flatten()))\nplt.errorbar(np.log10(mtest), meanQG10, yerr=stdQG10)\nplt.xlim(-.5, -3.5)\nplt.xlabel(r'log$_{10}$(m$_i$)')\nplt.ylabel('QG10')\nplt.savefig(plotfile5)\n\n# QQuadPS\nplt.clf()\nplotfile6 = os.path.join('plots', 'v3', 'QQuadPS_vs_m.png')\nmeanQQuadPS = []\nstdQQuadPS = []\nfor m, j in zip(mtest, range(len(mtest))):\n meanQQuadPS.append(np.mean(qQuadPS[:, j, :].flatten()))\n stdQQuadPS.append(np.std(qQuadPS[:, j, :].flatten()))\nplt.errorbar(np.log10(mtest), meanQQuadPS, yerr=stdQQuadPS)\nplt.xlim(-.5, -3.5)\nplt.xlabel(r'log$_{10}$(m$_i$)')\nplt.ylabel('QQuadPS')\nplt.savefig(plotfile6)\n\n","repo_name":"barnabytprowe/great3-public","sub_path":"metrics/plot_PS_normalization_results.py","file_name":"plot_PS_normalization_results.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"83"} +{"seq_id":"14817659992","text":"def sortCodesignalUsers(users):\n res = [CodeSignalUser(*user) for user in users]\n res.sort(reverse=True)\n return list(map(str, res))\n\n\nclass CodeSignalUser:\n def __init__(self, name, _id, score):\n self.name = name\n self.id = _id\n self.score = score\n\n def __lt__(self, other):\n if int(self.score) < int(other.score):\n return True\n elif int(self.score) == int(other.score):\n print(f'equal {self.name}, {other.name}')\n return int(self.id) > int(other.id)\n return False\n\n\n def __repr__(self):\n return self.name\n\nusers = [[\"warrior\", \"1\", \"1050\"],\n [\"Ninja!\", \"21\", \"995\"],\n [\"recruit\", \"3\", \"995\"]]\n\n\n# c = CodeSignalUser(\"warrior\", \"1\", \"1050\")\nprint(sortCodesignalUsers(users))\n","repo_name":"djaychela/playground","sub_path":"codefights/arcade/python/showing_class/sort_codesignal_users.py","file_name":"sort_codesignal_users.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"25729462235","text":"from dataclasses import dataclass, field\nimport json\n\nimport requests\n\nfrom .converters import xml_to_json\nfrom .config import PrtgConfig\nfrom .constants import PRTG_REQUEST_CONFIGS\n\n\n@dataclass\nclass Prtg:\n config: PrtgConfig\n _resources: dict = field(default=None, repr=False)\n\n def __post_init__(self):\n self._resources = {}\n\n def version(self):\n status = self.status()\n return status[\"status\"][\"Version\"]\n\n def status(self):\n request_resource = \"status\"\n req_status_code, content = self._request(request_resource)\n status_data = xml_to_json(content)\n return status_data\n\n def sensor_types(self):\n request_resource = \"sensor_types\"\n status_code, content = self._request(request_resource)\n data = json.loads(content)\n return data\n\n def passhash(self):\n if self.config.passhash:\n return self.config.passhash\n\n request_resource = \"passhash\"\n status_code, content = self._request(request_resource)\n return content.decode()\n\n def _request(self, request_resource, special_params={}):\n route = PRTG_REQUEST_CONFIGS[request_resource][\"route\"]\n uri = f\"{self.config.host}{route}\"\n external_params = PRTG_REQUEST_CONFIGS[request_resource].get(\"params\", {})\n params = {**self.config.auth, **external_params, **special_params}\n res = requests.get(uri, params=params)\n return res.status_code, res.content\n\n def _get_by_name_without_exception(self, resource, name):\n data = self._get(resource)\n for item in data[resource][\"item\"]:\n if item[\"name\"] == name:\n return item\n\n return None\n\n def _get_by_name(self, resource, name):\n data = self._get_by_name_without_exception(resource, name)\n if data is None:\n raise Exception(\n f\"We didn't found this '{name}' item in '{resource}' resource\"\n )\n\n return data\n\n def _clear_cache_resources(self, resource=\"*\"):\n if resource == \"*\":\n self._resources = {}\n\n self._resources[resource] = None\n\n def _get(self, resource):\n cache_data = self._resources.get(resource)\n if cache_data:\n return cache_data\n\n status_code, content = self._request(resource)\n data = xml_to_json(content)\n self._resources[resource] = data\n return data\n\n def get(self, resource, name):\n if name == \"*\":\n return self._get(resource)\n\n return self._get_by_name(resource, name)\n\n def _check_existence_of_item(self, resource, item_id_or_name):\n if not item_id_or_name.isdigit():\n item = self._get_by_name(resource, item_id_or_name)\n return item[\"objid\"]\n\n return item_id_or_name\n\n def object_state(self, state, resource, item):\n obj_id = self._check_existence_of_item(resource, item)\n request_resource = state\n status_code, content = self._request(\n request_resource, special_params={\"id\": obj_id}\n )\n return status_code\n\n def duplicate_device(\n self, source, target_group, target_name, target_host, force_recreate=False\n ):\n source_id = self._check_existence_of_item(\"devices\", source)\n target_id = self._check_existence_of_item(\"groups\", target_group)\n\n device = self._get_by_name_without_exception(\"devices\", target_name)\n if device:\n if not force_recreate:\n return (\n f\"This '{target_name}' device item is exists in 'devices' resource\",\n False,\n )\n\n device_id = device[\"objid\"]\n status_code = self.object_state(\"delete\", \"devices\", device_id)\n\n special_params = {\n \"id\": source_id,\n \"targetid\": target_id,\n \"name\": target_name,\n \"host\": target_host,\n }\n request_resource = \"duplicate\"\n status_code, content = self._request(\n request_resource, special_params=special_params\n )\n return status_code, True\n\n def duplicate_group(self, source, target, target_name):\n source_id = self._check_existence_of_item(\"groups\", source)\n target_id = self._check_existence_of_item(\"groups\", target)\n\n group = self._get_by_name_without_exception(\"groups\", target_name)\n if not group is None:\n raise Exception(f\"This '{target_name}' item is exists in 'groups' resource\")\n\n special_params = {\"id\": source_id, \"targetid\": target_id, \"name\": target_name}\n request_resource = \"duplicate\"\n status_code, content = self._request(\n request_resource, special_params=special_params\n )\n return status_code\n\n def duplicate_sensor(self, source, target_device, target_name):\n print(\"@TODO\")\n","repo_name":"f9n/prtg-cli","sub_path":"prtg_cli/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"83"} +{"seq_id":"15426280565","text":"from dataloader import EvalDataset, TrainDataset, NewBidirectionalOneShotIterator\nfrom dataloader import get_dataset\n\nimport argparse\nimport os\nimport logging\nimport time\n\nbackend = os.environ.get('DGLBACKEND', 'pytorch')\nif backend.lower() == 'mxnet':\n import multiprocessing as mp\n from train_mxnet import load_model\n from train_mxnet import train\n from train_mxnet import test\nelse:\n import torch.multiprocessing as mp\n from train_pytorch import load_model\n from train_pytorch import train\n from train_pytorch import test\n\nclass ArgParser(argparse.ArgumentParser):\n def __init__(self):\n super(ArgParser, self).__init__()\n\n self.add_argument('--model_name', default='TransE',\n choices=['TransE', 'TransE_l1', 'TransE_l2', 'TransR',\n 'RESCAL', 'DistMult', 'ComplEx', 'RotatE'],\n help='model to use')\n self.add_argument('--data_path', type=str, default='data',\n help='root path of all dataset')\n self.add_argument('--dataset', type=str, default='FB15k',\n help='dataset name, under data_path')\n self.add_argument('--format', type=str, default='1',\n help='the format of the dataset.')\n self.add_argument('--save_path', type=str, default='ckpts',\n help='place to save models and logs')\n self.add_argument('--save_emb', type=str, default=None,\n help='save the embeddings in the specific location.')\n\n self.add_argument('--max_step', type=int, default=80000,\n help='train xx steps')\n self.add_argument('--warm_up_step', type=int, default=None,\n help='for learning rate decay')\n self.add_argument('--batch_size', type=int, default=1024,\n help='batch size')\n self.add_argument('--batch_size_eval', type=int, default=8,\n help='batch size used for eval and test')\n self.add_argument('--neg_sample_size', type=int, default=128,\n help='negative sampling size')\n self.add_argument('--neg_chunk_size', type=int, default=-1,\n help='chunk size of the negative edges.')\n self.add_argument('--neg_deg_sample', action='store_true',\n help='negative sample proportional to vertex degree in the training')\n self.add_argument('--neg_deg_sample_eval', action='store_true',\n help='negative sampling proportional to vertex degree in the evaluation')\n self.add_argument('--neg_sample_size_valid', type=int, default=1000,\n help='negative sampling size for validation')\n self.add_argument('--neg_chunk_size_valid', type=int, default=-1,\n help='chunk size of the negative edges.')\n self.add_argument('--neg_sample_size_test', type=int, default=-1,\n help='negative sampling size for testing')\n self.add_argument('--neg_chunk_size_test', type=int, default=-1,\n help='chunk size of the negative edges.')\n self.add_argument('--hidden_dim', type=int, default=256,\n help='hidden dim used by relation and entity')\n self.add_argument('--lr', type=float, default=0.0001,\n help='learning rate')\n self.add_argument('-g', '--gamma', type=float, default=12.0,\n help='margin value')\n self.add_argument('--eval_percent', type=float, default=1,\n help='sample some percentage for evaluation.')\n self.add_argument('--no_eval_filter', action='store_true',\n help='do not filter positive edges among negative edges for evaluation')\n\n self.add_argument('--gpu', type=int, default=[-1], nargs='+', \n help='a list of active gpu ids, e.g. 0 1 2 4')\n self.add_argument('--mix_cpu_gpu', action='store_true',\n help='mix CPU and GPU training')\n self.add_argument('-de', '--double_ent', action='store_true',\n help='double entitiy dim for complex number')\n self.add_argument('-dr', '--double_rel', action='store_true',\n help='double relation dim for complex number')\n self.add_argument('--seed', type=int, default=0,\n help='set random seed fro reproducibility')\n self.add_argument('-log', '--log_interval', type=int, default=1000,\n help='do evaluation after every x steps')\n self.add_argument('--eval_interval', type=int, default=10000,\n help='do evaluation after every x steps')\n self.add_argument('-adv', '--neg_adversarial_sampling', action='store_true',\n help='if use negative adversarial sampling')\n self.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)\n\n self.add_argument('--valid', action='store_true',\n help='if valid a model')\n self.add_argument('--test', action='store_true',\n help='if test a model')\n self.add_argument('-rc', '--regularization_coef', type=float, default=0.000002,\n help='set value > 0.0 if regularization is used')\n self.add_argument('-rn', '--regularization_norm', type=int, default=3,\n help='norm used in regularization')\n self.add_argument('--num_worker', type=int, default=16,\n help='number of workers used for loading data')\n self.add_argument('--non_uni_weight', action='store_true',\n help='if use uniform weight when computing loss')\n self.add_argument('--init_step', type=int, default=0,\n help='DONT SET MANUALLY, used for resume')\n self.add_argument('--step', type=int, default=0,\n help='DONT SET MANUALLY, track current step')\n self.add_argument('--pickle_graph', action='store_true',\n help='pickle built graph, building a huge graph is slow.')\n self.add_argument('--num_proc', type=int, default=1,\n help='number of process used')\n self.add_argument('--rel_part', action='store_true',\n help='enable relation partitioning')\n\n\ndef get_logger(args):\n if not os.path.exists(args.save_path):\n os.mkdir(args.save_path)\n\n folder = '{}_{}_'.format(args.model_name, args.dataset)\n n = len([x for x in os.listdir(args.save_path) if x.startswith(folder)])\n folder += str(n)\n args.save_path = os.path.join(args.save_path, folder)\n\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n log_file = os.path.join(args.save_path, 'train.log')\n\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=log_file,\n filemode='w'\n )\n\n logger = logging.getLogger(__name__)\n print(\"Logs are being recorded at: {}\".format(log_file))\n return logger\n\n\ndef run(args, logger):\n # load dataset and samplers\n dataset = get_dataset(args.data_path, args.dataset, args.format)\n n_entities = dataset.n_entities\n n_relations = dataset.n_relations\n if args.neg_sample_size_test < 0:\n args.neg_sample_size_test = n_entities\n args.eval_filter = not args.no_eval_filter\n if args.neg_deg_sample_eval:\n assert not args.eval_filter, \"if negative sampling based on degree, we can't filter positive edges.\"\n\n # When we generate a batch of negative edges from a set of positive edges,\n # we first divide the positive edges into chunks and corrupt the edges in a chunk\n # together. By default, the chunk size is equal to the negative sample size.\n # Usually, this works well. But we also allow users to specify the chunk size themselves.\n if args.neg_chunk_size < 0:\n args.neg_chunk_size = args.neg_sample_size\n if args.neg_chunk_size_valid < 0:\n args.neg_chunk_size_valid = args.neg_sample_size_valid\n if args.neg_chunk_size_test < 0:\n args.neg_chunk_size_test = args.neg_sample_size_test\n\n train_data = TrainDataset(dataset, args, ranks=args.num_proc)\n if args.num_proc > 1:\n train_samplers = []\n for i in range(args.num_proc):\n train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size,\n args.neg_chunk_size,\n mode='chunk-head',\n num_workers=args.num_worker,\n shuffle=True,\n exclude_positive=True,\n rank=i)\n train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size,\n args.neg_chunk_size,\n mode='chunk-tail',\n num_workers=args.num_worker,\n shuffle=True,\n exclude_positive=True,\n rank=i)\n train_samplers.append(NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail,\n args.neg_chunk_size,\n True, n_entities))\n else:\n train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size,\n args.neg_chunk_size,\n mode='chunk-head',\n num_workers=args.num_worker,\n shuffle=True,\n exclude_positive=True)\n train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size,\n args.neg_chunk_size,\n mode='chunk-tail',\n num_workers=args.num_worker,\n shuffle=True,\n exclude_positive=True)\n train_sampler = NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail,\n args.neg_chunk_size,\n True, n_entities)\n\n # for multiprocessing evaluation, we don't need to sample multiple batches at a time\n # in each process.\n num_workers = args.num_worker\n if args.num_proc > 1:\n num_workers = 1\n if args.valid or args.test:\n eval_dataset = EvalDataset(dataset, args)\n if args.valid:\n # Here we want to use the regualr negative sampler because we need to ensure that\n # all positive edges are excluded.\n if args.num_proc > 1:\n valid_sampler_heads = []\n valid_sampler_tails = []\n for i in range(args.num_proc):\n valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval,\n args.neg_sample_size_valid,\n args.neg_chunk_size_valid,\n args.eval_filter,\n mode='chunk-head',\n num_workers=num_workers,\n rank=i, ranks=args.num_proc)\n valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval,\n args.neg_sample_size_valid,\n args.neg_chunk_size_valid,\n args.eval_filter,\n mode='chunk-tail',\n num_workers=num_workers,\n rank=i, ranks=args.num_proc)\n valid_sampler_heads.append(valid_sampler_head)\n valid_sampler_tails.append(valid_sampler_tail)\n else:\n valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval,\n args.neg_sample_size_valid,\n args.neg_chunk_size_valid,\n args.eval_filter,\n mode='chunk-head',\n num_workers=num_workers,\n rank=0, ranks=1)\n valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval,\n args.neg_sample_size_valid,\n args.neg_chunk_size_valid,\n args.eval_filter,\n mode='chunk-tail',\n num_workers=num_workers,\n rank=0, ranks=1)\n if args.test:\n # Here we want to use the regualr negative sampler because we need to ensure that\n # all positive edges are excluded.\n if args.num_proc > 1:\n test_sampler_tails = []\n test_sampler_heads = []\n for i in range(args.num_proc):\n test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval,\n args.neg_sample_size_test,\n args.neg_chunk_size_test,\n args.eval_filter,\n mode='chunk-head',\n num_workers=num_workers,\n rank=i, ranks=args.num_proc)\n test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval,\n args.neg_sample_size_test,\n args.neg_chunk_size_test,\n args.eval_filter,\n mode='chunk-tail',\n num_workers=num_workers,\n rank=i, ranks=args.num_proc)\n test_sampler_heads.append(test_sampler_head)\n test_sampler_tails.append(test_sampler_tail)\n else:\n test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval,\n args.neg_sample_size_test,\n args.neg_chunk_size_test,\n args.eval_filter,\n mode='chunk-head',\n num_workers=num_workers,\n rank=0, ranks=1)\n test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval,\n args.neg_sample_size_test,\n args.neg_chunk_size_test,\n args.eval_filter,\n mode='chunk-tail',\n num_workers=num_workers,\n rank=0, ranks=1)\n\n # We need to free all memory referenced by dataset.\n eval_dataset = None\n dataset = None\n # load model\n model = load_model(logger, args, n_entities, n_relations)\n\n if args.num_proc > 1:\n model.share_memory()\n\n # train\n start = time.time()\n if args.num_proc > 1:\n procs = []\n for i in range(args.num_proc):\n rel_parts = train_data.rel_parts if args.rel_part else None\n valid_samplers = [valid_sampler_heads[i], valid_sampler_tails[i]] if args.valid else None\n proc = mp.Process(target=train, args=(args, model, train_samplers[i], i, rel_parts, valid_samplers))\n procs.append(proc)\n proc.start()\n for proc in procs:\n proc.join()\n else:\n valid_samplers = [valid_sampler_head, valid_sampler_tail] if args.valid else None\n train(args, model, train_sampler, valid_samplers)\n print('training takes {} seconds'.format(time.time() - start))\n\n if args.save_emb is not None:\n if not os.path.exists(args.save_emb):\n os.mkdir(args.save_emb)\n model.save_emb(args.save_emb, args.dataset)\n\n # test\n if args.test:\n start = time.time()\n if args.num_proc > 1:\n queue = mp.Queue(args.num_proc)\n procs = []\n for i in range(args.num_proc):\n proc = mp.Process(target=test, args=(args, model, [test_sampler_heads[i], test_sampler_tails[i]],\n i, 'Test', queue))\n procs.append(proc)\n proc.start()\n\n total_metrics = {}\n for i in range(args.num_proc):\n metrics = queue.get()\n for k, v in metrics.items():\n if i == 0:\n total_metrics[k] = v / args.num_proc\n else:\n total_metrics[k] += v / args.num_proc\n for k, v in metrics.items():\n print('Test average {} at [{}/{}]: {}'.format(k, args.step, args.max_step, v))\n\n for proc in procs:\n proc.join()\n else:\n test(args, model, [test_sampler_head, test_sampler_tail])\n print('test:', time.time() - start)\n\nif __name__ == '__main__':\n args = ArgParser().parse_args()\n print(\"Args: \", args)\n logger = get_logger(args)\n run(args, logger)\n","repo_name":"prempv/dgl_kge","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":19605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29240113982","text":"\"\"\"\n# Name: No-Reference Multiscale Autocorrelation (NRMAC)\n# Purpose: To quantify the image quality based on a No-Reference Multiscale Autocorrelation Metric.\n# References: This method is a modification of the Vollath's correlation (Santos, 1997) metric\n# Function: This NRMAC is a focus measure based on image autocorrelation from multiple scales\n# The output with a lower value indicates poorer quality of the input image. \n The NRMAC has been tested on RGB geotif images and an empirical threshold is set be 15 and needs to be further evaluated. This value is subject to be changed based on sensors setting and user requirements. \n# Version: 1.0\n#\n# Created: 03/20/2018\n\"\"\"\nimport os\nimport shutil\nimport tempfile\nimport cv2\nimport numpy as np\nfrom osgeo import gdal\nfrom PIL import Image, ImageFilter\n\nfrom pyclowder.utils import CheckMessage\nfrom pyclowder.datasets import download_metadata, upload_metadata, remove_metadata, submit_extraction\nfrom terrautils.extractors import TerrarefExtractor, build_metadata, upload_to_dataset, \\\n is_latest_file, contains_required_files, file_exists, load_json_file, check_file_in_dataset\nfrom terrautils.metadata import get_extractor_metadata, get_terraref_metadata\nfrom terrautils.formats import create_geotiff\nfrom terrautils.spatial import geojson_to_tuples\n\n\ndef MAC(im1, im2, im): # main function: Multiscale Autocorrelation (MAC)\n h, v, c = im1.shape\n if c>1:\n im = np.matrix.round(rgb2gray(im))\n im1 = np.matrix.round(rgb2gray(im1))\n im2 = np.matrix.round(rgb2gray(im2))\n # multiscale parameters\n scales = np.array([2, 3, 5])\n FM = np.zeros(len(scales))\n for s in range(len(scales)):\n im1[0: h-1,:] = im[1:h,:]\n im2[0: h-scales[s], :]= im[scales[s]:h,:]\n dif = im*(im1 - im2)\n FM[s] = np.mean(dif)\n NRMAC = np.mean(FM)\n return NRMAC\n\ndef rgb2gray(rgb):\n r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n\ndef getImageQuality(imgfile):\n # Some RGB Geotiffs have issues with Image library...\n #img = Image.open(imgfile)\n #img = np.array(img)\n img = np.rollaxis(gdal.Open(imgfile).ReadAsArray().astype(np.uint8), 0, 3)\n NRMAC = MAC(img, img, img)\n return NRMAC\n\n\ndef add_local_arguments(parser):\n # add any additional arguments to parser\n parser.add_argument('--left', type=bool, default=os.getenv('LEFT_ONLY', True),\n help=\"only generate a mask for the left image\")\n\nclass RGB_NRMAC(TerrarefExtractor):\n def __init__(self):\n super(RGB_NRMAC, self).__init__()\n\n add_local_arguments(self.parser)\n\n # parse command line and load default logging configuration\n self.setup(sensor='rgb_nrmac')\n\n # assign local arguments\n self.leftonly = self.args.left\n\n def check_message(self, connector, host, secret_key, resource, parameters):\n if \"rulechecked\" in parameters and parameters[\"rulechecked\"]:\n return CheckMessage.download\n\n self.start_check(resource)\n\n if not is_latest_file(resource):\n self.log_skip(resource, \"not latest file\")\n return CheckMessage.ignore\n\n # Check for a left and right BIN file - skip if not found\n if not contains_required_files(resource, ['_left.tif', '_right.tif']):\n self.log_skip(resource, \"missing required files\")\n # Check for raw_data_source in metadata and resumbit to bin2tif if available...\n md = download_metadata(connector, host, secret_key, resource['id'])\n terra_md = get_terraref_metadata(md)\n if 'raw_data_source' in terra_md:\n raw_id = str(terra_md['raw_data_source'].split(\"/\")[-1])\n self.log_info(resource, \"submitting raw source %s to bin2tif\" % raw_id)\n submit_extraction(connector, host, secret_key, raw_id, \"terra.stereo-rgb.bin2tif\")\n return CheckMessage.ignore\n\n # Check metadata to verify we have what we need\n md = download_metadata(connector, host, secret_key, resource['id'])\n if get_terraref_metadata(md):\n if get_extractor_metadata(md, self.extractor_info['name'], self.extractor_info['version']):\n # Make sure outputs properly exist\n timestamp = resource['dataset_info']['name'].split(\" - \")[1]\n left_nrmac_tiff = self.sensors.create_sensor_path(timestamp, opts=['left'])\n right_nrmac_tiff = self.sensors.create_sensor_path(timestamp, opts=['right'])\n if (self.leftonly and file_exists(left_nrmac_tiff)) or (\n not self.leftonly and file_exists(left_nrmac_tiff) and file_exists(right_nrmac_tiff)):\n if contains_required_files(resource, [os.path.basename(left_nrmac_tiff)]):\n self.log_skip(resource, \"metadata v%s and outputs already exist\" % self.extractor_info['version'])\n return CheckMessage.ignore\n else:\n self.log_info(resource, \"output file exists but not yet uploaded\")\n # Have TERRA-REF metadata, but not any from this extractor\n return CheckMessage.download\n else:\n self.log_skip(resource, \"no terraref metadata found\")\n return CheckMessage.ignore\n\n def process_message(self, connector, host, secret_key, resource, parameters):\n self.start_message(resource)\n\n # Get left/right files and metadata\n img_left, img_right, metadata = None, None, None\n for fname in resource['local_paths']:\n if fname.endswith('_dataset_metadata.json'):\n all_dsmd = load_json_file(fname)\n terra_md_full = get_terraref_metadata(all_dsmd, 'stereoTop')\n elif fname.endswith('_left.tif'):\n img_left = fname\n elif fname.endswith('_right.tif'):\n img_right = fname\n if None in [img_left, img_right, terra_md_full]:\n raise ValueError(\"could not locate all files & metadata in processing\")\n\n timestamp = resource['dataset_info']['name'].split(\" - \")[1]\n target_dsid = resource['id']\n left_nrmac_tiff = self.sensors.create_sensor_path(timestamp, opts=['left'])\n right_nrmac_tiff = self.sensors.create_sensor_path(timestamp, opts=['right'])\n uploaded_file_ids = []\n\n self.log_info(resource, \"determining image quality\")\n left_qual = getImageQuality(img_left)\n if not self.leftonly:\n right_qual = getImageQuality(img_right)\n\n left_bounds = geojson_to_tuples(terra_md_full['spatial_metadata']['left']['bounding_box'])\n right_bounds = geojson_to_tuples(terra_md_full['spatial_metadata']['right']['bounding_box'])\n\n if not file_exists(left_nrmac_tiff) or self.overwrite:\n self.log_info(resource, \"creating %s\" % left_nrmac_tiff)\n create_geotiff(np.array([[left_qual, left_qual],[left_qual, left_qual]]), left_bounds,\n left_nrmac_tiff, None, True, self.extractor_info, terra_md_full, compress=True)\n self.created += 1\n self.bytes += os.path.getsize(left_nrmac_tiff)\n found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, left_nrmac_tiff,\n remove=self.overwrite)\n if not found_in_dest or self.overwrite:\n self.log_info(resource, \"uploading %s\" % left_nrmac_tiff)\n fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid,\n left_nrmac_tiff)\n uploaded_file_ids.append(host + (\"\" if host.endswith(\"/\") else \"/\") + \"files/\" + fileid)\n\n\n if not self.leftonly:\n if (not file_exists(right_nrmac_tiff) or self.overwrite):\n self.log_info(resource, \"creating %s\" % right_nrmac_tiff)\n create_geotiff(np.array([[right_qual, right_qual],[right_qual, right_qual]]), right_bounds,\n right_nrmac_tiff, None, True, self.extractor_info, terra_md_full, compress=True)\n self.created += 1\n self.bytes += os.path.getsize(right_nrmac_tiff)\n found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, right_nrmac_tiff,\n remove=self.overwrite)\n if not found_in_dest or self.overwrite:\n self.log_info(resource, \"uploading %s\" % right_nrmac_tiff)\n fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid,\n right_nrmac_tiff)\n uploaded_file_ids.append(host + (\"\" if host.endswith(\"/\") else \"/\") + \"files/\" + fileid)\n\n # Tell Clowder this is completed so subsequent file updates don't daisy-chain\n md = {\n \"files_created\": uploaded_file_ids,\n \"left_quality_score\": left_qual\n }\n if not self.leftonly:\n md[\"right_quality_score\"] = right_qual\n extractor_md = build_metadata(host, self.extractor_info, resource['id'], md, 'file')\n self.log_info(resource, \"uploading extractor metadata to Lv1 dataset\")\n remove_metadata(connector, host, secret_key, resource['id'], self.extractor_info['name'])\n upload_metadata(connector, host, secret_key, resource['id'], extractor_md)\n\n self.end_message(resource)\n\n\nif __name__ == \"__main__\":\n extractor = RGB_NRMAC()\n extractor.start()\n","repo_name":"terraref/quality-metrics","sub_path":"rgb_qc/terra_nrmac.py","file_name":"terra_nrmac.py","file_ext":"py","file_size_in_byte":9705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12491791838","text":"# Python script\nimport maya.cmds as cmd\n\nif cmd.window(\"clothCleanup\", ex=True):\n cmd.deleteUI(\"clothCleanup\", window=True)\n\ncmd.window(\"clothCleanup\", title=\"nCloth cleanup tool\")\ncmd.columnLayout(adj=True, rs=10)\n\ncmd.text(label=\"This tool makes the nCloth into a normal mesh.\", align=\"center\")\ncmd.checkBox(\"rigid\", label=\"Remove rigidbody?\", value=True)\ncmd.checkBox(\"nucleus\", label=\"Remove nucleus?\", value=True)\ncmd.button(label=\"Cleanup\", command=\"import nClothCleanup as g\\ng.cleanup()\")\ncmd.showWindow(\"clothCleanup\")\n\ndef cleanup():\n selection = cmd.ls(sl=True)\n print(selection)\n # deletes history and clears\n cmd.delete(selection, ch=True)\n cmd.makeIdentity(selection, apply=True, jointOrient=True, rotate=True, translate=True, scale=True)\n cmd.delete(\"nCloth*\")\n if cmd.checkBox(\"rigid\", query=True, value=True):\n cmd.delete(\"nRigid*\")\n shapes = cmd.listRelatives(shapes=True)\n for item in shapes:\n if \"outputCloth\" in item:\n pass\n else:\n cmd.delete(item)\n print(shapes)\n\n if cmd.checkBox(\"nucleus\", query=True, value=True):\n cmd.delete(\"*nucleus*\")\n\n if cmd.window(\"clothCleanup\", ex=True):\n cmd.deleteUI(\"clothCleanup\", window=True)\n","repo_name":"andesyv/MayaScripts","sub_path":"nClothCleanup.py","file_name":"nClothCleanup.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22035425030","text":"#Takes a user input of two numbers and those two numbers multiplied are over 1000 then instead it adds them. \n#Solving some basic python exercises with this\n\nx = int(input(\"Type a number\\n\"))\ny = int(input(\"Type a second number\\n\"))\n\nif x * y > 1000:\n print(x+y)\nelse:\n print(x*y)\n","repo_name":"RiggsWebDev/MiscScripts","sub_path":"PythonScripts/Exercises/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"28028895596","text":"\"\"\"\nQuery system Windows fonts with pure Python.\n\nPublic domain work by anatoly techtonik \nUse MIT License if public domain doesn't make sense for you.\n\n\n\nThe task: Get monospace font for an application in the order of\npreference.\n\nA problem: Font ID in Windows is its name. Windows doesn't provide\nany information about filenames they contained in. From two different\nfiles with the same font name you can get only one.\n\nWindows also doesn't have a clear concept of _generic font family_\nfamiliar from CSS specification. Here is how fontquery maps Windows\nLOGFONT properties to generic CSS font families:\n\n serif - (LOGFONT.lfPitchAndFamily >> 4) == FF_ROMAN\n sans-serif - (LOGFONT.lfPitchAndFamily >> 4) == FF_SWISS\n cursive - (LOGFONT.lfPitchAndFamily >> 4) == FF_SCRIPT\n fantasy - (LOGFONT.lfPitchAndFamily >> 4) == FF_DECORATIVE\n monospace - (lf.lfPitchAndFamily & 0b11) == FIXED_PITCH\n\nNOTE: ATM, May 2015, the Microsoft documentation related to monospace\nis misleading due to poor wording:\n - FF_MODERN in the description of LOGFONT structure tells\n \"Fonts with constant stroke width (monospace), with or without serifs.\n Monospace fonts are usually modern.\n Pica, Elite, and CourierNew are examples.\n \"\n \n Stroke width is the 'pen width', not glyph width. It should read\n\n \"Fonts with constant stroke width, with or without serifs.\n Monospace fonts are usually modern, but not all modern are monospace\n \"\n\nPYGLET NOTE:\nExamination of all fonts in a windows xp machine shows that all fonts\nwith\n\n fontentry.vector and fontentry.family != FF_DONTCARE\n\nare rendered fine.\n\n\nUse cases:\n [x] get the list of all available system font names\n [ ] get the list of all fonts for generic family\n [ ] get the list of all fonts for specific charset\n [ ] check if specific font is available\n\nConsiderations:\n - performance of querying all system fonts is not measured\n - Windows doesn't allow to get filenames of the fonts, so if there\n are two fonts with the same name, one will be missing\n\nMSDN:\n\n If you request a font named Palatino, but no such font is available\non the system, the font mapper will substitute a font that has similar\nattributes but a different name.\n\n [ ] check if font chosen by the system has required family\n\n To get the appropriate font, call EnumFontFamiliesEx with the\ndesired font characteristics in the LOGFONT structure, then retrieve the\nappropriate typeface name and create the font using CreateFont or\nCreateFontIndirect.\n\n\"\"\"\nfrom pyglet.libs.win32.context_managers import device_context\n\nDEBUG = False\n\n__all__ = ['have_font', 'font_list']\n\n__version__ = '0.3'\n__url__ = 'https://bitbucket.org/techtonik/fontquery'\n\n\n# -- INTRO: MAINTAIN CACHED FONTS DB --\n\n# [ ] make it Django/NDB style model definition\nclass FontEntry:\n \"\"\"\n Font classification.\n Level 0:\n - name\n - vector (True if font is vector, False for raster fonts)\n - format: ttf | ...\n \"\"\"\n\n def __init__(self, name, vector, format, monospace, family):\n self.name = name\n self.vector = vector\n self.format = format\n self.monospace = monospace\n self.family = family\n\n\n# List of FontEntry objects\nFONTDB = []\n\n# -- CHAPTER 1: GET ALL SYSTEM FONTS USING EnumFontFamiliesEx FROM GDI --\n\n\"\"\"\nQ: Why GDI? Why not GDI+? \nA: Wikipedia:\n\n Because of the additional text processing and resolution independence\ncapabilities in GDI+, text rendering is performed by the CPU [2] and it\nis nearly an order of magnitude slower than in hardware accelerated GDI.[3]\nChris Jackson published some tests indicating that a piece of text\nrendering code he had written could render 99,000 glyphs per second in GDI,\nbut the same code using GDI+ rendered 16,600 glyphs per second.\n\"\"\"\n\nimport ctypes\nfrom ctypes import wintypes\nfrom pyglet.libs.win32 import LOGFONT, LOGFONTW\n\nuser32 = ctypes.windll.user32\ngdi32 = ctypes.windll.gdi32\n\n# --- define necessary data structures from wingdi.h\n\n# for calling ANSI functions of Windows API (end with A) TCHAR is\n# defined as single char, for Unicode ones (end witn W) it is WCHAR\nCHAR = ctypes.c_char # Python 2.7 compatibility\nTCHAR = CHAR\nBYTE = ctypes.c_ubyte # http://bugs.python.org/issue16376\n\n# charset codes for LOGFONT structure\nANSI_CHARSET = 0\nARABIC_CHARSET = 178\nBALTIC_CHARSET = 186\nCHINESEBIG5_CHARSET = 136\nDEFAULT_CHARSET = 1\n# - charset for current system locale -\n# means function can be called several times\n# for the single font (for each charset)\nEASTEUROPE_CHARSET = 238\nGB2312_CHARSET = 134\nGREEK_CHARSET = 161\nHANGUL_CHARSET = 129\nHEBREW_CHARSET = 177\nJOHAB_CHARSET = 130\nMAC_CHARSET = 77\nOEM_CHARSET = 255 # OS dependent system charset\nRUSSIAN_CHARSET = 204\nSHIFTJIS_CHARSET = 128\nSYMBOL_CHARSET = 2\nTHAI_CHARSET = 222\nTURKISH_CHARSET = 162\nVIETNAMESE_CHARSET = 163\n\n# build lookup dictionary to get charset name from its code\nCHARSET_NAMES = {}\nfor (name, value) in locals().copy().items():\n if name.endswith('_CHARSET'):\n CHARSET_NAMES[value] = name\n\n# font pitch constants ('fixed pitch' means 'monospace')\nDEFAULT_PITCH = 0\nFIXED_PITCH = 1\nVARIABLE_PITCH = 2\n\n# Windows font family constants\nFF_DONTCARE = 0 # Don't care or don't know\nFF_ROMAN = 1 # with serifs, proportional\nFF_SWISS = 2 # w/out serifs, proportional\nFF_MODERN = 3 # constant stroke width\nFF_SCRIPT = 4 # handwritten\nFF_DECORATIVE = 5 # novelty\n\n\nclass FONTSIGNATURE(ctypes.Structure):\n # supported code pages and Unicode subranges for the font\n # needed for NEWTEXTMETRICEX structure\n _fields_ = [\n ('sUsb', wintypes.DWORD * 4), # 128-bit Unicode subset bitfield (USB)\n ('sCsb', wintypes.DWORD * 2)] # 64-bit, code-page bitfield (CPB)\n\n\nclass NEWTEXTMETRIC(ctypes.Structure):\n # physical font attributes for True Type fonts\n # needed for NEWTEXTMETRICEX structure\n _fields_ = [\n ('tmHeight', wintypes.LONG),\n ('tmAscent', wintypes.LONG),\n ('tmDescent', wintypes.LONG),\n ('tmInternalLeading', wintypes.LONG),\n ('tmExternalLeading', wintypes.LONG),\n ('tmAveCharWidth', wintypes.LONG),\n ('tmMaxCharWidth', wintypes.LONG),\n ('tmWeight', wintypes.LONG),\n ('tmOverhang', wintypes.LONG),\n ('tmDigitizedAspectX', wintypes.LONG),\n ('tmDigitizedAspectY', wintypes.LONG),\n ('mFirstChar', TCHAR),\n ('mLastChar', TCHAR),\n ('mDefaultChar', TCHAR),\n ('mBreakChar', TCHAR),\n ('tmItalic', BYTE),\n ('tmUnderlined', BYTE),\n ('tmStruckOut', BYTE),\n ('tmPitchAndFamily', BYTE),\n ('tmCharSet', BYTE),\n ('tmFlags', wintypes.DWORD),\n ('ntmSizeEM', wintypes.UINT),\n ('ntmCellHeight', wintypes.UINT),\n ('ntmAvgWidth', wintypes.UINT)]\n\nclass NEWTEXTMETRICW(ctypes.Structure):\n _fields_ = [\n ('tmHeight', wintypes.LONG),\n ('tmAscent', wintypes.LONG),\n ('tmDescent', wintypes.LONG),\n ('tmInternalLeading', wintypes.LONG),\n ('tmExternalLeading', wintypes.LONG),\n ('tmAveCharWidth', wintypes.LONG),\n ('tmMaxCharWidth', wintypes.LONG),\n ('tmWeight', wintypes.LONG),\n ('tmOverhang', wintypes.LONG),\n ('tmDigitizedAspectX', wintypes.LONG),\n ('tmDigitizedAspectY', wintypes.LONG),\n ('mFirstChar', wintypes.WCHAR),\n ('mLastChar', wintypes.WCHAR),\n ('mDefaultChar', wintypes.WCHAR),\n ('mBreakChar', wintypes.WCHAR),\n ('tmItalic', BYTE),\n ('tmUnderlined', BYTE),\n ('tmStruckOut', BYTE),\n ('tmPitchAndFamily', BYTE),\n ('tmCharSet', BYTE),\n ('tmFlags', wintypes.DWORD),\n ('ntmSizeEM', wintypes.UINT),\n ('ntmCellHeight', wintypes.UINT),\n ('ntmAvgWidth', wintypes.UINT)]\n\nclass NEWTEXTMETRICEX(ctypes.Structure):\n # physical font attributes for True Type fonts\n # needed for FONTENUMPROC callback function\n _fields_ = [\n ('ntmTm', NEWTEXTMETRIC),\n ('ntmFontSig', FONTSIGNATURE)]\n\nclass NEWTEXTMETRICEXW(ctypes.Structure):\n _fields_ = [\n ('ntmTm', NEWTEXTMETRICW),\n ('ntmFontSig', FONTSIGNATURE)]\n\n# type for a function that is called by the system for\n# each font during execution of EnumFontFamiliesEx\nFONTENUMPROC = ctypes.WINFUNCTYPE(\n ctypes.c_int, # return non-0 to continue enumeration, 0 to stop\n ctypes.POINTER(LOGFONT),\n ctypes.POINTER(NEWTEXTMETRICEX),\n wintypes.DWORD, # font type, a combination of\n # DEVICE_FONTTYPE\n # RASTER_FONTTYPE\n # TRUETYPE_FONTTYPE\n wintypes.LPARAM\n)\n\nFONTENUMPROCW = ctypes.WINFUNCTYPE(\n ctypes.c_int, # return non-0 to continue enumeration, 0 to stop\n ctypes.POINTER(LOGFONTW),\n ctypes.POINTER(NEWTEXTMETRICEXW),\n wintypes.DWORD,\n wintypes.LPARAM\n)\n\n\n# When running 64 bit windows, some types are not 32 bit, so Python/ctypes guesses wrong\ngdi32.EnumFontFamiliesExA.argtypes = [\n wintypes.HDC,\n ctypes.POINTER(LOGFONT),\n FONTENUMPROC,\n wintypes.LPARAM,\n wintypes.DWORD]\n\n\ngdi32.EnumFontFamiliesExW.argtypes = [\n wintypes.HDC,\n ctypes.POINTER(LOGFONTW),\n FONTENUMPROCW,\n wintypes.LPARAM,\n wintypes.DWORD]\n\ndef _enum_font_names(logfont, textmetricex, fonttype, param):\n \"\"\"callback function to be executed during EnumFontFamiliesEx\n call for each font name. it stores names in global variable\n \"\"\"\n global FONTDB\n\n lf = logfont.contents\n name = lf.lfFaceName\n\n # detect font type (vector|raster) and format (ttf)\n # [ ] use Windows constant TRUETYPE_FONTTYPE\n if fonttype & 4:\n vector = True\n fmt = 'ttf'\n else:\n vector = False\n # [ ] research Windows raster format structure\n fmt = 'unknown'\n\n pitch = lf.lfPitchAndFamily & 0b11\n family = lf.lfPitchAndFamily >> 4\n\n # [ ] check FIXED_PITCH, VARIABLE_PITCH and FF_MODERN\n # combination\n #\n # FP T NM 400 CHARSET: 0 DFKai-SB\n # FP T NM 400 CHARSET: 136 DFKai-SB\n # FP T NM 400 CHARSET: 0 @DFKai-SB\n # FP T NM 400 CHARSET: 136 @DFKai-SB\n # VP T M 400 CHARSET: 0 OCR A Extended\n\n monospace = (pitch == FIXED_PITCH)\n\n charset = lf.lfCharSet\n\n FONTDB.append(FontEntry(name, vector, fmt, monospace, family))\n\n if DEBUG:\n info = ''\n\n if pitch == FIXED_PITCH:\n info += 'FP '\n elif pitch == VARIABLE_PITCH:\n info += 'VP '\n else:\n info += ' '\n\n # [ ] check exact fonttype values meaning\n info += '%s ' % {0: 'U', 1: 'R', 4: 'T'}[fonttype]\n\n if monospace:\n info += 'M '\n else:\n info += 'NM '\n\n style = [' '] * 3\n if lf.lfItalic:\n style[0] = 'I'\n if lf.lfUnderline:\n style[1] = 'U'\n if lf.lfStrikeOut:\n style[2] = 'S'\n info += ''.join(style)\n\n info += ' %s' % lf.lfWeight\n\n # if pitch == FIXED_PITCH:\n if 1:\n # print('%s CHARSET: %3s %s' % (info, lf.lfCharSet, lf.lfFaceName))\n print(f'{info} CHARSET: {lf.lfCharSet} {lf.lfFaceName}')\n\n return 1 # non-0 to continue enumeration\n\n\nenum_font_names = FONTENUMPROCW(_enum_font_names)\n\n\n# --- /define\n\n\n# --- prepare and call EnumFontFamiliesEx\n\ndef query(charset=DEFAULT_CHARSET):\n \"\"\"\n Prepare and call EnumFontFamiliesEx.\n\n query()\n - return tuple with sorted list of all available system fonts\n query(charset=ANSI_CHARSET)\n - return tuple sorted list of system fonts supporting ANSI charset\n\n \"\"\"\n global FONTDB\n\n # 1. Get device context of the entire screen\n with device_context(None) as hdc:\n\n # 2. Call EnumFontFamiliesExA (ANSI version)\n\n # 2a. Call with empty font name to query all available fonts\n # (or fonts for the specified charset)\n #\n # NOTES:\n #\n # * there are fonts that don't support ANSI charset\n # * for DEFAULT_CHARSET font is passed to callback function as\n # many times as charsets it supports\n\n # [ ] font name should be less than 32 symbols with terminating \\0\n # [ ] check double purpose - enumerate all available font names\n # - enumerate all available charsets for a single font\n # - other params?\n\n logfont = LOGFONTW(0, 0, 0, 0, 0, 0, 0, 0, charset, 0, 0, 0, 0, '')\n FONTDB = [] # clear cached FONTDB for enum_font_names callback\n res = gdi32.EnumFontFamiliesExW(\n hdc, # handle to device context\n ctypes.byref(logfont),\n enum_font_names, # pointer to callback function\n 0, # lParam - application-supplied data\n 0) # dwFlags - reserved = 0\n # res here is the last value returned by callback function\n\n return FONTDB\n\n\n# --- Public API ---\n\ndef have_font(name, refresh=False):\n \"\"\"\n Return True if font with specified `name` is present. The result\n of querying system font names is cached. Set `refresh` parameter\n to True to purge cache and reload font information.\n \"\"\"\n if not FONTDB or refresh:\n query()\n if any(f.name == name for f in FONTDB):\n return True\n else:\n return False\n\n\ndef font_list(vector_only=False, monospace_only=False):\n \"\"\"Return list of system installed font names.\"\"\"\n\n if not FONTDB:\n query()\n\n fonts = FONTDB\n if vector_only:\n fonts = [f for f in fonts if f.vector]\n if monospace_only:\n fonts = [f for f in fonts if f.monospace]\n\n return sorted([f.name for f in fonts])\n\n\n# TODO: move this into tests/\nif __name__ == '__main__':\n import sys\n\n if sys.argv[1:] == ['debug']:\n DEBUG = True\n\n if sys.argv[1:] == ['test'] or DEBUG:\n print('Running tests..')\n # test have_font (Windows)\n test_arial = have_font('Arial')\n print('Have font \"Arial\"? %s' % test_arial)\n print('Have font \"missing-one\"? %s' % have_font('missing-one'))\n # test cache is not rebuilt\n FONTDB = [FontEntry('stub', False, '', False, FF_MODERN)]\n assert (have_font('Arial') != test_arial)\n # test cache is rebiult\n assert (have_font('Arial', refresh=True) == test_arial)\n if not DEBUG:\n sys.exit()\n\n if sys.argv[1:] == ['vector']:\n fonts = font_list(vector_only=True)\n elif sys.argv[1:] == ['mono']:\n fonts = font_list(monospace_only=True)\n elif sys.argv[1:] == ['vector', 'mono']:\n fonts = font_list(vector_only=True, monospace_only=True)\n else:\n fonts = font_list()\n print('\\n'.join(fonts))\n\n if DEBUG:\n print(f\"Total: {len(font_list())}\")\n\n\n# -- CHAPTER 2: WORK WITH FONT DIMENSIONS --\n#\n# Essential info about font metrics http://support.microsoft.com/kb/32667\n# And about logical units at http://www.winprog.org/tutorial/fonts.html\n\n# x. Convert desired font size from points into logical units (pixels)\n\n# By default logical for the screen units are pixels. This is defined\n# by default MM_TEXT mapping mode.\n\n# Point is ancient unit of measurement for physical size of a font.\n# 10pt is equal to 3.527mm. To make sure a char on screen has physical\n# size equal to 3.527mm, we need to know display size to calculate how\n# many pixels are in 3.527mm, and then fetch font that best matches\n# this size.\n\n# Essential info about conversion http://support.microsoft.com/kb/74299\n\n# x.1 Get pixels per inch using GetDeviceCaps() or ...\n\n\n# -- CHAPTER 3: LAYERED FONT API --\n#\n# y. Font object with several layers of info\n\n# Font object should contains normalized font information. This\n# information is split according to usage. For example, level 0 property\n# is font id - its name. Level 1 can be information about loaded font\n# characters - in pyglet it could be cached/used glyphs and video memory\n# taken by those glyphs.\n\n# [ ] (pyglet) investigate if it is possible to get video memory size\n# occupied by the font glyphs\n\n# [ ] (pyglet) investigate if it is possible to unload font from video\n# memory if its unused\n","repo_name":"pyglet/pyglet","sub_path":"pyglet/font/win32query.py","file_name":"win32query.py","file_ext":"py","file_size_in_byte":16070,"program_lang":"python","lang":"en","doc_type":"code","stars":1621,"dataset":"github-code","pt":"83"} +{"seq_id":"27103616451","text":"class Solution:\n def minimumTime(self, time: List[int], totalTrips: int) -> int:\n \n def condition(curtime):\n return sum([curtime // t for t in time]) >= totalTrips\n \n ans = -1\n left = 0\n right = totalTrips * min(time)\n while left <= right:\n mid = left + (right - left) // 2\n if condition(mid):\n ans = mid\n right = mid - 1\n else:\n left = mid + 1\n \n return ans","repo_name":"ChengTsungPao/LeetCode","sub_path":"2187_Minimum_Time_to_Complete_Trips/code1.py","file_name":"code1.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"41074774281","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 30 21:45:23 2018\n\n@author: sai\n\"\"\"\nimport cv2 as cv\nimport numpy as np\n\n\ndef erosion(img, se):\n img_h =len(img)\n img_w = len(img[0])\n img1 = [[0 for i in range(len(img[0]))] for j in range(len(img))]\n se_h = len(se)\n se_w = len(se[0]) \n for i in range(0, img_h-se_h):\n for j in range(0, img_w-se_w):\n count =0\n for m in range(se_h):\n for n in range(se_w):\n if img[i+m][j+n]== se[m][n]:\n count+=1\n if count == 9:\n img1[i+1][j+1] =1\n return np.array(img1)\n\ndef dilation(img, se):\n img_h =len(img)\n img_w = len(img[0])\n img1 = [[0 for i in range(len(img[0]))] for j in range(len(img)) ]\n se_h = len(se)\n se_w = len(se[0]) \n for i in range(0, img_h-se_h):\n for j in range(0, img_w-se_w):\n count =0\n for m in range(se_h):\n for n in range(se_w):\n if img[i+m][j+n]== se[m][n]:\n count+=1\n if count>0:\n img1[i+1][j+1] = 1\n return np.array(img1)\n\ndef opening(img,se):\n res = erosion(img,se)\n res1 = dilation(res,se)\n return res1\ndef closing(img,se):\n res = dilation(img,se)\n res1 = erosion(res,se)\n return res1\n \n \nimg = cv.imread(\"C:/Users/sai/Downloads/CVIP/Project_3/original_imgs/noise.jpg\", 0)\nimg = np.array(img)\nimg2 =img\nimg = img/255\nse = [[1,1,1],[1,1,1],[1,1,1]]\nse = np.array(se)\n#operation 1\nopening_1 = opening(img,se)\nclosing_1 = closing(opening_1,se)\n\n#operation 2\nclosing_2 = closing(img,se)\nopening_2 = opening(closing_2,se)\n\n#boundaries 1\nboundary1 = erosion(closing_1,se)\noutput1 = closing_1-boundary1\n\n#boundaries 2\nboundary2 = erosion(opening_2,se)\noutput2 = opening_2-boundary2\n\n\ncv.imwrite('res_noise1.jpg',closing_1*255)\ncv.imwrite('res_noise2.jpg',opening_2*255)\ncv.imwrite('res_bound1.jpg',output1*255)\ncv.imwrite('res_bound2.jpg',output2*255)\n","repo_name":"skalyan1210/ComputerVision","sub_path":"Project_3/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29214211810","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\nT = int(input()) # test case\n\nfor tc in range(1, T + 1):\n iteration = int(input()) # 시행 수\n arr = [list(map(int, input().split())) for _ in range(iteration)]\n\n # 0. 속도, 이동거리 변수 설정\n velocity, distance = 0, 0\n\n # 1. arr[y][0] 탐색해서 무슨 행동인지 확인\n for y in range(len(arr)):\n # 1-1. 가속일 때 조건\n if arr[y][0] == 1:\n velocity += arr[y][1] # 가속도 부여\n distance += velocity\n # 1-2. 감속일 때 조건\n elif arr[y][0] == 2:\n\n # 감속이 클 경우에는 velocity = 0\n if arr[y][1] > velocity:\n velocity = 0\n else:\n velocity -= arr[y][1]\n distance += velocity\n\n # 1-3. 현재 속도 유지\n if arr[y][0] == 0:\n distance += velocity\n\n print('#{} {}'.format(tc, distance))","repo_name":"qsoo/algo","sub_path":"SWEA/d2_1940/RC_car.py","file_name":"RC_car.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72775919632","text":"import functools\n\nfrom .models import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\ndef is_user_in_project(user_id, project_id):\n user = User.objects.filter(id=user_id, is_deleted=False).first()\n if user is None:\n return {\n 'result': False,\n 'detail': 'User is not exist'\n }\n project = Project.objects.filter(id=project_id).first()\n if project is None:\n return {\n 'result': False,\n 'detail': 'Project is not exist'\n }\n project_member = ProjectMember.objects.filter(user=user, project=project)\n if project_member is None:\n return {\n 'result': False,\n 'detail': 'User is not in Project',\n }\n else:\n return {\n 'result': True\n }\n\n\ndef auth(func):\n\n @functools.wraps(func)\n def execute(view_set, request, *args, **kwargs):\n user = User.objects.filter(email=request.user, is_deleted=False).first()\n if user is None:\n return Response({}, status.HTTP_401_UNAUTHORIZED)\n return func(view_set, request, *args, **kwargs)\n\n return execute\n\n\ndef is_project_member(func):\n\n @functools.wraps(func)\n @auth\n def execute(view_set, request, *args, **kwargs):\n user = User.objects.filter(email=request.user).first()\n project = Project.objects.filter(pk=request.data['project_id']).first()\n if project is None:\n return Response({'error': 'Project is not exist.'}, status.HTTP_404_NOT_FOUND)\n project_member = ProjectMember.objects.filter(user=user, project=project).first()\n if project_member is None:\n return Response({'error': 'User is not a project member.'}, status.HTTP_403_FORBIDDEN)\n return func(view_set, request, *args, **kwargs)\n\n return execute\n\n\ndef is_project_owner(func):\n\n @functools.wraps(func)\n @is_project_member\n def execute(view_set, request, *args, **kwargs):\n user = User.objects.filter(email=request.user).first()\n project = Project.objects.filter(pk=request.data['project_id']).first()\n if project.owner != user:\n return Response({'errors': 'This action is for project owner only.'}, status.HTTP_403_FORBIDDEN)\n return func(view_set, request, *args, **kwargs)\n\n return execute\n\n\ndef is_admin(func):\n\n @functools.wraps(func)\n @auth\n def execute(view_set, request, *args, **kwargs):\n user = User.objects.filter(email=request.user).first()\n if not user.is_superuser:\n return Response({'errors': 'This site is for admin only'}, status.HTTP_403_FORBIDDEN)\n return func(view_set, request, *args, **kwargs)\n\n return execute\n\n\ndef update_model(obj, data, fields):\n for field in fields:\n if data.get(field) is not None:\n setattr(obj, field, data[field])\n obj.save()\n","repo_name":"thienanh1999/final-thesis","sub_path":"labeling_tool/server/app/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71445157712","text":"\"\"\"mirgecom driver for the Y0 demonstration.\n\nNote: this example requires a *scaled* version of the Y0\ngrid. A working grid example is located here:\ngithub.com:/illinois-ceesd/data@y0scaled\n\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2020 University of Illinois Board of Trustees\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\nimport yaml\nimport logging\nimport numpy as np\nimport pyopencl as cl\nimport numpy.linalg as la # noqa\nimport pyopencl.array as cla # noqa\nfrom functools import partial\n\nfrom meshmode.dof_array import thaw\nfrom meshmode.array_context import PyOpenCLArrayContext\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\nfrom grudge.dof_desc import DTAG_BOUNDARY\nfrom grudge.eager import EagerDGDiscretization\nfrom grudge.shortcuts import make_visualizer\n\nfrom mirgecom.profiling import PyOpenCLProfilingArrayContext\n\nfrom mirgecom.navierstokes import ns_operator\nfrom mirgecom.simutil import (\n check_step,\n get_sim_timestep,\n generate_and_distribute_mesh,\n write_visfile,\n check_naninf_local,\n check_range_local\n)\nfrom mirgecom.restart import (\n write_restart_file\n)\nfrom mirgecom.io import make_init_message\nfrom mirgecom.mpi import mpi_entry_point\nimport pyopencl.tools as cl_tools\n# from mirgecom.checkstate import compare_states\nfrom mirgecom.integrators import (\n rk4_step,\n lsrk54_step,\n lsrk144_step,\n euler_step\n)\nfrom mirgecom.steppers import advance_state\nfrom mirgecom.boundary import (\n PrescribedViscousBoundary\n)\nfrom mirgecom.fluid import make_conserved\nfrom mirgecom.initializers import (\n PlanarDiscontinuity,\n MixtureInitializer\n)\nfrom mirgecom.transport import SimpleTransport\nfrom mirgecom.eos import PyrometheusMixture\nimport cantera\nimport pyrometheus as pyro\n\nfrom logpyle import IntervalTimer, set_dt\nfrom mirgecom.euler import extract_vars_for_logging, units_for_logging\nfrom mirgecom.logging_quantities import (\n initialize_logmgr, logmgr_add_many_discretization_quantities,\n logmgr_add_cl_device_info, logmgr_set_time, LogUserQuantity,\n set_sim_state\n)\nlogger = logging.getLogger(__name__)\n\n\nclass MyRuntimeError(RuntimeError):\n \"\"\"Simple exception to kill the simulation.\"\"\"\n\n pass\n\n\n@mpi_entry_point\ndef main(ctx_factory=cl.create_some_context, casename=\"flame1d\",\n user_input_file=None, restart_file=None, use_profiling=False,\n use_logmgr=False, use_lazy_eval=False):\n \"\"\"Drive the 1D Flame example.\"\"\"\n\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = 0\n rank = comm.Get_rank()\n nparts = comm.Get_size()\n\n restart_path = \"restart_data/\"\n viz_path = \"viz_data/\"\n vizname = viz_path+casename\n snapshot_pattern = restart_path+\"{cname}-{step:06d}-{rank:04d}.pkl\"\n\n logmgr = initialize_logmgr(use_logmgr, filename=(f\"{casename}.sqlite\"),\n mode=\"wo\", mpi_comm=comm)\n\n cl_ctx = ctx_factory()\n if use_profiling:\n if use_lazy_eval:\n raise RuntimeError(\"Cannot run lazy with profiling.\")\n queue = cl.CommandQueue(cl_ctx,\n properties=cl.command_queue_properties.PROFILING_ENABLE)\n actx = PyOpenCLProfilingArrayContext(queue,\n allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),\n logmgr=logmgr)\n else:\n queue = cl.CommandQueue(cl_ctx)\n if use_lazy_eval:\n from meshmode.array_context import PytatoPyOpenCLArrayContext\n actx = PytatoPyOpenCLArrayContext(queue)\n else:\n actx = PyOpenCLArrayContext(queue,\n allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))\n\n # discretization and model control\n order = 1\n char_len = 0.0001\n fuel = \"C2H4\"\n\n if user_input_file:\n if rank == 0:\n with open(user_input_file) as f:\n input_data = yaml.load(f, Loader=yaml.FullLoader)\n else:\n input_data = None\n input_data = comm.bcast(input_data, root=0)\n\n try:\n order = int(input_data[\"order\"])\n except KeyError:\n pass\n try:\n char_len = float(input_data[\"char_len\"])\n except KeyError:\n pass\n try:\n fuel = input_data[\"fuel\"]\n except KeyError:\n pass\n\n allowed_fuels = [\"H2\", \"C2H4\"]\n if(fuel not in allowed_fuels):\n error_message = \"Invalid fuel selection: {}\".format(fuel)\n raise RuntimeError(error_message)\n\n if rank == 0:\n print(\"#### Simluation control data: ####\")\n print(f\"\\torder = {order}\")\n print(f\"\\tFuel: {fuel}\")\n print(\"#### Simluation control data: ####\")\n\n dim = 2\n current_cfl = 1.0\n current_t = 0\n current_dt = 1.e-9\n constant_cfl = False\n current_step = 0\n t_final = 0.\n\n vel_burned = np.zeros(shape=(dim,))\n vel_unburned = np.zeros(shape=(dim,))\n\n # {{{ Set up initial state using Cantera\n\n # Use Cantera for initialization\n # -- Pick up a CTI for the thermochemistry config\n # --- Note: Users may add their own CTI file by dropping it into\n # --- mirgecom/mechanisms alongside the other CTI files.\n\n from mirgecom.mechanisms import get_mechanism_cti\n if fuel == \"C2H4\":\n mech_cti = get_mechanism_cti(\"uiuc\")\n elif fuel == \"H2\":\n mech_cti = get_mechanism_cti(\"sanDiego\")\n # mech_cti = get_mechanism_cti(\"sanDiego_trans\")\n\n cantera_soln = cantera.Solution(phase_id=\"gas\", source=mech_cti)\n nspecies = cantera_soln.n_species\n\n # Initial temperature, pressure, and mixutre mole fractions are needed to\n # set up the initial state in Cantera.\n temp_unburned = 300.0\n temp_ignition = 1500.0\n # Parameters for calculating the amounts of fuel, oxidizer, and inert species\n if fuel == \"C2H4\":\n stoich_ratio = 3.0\n if fuel == \"H2\":\n stoich_ratio = 0.5\n equiv_ratio = 1.0\n ox_di_ratio = 0.21\n # Grab the array indices for the specific species, ethylene, oxygen, and nitrogen\n i_fu = cantera_soln.species_index(fuel)\n i_ox = cantera_soln.species_index(\"O2\")\n i_di = cantera_soln.species_index(\"N2\")\n x = np.zeros(nspecies)\n # Set the species mole fractions according to our desired fuel/air mixture\n x[i_fu] = (ox_di_ratio*equiv_ratio)/(stoich_ratio+ox_di_ratio*equiv_ratio)\n x[i_ox] = stoich_ratio*x[i_fu]/equiv_ratio\n x[i_di] = (1.0-ox_di_ratio)*x[i_ox]/ox_di_ratio\n # Uncomment next line to make pylint fail when it can't find cantera.one_atm\n one_atm = cantera.one_atm # pylint: disable=no-member\n # one_atm = 101325.0\n pres_unburned = one_atm\n\n # Let the user know about how Cantera is being initilized\n print(f\"Input state (T,P,X) = ({temp_unburned}, {pres_unburned}, {x}\")\n # Set Cantera internal gas temperature, pressure, and mole fractios\n cantera_soln.TPX = temp_unburned, pres_unburned, x\n # Pull temperature, total density, mass fractions, and pressure from Cantera\n # We need total density, and mass fractions to initialize the fluid/gas state.\n y_unburned = np.zeros(nspecies)\n can_t, rho_unburned, y_unburned = cantera_soln.TDY\n\n # *can_t*, *can_p* should not differ (significantly) from user's initial data,\n # but we want to ensure that we use exactly the same starting point as Cantera,\n # so we use Cantera's version of these data.\n\n # now find the conditions for the burned gas\n cantera_soln.equilibrate(\"TP\")\n temp_burned, rho_burned, y_burned = cantera_soln.TDY\n pres_burned = cantera_soln.P\n\n pyrometheus_mechanism = pyro.get_thermochem_class(cantera_soln)(actx.np)\n\n kappa = 1.6e-5 # Pr = mu*rho/alpha = 0.75\n mu = 1.e-5\n species_diffusivity = 1.e-5 * np.ones(nspecies)\n transport_model = SimpleTransport(viscosity=mu, thermal_conductivity=kappa,\n species_diffusivity=species_diffusivity)\n\n eos = PyrometheusMixture(pyrometheus_mechanism, temperature_guess=temp_unburned,\n transport_model=transport_model)\n species_names = pyrometheus_mechanism.species_names\n\n print(f\"Pyrometheus mechanism species names {species_names}\")\n print(f\"Unburned (T,P,Y) = ({temp_unburned}, {pres_unburned}, {y_unburned}\")\n print(f\"Burned (T,P,Y) = ({temp_burned}, {pres_burned}, {y_burned}\")\n\n flame_start_loc = 0.10\n\n # use the burned conditions with a lower temperature\n bulk_init = PlanarDiscontinuity(dim=dim,\n disc_location=flame_start_loc,\n sigma=0.0005,\n nspecies=nspecies,\n temperature_right=temp_ignition,\n temperature_left=temp_unburned,\n pressure_right=pres_burned,\n pressure_left=pres_unburned,\n velocity_right=vel_burned,\n velocity_left=vel_unburned,\n species_mass_right=y_burned,\n species_mass_left=y_unburned)\n #inflow_init = MixtureInitializer(dim=dim,\n #nspecies=nspecies,\n #pressure=pres_burned,\n #temperature=temp_ignition,\n #massfractions=y_burned,\n #velocity=vel_burned)\n #outflow_init = MixtureInitializer(dim=dim,\n #nspecies=nspecies,\n #pressure=pres_unburned,\n #temperature=temp_unburned,\n #massfractions=y_unburned,\n #velocity=vel_unburned)\n#\n #def symmetry(nodes, eos, cv=None, **kwargs):\n #dim = len(nodes)\n#\n #if cv is not None:\n #mass = cv.mass\n #momentum = cv.momentum\n #momentum[1] = -1.0 * momentum[1]\n #energy = cv.energy\n #species_mass = cv.species_mass\n #return make_conserved(dim=dim,\n #mass=mass,\n #momentum=momentum,\n #energy=energy,\n #species_mass=species_mass)\n#\n #def dummy(nodes, eos, cv=None, **kwargs):\n #dim = len(nodes)\n#\n #if cv is not None:\n #mass = cv.mass\n #momentum = cv.momentum\n #energy = cv.energy\n #species_mass = cv.species_mass\n #return make_conserved(dim=dim,\n #mass=mass,\n #momentum=momentum,\n #energy=energy,\n #species_mass=species_mass)\n#\n #inflow = PrescribedViscousBoundary(q_func=inflow_init)\n #outflow = PrescribedViscousBoundary(q_func=outflow_init)\n #wall_symmetry = PrescribedViscousBoundary(q_func=symmetry)\n#\n #boundaries = {DTAG_BOUNDARY(\"Inflow\"): inflow,\n #DTAG_BOUNDARY(\"Outflow\"): outflow,\n ##DTAG_BOUNDARY(\"Wall\"): wall}\n ##DTAG_BOUNDARY(\"Wall\"): wall_dummy}\n #DTAG_BOUNDARY(\"Wall\"): wall_symmetry}\n#\n box_ll = (0.0, 0.0)\n box_ur = (0.2, 0.00125)\n num_elements = (int((box_ur[0]-box_ll[0])/char_len),\n int((box_ur[1]-box_ll[1])/char_len))\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n generate_mesh = partial(generate_regular_rect_mesh,\n a=box_ll,\n b=box_ur,\n n=num_elements,\n boundary_tag_to_face={\n \"Inflow\": [\"+x\"],\n \"Outflow\": [\"-x\"],\n \"Wall\": [\"+y\", \"-y\"]})\n local_mesh, global_nelements = (\n generate_and_distribute_mesh(comm, generate_mesh))\n local_nelements = local_mesh.nelements\n\n\n if rank == 0:\n logging.info(\"Making discretization\")\n discr = EagerDGDiscretization(actx,\n local_mesh,\n order=order,\n mpi_communicator=comm)\n nodes = thaw(actx, discr.nodes())\n visualizer = make_visualizer(discr)\n\n current_state = bulk_init(x_vec=nodes, eos=eos, time=0.)\n\n def my_get_timestep(t, dt, state):\n t_remaining = max(0, t_final - t)\n if constant_cfl:\n from mirgecom.viscous import get_viscous_timestep\n ts_field = current_cfl * get_viscous_timestep(discr, eos=eos, cv=state)\n from grudge.op import nodal_min\n dt = nodal_min(discr, \"vol\", ts_field)\n cfl = current_cfl\n else:\n from mirgecom.viscous import get_viscous_cfl\n ts_field = get_viscous_cfl(discr, eos=eos, dt=dt, cv=state)\n from grudge.op import nodal_max\n cfl = nodal_max(discr, \"vol\", ts_field)\n\n return ts_field, cfl, min(t_remaining, dt)\n\n def my_write_viz(step, t, dt, state, dv=None,\n reaction_rates=None, ts_field=None):\n if dv is None:\n dv = eos.dependent_vars(state)\n if reaction_rates is None:\n reaction_rates = eos.get_production_rates(state)\n if ts_field is None:\n ts_field, cfl, dt = my_get_timestep(t, dt, state)\n viz_fields = [(\"CV_rho\", state.mass),\n (\"CV_rhoU\", state.momentum[0]),\n (\"CV_rhoV\", state.momentum[1]),\n (\"CV_rhoE\", state.energy),\n (\"DV\", dv),\n (\"reaction_rates\", reaction_rates),\n (\"dt\" if constant_cfl else \"cfl\", ts_field)]\n # species mass fractions\n viz_fields.extend(\n (\"Y_\"+species_names[i], state.species_mass[i]/state.mass)\n for i in range(nspecies))\n write_visfile(discr, viz_fields, visualizer, vizname=vizname,\n step=step, t=t, overwrite=True)\n\n def my_write_restart(step, t, state):\n rst_fname = snapshot_pattern.format(cname=casename, step=step, rank=rank)\n if rst_fname != restart_file:\n rst_data = {\n \"local_mesh\": local_mesh,\n \"state\": state,\n \"t\": t,\n \"step\": step,\n \"order\": order,\n \"global_nelements\": global_nelements,\n \"num_parts\": nparts\n }\n write_restart_file(actx, rst_data, rst_fname, comm)\n\n # Dump the final data\n if rank == 0:\n logger.info(\"Checkpointing final state ...\")\n final_dv = eos.dependent_vars(current_state)\n my_write_viz(step=current_step, t=current_t, dt=current_dt, state=current_state,\n dv=final_dv)\n my_write_restart(step=current_step, t=current_t, state=current_state)\n\n if logmgr:\n logmgr.close()\n elif use_profiling:\n print(actx.tabulate_profiling_data())\n\n exit()\n\n\nif __name__ == \"__main__\":\n import sys\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO)\n\n import argparse\n parser = argparse.ArgumentParser(description=\"MIRGE-Com 1D Flame Driver\")\n parser.add_argument(\"-r\", \"--restart_file\", type=ascii,\n dest=\"restart_file\", nargs=\"?\", action=\"store\",\n help=\"simulation restart file\")\n parser.add_argument(\"-i\", \"--input_file\", type=ascii,\n dest=\"input_file\", nargs=\"?\", action=\"store\",\n help=\"simulation config file\")\n parser.add_argument(\"-c\", \"--casename\", type=ascii,\n dest=\"casename\", nargs=\"?\", action=\"store\",\n help=\"simulation case name\")\n parser.add_argument(\"--profile\", action=\"store_true\", default=False,\n help=\"enable kernel profiling [OFF]\")\n parser.add_argument(\"--log\", action=\"store_true\", default=True,\n help=\"enable logging profiling [ON]\")\n parser.add_argument(\"--lazy\", action=\"store_true\", default=False,\n help=\"enable lazy evaluation [OFF]\")\n\n args = parser.parse_args()\n\n # for writing output\n casename = \"flame1d\"\n if(args.casename):\n print(f\"Custom casename {args.casename}\")\n casename = (args.casename).replace(\"'\", \"\")\n else:\n print(f\"Default casename {casename}\")\n\n restart_file = None\n if args.restart_file:\n restart_file = (args.restart_file).replace(\"'\", \"\")\n print(f\"Restarting from file: {restart_file}\")\n\n input_file = None\n if(args.input_file):\n input_file = (args.input_file).replace(\"'\", \"\")\n print(f\"Reading user input from {args.input_file}\")\n else:\n print(\"No user input file, using default values\")\n\n print(f\"Running {sys.argv[0]}\\n\")\n main(restart_file=restart_file, user_input_file=input_file,\n use_profiling=args.profile, use_lazy_eval=args.lazy, use_logmgr=args.log)\n","repo_name":"anderson2981/mirgecom_parsl_sample","sub_path":"local_execution/flame_init.py","file_name":"flame_init.py","file_ext":"py","file_size_in_byte":18093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"16730570264","text":"import time\nfrom plyer import notification\n\nicon = \"battery.jpg\"\n\nif __name__ == \"__main__\":\n\n notification.notify(\n title = \"Notification System\",\n message = \"Your Notification System is Working\",\n app_name = \"1 New Notice\",\n\n\n # displaying time\n timeout = 10\n )\n\n","repo_name":"furkanbicici/NotificationApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22068218906","text":"import time, os, shutil, glob, subprocess, sys, json\nfrom copy import deepcopy\nimport pytest\nimport pkgutil, pyclbr\nimport importlib\nimport inspect\nimport numpy as np\n\nfrom flare.learners.otf import OTF\nfrom flare.md.fake import FakeDFT\n\nfrom ase import units\nimport ase.calculators as ase_calculators\nfrom ase.md.velocitydistribution import (\n MaxwellBoltzmannDistribution,\n Stationary,\n ZeroRotation,\n)\nfrom ase import io\nfrom ase.symbols import symbols2numbers\n\nimport yaml\n\n\ndef get_super_cell(atoms_config):\n \"\"\"\n Set up Supercell from ASE Atoms\n \"\"\"\n # parse parameters\n atoms_file = atoms_config.get(\"file\")\n atoms_format = atoms_config.get(\"format\", None)\n atoms_index = atoms_config.get(\"index\", -1)\n replicate = atoms_config.get(\"replicate\", [1, 1, 1])\n jitter = atoms_config.get(\"jitter\", 0)\n\n if atoms_format is not None:\n super_cell = io.read(atoms_file, format=atoms_format, index=atoms_index)\n else:\n super_cell = io.read(atoms_file)\n super_cell *= replicate\n\n # jitter positions to give nonzero force on first frame\n super_cell.positions += (2 * np.random.rand(len(super_cell), 3) - 1) * jitter\n return super_cell\n\n\ndef get_dft_calc(dft_config):\n \"\"\"\n Set up ASE DFT calculator\n \"\"\"\n dft_calc_name = dft_config.get(\"name\", \"LennardJones\")\n dft_calc_kwargs = dft_config.get(\"kwargs\", {})\n dft_calc_params = dft_config.get(\"params\", {})\n\n if dft_calc_name == \"FakeDFT\":\n dft_calc = FakeDFT(**dft_calc_kwargs)\n dft_calc.set(**dft_calc_params)\n return dft_calc\n\n # find the module including the ASE DFT calculator class by name\n dft_module_name = \"\"\n for importer, modname, ispkg in pkgutil.iter_modules(ase_calculators.__path__):\n module_info = pyclbr.readmodule(\"ase.calculators.\" + modname)\n if dft_calc_name in module_info:\n dft_module_name = modname\n break\n\n # if the module is not found in the current folder, then search the sub-directory\n if not dft_module_name:\n for calc_dir in os.listdir(ase_calculators.__path__[0]):\n dir_path = ase_calculators.__path__[0] + \"/\" + calc_dir\n if os.path.isdir(dir_path):\n for importer, modname, ispkg in pkgutil.iter_modules([dir_path]):\n module_info = pyclbr.readmodule(\n \"ase.calculators.\" + calc_dir + \".\" + modname\n )\n if dft_calc_name in module_info:\n dft_module_name = calc_dir + \".\" + modname\n break\n\n # import ASE DFT calculator module, and build a DFT calculator class object\n dft_calc = None\n dft_module = importlib.import_module(\"ase.calculators.\" + dft_module_name)\n for name, obj in inspect.getmembers(dft_module, inspect.isclass):\n if name == dft_calc_name:\n dft_calc = obj(**dft_calc_kwargs)\n dft_calc.set(**dft_calc_params)\n return dft_calc\n\n\ndef get_flare_calc(flare_config):\n \"\"\"\n Set up ASE flare calculator\n \"\"\"\n gp_name = flare_config.get(\"gp\")\n if gp_name == \"GaussianProcess\":\n return get_gp_calc(flare_config)\n elif gp_name == \"SGP_Wrapper\":\n return get_sgp_calc(flare_config)\n else:\n raise NotImplementedError(f\"{gp_name} is not implemented\")\n\n\ndef get_gp_calc(flare_config):\n \"\"\"\n Return a FLARE_Calculator with gp from GaussianProcess\n \"\"\"\n from flare.bffs.gp import GaussianProcess\n from flare.bffs.mgp import MappedGaussianProcess\n from flare.bffs.gp.calculator import FLARE_Calculator\n from flare.utils.parameter_helper import ParameterHelper\n\n gp_file = flare_config.get(\"file\", None)\n\n # Load GP from file\n if gp_file is not None:\n with open(gp_file, \"r\") as f:\n gp_dct = json.loads(f.readline())\n if gp_dct.get(\"class\", None) == \"FLARE_Calculator\":\n flare_calc = FLARE_Calculator.from_file(gp_file)\n else:\n gp, _ = GaussianProcess.from_file(gp_file)\n flare_calc = FLARE_Calculator(gp)\n return flare_calc\n\n # Create gaussian process model\n kernels = flare_config.get(\"kernels\")\n hyps = flare_config.get(\"hyps\", \"random\")\n opt_algorithm = flare_config.get(\"opt_algorithm\", \"BFGS\")\n max_iterations = flare_config.get(\"max_iterations\", 20)\n bounds = flare_config.get(\"bounds\", None)\n\n gp_parameters = flare_config.get(\"gp_parameters\")\n n_cpus = flare_config.get(\"n_cpus\", 1)\n use_mapping = flare_config.get(\"use_mapping\", False)\n\n # set up GP hyperparameters\n pm = ParameterHelper(\n kernels=kernels,\n random=True,\n parameters=gp_parameters,\n )\n hm = pm.as_dict()\n if hyps == \"random\":\n hyps = hm[\"hyps\"]\n\n gp_model = GaussianProcess(\n kernels=kernels,\n component=\"mc\",\n hyps=hyps,\n cutoffs=hm[\"cutoffs\"],\n hyps_mask=None,\n hyp_labels=hm[\"hyp_labels\"],\n opt_algorithm=opt_algorithm,\n maxiter=max_iterations,\n parallel=n_cpus > 1,\n per_atom_par=flare_config.get(\"per_atom_par\", True),\n n_cpus=n_cpus,\n n_sample=flare_config.get(\"n_sample\", 100),\n output=None,\n name=flare_config.get(\"name\", \"default_gp\"),\n energy_noise=flare_config.get(\"energy_noise\", 0.01),\n )\n\n # create mapped gaussian process\n if use_mapping:\n grid_params = flare_config.get(\"grid_params\")\n var_map = flare_config.get(\"var_map\", \"pca\")\n unique_species = flare_config.get(\"unique_species\")\n coded_unique_species = symbols2numbers(unique_species)\n mgp_model = MappedGaussianProcess(\n grid_params=grid_params,\n unique_species=coded_unique_species,\n n_cpus=n_cpus,\n var_map=var_map,\n )\n else:\n mgp_model = None\n\n flare_calc = FLARE_Calculator(\n gp_model=gp_model,\n mgp_model=mgp_model,\n par=n_cpus > 1,\n use_mapping=use_mapping,\n )\n return flare_calc, kernels\n\n\ndef get_sgp_calc(flare_config):\n \"\"\"\n Return a SGP_Calculator with sgp from SparseGP\n \"\"\"\n from flare.bffs.sgp._C_flare import NormalizedDotProduct, SquaredExponential\n from flare.bffs.sgp._C_flare import B2, B3, TwoBody, ThreeBody, FourBody\n from flare.bffs.sgp import SGP_Wrapper\n from flare.bffs.sgp.calculator import SGP_Calculator\n\n sgp_file = flare_config.get(\"file\", None)\n\n # Load sparse GP from file\n if sgp_file is not None:\n with open(sgp_file, \"r\") as f:\n gp_dct = json.loads(f.readline())\n if gp_dct.get(\"class\", None) == \"SGP_Calculator\":\n flare_calc, kernels = SGP_Calculator.from_file(sgp_file)\n else:\n sgp, kernels = SGP_Wrapper.from_file(sgp_file)\n flare_calc = SGP_Calculator(sgp)\n return flare_calc, kernels\n\n kernels = flare_config.get(\"kernels\")\n opt_algorithm = flare_config.get(\"opt_algorithm\", \"BFGS\")\n max_iterations = flare_config.get(\"max_iterations\", 20)\n bounds = flare_config.get(\"bounds\", None)\n use_mapping = flare_config.get(\"use_mapping\", False)\n\n # Define kernels.\n kernels = []\n for k in flare_config[\"kernels\"]:\n if k[\"name\"] == \"NormalizedDotProduct\":\n kernels.append(NormalizedDotProduct(k[\"sigma\"], k[\"power\"]))\n elif k[\"name\"] == \"SquaredExponential\":\n kernels.append(SquaredExponential(k[\"sigma\"], k[\"ls\"]))\n else:\n raise NotImplementedError(f\"{k['name']} kernel is not implemented\")\n\n # Define descriptor calculators.\n n_species = len(flare_config[\"species\"])\n cutoff = flare_config[\"cutoff\"]\n descriptors = []\n for d in flare_config[\"descriptors\"]:\n if \"cutoff_matrix\" in d: # multiple cutoffs\n assert np.allclose(np.array(d[\"cutoff_matrix\"]).shape, (n_species, n_species)),\\\n \"cutoff_matrix needs to be of shape (n_species, n_species)\"\n\n if d[\"name\"] == \"B2\":\n radial_hyps = [0.0, cutoff]\n cutoff_hyps = []\n descriptor_settings = [n_species, d[\"nmax\"], d[\"lmax\"]]\n if \"cutoff_matrix\" in d: # multiple cutoffs\n desc_calc = B2(\n d[\"radial_basis\"],\n d[\"cutoff_function\"],\n radial_hyps,\n cutoff_hyps,\n descriptor_settings,\n d[\"cutoff_matrix\"],\n )\n else:\n desc_calc = B2(\n d[\"radial_basis\"],\n d[\"cutoff_function\"],\n radial_hyps,\n cutoff_hyps,\n descriptor_settings,\n )\n\n elif d[\"name\"] == \"B3\":\n radial_hyps = [0.0, cutoff]\n cutoff_hyps = []\n descriptor_settings = [n_species, d[\"nmax\"], d[\"lmax\"]]\n desc_calc = B3(\n d[\"radial_basis\"],\n d[\"cutoff_function\"],\n radial_hyps,\n cutoff_hyps,\n descriptor_settings,\n )\n\n elif d[\"name\"] == \"TwoBody\":\n desc_calc = TwoBody(cutoff, n_species, d[\"cutoff_function\"], cutoff_hyps)\n\n elif d[\"name\"] == \"ThreeBody\":\n desc_calc = ThreeBody(cutoff, n_species, d[\"cutoff_function\"], cutoff_hyps)\n\n elif d[\"name\"] == \"FourBody\":\n desc_calc = FourBody(cutoff, n_species, d[\"cutoff_function\"], cutoff_hyps)\n\n else:\n raise NotImplementedError(f\"{d['name']} descriptor is not supported\")\n\n descriptors.append(desc_calc)\n\n # Define remaining parameters for the SGP wrapper.\n species_map = {flare_config.get(\"species\")[i]: i for i in range(n_species)}\n sae_dct = flare_config.get(\"single_atom_energies\", None)\n if sae_dct is not None:\n assert n_species == len(\n sae_dct\n ), \"'single_atom_energies' should be the same length as 'species'\"\n single_atom_energies = {i: sae_dct[i] for i in range(n_species)}\n else:\n single_atom_energies = {i: 0 for i in range(n_species)}\n\n sgp = SGP_Wrapper(\n kernels=kernels,\n descriptor_calculators=descriptors,\n cutoff=cutoff,\n sigma_e=flare_config.get(\"energy_noise\"),\n sigma_f=flare_config.get(\"forces_noise\"),\n sigma_s=flare_config.get(\"stress_noise\"),\n species_map=species_map,\n variance_type=flare_config.get(\"variance_type\", \"local\"),\n single_atom_energies=single_atom_energies,\n energy_training=flare_config.get(\"energy_training\", True),\n force_training=flare_config.get(\"force_training\", True),\n stress_training=flare_config.get(\"stress_training\", True),\n max_iterations=max_iterations,\n opt_method=opt_algorithm,\n bounds=bounds,\n )\n\n flare_calc = SGP_Calculator(sgp, use_mapping)\n return flare_calc, kernels\n\n\ndef fresh_start_otf(config):\n \"\"\"\n Set up MD and OTF training engine\n \"\"\"\n\n super_cell = get_super_cell(config[\"supercell\"])\n dft_calc = get_dft_calc(config[\"dft_calc\"])\n flare_calc, kernels = get_flare_calc(config[\"flare_calc\"])\n otf_config = config.get(\"otf\")\n\n # intialize velocity\n # The \"file\" option uses the velocities read from the supercell file.\n initial_velocity = otf_config.get(\"initial_velocity\", \"file\")\n if initial_velocity != \"file\":\n # Otherwise, the initial_velocity is a number specifying the temperature\n # to initialize the velocity with Boltzmann distribution\n init_temp = float(initial_velocity)\n MaxwellBoltzmannDistribution(super_cell, init_temp * units.kB)\n Stationary(super_cell)\n ZeroRotation(super_cell)\n\n otf = OTF(\n super_cell,\n flare_calc=flare_calc,\n dft_calc=dft_calc,\n **otf_config,\n )\n\n otf.run()\n\n\ndef restart_otf(config):\n \"\"\"\n Set up MD and OTF training engine. Restart with checkpoint files\n \"\"\"\n\n otf_config = config.get(\"otf\")\n checkpoint = otf_config.get(\"checkpoint\")\n otf = OTF.from_checkpoint(checkpoint)\n\n # allow modification of some parameters\n for attr in [\n \"number_of_steps\",\n \"rescale_steps\",\n \"rescale_temps\",\n \"write_model\",\n \"freeze_hyps\",\n \"store_dft_output\",\n ]:\n if attr in otf_config:\n setattr(otf, attr, otf_config.get(attr))\n\n otf.run()\n\n\ndef main():\n with open(sys.argv[1], \"r\") as f:\n config = yaml.safe_load(f)\n\n mode = config.get(\"otf\").get(\"mode\", \"fresh\")\n if mode == \"fresh\":\n fresh_start_otf(config)\n elif mode == \"restart\":\n restart_otf(config)\n","repo_name":"mir-group/flare","sub_path":"flare/scripts/otf_train.py","file_name":"otf_train.py","file_ext":"py","file_size_in_byte":12701,"program_lang":"python","lang":"en","doc_type":"code","stars":252,"dataset":"github-code","pt":"83"} +{"seq_id":"44084192947","text":"import random\nimport torch\nimport numpy as np\n\ndef set_seeds(seed):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n return [\n np.random.rand(1),\n torch.randn(1)\n ]","repo_name":"nv-tlabs/meta-sim","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"83"} +{"seq_id":"33737678311","text":"from absradiooption import AbstractRadio\nfrom workmode import WORK_MODE, WorkMode\nfrom ProjectConstants import *\nfrom clickable import Clickable\nfrom printable import Printable\n\nclass RadioButton(AbstractRadio, Clickable):\n\n class CircleOption(AbstractRadio.Option):\n\n def __init__(self, name : str, mode, x_pos : int, y_pos : int, button_size = radio_button_size):\n super().__init__(name, mode, x_pos, y_pos)\n self.button_size = button_size\n \n def is_in(self, x : int, y : int):\n return ((x - self.x_pos) ** 2 + (y - self.y_pos) ** 2) <= self.button_size ** 2\n\n def blit(self, chosen : bool, x : int, y : int):\n\n color = WHITE\n if self.is_in(x, y):\n color = darken(color)\n pygame.draw.circle(Printable.surface, color,(self.x_pos,self.y_pos), self.button_size)\n\n text = Printable.font.render(self.opt_name, True, WHITE, BLACK)\n text_rect = text.get_rect()\n text_rect.center = (self.x_pos , self.y_pos)\n text_rect.left = self.x_pos + self.button_size * 1.5\n Printable.surface.blit(text, text_rect)\n\n if chosen:\n pygame.draw.circle(Printable.surface, BLACK,(self.x_pos,self.y_pos), self.button_size * radio_inner_circle_factor)\n\n\n class RectOption(AbstractRadio.Option):\n\n def __init__(self, name : str, mode, x_pos : int, y_pos : int, rect : pygame.Rect):\n super().__init__(name, mode, x_pos, y_pos)\n self.rect = rect\n \n def is_in(self, x : int, y : int):\n return self.rect.collidepoint(x, y)\n \n def blit(self, chosen : bool, x : int, y : int):\n color = WHITE\n \n if chosen:\n color = YELLOW\n\n if self.is_in(x, y):\n color = darken(color)\n\n pygame.draw.rect(Printable.surface, color, self.rect)\n\n txt = Printable.font.render(str(self.text), True, BLACK, color)\n text_rect = txt.get_rect()\n text_rect.center = self.rect.center\n Printable.surface.blit(txt, text_rect)\n\n\n def __init__(self, option_list = [], def_choice = -1, inactive_modes = [WorkMode.STERILE, WorkMode.PLAY]):\n super().__init__( option_list, def_choice)\n self.inactive_modes = inactive_modes\n Printable.PRINTABLES.append(self)\n Clickable.CLICKABLES.append(self)\n \n def handle_click(self):\n if WORK_MODE.get_mode() not in self.inactive_modes :\n x, y = pygame.mouse.get_pos()\n for opt in self.options:\n if opt.is_in(x, y):\n self.choice = opt\n\n def blit(self):\n x,y = pygame.mouse.get_pos()\n for opt in self.options:\n opt.blit(self.choice == opt, x , y)\n \n\n","repo_name":"ItayHirschel/Path-Finding-Project","sub_path":"radiobutton.py","file_name":"radiobutton.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29397161481","text":"import pandas as pd\n\nfrom bs4 import BeautifulSoup\n\nwith open('index.html', 'r') as arquivo:\n conteudo = arquivo.read()\n\nsoup = BeautifulSoup(conteudo, 'html.parser')\n\ntabela = soup.find('table')\n\nlinhas = tabela.find_all('tr')\n\nprodutos = []\nprecos = []\n\nquantidades = []\nfaturamentos = []\n\nfor linha in linhas[1:]:\n\n colunas = linha.find_all('td')\n nome = colunas[0].get_text()\n\n preco = float(colunas[1].get_text())\n quantidade = int(colunas[2].get_text())\n faturamento = preco * quantidade\n\n produtos.append(nome)\n precos.append(preco)\n quantidades.append(quantidade)\n faturamentos.append(faturamento)\n\ndf = pd.DataFrame({'Produto': produtos, 'Preço': precos, 'Quantidade': quantidades, 'Faturamento': faturamentos})\n\nprint(df)","repo_name":"xDef4lt/lendo-tabelas-com-python","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26256441144","text":"from region import Region\nimport random\nimport numpy as np\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass Child:\n\n def __init__(self, cromossome, parentIndex):\n self.cromossome = cromossome\n self.parentIndex = parentIndex\n\nclass Cromossome:\n\n\n def __init__(self, region):\n self.size = region.getNumOfCities()\n self.defaultCromossome = []\n self.region = region\n\n for i in range(0, self.size):\n self.defaultCromossome.append(i+1)\n\n \n # create objective Function Value\n def objectiveFunction(self, cromossome, region):\n result = 0.\n size = len(cromossome)\n\n for genIndex in range(0, size-1):\n result += region.getDistance(cromossome[genIndex],\n cromossome[genIndex+1])\n\n return result\n\n # print cromossome\n def printCromossome(self, cromossome, region):\n # print(\"ALL CROMOSSOMES (PATH)\")\n for i in cromossome:\n print(i, end=\"-\")\n print() \n print(\"objective fuction value: {}\".format(self.objectiveFunction(cromossome, region)))\n\n # return one cromossome\n def createRandomCromossome(self):\n # random.seed(0)\n copy = self.defaultCromossome.copy()\n random.shuffle(copy)\n return copy\n\n\n # mutation method\n # probability - [0, 1]\n def mutation(self, cromossome, probability):\n copy_cromossome = cromossome.copy()\n size = len(cromossome)\n\n for genIndex in range(0, size):\n\n value = random.uniform(0, 1)\n\n if value <= probability:\n indexChange = int((random.uniform(0, 1) * 100) % size)\n \n copy_cromossome[genIndex], copy_cromossome[indexChange] = copy_cromossome[indexChange], copy_cromossome[genIndex]\n\n if self.objectiveFunction(copy_cromossome, self.region) < self.objectiveFunction(cromossome, self.region):\n cromossome = copy_cromossome\n\n\n return cromossome\n\n \n\n\n # crossover method between two cromossome\n def crossover(self, cromossomeA, cromossomeB, pcross):\n size = len(cromossomeA)\n child = np.zeros(size, dtype=np.int8)\n indexC = 0\n indexF = 0\n \n # before the cross point\n for indexP in range(0, pcross):\n child[indexC] = cromossomeA[indexP] \n indexC += 1\n indexF = indexP\n\n indexF += 1\n # after the cross point\n while indexC < size:\n gen = cromossomeB[int(indexF % size)]\n \n if np.where(child == gen)[0].shape[0] is 0:\n child[indexC] = gen\n indexC += 1\n indexF += 1\n\n return child\n\n\n\n\n","repo_name":"neemiasbsilva/tsp-genetic-algorithm","sub_path":"cromossome.py","file_name":"cromossome.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38867209721","text":"import consts\nimport game_field\nimport TP\n\n\n# parameters: field\n# return: list of the soldier position\n# this function return the position of the soldier in the field\ndef get_soldier_position(field):\n found_soldier = []\n tmp_list = []\n for row in range(len(field)):\n for col in range(len(field[row])):\n if field[row][col] == consts.SOLDIER:\n tmp_list = [row, col]\n found_soldier.append(tmp_list)\n return found_soldier\n\n\n\n\ndef chose_direction(direct):\n if direct==consts.LEFT:\n return (0,-1)\n elif direct==consts.RIGHT:\n return (0,1)\n elif direct==consts.DOWN:\n return (1,0)\n elif direct==consts.UP:\n return (-1,0)\n\n\ndef actual_move(field,soldier_position,pos):\n new_pos = []\n for i in range(len(soldier_position)):\n new_pos.append([soldier_position[i][0] + pos[0], soldier_position[i][1] + pos[1]])\n\n for i in range(len(soldier_position)):\n field[soldier_position[i][0]][soldier_position[i][1]] = consts.EMPTY\n for i in range(len(soldier_position)):\n field[new_pos[i][0]][new_pos[i][1]] = consts.SOLDIER\n\n\ndef move(field,direction,field_copy):\n pos=chose_direction(direction)\n count = 0\n soldier_position = get_soldier_position(field)\n stat=\"\"\n for i in soldier_position:\n next_position_info = field_copy[i[0] + pos[0]][i[1]+pos[1]]\n if 0 <= i[0] + pos[0] < consts.BOARD_GRID_ROW and 0 <= i[1]+pos[1] < consts.BOARD_GRID_COLS:\n if count >= 6:\n if next_position_info == consts.MINE:\n stat = consts.SOLDIER_MINE_HIT\n break\n if next_position_info == consts.TELEPORT:\n stat = consts.SOLDIER_TELEPORT\n break\n\n if next_position_info == consts.FLAG:\n stat = consts.SOLDIER_FLAG_HIT\n break\n if next_position_info == consts.GUARD:\n stat = consts.SOLDIER_GUARD_HIT\n break\n else:\n return consts.SOLDIER_OUT_OF_BOUNDS\n count += 1\n\n # go in reverse order to not change the position of the soldier\n\n actual_move(field,soldier_position,pos)\n if stat==\"\":\n stat = consts.SOLDIER_MOVE\n return stat\n\n\n# parameters: state, field, tp_list\n# return: None\n# this function checks the status of the soldier and change the state accordingly\ndef check_soldier_status(state, field, tp_list):\n if state[\"player_status\"] == consts.SOLDIER_MINE_HIT:\n state[\"state\"] = consts.LOSE_STATE\n elif state[\"player_status\"] == consts.SOLDIER_TELEPORT:\n TP.teleport_the_player(field, tp_list)\n state[\"state\"] = consts.RUNNING_STATE\n elif state[\"player_status\"] == consts.SOLDIER_FLAG_HIT:\n state[\"state\"] = consts.WIN_STATE\n elif state[\"player_status\"] == consts.SOLDIER_OUT_OF_BOUNDS:\n state[\"state\"] = consts.RUNNING_STATE\n elif state[\"player_status\"] == consts.SOLDIER_MOVE:\n state[\"state\"] = consts.RUNNING_STATE\n elif state[\"player_status\"] == consts.SOLDIER_GUARD_HIT:\n state[\"state\"] = consts.LOSE_STATE\n\n\ndef check_if_suldier_guard_hit(field,state):\n if len(get_soldier_position(field)) < consts.SOLDIER_PIXALES:\n state[\"state\"] = consts.LOSE_STATE\n state[\"object_hitted\"]=consts.GUARD","repo_name":"forteen-14/finel_bagira","sub_path":"soldier.py","file_name":"soldier.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"4812264168","text":"import psycopg2\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\ndef access(dbname, user, edge_id, week_day):\n conn = psycopg2.connect(\"dbname=\"+dbname+\" user=\"+user)\n curs = conn.cursor()\n curs.execute(\"SELECT * FROM public.speed_observations_june where avg_speed_ms < 100 and edge_id_gh=\"+edge_id+\" and day_of_week=\"+week_day+\" order by edge_id_gh\")\n x= curs.fetchall()\n df = pd.DataFrame(x, columns=['traj_id','edge','date','speed','week_day'])\n return df\n\ndef access_hour(dbname, user, edge_id, week_day, min_hour, max_hour):\n conn = psycopg2.connect(\"dbname=\"+dbname+\" user=\"+user)\n curs = conn.cursor()\n curs.execute(\"SELECT * FROM public.speed_observations_june where avg_speed_ms < 100 and edge_id_gh=\"+edge_id+\" and day_of_week=\"+week_day+\" and date_part('hour' , start_time) <\"+max_hour+\" and date_part('hour' , start_time)>=\"+min_hour+\" order by edge_id_gh\")\n x= curs.fetchall()\n df = pd.DataFrame(x, columns=['traj_id','edge','date','speed','week_day'])\n return df\n\n\ndef plot_pdf(speed_df):\n mean = np.mean(speed_df.speed)\n std = np.std(speed_df.speed)\n r = speed_df.speed\n norm.pdf(r.unique(), loc=mean, scale=std)\n count, bins, ignored = plt.hist(r, normed=True, bins=100)\n plt.show()\n plt.plot(bins, norm.pdf(bins, loc=mean, scale=std), 'r-', alpha=0.6, label='norm pdf')\n plt.show()\n \n \ndef plot_pdf2(mean, std, bins):\n plt.plot(bins, norm.pdf(bins, loc=mean, scale=std), alpha=0.6, label='norm pdf')\n plt.show()\n \n \n\n\n\n\n\n","repo_name":"liviaalmada/jup-notebooks","sub_path":"road-network-analysis/fortaleza-dataset/speed_db.py","file_name":"speed_db.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"23513953865","text":"\"\"\"This file is our main simulation file it includes the set-up and time loop\"\"\"\n\nimport random\nfrom stockmarket.limitorderbook import *\nfrom stockmarket import setup, marketmechanisms\nfrom stockmarket.functions import div0\n\ndef stockMarketSimulation(seed,\n simulation_time,\n init_backward_simulated_time,\n number_of_agents,\n share_chartists,\n share_mean_reversion,\n amount_of_firms,\n initial_total_money,\n initial_profit,\n discount_rate,\n init_price_to_earnings_window,\n order_expiration_time,\n agent_order_price_variability,\n agent_order_variability,\n agent_ma_short,\n agent_ma_long,\n agents_hold_thresholds,\n agent_volume_risk_aversion,\n agent_propensity_to_switch,\n firm_profit_mu,\n firm_profit_delta,\n firm_profit_sigma,\n profit_announcement_working_days,\n mean_reversion_memory_divider,\n printProgress=False):\n \"\"\"Returns a set of agents at time stockMarketSimulationParameterSet['simuatlion_time'] and the values\n of their state variables for every time step in stockMarketSimulationParameterSet['simuatlion_time'].\n\n Arguments\n ----------\n seed : integer\n Seed used to set the pseudo number generator for experiment reproduction\n simulation_time : integer\n Amount of periods over which the simulation takes place\n init_backward_simulated_time: integer\n Amount of pre-simulated periods\n amount_fundamentalists: integer\n Sets the amount of fundamentalist type agents\n amount_chartists : integer\n Sets the amount of chartist type agents\n amount_firms: integer\n Sets the amount of firms\n initial_money: tuple\n Integer range (low, high) for the initial amount of money for every trader\n initial_bid_ask: tuple\n Integer range (Low, high) for the initial bid-ask spread for every trader\n initial_memory: tuple\n Integer range (Low, high) for the initial memory size for every trader\n initial_ma_short: tuple\n Integer range (Low, high) for the MA memory size for every trader who uses the chartist strategy\n initial_ma_long: tuple\n Integer range (Low, high) for the MA memory size for every trader who uses the chartist strategy\n initial_profit: tuple\n Integer range (Low, high) for the initial profit for every firm\n initial_book_value: tuple\n Integer range (Low, high) for the initial book_value for every firm\n initial_stock_amount: tuple\n Integer range (Low, high) for the initial amount of stocks distributed for every firm\n observable_set_size: integer\n The amount of suppliers observable in the market mechanism\n record_data: boolean\n This can be switched of for bigger models so that no data is recorder\n database_name: string\n Name of the database saved\n\n Returns\n -------\n list\n agents, firms, stocks, and orderbooks\n \"\"\"\n\n random.seed(seed)\n np.random.seed(seed)\n\n \"\"\"\n Setup\n \"\"\"\n\n # divide total money into (approximately) money per agent\n initial_money = (int(initial_total_money[0] / number_of_agents), int(initial_total_money[1] / number_of_agents))\n # calculate the amount of different types of traders\n amount_technical_traders = int(number_of_agents * (share_chartists))\n amount_noise_traders = number_of_agents - amount_technical_traders\n amount_mean_reversion = int(share_mean_reversion * amount_technical_traders)\n amount_momentum = amount_technical_traders - amount_mean_reversion\n\n agents = setup.setup_agents_with_noise_traders(init_money=initial_money,\n init_bid_ask_spread=agent_order_price_variability,\n init_ma_s=agent_ma_short,\n init_ma_l=agent_ma_long,\n trader_volume_risk_aversion=agent_volume_risk_aversion,\n momentum_traders=amount_momentum,\n reversion_traders=amount_mean_reversion,\n noise_traders=amount_noise_traders,\n init_propensity_to_switch=agent_propensity_to_switch,\n init_price_to_earnings_window=init_price_to_earnings_window)\n\n\n firms = setup.setup_firms(init_book_value=(10000,10000),\n init_profit=initial_profit,\n firm_profit_mu=firm_profit_mu,\n firm_profit_delta=firm_profit_delta,\n firm_profit_sigma=firm_profit_sigma,\n backward_simulated_time=init_backward_simulated_time,\n amount_of_firms=amount_of_firms\n )\n\n # initialise the amount of stocks so that buy and sell orders are roughly equal for noise trader baseline\n init_firm_value = initial_profit[1] / discount_rate\n initial_stock_amount = np.sqrt(initial_total_money[1] * init_firm_value * number_of_agents)\n stocks = setup.setup_stocks(firms, amount=initial_stock_amount)\n\n order_books = []\n for stock in stocks:\n order_books.append(LimitOrderBook(stock, stock.price_history[-1], order_expiration_time, agent_order_price_variability))\n\n setup.distribute_initial_stocks(stocks, agents)\n\n # TODO fill market returns history with stock market prices\n #previous_period_stock_prices = stock.price_history[-(profit_announcement_working_days + 1):]\n #market_returns_history = list(np.diff(previous_period_stock_prices))\n market_returns_history = []\n\n \"\"\"\n Simulation\n\n Process overview and scheduling from the ODD\n 1. Update profits\n 2. Update expected price and spread\n 3. Market mechanism\n 4. Store market prices t-1\n \"\"\"\n\n for day in range(simulation_time):\n if printProgress:\n print('period: ', day)\n # 1 update profits after a number of working days or update profit history with the current profit\n for firm in firms:\n if day % profit_announcement_working_days == 0:\n profit = firm.determine_profit()\n else:\n profit = firm.profit\n firm.update_profits(profit)\n\n # 2-3 continuous double auction market mechanism - market maker quotes, traders trade\n market_returns = []\n\n for idx, stock in enumerate(stocks):\n current_market_price = stock.price_history[-1]\n earnings_per_stock = stock.firm.profit / stock.amount\n current_price_to_earnings_ratio = current_market_price / earnings_per_stock\n stock.price_to_earnings_history.append(current_price_to_earnings_ratio)\n agents, stock, order_books[idx] = marketmechanisms.continuous_double_auction(agents, stock,\n order_books[idx],\n marketmechanisms.orders_based_on_sentiment_and_fundamentals,\n agents_hold_thresholds, agent_order_variability,\n current_price_to_earnings_ratio, mean_reversion_memory_divider)\n current = stock.price_history[-1]\n previous = stock.price_history[-2]\n diff = div0((current - previous), previous) if current != 0 else 0.0\n\n market_returns.append(diff)\n\n av_market_return = np.mean(market_returns)\n market_returns_history.append(av_market_return)\n\n # 4 record and update variables + switching strategies\n for agent in agents:\n # record agent stocks\n agent.portfolio_history.append(agent.stocks.copy())\n # evaluate and record agent returns\n money = agent.money\n portfolio_value = 0\n for stock in stocks:\n portfolio_value += agent.stocks[stock] * stock.price_history[-1]\n income = money - agent.money_history[-1] + portfolio_value - agent.portfolio_value_history[-1]\n average_total_assets = np.mean([money,agent.money_history[-1]]) + \\\n np.mean([portfolio_value, agent.portfolio_value_history[-1]])\n agent.return_on_assets.append(income / average_total_assets)\n agent.money_history.append(money)\n agent.portfolio_value_history.append(portfolio_value)\n agent.function_history.append(agent.function)\n # 4 update strategies\n if (day % profit_announcement_working_days == 0) and (day != 0):\n av_market_return_previous_period = np.mean(market_returns_history[-profit_announcement_working_days:])\n agent.update_strategy(av_market_return_previous_period, profit_announcement_working_days)\n\n\n return agents, firms, stocks, order_books\n\n\n\n\n\n\n\n","repo_name":"Zarate1997/abm","sub_path":"stockmarket/baselinemodel.py","file_name":"baselinemodel.py","file_ext":"py","file_size_in_byte":9698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"4311574546","text":"from src.util.tree.builders import node_tree_from_sequence\nfrom src.util.tree.cnf import binarization, revert_binarization\nfrom src.util.tree.node import Node\n\n\ndef write_tree(node: Node) -> str:\n \"\"\"\n Transform tree to a sequence of tags.\n :param node: Tree's head\n :return : A string representation of the parse tree in bracketed notation.\n \"\"\"\n if not node.children:\n return node.tag\n return \"({} {})\".format(node.tag, \" \".join([write_tree(child) for child in node.children]))\n\n\nif __name__ == '__main__':\n sent = \"(TOP (FRAGQ (NP (WDT AIZH) (NP (NN PCWEIM))) (yyQM yyQM)))\"\n # sent = \"(TOP (S (yyQUOT yyQUOT) (S (VP (VB THIH)) (NP (NN NQMH)) (CC W) (ADVP (RB BGDWL))) (yyDOT yyDOT)))\"\n head = node_tree_from_sequence(sent)\n print(sent)\n orig = write_tree(head)\n print(write_tree(head))\n binarization(head,2,2)\n reversed_tree = write_tree(head)\n print(write_tree(head))\n revert_binarization(head)\n reverted = write_tree(head)\n print(write_tree(head))\n assert reverted == orig\n assert orig != reversed_tree\n","repo_name":"dannykh/PCFG_mmn13_openu","sub_path":"src/util/tree/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18389288737","text":"from sqlalchemy.orm import Session\n\nimport models, schemas\n\ndef get_persons(db: Session, skip: int = 0, limit: int = 100):\n return (\n db.query(models.Person)\n .order_by(models.Person.id)\n .offset(skip)\n .limit(limit)\n .all()\n )\n\ndef create_person(db: Session, person: schemas.PersonCreate):\n db_person = models.Person(**person.dict())\n db.add(db_person)\n db.commit()\n db.refresh(db_person)\n return db_person\n","repo_name":"Kanchan-Wakchaure/FastAPI-python-mssql-tutorial","sub_path":"crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"9096804788","text":"from typing import Tuple\nimport torch\nfrom torch import nn, Tensor\nimport torch.nn.functional as F\nimport math\nimport numpy as np\nimport random\n\nclass ArcFace(nn.Module):\n def __init__(self, emdsize: int ,class_num: int, s: float, m:float, device: str, lst_file: str) -> None:\n super(ArcFace, self).__init__()\n self.class_num = class_num\n self.emdsize = emdsize\n self.s = s\n self.m = m\n self.cos_m = math.cos(m)\n self.sin_m = math.sin(m)\n self.th = math.cos(math.pi - m)\n self.mm = math.sin(math.pi - m) * m\n\n self.weight = nn.Parameter(torch.empty([self.class_num, self.emdsize], device=device))\n nn.init.xavier_uniform_(self.weight)\n\n self.CEL = torch.nn.CrossEntropyLoss()\n self.device = device\n\n self.genders = self.read_genders(lst_file)\n self.gender_vector = torch.ones([self.weight.shape[0],1]).to(self.device)\n self.gender_vector[self.genders==0] = -1\n self.idx = set(range(0,class_num))\n \n def read_genders(self, lst_file):\n genders = -1 * np.ones(self.class_num)\n with open(lst_file, 'r') as f:\n for line in f:\n _, iden, gender, _ = line.strip().split('\\t')\n iden = int(iden)\n gender = int(gender)\n genders[iden] = gender\n f.close()\n if (genders == -1).any():\n raise ValueError('Arcface loss initializing failed.')\n else:\n return genders\n \n def random_sample(self, num, iden_unique):\n if num - len(iden_unique) > 0:\n idx = random.sample(self.idx - iden_unique, num - len(iden_unique))\n else:\n idx = []\n idx = idx + list(iden_unique)\n return self.weight[idx], self.genders[idx]\n\n def forward(self, input: Tensor, label: Tensor) -> Tensor:\n\n normed_input = nn.functional.normalize(input, p=2, dim=1, eps=1e-12)\n # normed_weight = nn.functional.normalize(torch.cat([self.weight,self.gender_vector], dim=1), p=2, dim=1, eps=1e-12)\n normed_weight = nn.functional.normalize(self.weight, p=2, dim=1, eps=1e-12)\n\n cosine = F.linear(normed_input, normed_weight).clamp(-1, 1)\n\n one_hot = torch.zeros(cosine.size(), device=self.device)\n one_hot.scatter_(1, label.view(-1, 1), 1)\n one_hot = one_hot.type(dtype=torch.bool)\n cosine_t = cosine[one_hot]\n\n sine_t = torch.sqrt(1.0 - torch.pow(cosine_t, 2))\n phi_t = cosine_t * self.cos_m - sine_t * self.sin_m\n\n phi = torch.where(cosine_t > self.th, phi_t, cosine_t - self.mm)\n\n cosine[one_hot] = phi\n\n return self.CEL(self.s * cosine, label)\n","repo_name":"XudongOliverShen/2021-fair-representation","sub_path":"lib/VGGFace2/face_loss.py","file_name":"face_loss.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"25098347883","text":"import csv\nimport datetime\n\ndef main():\n content = get_csv_content()\n born_after = datetime.date(1999, 12, 31)\n print(count_people_born_after(content, born_after))\n for name in get_people_that_names_end_with(content, 'a'):\n print(name)\n\ndef get_csv_content():\n with open('test.csv', 'r') as file:\n reader = csv.reader(file)\n\n #skip header\n next(reader)\n content = list(reader)\n\n return content\n\ndef count_people_born_after(people_list, born_after):\n count = 0\n for person in people_list:\n person_birthday = datetime.datetime.strptime(person[2], '%d.%m.%Y').date()\n if(person_birthday > born_after):\n count += 1\n return count\n\ndef get_people_that_names_end_with(people_list, letter):\n names = []\n for person in people_list:\n if(person[0][-1:] == letter):\n names.append(person[0])\n names = list(set(names))\n return names\n\nif __name__ == '__main__':\n main()","repo_name":"PoliPyc/enigmaTasks","sub_path":"task1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14126225160","text":"#!/usr/bin/env python\nimport os\nimport matplotlib.pyplot as plt\n\neg={};\ncl={};\npw={};\nfiles= [f for f in os.listdir('.') if os.path.isfile(f) if 'medicion' in f]\nmaxEquipos=0;\nmaxPromedio=0;\nfor f in files:\n archivo=open(f,'r');\n archivo.readline();\n archivo.readline();\n print(archivo);\n promedio= float(archivo.readline().split(':')[1]);\n nombre=f.split('.')[1].split('_');\n modo=nombre[1];\n equipos=int(nombre[0]);\n if(maxEquipos nbacks:\n delete = len(backups) + 1 - nbacks\n delete = backups[:delete]\n for file in delete:\n os.unlink(os.path.join(dir, file[1]))\n\n def maybeOptimize(self):\n # have two weeks passed?\n if (intTime() - self.pm.profile['lastOptimize']) < 86400*14:\n return\n self.progress.start(label=_(\"Optimizing...\"), immediate=True)\n self.col.optimize()\n self.pm.profile['lastOptimize'] = intTime()\n self.pm.save()\n self.progress.finish()\n\n # State machine\n ##########################################################################\n\n def moveToState(self, state, *args):\n #print(\"-> move from\", self.state, \"to\", state)\n oldState = self.state or \"dummy\"\n cleanup = getattr(self, \"_\"+oldState+\"Cleanup\", None)\n if cleanup:\n cleanup(state)\n self.state = state\n runHook('beforeStateChange', state, oldState, *args)\n getattr(self, \"_\"+state+\"State\")(oldState, *args)\n if state != \"resetRequired\":\n self.bottomWeb.show()\n runHook('afterStateChange', state, oldState, *args)\n\n def _deckBrowserState(self, oldState):\n self.deckBrowser.show()\n\n def _colLoadingState(self, oldState):\n \"Run once, when col is loaded.\"\n self.enableColMenuItems()\n # ensure cwd is set if media dir exists\n self.col.media.dir()\n runHook(\"colLoading\", self.col)\n self.moveToState(\"overview\")\n\n def _selectedDeck(self):\n did = self.col.decks.selected()\n if not self.col.decks.nameOrNone(did):\n showInfo(_(\"Please select a deck.\"))\n return\n return self.col.decks.get(did)\n\n def _overviewState(self, oldState):\n if not self._selectedDeck():\n return self.moveToState(\"deckBrowser\")\n self.col.reset()\n self.overview.show()\n\n def _reviewState(self, oldState):\n self.reviewer.show()\n\n def _reviewCleanup(self, newState):\n if newState != \"resetRequired\" and newState != \"review\":\n self.reviewer.cleanup()\n\n def noteChanged(self, nid):\n \"Called when a card or note is edited (but not deleted).\"\n runHook(\"noteChanged\", nid)\n\n # Resetting state\n ##########################################################################\n\n def reset(self, guiOnly=False):\n \"Called for non-trivial edits. Rebuilds queue and updates UI.\"\n if self.col:\n if not guiOnly:\n self.col.reset()\n runHook(\"reset\")\n self.maybeEnableUndo()\n self.moveToState(self.state)\n\n def requireReset(self, modal=False):\n \"Signal queue needs to be rebuilt when edits are finished or by user.\"\n self.autosave()\n self.resetModal = modal\n if self.interactiveState():\n self.moveToState(\"resetRequired\")\n\n def interactiveState(self):\n \"True if not in profile manager, syncing, etc.\"\n return self.state in (\"overview\", \"review\", \"deckBrowser\")\n\n def maybeReset(self):\n self.autosave()\n if self.state == \"resetRequired\":\n self.state = self.returnState\n self.reset()\n\n def delayedMaybeReset(self):\n # if we redraw the page in a button click event it will often crash on\n # windows\n self.progress.timer(100, self.maybeReset, False)\n\n def _resetRequiredState(self, oldState):\n if oldState != \"resetRequired\":\n self.returnState = oldState\n if self.resetModal:\n # we don't have to change the webview, as we have a covering window\n return\n self.web.resetHandlers()\n self.web.onBridgeCmd = lambda url: self.delayedMaybeReset()\n i = _(\"Waiting for editing to finish.\")\n b = self.button(\"refresh\", _(\"Resume Now\"), id=\"resume\")\n self.web.stdHtml(\"\"\"\n
\n
\n%s
\n%s
\n\"\"\" % (i, b), css=self.sharedCSS)\n self.bottomWeb.hide()\n self.web.setFocus()\n self.web.eval(\"$('#resume').focus()\")\n\n # HTML helpers\n ##########################################################################\n\n sharedCSS = \"\"\"\nbody {\nbackground: #f3f3f3;\nmargin: 2em;\n}\nh1 { margin-bottom: 0.2em; }\n\"\"\"\n\n def button(self, link, name, key=None, class_=\"\", id=\"\", extra=\"\"):\n class_ = \"but \"+ class_\n if key:\n key = _(\"Shortcut key: %s\") % key\n else:\n key = \"\"\n return '''\n''' % (\n id, class_, link, key, extra, name)\n\n # Main window setup\n ##########################################################################\n\n def setupMainWindow(self):\n # main window\n self.form = aqt.forms.main.Ui_MainWindow()\n self.form.setupUi(self)\n # toolbar\n tweb = aqt.webview.AnkiWebView()\n tweb.title = \"top toolbar\"\n tweb.setFocusPolicy(Qt.WheelFocus)\n self.toolbar = aqt.toolbar.Toolbar(self, tweb)\n self.toolbar.draw()\n # main area\n self.web = aqt.webview.AnkiWebView()\n self.web.title = \"main webview\"\n self.web.setFocusPolicy(Qt.WheelFocus)\n self.web.setMinimumWidth(400)\n # bottom area\n sweb = self.bottomWeb = aqt.webview.AnkiWebView()\n sweb.title = \"bottom toolbar\"\n sweb.setFocusPolicy(Qt.WheelFocus)\n # add in a layout\n self.mainLayout = QVBoxLayout()\n self.mainLayout.setContentsMargins(0,0,0,0)\n self.mainLayout.setSpacing(0)\n self.mainLayout.addWidget(tweb)\n self.mainLayout.addWidget(self.web)\n self.mainLayout.addWidget(sweb)\n self.form.centralwidget.setLayout(self.mainLayout)\n\n def closeAllCollectionWindows(self):\n return aqt.dialogs.closeAll()\n\n # Components\n ##########################################################################\n\n def setupSignals(self):\n signal.signal(signal.SIGINT, self.onSigInt)\n\n def onSigInt(self, signum, frame):\n # interrupt any current transaction and schedule a rollback & quit\n self.col.db.interrupt()\n def quit():\n self.col.db.rollback()\n self.close()\n self.progress.timer(100, quit, False)\n\n def setupProgress(self):\n self.progress = aqt.progress.ProgressManager(self)\n\n def setupErrorHandler(self):\n import aqt.errors\n self.errorHandler = aqt.errors.ErrorHandler(self)\n\n def setupAddons(self):\n import aqt.addons\n self.addonManager = aqt.addons.AddonManager(self)\n\n def setupThreads(self):\n self._mainThread = QThread.currentThread()\n\n def inMainThread(self):\n return self._mainThread == QThread.currentThread()\n\n def setupDeckBrowser(self):\n from aqt.deckbrowser import DeckBrowser\n self.deckBrowser = DeckBrowser(self)\n\n def setupOverview(self):\n from aqt.overview import Overview\n self.overview = Overview(self)\n\n def setupReviewer(self):\n from aqt.reviewer import Reviewer\n self.reviewer = Reviewer(self)\n\n # Syncing\n ##########################################################################\n\n def onSync(self, auto=False, reload=True):\n if not auto or (self.pm.profile['syncKey'] and\n self.pm.profile['autoSync'] and\n not self.safeMode):\n from aqt.sync import SyncManager\n if not self.unloadCollection():\n return\n # set a sync state so the refresh timer doesn't fire while deck\n # unloaded\n self.state = \"sync\"\n self.syncer = SyncManager(self, self.pm)\n self.syncer.sync()\n if reload:\n if not self.col:\n self.loadCollection()\n\n # Tools\n ##########################################################################\n\n def raiseMain(self):\n if not self.app.activeWindow():\n # make sure window is shown\n self.setWindowState(self.windowState() & ~Qt.WindowMinimized)\n return True\n\n def setStatus(self, text, timeout=3000):\n self.form.statusbar.showMessage(text, timeout)\n\n def setupStyle(self):\n applyStyles(self)\n\n # Key handling\n ##########################################################################\n\n def setupKeys(self):\n self.keyHandler = None\n # debug shortcut\n self.debugShortcut = QShortcut(QKeySequence(\"Ctrl+Shift+;\"), self)\n self.debugShortcut.activated.connect(self.onDebug)\n\n def keyPressEvent(self, evt):\n # do we have a delegate?\n if self.keyHandler:\n # did it eat the key?\n if self.keyHandler(evt):\n return\n # run standard handler\n QMainWindow.keyPressEvent(self, evt)\n # check global keys\n key = str(evt.text())\n if key == \"d\":\n self.moveToState(\"deckBrowser\")\n elif key == \"s\":\n if self.state == \"overview\":\n self.col.startTimebox()\n self.moveToState(\"review\")\n else:\n self.moveToState(\"overview\")\n elif key == \"a\":\n self.onAddCard()\n elif key == \"b\":\n self.onBrowse()\n elif key == \"S\":\n self.onStats()\n elif key == \"y\":\n self.onSync()\n # temp accelerators to work around bug\n elif key == \"/\":\n self.onStudyDeck()\n elif key == \"f\":\n self.onCram()\n elif evt.matches(QKeySequence.Undo):\n self.onUndo()\n\n # App exit\n ##########################################################################\n\n def closeEvent(self, event):\n \"User hit the X button, etc.\"\n event.accept()\n self.onClose(force=True)\n\n def onClose(self, force=False):\n \"Called from a shortcut key. Close current active window.\"\n aw = self.app.activeWindow()\n if not aw or aw == self or force:\n self.unloadProfile(browser=False)\n self.app.closeAllWindows()\n else:\n aw.close()\n\n # Undo & autosave\n ##########################################################################\n\n def onUndo(self):\n n = self.col.undoName()\n if not n:\n return\n cid = self.col.undo()\n if cid and self.state == \"review\":\n card = self.col.getCard(cid)\n self.reviewer.cardQueue.append(card)\n else:\n tooltip(_(\"Reverted to state prior to '%s'.\") % n.lower())\n self.reset()\n self.maybeEnableUndo()\n\n def maybeEnableUndo(self):\n if self.col and self.col.undoName():\n self.form.actionUndo.setText(_(\"Undo %s\") %\n self.col.undoName())\n self.form.actionUndo.setEnabled(True)\n runHook(\"undoState\", True)\n else:\n self.form.actionUndo.setText(_(\"Undo\"))\n self.form.actionUndo.setEnabled(False)\n runHook(\"undoState\", False)\n\n def checkpoint(self, name):\n self.col.save(name)\n self.maybeEnableUndo()\n\n def autosave(self):\n saved = self.col.autosave()\n self.maybeEnableUndo()\n if saved:\n self.doGC()\n\n # Other menu operations\n ##########################################################################\n\n def onAddCard(self):\n aqt.dialogs.open(\"AddCards\", self)\n\n def onBrowse(self):\n aqt.dialogs.open(\"Browser\", self)\n\n def onEditCurrent(self):\n aqt.dialogs.open(\"EditCurrent\", self)\n\n def onDeckConf(self, deck=None):\n if not deck:\n deck = self.col.decks.current()\n if deck['dyn']:\n import aqt.dyndeckconf\n aqt.dyndeckconf.DeckConf(self, deck=deck)\n else:\n import aqt.deckconf\n aqt.deckconf.DeckConf(self, deck)\n\n def onOverview(self):\n self.col.reset()\n self.moveToState(\"overview\")\n\n def onStats(self):\n deck = self._selectedDeck()\n if not deck:\n return\n aqt.stats.DeckStats(self)\n\n def onPrefs(self):\n import aqt.preferences\n aqt.preferences.Preferences(self)\n\n def onNoteTypes(self):\n import aqt.models\n aqt.models.Models(self, self, fromMain=True)\n\n def onAbout(self):\n import aqt.about\n aqt.about.show(self)\n\n def onDonate(self):\n openLink(aqt.appDonate)\n\n def onDocumentation(self):\n openHelp(\"\")\n\n # Importing & exporting\n ##########################################################################\n\n def handleImport(self, path):\n import aqt.importing\n if not os.path.exists(path):\n return showInfo(_(\"Please use File>Import to import this file.\"))\n\n aqt.importing.importFile(self, path)\n\n def onImport(self):\n import aqt.importing\n aqt.importing.onImport(self)\n\n def onExport(self, did=None):\n import aqt.exporting\n aqt.exporting.ExportDialog(self, did=did)\n\n # Cramming\n ##########################################################################\n\n def onCram(self, search=\"\"):\n import aqt.dyndeckconf\n n = 1\n deck = self.col.decks.current()\n if not search:\n if not deck['dyn']:\n search = 'deck:\"%s\" ' % deck['name']\n decks = self.col.decks.allNames()\n while _(\"Filtered Deck %d\") % n in decks:\n n += 1\n name = _(\"Filtered Deck %d\") % n\n did = self.col.decks.newDyn(name)\n diag = aqt.dyndeckconf.DeckConf(self, first=True, search=search)\n if not diag.ok:\n # user cancelled first config\n self.col.decks.rem(did)\n self.col.decks.select(deck['id'])\n else:\n self.moveToState(\"overview\")\n\n # Menu, title bar & status\n ##########################################################################\n\n def setupMenus(self):\n m = self.form\n m.actionSwitchProfile.triggered.connect(lambda b: self.unloadProfile())\n m.actionImport.triggered.connect(self.onImport)\n m.actionExport.triggered.connect(self.onExport)\n m.actionExit.triggered.connect(self.close)\n m.actionPreferences.triggered.connect(self.onPrefs)\n m.actionAbout.triggered.connect(self.onAbout)\n m.actionUndo.triggered.connect(self.onUndo)\n m.actionFullDatabaseCheck.triggered.connect(self.onCheckDB)\n m.actionCheckMediaDatabase.triggered.connect(self.onCheckMediaDB)\n m.actionDocumentation.triggered.connect(self.onDocumentation)\n m.actionDonate.triggered.connect(self.onDonate)\n m.actionStudyDeck.triggered.connect(self.onStudyDeck)\n m.actionCreateFiltered.triggered.connect(self.onCram)\n m.actionEmptyCards.triggered.connect(self.onEmptyCards)\n m.actionNoteTypes.triggered.connect(self.onNoteTypes)\n\n def updateTitleBar(self):\n self.setWindowTitle(\"Anki\")\n\n # Auto update\n ##########################################################################\n\n def setupAutoUpdate(self):\n import aqt.update\n self.autoUpdate = aqt.update.LatestVersionFinder(self)\n self.autoUpdate.newVerAvail.connect(self.newVerAvail)\n self.autoUpdate.newMsg.connect(self.newMsg)\n self.autoUpdate.clockIsOff.connect(self.clockIsOff)\n self.autoUpdate.start()\n\n def newVerAvail(self, ver):\n if self.pm.meta.get('suppressUpdate', None) != ver:\n aqt.update.askAndUpdate(self, ver)\n\n def newMsg(self, data):\n aqt.update.showMessages(self, data)\n\n def clockIsOff(self, diff):\n diffText = ngettext(\"%s second\", \"%s seconds\", diff) % diff\n warn = _(\"\"\"\\\nIn order to ensure your collection works correctly when moved between \\\ndevices, Anki requires your computer's internal clock to be set correctly. \\\nThe internal clock can be wrong even if your system is showing the correct \\\nlocal time.\n\nPlease go to the time settings on your computer and check the following:\n\n- AM/PM\n- Clock drift\n- Day, month and year\n- Timezone\n- Daylight savings\n\nDifference to correct time: %s.\"\"\") % diffText\n showWarning(warn)\n self.app.closeAllWindows()\n\n # Count refreshing\n ##########################################################################\n\n def setupRefreshTimer(self):\n # every 10 minutes\n self.progress.timer(10*60*1000, self.onRefreshTimer, True)\n\n def onRefreshTimer(self):\n if self.state == \"deckBrowser\":\n self.deckBrowser.refresh()\n elif self.state == \"overview\":\n self.overview.refresh()\n\n # Permanent libanki hooks\n ##########################################################################\n\n def setupHooks(self):\n addHook(\"modSchema\", self.onSchemaMod)\n addHook(\"remNotes\", self.onRemNotes)\n addHook(\"odueInvalid\", self.onOdueInvalid)\n\n def onOdueInvalid(self):\n showWarning(_(\"\"\"\\\nInvalid property found on card. Please use Tools>Check Database, \\\nand if the problem comes up again, please ask on the support site.\"\"\"))\n\n # Log note deletion\n ##########################################################################\n\n def onRemNotes(self, col, nids):\n path = os.path.join(self.pm.profileFolder(), \"deleted.txt\")\n existed = os.path.exists(path)\n with open(path, \"ab\") as f:\n if not existed:\n f.write(b\"nid\\tmid\\tfields\\n\")\n for id, mid, flds in col.db.execute(\n \"select id, mid, flds from notes where id in %s\" %\n ids2str(nids)):\n fields = splitFields(flds)\n f.write((\"\\t\".join([str(id), str(mid)] + fields)).encode(\"utf8\"))\n f.write(b\"\\n\")\n\n # Schema modifications\n ##########################################################################\n\n def onSchemaMod(self, arg):\n return askUser(_(\"\"\"\\\nThe requested change will require a full upload of the database when \\\nyou next synchronize your collection. If you have reviews or other changes \\\nwaiting on another device that haven't been synchronized here yet, they \\\nwill be lost. Continue?\"\"\"))\n\n # Advanced features\n ##########################################################################\n\n def onCheckDB(self):\n \"True if no problems\"\n self.progress.start(immediate=True)\n ret, ok = self.col.fixIntegrity()\n self.progress.finish()\n if not ok:\n showText(ret)\n else:\n tooltip(ret)\n self.reset()\n return ret\n\n def onCheckMediaDB(self):\n self.progress.start(immediate=True)\n (nohave, unused, invalid) = self.col.media.check()\n self.progress.finish()\n # generate report\n report = \"\"\n if invalid:\n report += _(\"Invalid encoding; please rename:\")\n report += \"\\n\" + \"\\n\".join(invalid)\n if unused:\n if report:\n report += \"\\n\\n\\n\"\n report += _(\n \"In media folder but not used by any cards:\")\n report += \"\\n\" + \"\\n\".join(unused)\n if nohave:\n if report:\n report += \"\\n\\n\\n\"\n report += _(\n \"Used on cards but missing from media folder:\")\n report += \"\\n\" + \"\\n\".join(nohave)\n if not report:\n tooltip(_(\"No unused or missing files found.\"))\n return\n # show report and offer to delete\n diag = QDialog(self)\n diag.setWindowTitle(\"Anki\")\n layout = QVBoxLayout(diag)\n diag.setLayout(layout)\n text = QTextEdit()\n text.setReadOnly(True)\n text.setPlainText(report)\n layout.addWidget(text)\n box = QDialogButtonBox(QDialogButtonBox.Close)\n layout.addWidget(box)\n b = QPushButton(_(\"Delete Unused\"))\n b.setAutoDefault(False)\n box.addButton(b, QDialogButtonBox.ActionRole)\n b.clicked.connect(\n lambda c, u=unused, d=diag: self.deleteUnused(u, d))\n box.rejected.connect(diag.reject)\n diag.setMinimumHeight(400)\n diag.setMinimumWidth(500)\n restoreGeom(diag, \"checkmediadb\")\n diag.exec_()\n saveGeom(diag, \"checkmediadb\")\n\n def deleteUnused(self, unused, diag):\n if not askUser(\n _(\"Delete unused media?\")):\n return\n mdir = self.col.media.dir()\n for f in unused:\n path = os.path.join(mdir, f)\n if os.path.exists(path):\n send2trash(path)\n tooltip(_(\"Deleted.\"))\n diag.close()\n\n def onStudyDeck(self):\n from aqt.studydeck import StudyDeck\n ret = StudyDeck(\n self, dyn=True, current=self.col.decks.current()['name'])\n if ret.name:\n self.col.decks.select(self.col.decks.id(ret.name))\n self.moveToState(\"overview\")\n\n def onEmptyCards(self):\n self.progress.start(immediate=True)\n cids = self.col.emptyCids()\n if not cids:\n self.progress.finish()\n tooltip(_(\"No empty cards.\"))\n return\n report = self.col.emptyCardReport(cids)\n self.progress.finish()\n part1 = ngettext(\"%d card\", \"%d cards\", len(cids)) % len(cids)\n part1 = _(\"%s to delete:\") % part1\n diag, box = showText(part1 + \"\\n\\n\" + report, run=False,\n geomKey=\"emptyCards\")\n box.addButton(_(\"Delete Cards\"), QDialogButtonBox.AcceptRole)\n box.button(QDialogButtonBox.Close).setDefault(True)\n def onDelete():\n saveGeom(diag, \"emptyCards\")\n QDialog.accept(diag)\n self.checkpoint(_(\"Delete Empty\"))\n self.col.remCards(cids)\n tooltip(ngettext(\"%d card deleted.\", \"%d cards deleted.\", len(cids)) % len(cids))\n self.reset()\n box.accepted.connect(onDelete)\n diag.show()\n\n # Debugging\n ######################################################################\n\n def onDebug(self):\n d = self.debugDiag = QDialog()\n frm = aqt.forms.debug.Ui_Dialog()\n frm.setupUi(d)\n s = self.debugDiagShort = QShortcut(QKeySequence(\"ctrl+return\"), d)\n s.activated.connect(lambda: self.onDebugRet(frm))\n s = self.debugDiagShort = QShortcut(\n QKeySequence(\"ctrl+shift+return\"), d)\n s.activated.connect(lambda: self.onDebugPrint(frm))\n d.show()\n\n def _captureOutput(self, on):\n mw = self\n class Stream(object):\n def write(self, data):\n mw._output += data\n if on:\n self._output = \"\"\n self._oldStderr = sys.stderr\n self._oldStdout = sys.stdout\n s = Stream()\n sys.stderr = s\n sys.stdout = s\n else:\n sys.stderr = self._oldStderr\n sys.stdout = self._oldStdout\n\n def _debugCard(self):\n return self.reviewer.card.__dict__\n\n def _debugBrowserCard(self):\n return aqt.dialogs._dialogs['Browser'][1].card.__dict__\n\n def onDebugPrint(self, frm):\n frm.text.setPlainText(\"pp(%s)\" % frm.text.toPlainText())\n self.onDebugRet(frm)\n\n def onDebugRet(self, frm):\n import pprint, traceback\n text = frm.text.toPlainText()\n card = self._debugCard\n bcard = self._debugBrowserCard\n mw = self\n pp = pprint.pprint\n self._captureOutput(True)\n try:\n exec(text)\n except:\n self._output += traceback.format_exc()\n self._captureOutput(False)\n buf = \"\"\n for c, line in enumerate(text.strip().split(\"\\n\")):\n if c == 0:\n buf += \">>> %s\\n\" % line\n else:\n buf += \"... %s\\n\" % line\n try:\n frm.log.appendPlainText(buf + (self._output or \"\"))\n except UnicodeDecodeError:\n frm.log.appendPlainText(_(\"\"))\n frm.log.ensureCursorVisible()\n\n # System specific code\n ##########################################################################\n\n def setupSystemSpecific(self):\n self.hideMenuAccels = False\n if isMac:\n # mac users expect a minimize option\n self.minimizeShortcut = QShortcut(\"Ctrl+M\", self)\n self.minimizeShortcut.activated.connect(self.onMacMinimize)\n self.hideMenuAccels = True\n self.maybeHideAccelerators()\n self.hideStatusTips()\n elif isWin:\n # make sure ctypes is bundled\n from ctypes import windll, wintypes\n _dummy = windll\n _dummy = wintypes\n\n def maybeHideAccelerators(self, tgt=None):\n if not self.hideMenuAccels:\n return\n tgt = tgt or self\n for action in tgt.findChildren(QAction):\n txt = str(action.text())\n m = re.match(\"^(.+)\\(&.+\\)(.+)?\", txt)\n if m:\n action.setText(m.group(1) + (m.group(2) or \"\"))\n\n def hideStatusTips(self):\n for action in self.findChildren(QAction):\n action.setStatusTip(\"\")\n\n def onMacMinimize(self):\n self.setWindowState(self.windowState() | Qt.WindowMinimized)\n\n # Single instance support\n ##########################################################################\n\n def setupAppMsg(self):\n self.app.appMsg.connect(self.onAppMsg)\n\n def onAppMsg(self, buf):\n if self.state == \"startup\":\n # try again in a second\n return self.progress.timer(1000, lambda: self.onAppMsg(buf), False)\n elif self.state == \"profileManager\":\n # can't raise window while in profile manager\n if buf == \"raise\":\n return\n self.pendingImport = buf\n return tooltip(_(\"Deck will be imported when a profile is opened.\"))\n if not self.interactiveState() or self.progress.busy():\n # we can't raise the main window while in profile dialog, syncing, etc\n if buf != \"raise\":\n showInfo(_(\"\"\"\\\nPlease ensure a profile is open and Anki is not busy, then try again.\"\"\"),\n parent=None)\n return\n # raise window\n if isWin:\n # on windows we can raise the window by minimizing and restoring\n self.showMinimized()\n self.setWindowState(Qt.WindowActive)\n self.showNormal()\n else:\n # on osx we can raise the window. on unity the icon in the tray will just flash.\n self.activateWindow()\n self.raise_()\n if buf == \"raise\":\n return\n # import\n self.handleImport(buf)\n\n # GC\n ##########################################################################\n # ensure gc runs in main thread\n\n def setupDialogGC(self, obj):\n obj.finished.connect(lambda o=obj: self.gcWindow(obj))\n\n def gcWindow(self, obj):\n obj.deleteLater()\n\n def disableGC(self):\n gc.collect()\n gc.disable()\n\n def doGC(self):\n assert not self.progress.inDB\n gc.collect()\n\n # Crash log\n ##########################################################################\n\n def setupCrashLog(self):\n p = os.path.join(self.pm.base, \"crash.log\")\n self._crashLog = open(p, \"ab\", 0)\n faulthandler.enable(self._crashLog)\n\n # Media server\n ##########################################################################\n # prevent malicious decks from accessing the local filesystem\n\n def setupMediaServer(self):\n self.mediaServer = aqt.mediasrv.MediaServer()\n self.mediaServer.start()\n\n def baseHTML(self):\n return '' % self.mediaServer.port\n","repo_name":"lengxiaoxuan/test","sub_path":"aqt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":39809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"4894812300","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport tensorflow as tf\nimport numpy as np\ntf.__version__\n\n# In[4]:\n\n\ntrain, _ = tf.keras.datasets.fashion_mnist.load_data()\nimages, labels = train\nx = images/255\n\ny=labels.astype(np.int32)\n\n# In[5]:\n\n\nfeature_columns = [tf.feature_column.numeric_column(\"x\", shape=[784])]\n\n# In[6]:\n\n\ntrain_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(x={\"x\":x},y=y,num_epochs=None,shuffle=True)\n\n# In[12]:\n\n\n#classifier = tf.estimator.LinearClassifier(feature_columns=feature_columns, n_classes=10)\n\nclassifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,hidden_units=[500, 500, 500], n_classes=10)\n\n# In[13]:\n\n\nclassifier.train(input_fn=train_input_fn, steps=2000)\n\n# In[ ]:\n\n\n\n","repo_name":"RIMEL-UCA/RIMEL-UCA.github.io","sub_path":"chapters/2023/Qualité logicielle dans les notebooks Jupyter/assets/python-scripts/tf.estimator.py","file_name":"tf.estimator.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"5415321838","text":"from __future__ import print_function\nimport os\nimport logging\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport morphsnakes as ms\nfrom morphsnakes import _curvop\nimport copy\nimport mcubes\nimport morphsnakes_aux_fns as msaux\nfrom scipy import ndimage as ndi\nimport h5py\ntry:\n from PIL import Image\nexcept:\n print('Could not import PIL. Tiff handling will be limited.')\n\n'''Extract levelset from 2d or volumetric data. This is a wrapper script for using contained in the morphsnakes module.\n\n\ncopyright via MIT license\nNPMitchell 2019 npmitchell@kitp.ucsb.edu\n'''\n\n\n# in case you are running on machine without display, e.g. server\nif os.environ.get('DISPLAY', '') == '':\n logging.warning('No display found. Using non-interactive Agg backend.')\n matplotlib.use('Agg')\n\nPATH_IMG_NODULE = 'images/mama07ORI.bmp'\nPATH_IMG_STARFISH = 'images/seastar2.png'\nPATH_IMG_LAKES = 'images/lakes3.jpg'\nPATH_IMG_CAMERA = 'images/camera.png'\nPATH_IMG_COINS = 'images/coins.png'\nPATH_ARRAY_CONFOCAL = 'images/confocal.npy'\n\n\ndef visual_callback_2d(background, fig=None):\n \"\"\"\n Returns a callback than can be passed as the argument `iter_callback`\n of `morphological_geodesic_active_contour` and\n `morphological_chan_vese` for visualizing the evolution\n of the levelsets. Only works for 2D images.\n\n Parameters\n ----------\n background : (M, N) array\n Image to be plotted as the background of the visual evolution.\n fig : matplotlib.figure.Figure\n Figure where results will be drawn. If not given, a new figure\n will be created.\n\n Returns\n -------\n callback : Python function\n A function that receives a levelset and updates the current plot\n accordingly. This can be passed as the `iter_callback` argument of\n `morphological_geodesic_active_contour` and\n `morphological_chan_vese`.\n\n \"\"\"\n\n # Prepare the visual environment.\n if fig is None:\n fig = plt.figure()\n fig.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.imshow(background, cmap=plt.cm.gray)\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)\n plt.pause(0.001)\n\n def callback(levelset):\n\n if ax1.collections:\n del ax1.collections[0]\n ax1.contour(levelset, [0.5], colors='r')\n ax_u.set_data(levelset)\n fig.canvas.draw()\n plt.pause(0.001)\n\n return callback\n\n\ndef visual_callback_3d(fig=None, plot_each=1, show=False, save=True, impath=None, rmimages=True,\n comparison_mesh=None, fig2=None,\n img=None, axis_order='yxz', alpha=0.5, compare_mesh_slices=False, sz=5,\n thres=0.5, plot_diff=False, plot_mesh3d=False, labelcheckax=False):\n \"\"\"\n Returns a callback than can be passed as the argument `iter_callback`\n of `morphological_geodesic_active_contour` and\n `morphological_chan_vese` for visualizing the evolution\n of the levelsets. Only works for 3D images.\n\n Parameters\n ----------\n fig : matplotlib.figure.Figure\n Figure where results will be drawn. If not given, a new figure\n will be created.\n plot_each : positive integer\n The plot will be updated once every `plot_each` calls to the callback\n function.\n show : bool\n make the figure appear to show intermediate results\n save : bool\n save the intermediate results to disk\n impath : None or string\n path to the directory for saving images\n rmimages : bool\n Remove the images from disk after making them\n comparison_mesh : mesh or None\n A mesh to compare the intermediate results to, if desired\n fig2 : matplotlib.figure.Figure instance\n Second figure where results will be drawn. If not given, a new figure will be created if a mesh comparison is\n desired.\n plot_diff : bool\n plot the difference between current ls and previous\n\n\n Returns\n -------\n callback : Python function\n A function that receives a levelset and updates the current plot\n accordingly. This can be passed as the `iter_callback` argument of\n `morphological_geodesic_active_contour` and\n `morphological_chan_vese`.\n\n \"\"\"\n # Prepare the visual environment.\n if fig is None:\n fig = plt.figure(1)\n else:\n fig.clf()\n\n if fig2 is None:\n fig2 = plt.figure(2)\n else:\n fig2.clf()\n\n try:\n ax = fig.add_subplot(111, projection='3d')\n except ValueError:\n from mpl_toolkits.mplot3d import Axes3D\n ax = fig.add_subplot(111, projection='3d')\n\n if img is not None:\n # Also show cross sections of the volume\n ax2 = fig2.add_subplot(131)\n ax3 = fig2.add_subplot(132)\n ax4 = fig2.add_subplot(133)\n nx = int(np.shape(img)[0] * 0.5)\n ny = int(np.shape(img)[1] * 0.5)\n nz = int(np.shape(img)[2] * 0.5)\n xslice = img[nx, :, :]\n yslice = img[:, ny, :]\n zslice = img[:, :, nz]\n\n if plot_diff:\n fig3 = plt.figure(3)\n fig3.clf()\n # Also show cross sections of the difference in ls\n ax5 = fig3.add_subplot(131)\n ax6 = fig3.add_subplot(132)\n ax7 = fig3.add_subplot(133)\n # Get indices if not already done (will have been done if img is not None)\n if img is None:\n nx = int(np.shape(img)[0] * 0.5)\n ny = int(np.shape(img)[1] * 0.5)\n nz = int(np.shape(img)[2] * 0.5)\n\n counter = [-1]\n if show or save:\n def callback(levelset, ls_prev=None, force=False):\n \"\"\"\n\n Parameters\n ----------\n levelset\n ls_prev\n force : bool\n force callback to run without returning prematurely\n plot_mesh3d : bool\n\n Returns\n -------\n\n \"\"\"\n counter[0] += 1\n if (counter[0] % plot_each) != 0 and not force:\n return\n\n if ax.collections:\n del ax.collections[0]\n\n coords, triangles = mcubes.marching_cubes(levelset, 0.5)\n\n ################################################################################################\n # Plot the level set mesh in 3d space\n if plot_mesh3d:\n # Plot the level set\n ax.set_aspect('equal')\n ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], # change axes!!!!\n triangles=triangles, alpha=alpha)\n\n if comparison_mesh is not None:\n mmm = comparison_mesh\n ax.plot_trisurf(mmm.points[:, 1], mmm.points[:, 0], mmm.points[:, 2],\n triangles=mm.triangles, alpha=0.3)\n ax.set_xlabel(r'x [$\\mu$m]')\n ax.set_ylabel(r'y [$\\mu$m]')\n ax.set_zlabel(r'z [$\\mu$m]')\n\n title = 'Morphological Chan-Vese level set'\n title += '\\n' + r'$t=$' + '{0:d}'.format(counter[0])\n ax.set_title(title)\n\n if save:\n if impath is not None and counter[0] == 0:\n # ensure the directory exists\n d = os.path.dirname(impath)\n if not os.path.exists(d):\n print('run_morphsnakes.py: creating dir: ', d)\n os.makedirs(d)\n\n # set axes equal\n limits = np.array([\n ax.get_xlim3d(),\n ax.get_ylim3d(),\n ax.get_zlim3d(),\n ])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n # ax.view_init(0, 30)\n # imfn = impath + '{0:06d}'.format(counter[0]) + '.png'\n # print 'saving ', imfn\n # fig.savefig(imfn)\n ax.view_init(0, 0)\n imfn = impath + 'viewx_{0:06d}'.format(counter[0]) + '.png'\n print('saving ', imfn)\n fig.savefig(imfn)\n ax.view_init(0, 90)\n imfn = impath + 'viewy_{0:06d}'.format(counter[0]) + '.png'\n print('saving ', imfn)\n fig.savefig(imfn)\n ax.view_init(-90, 90)\n imfn = impath + 'viewz_{0:06d}'.format(counter[0]) + '.png'\n print('saving ', imfn)\n fig.savefig(imfn)\n\n # plt.show()\n # plt.close('all')\n ax.cla()\n # raise RuntimeError\n elif show:\n plt.pause(0.1)\n\n ################################################################################################\n # Show 2d slices of the 3D volume\n if img is not None:\n ax2.imshow(xslice.T)\n ax3.imshow(yslice.T)\n ax4.imshow(zslice.T)\n\n # Show the comparison mesh boundary\n ax2.set_aspect('equal')\n ax3.set_aspect('equal')\n ax4.set_aspect('equal')\n if compare_mesh_slices:\n mbxpts = np.where(np.abs(mm.points[:, 0] - nx) < thres)[0]\n mbypts = np.where(np.abs(mm.points[:, 1] - ny) < thres)[0]\n mbzpts = np.where(np.abs(mm.points[:, 2] - nz) < thres)[0]\n ax2.scatter(mm.points[mbxpts, 1], mm.points[mbxpts, 2], s=sz, alpha=alpha)\n ax3.scatter(mm.points[mbypts, 0], mm.points[mbypts, 2], s=sz, alpha=alpha)\n ax4.scatter(mm.points[mbzpts, 0], mm.points[mbzpts, 1], s=sz, alpha=alpha)\n\n # Show the level set\n lsxpts = np.where(np.abs(coords[:, 0] - nx) < thres)[0]\n lsypts = np.where(np.abs(coords[:, 1] - ny) < thres)[0]\n lszpts = np.where(np.abs(coords[:, 2] - nz) < thres)[0]\n ax2.scatter(coords[lsxpts, 1], coords[lsxpts, 2], s=sz, alpha=alpha)\n ax3.scatter(coords[lsypts, 0], coords[lsypts, 2], s=sz, alpha=alpha)\n ax4.scatter(coords[lszpts, 0], coords[lszpts, 1], s=sz, alpha=alpha)\n ax2.set_xlabel('y')\n ax3.set_xlabel('x')\n ax4.set_xlabel('x')\n ax2.set_ylabel('z')\n ax3.set_ylabel('z')\n ax4.set_ylabel('y')\n # Remove tick labels if labelcheckax == False\n if not labelcheckax:\n for axx in [ax2, ax3, ax4]:\n axx.xaxis.set_ticks([])\n axx.yaxis.set_ticks([])\n\n title = 'Morphological Chan-Vese level set'\n title += '\\n' + r'$t=$' + '{0:d}'.format(counter[0])\n ax3.text(0.5, 0.9, title, va='center', ha='center', transform=fig2.transFigure)\n\n if save:\n print('impath = ', impath)\n if impath is not None and counter[0] == 0:\n # ensure the directory exists\n d = os.path.dirname(impath)\n if not os.path.exists(d):\n print('run_morphsnakes.py: creating dir: ', d)\n os.makedirs(d)\n\n ax2.set_aspect('equal')\n ax3.set_aspect('equal')\n imfn = impath + 'slices_{0:06d}'.format(counter[0]) + '.png'\n print('saving ', imfn)\n fig2.savefig(imfn, dpi=250)\n # plt.show()\n # plt.close('all')\n ax2.cla()\n ax3.cla()\n ax4.cla()\n # raise RuntimeError\n elif show:\n plt.pause(0.1)\n\n ################################################################################################\n # Plot difference in ls from previous timepoint\n if plot_diff and ls_prev is not None:\n lsxslice = levelset[nx, :, :]\n lsyslice = levelset[:, ny, :]\n lszslice = levelset[:, :, nz]\n lspx = ls_prev[nx, :, :]\n lspy = ls_prev[:, ny, :]\n lspz = ls_prev[:, :, nz]\n ax5.imshow(lsxslice - lspx)\n ax6.imshow(lsyslice - lspy)\n ax7.imshow(lszslice - lspz)\n\n # Show the comparison mesh boundary\n for axx in [ax5, ax6, ax7]:\n axx.set_aspect('equal')\n if not labelcheckax:\n axx.xaxis.set_ticks([])\n axx.yaxis.set_ticks([])\n\n # Show the level set\n ax5.set_xlabel('y')\n ax6.set_xlabel('x')\n ax7.set_xlabel('x')\n ax5.set_ylabel('z')\n ax6.set_ylabel('z')\n ax7.set_ylabel('y')\n\n title = r'Morphological Chan-Vese level set: $\\Delta u$'\n title += '\\n' + r'$t=$' + '{0:d}'.format(counter[0])\n ax5.text(0.5, 0.9, title, va='center', ha='center', transform=fig3.transFigure)\n\n for axx in [ax5, ax6, ax7]:\n axx.set_aspect('equal')\n\n if save:\n print('impath = ', impath)\n if impath is not None and counter[0] == 0:\n # ensure the directory exists\n d = os.path.dirname(impath)\n if not os.path.exists(d):\n print('run_morphsnakes.py: creating dir: ', d)\n os.makedirs(d)\n\n imfn = impath + 'diff_slices_{0:06d}'.format(counter[0]) + '.png'\n print('saving ', imfn)\n fig3.savefig(imfn)\n for axx in [ax5, ax6, ax7]:\n axx.cla()\n\n elif show:\n plt.pause(0.1)\n\n else:\n print('Images will be neither saved nor shown, so passing empty defition for callback.')\n\n def callback(*args, **kwargs):\n counter[0] += 1\n if (counter[0] % plot_each) != 0:\n return\n else:\n print('counter = ' + str(counter[0]), end='\\r')\n\n return callback\n\n\ndef rgb2gray(img):\n \"\"\"Convert a RGB image to gray scale.\"\"\"\n return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]\n\n\ndef read_multipage_tiff(path):\n \"\"\"Load multipage tiff file as numpy array\n\n Parameters\n ----------\n path - Path to the multipage-tiff file\n\n Returns\n -------\n numpy array of the miltipage tiff\n \"\"\"\n img = Image.open(path)\n images = []\n for i in range(img.n_frames):\n img.seek(i)\n images.append(np.array(img))\n return np.array(images)\n\n\ndef load_img(fn, channel, dset_name='exported_data', axis_order='xyzc'):\n \"\"\"Load a 2d or 3d grid of intensities from disk\n\n Parameters\n ----------\n fn : str\n the filename to load as an image\n channel : int\n Channel number to use in Chan-Vese from the h5 file\n dset_name : str\n The name of the dataset within the h5 file\n axis_order : str ('xyzc', 'xyz', or permutations of these characters)\n axis_order is the order in which they are STORED. This function resorts the axes to process the image as XYZ\n\n Returns\n -------\n img : numpy float array\n intensity values in 2d or 3d grid (dataset)\n \"\"\"\n print('loading ' + fn + ' with axis order ' + axis_order)\n if fn[-3:] == 'npy':\n if channel is not None:\n img = np.load(fn)[:, :, :, channel]\n\n elif fn[-3:] == 'tif':\n img = read_multipage_tiff(fn)\n print('Converting to float...')\n img = img.astype(float)\n\n elif fn[-2:] == 'h5':\n # filename = file_architecture.os_i(filename)\n if os.path.exists(fn):\n hfn = h5py.File(fn, 'r')\n else:\n print(\"File \" + fn + \" does not exist\")\n hfn = h5py.File(fn, 'w')\n\n # ilastik internally swaps axes. 1: class, 2: y, 3: x 4 : z\n # so flip the axes to select channel, y, x, z\n if channel is None and len(np.shape(hfn[dset_name])) == 4:\n print('4D data but no channel specified, assuming channel is 1...')\n channel = 1\n\n print('size of data is ', np.shape(hfn[dset_name]))\n if channel is not None:\n if len(axis_order) == 3:\n img = hfn[dset_name][:, :, :]\n elif axis_order[3] == 'c':\n axis_order = axis_order[0:3]\n img = hfn[dset_name][:, :, :, channel]\n elif axis_order[0] == 'c':\n axis_order = axis_order[1:]\n img = hfn[dset_name][channel, :, :, :]\n elif axis_order[1] == 'c':\n okax = np.array([0, 2, 3])\n axis_order = axis_order[okax]\n img = hfn[dset_name][:, channel, :, :]\n elif axis_order[2] == 'c':\n okax = np.array([0, 1, 3])\n axis_order = axis_order[okax]\n img = hfn[dset_name][:, :, channel, :]\n else:\n raise RuntimeError(\"Cannot parse this axis order\")\n else:\n print('Converting h5 data into a numpy array')\n img = np.array(hfn[dset_name])\n print('Converting to float...')\n img = img.astype(float)\n\n # Now sort the spatial dimensions\n # Note: axis_order is the order in which they are STORED, but resort to process as XYZ\n if axis_order == 'xyz':\n pass\n elif axis_order == 'xzy':\n img = np.swapaxes(img, 1, 2)\n print(axis_order + '-> xyz', np.shape(img))\n elif axis_order == 'yzx':\n # make zyx, then make xyz\n img = np.swapaxes(img, 0, 1)\n img = np.swapaxes(img, 0, 2)\n print(axis_order + '-> xyz', np.shape(img))\n elif axis_order == 'yxz':\n img = np.swapaxes(img, 0, 1)\n print(axis_order + '-> xyz', np.shape(img))\n elif axis_order == 'zyx':\n img = np.swapaxes(img, 0, 2)\n print(axis_order + '-> xyz', np.shape(img))\n elif axis_order == 'zxy':\n # make yxz, then make xyz\n img = np.swapaxes(img, 0, 2)\n img = np.swapaxes(img, 0, 1)\n print(axis_order + '-> xyz', np.shape(img))\n else:\n raise RuntimeError(\"Did not recognize axis order:\" + axis_order)\n\n # plt.hist(img.ravel())\n # print(np.max(img.ravel()))\n # plt.savefig('/Users/npmitchell/Desktop/test.png')\n # raise RuntimeError('Exiting now')\n print('Constructing deep copy')\n img = copy.deepcopy(img)\n # file_is_open = True\n print('Closing the h5 file')\n hfn.close()\n else:\n raise RuntimeError('Could not find filename for img: ' + fn)\n\n return img\n\n\ndef extract_levelset(fn, iterations=150, smoothing=0, lambda1=1, lambda2=1, nu=None, post_smoothing0=0,\n post_smoothing=1, post_nu=1,\n channel=0, init_ls=None, show_callback=False, save_callback=False, exit_thres=5e-6,\n center_guess=None, radius_guess=None, impath=None, dset_name='exported_data',\n plot_each=5, comparison_mesh=None, axis_order='xyzc', plot_diff=True,\n plot_mesh3d=False, clip=None, clip_floor=None, labelcheckax=False, mask=None,\n volumetric=False, target_volume=1000, nu_max=5):\n \"\"\"Extract the level set from a 2d or 3d image.\n\n Parameters\n ----------\n fn : str\n path to the image (possibly 3D) to load. Can be h5, png, or npy file\n iterations : int\n How many iterations to perform\n nu : float or None, optional\n pressure to apply periodically. If float and < 1, applies one pressure step every 1/nu iterations\n smoothing : uint, optional\n Number of times the smoothing operator is applied per iteration.\n Reasonable values are around 1-4. Larger values lead to smoother\n segmentations.\n lambda1 : float, optional\n Weight parameter for the outer region. If `lambda1` is larger than\n `lambda2`, the outer region will contain a larger range of values than\n the inner region.\n lambda2 : float, optional\n Weight parameter for the inner region. If `lambda2` is larger than\n `lambda1`, the inner region will contain a larger range of values than\n the outer region.\n post_smoothing : int, optional\n channel : int or None\n If not None, select this channel (last index of the array) of the image to analyze.\n init_ls : int binary array of inside/outside in 3D or None\n Initial guess for the level set\n center_guess : length 3 float array\n Center of mass of the spherical guess if init_ls not supplied.\n radius_guess : float\n Radius of initial spherical guess if init_ls not supplied.\n (An argument for creating an initial guess via ms.circle_level_set() if init_ls not supplied)\n plot_each : int\n How often (in #iterations) to save a snapshot png of the morphological process.\n comparison_mesh : mesh.Mesh class instance or None\n If supplied, use this mesh (with attrs mesh.points and mesh.triangles) to compare to the morphsnakes output\n clip : float or None\n If not None, clip all values above this in the image on which to run morphsnakes\n clip_floor : float or None\n If not None, clip all values below this in the image on which to run morphsnakes\n\n Returns\n -------\n ls : L x W x H binary int array\n The output level set computed after iterations\n \"\"\"\n logging.info('Running MorphACWE...')\n\n # Load the image.\n print('loading ' + fn)\n img = load_img(fn, channel, dset_name=dset_name, axis_order=axis_order)\n\n if clip is not None:\n img[img > clip] = clip\n\n if clip_floor is not None:\n img[img < clip_floor] = clip_floor\n img -= clip_floor\n\n if mask is not None:\n img *= mask\n\n # Initialization of the level-set.\n if init_ls is None:\n print('No initial levelset supplied, creating a default sphere as initial condition...')\n if center_guess is None:\n print('No initial levelset position supplied, using center of data')\n center_guess = (np.shape(img)[0]*0.5, np.shape(img)[1]*0.5, np.shape(img)[2]*0.5)\n if radius_guess is None:\n print('No initial levelset radius supplied, using half minimum coordinate size')\n radius_guess = min(np.abs(center_guess)) * 0.5\n\n init_ls = ms.circle_level_set(img.shape, center_guess, radius_guess)\n\n # Callback for visual plotting\n print('Defining visual callback for 3d visualization')\n callback = visual_callback_3d(show=show_callback, save=save_callback,\n plot_each=plot_each, impath=impath, comparison_mesh=comparison_mesh,\n img=img, plot_diff=plot_diff,\n plot_mesh3d=plot_mesh3d, labelcheckax=labelcheckax)\n\n # Morphological Chan-Vese (or ACWE)\n print('Running morphological Chan-Vese (ACWE)')\n if volumetric:\n ls = ms.volumetric_morphological_chan_vese(img, iterations=iterations,\n init_level_set=init_ls,\n smoothing=smoothing, lambda1=lambda1, lambda2=lambda2, nu=nu,\n post_smoothing0=post_smoothing0,\n post_smoothing=post_smoothing, post_nu=post_nu,\n iter_callback=callback, exit_thres=exit_thres,\n volume0=target_volume, nu_max=nu_max)\n else:\n ls = ms.morphological_chan_vese(img, iterations=iterations,\n init_level_set=init_ls,\n smoothing=smoothing, lambda1=lambda1, lambda2=lambda2, nu=nu,\n post_smoothing0=post_smoothing0,\n post_smoothing=post_smoothing, post_nu=post_nu,\n iter_callback=callback, exit_thres=exit_thres)\n return ls\n\n\ndef load_data(fn, dataset_name=None):\n \"\"\"Load a numpy or hdf5 file from disk\n\n Parameters\n ----------\n fn : str\n filename of the\n\n Returns\n -------\n data : numpy array\n dataset loaded from file\n \"\"\"\n if args.init_ls_fn[-3:] == 'npy':\n data = np.load(args.init_ls_fn)\n elif args.init_ls_fn[-3:] in ['.h5', 'df5']:\n f = h5py.File(args.init_ls_fn, 'r')\n data = f[dataset_name][:]\n f.close()\n return data\n\n\nif __name__ == '__main__':\n import mesh\n import argparse\n import glob\n \"\"\"Show example of finding mesh from raw image and initial guess.\n \n Example usage\n python run_morphsnakes.py \\\n -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/morphsnakes_testing/test_out.ply \\\n -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/Time_000001_c1_Probabilities.h5 \\\n -ofn_ply mesh_apical_ms \\\n -ofn_ls msls_apical_ -l1 1 -l2 1 -nu 2 -smooth 1 -postsmooth 1 -postnu 2 -n 35 -n0 100 -exit 5e-6\n \n python /mnt/data/code/gut_python/run_morphsnakes.py \\\n -dataset -o ./ -i ./ \\\n -ofn_ply mesh_apical_ms_ \\\n -ofn_ls msls_apical_ -l1 1 -l2 1 -nu 2 -smooth 1 -postnu 2 -postsmooth 1 -n 35 -n0 100 -exit 1e-6\n \n \n python run_morphsnakes.py \\\n -dataset -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -ofn_ply mesh_apical_ms_ \\\n -ofn_ls msls_apical_ -l1 1 -l2 1 -nu 2 -smooth 1 -postnu 2 -postsmooth 1 -n 35 -n0 100 -exit 1e-6\n \n python run_morphsnakes.py -dataset \\\n -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -ofn_ply mesh_apical_ms_ \\\n -ofn_ls msls_apical_ -l1 1 -l2 1 -nu 2 -smooth 1 -postsmooth 5 -postnu 5 -n 26 -n0 76 -exit 5e-6\n \n \n python run_morphsnakes.py -dataset \\\n -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -ofn_ply mesh_apical_ms_ \\\n -ofn_ls msls_apical_ -l1 1 -l2 1 -nu 0.1 -smooth 0.3 -postsmooth 3 -postnu 2 -n 25 -n0 76 -exit 5e-6\n \n \n python /mnt/data/code/morphsnakes_wrapper/morphsnakes_wrapper/run_morphsnakes.py -i \\\n Time_000104_c1_stab_Probabilities.h5 \\\n -init_ls msls_apical_stab_000103.h5 -o \\\n /mnt/crunch/48YGal4UasLifeActRuby/201904021800_great/Time6views_60sec_1p4um_25x_1p0mW_exp0p150_3/data/deconvolved_16bit/msls_output/ \\\n -prenu -4 -presmooth 1 -ofn_ply mesh_apical_ms_stab_000104.ply -ofn_ls \\\n msls_apical_stab_000104.h5 -l1 1 -l2 1 -nu 0.1 -postnu 2 -channel 1 -smooth 0.3 -postsmooth 4 \\\n -exit 0.000100000 -channel 0 -dtype h5 -permute zyxc -ss 4 -include_boundary_faces -save \\\n -center_guess 200,75,75 -volumetric -rad0 40 -n 15\n \n cd morphsnakes_wrapper/\n dataDir=\"/mnt/crunch/48YGal4UasLifeActRuby/201904021800_great/Time6views_60sec_1p4um_25x_1p0mW_exp0p150_3/data/deconvolved_16bit/\"\n mslsDir=\"${datDir}msls_output/\"\n idx=$(printf \"%03d\" $(( num )))\n prev=$(printf \"%03d\" $(( num-1 )))\n python /mnt/data/code/morphsnakes_wrapper/morphsnakes_wrapper/run_morphsnakes.py -i \\\n ${datDir}Time_000${idx}_c1_stab_Probabilities.h5 \\\n -init_ls ${mslsDir}msls_apical_stab_000${prev}.h5 \\\n -o $mslsDir \\\n -prenu -4 -presmooth 1 -ofn_ply mesh_apical_ms_stab_000${idx}.ply -ofn_ls \\\n msls_apical_stab_000${idx}.h5 -l1 1 -l2 1 -nu 4 -postnu 2 -channel 1 -smooth 0.3 -postsmooth 3 \\\n -exit 0.000100000 -channel 0 -dtype h5 -permute zyxc -ss 4 -include_boundary_faces -save \\\n -center_guess 200,75,75 -volumetric -rad0 40 -n 15\n \n python ./run_morphsnakes.py -i \\\n ${datDir}Time_000104_c1_stab_Probabilities.h5 \\\n -init_ls ${mslsDir}msls_initguess.h5 -o $mslsDir \\\n -prenu -4 -presmooth 1 -ofn_ply mesh_apical_ms_stab_000104.ply -ofn_ls \\\n msls_apical_stab_000104.h5 -l1 1 -l2 1 -nu 0.1 -postnu 2 -channel 1 -smooth 0.3 -postsmooth 3 \\\n -exit 0.000100000 -channel 0 -dtype h5 -permute zyxc -ss 4 -include_boundary_faces -save \\\n -center_guess 200,75,75 -volumetric -rad0 40 -n 15\n\n \n \"\"\"\n parser = argparse.ArgumentParser(description='Compute level set using Morphological Chan-Vese ACWE.')\n\n # Procedure options\n parser.add_argument('-dataset', '--dataset', help='Turn hdf5 dataset sequence into level sets', action='store_true')\n parser.add_argument('-sweep', '--parameter_sweep', help='Sweep over nu and smooth to compare', action='store_true')\n\n # Parameter options\n parser.add_argument('-i', '--input', help='Path to folder or file to read hdf5 probabilities' +\n 'If dataset == True, then this is dir, else is filename.',\n type=str, default='./')\n parser.add_argument('-clip', '--clip', help='Maximum intensity value for the image before evolution, if positive',\n type=float, default=-1)\n parser.add_argument('-clip_floor', '--clip_floor',\n help='Minimum intensity value for the image before evolution, if positive',\n type=float, default=-1)\n parser.add_argument('-channel', '--channel', help='Which channel of the loaded data/image to use',\n type=int, default=0)\n parser.add_argument('-o', '--outputdir', help='Path to folder to which to write meshes',\n type=str, default='./morphsnakes_output/')\n parser.add_argument('-ofn_ply', '--outputfn_ply', help='Name of file in output dir to write ply',\n type=str, default='mesh_apical_ms_')\n parser.add_argument('-ofn_ls', '--outputfn_ls', help='Name of file in output dir to write level set as numpy array',\n type=str, default='msls_apical_')\n parser.add_argument('-init_ls', '--init_ls_fn', help='Path to numpy file to use as initial level set, if any',\n type=str, default='empty_string')\n parser.add_argument('-prenu', '--init_ls_nu',\n help='Number of dilation (nu > 0) or erosion (nu < 0) passes before passing init_ls to MCV',\n type=int, default=-8)\n parser.add_argument('-presmooth', '--pre_smoothing',\n help='Number of smoothing passes before passing init_ls to MCV', type=int, default=0)\n parser.add_argument('-l1', '--lambda1',\n help='Weight parameter for the outer region. If `lambda1` is larger than `lambda2`, the outer '\n 'region will contain a larger range of values than the inner region',\n type=float, default=1)\n parser.add_argument('-l2', '--lambda2',\n help='Weight parameter for the inner region. If `lambda2` is larger than '\n '`lambda1`, the inner region will contain a larger range of values than'\n 'the outer region.', type=float, default=2)\n parser.add_argument('-nu', '--nu',\n help='If not None and nonzero, applies pressure to the surface. If negative,' +\n 'applies negative pressure at each iteration. int(nu) is the number of' +\n 'times to apply a dilation or erosion at each timestep', type=float, default=0.1)\n parser.add_argument('-smooth', '--smoothing', help='Number of smoothing passes per iteration',\n type=float, default=1)\n parser.add_argument('-postnu', '--post_nu',\n help='Number of dilation (nu > 0) or erosion (nu < 0) passes after iterations completed',\n type=int, default=5)\n parser.add_argument('-postsmooth', '--post_smoothing', help='Number of final smoothing passes after iterations completed after post_nu',\n type=int, default=5)\n parser.add_argument('-postsmooth0', '--post_smoothing0', help='Number of initial smoothing passes after iterations completed, before post_nu',\n type=int, default=0)\n parser.add_argument('-exit', '--exit_thres', help='Number of smoothing passes per iteration', type=float,\n default=5e-6)\n parser.add_argument('-n', '--niters', help='Number of iterations per timepoint', type=int, default=26)\n parser.add_argument('-n0', '--niters0', help='Number of iterations for the first timepoint', type=int, default=76)\n parser.add_argument('-rad0', '--radius_guess',\n help='If positive, specifies the radius of the initial implicit level set guess',\n type=float, default=-1)\n parser.add_argument('-center_guess', '--center_guess',\n help='If not empty_string, specifies the center of the initial level set guess.'\n 'The delimiter between each positional value is a comma',\n type=str, default=\"empty_string\")\n parser.add_argument('-ss', '--subsampling_factor', help='Factor to multiply the coordinates of the extracted ls',\n type=int, default=4)\n\n # IO options\n parser.add_argument('-save', '--save_callback', help='Save images of ls meshes during MS', action='store_true')\n parser.add_argument('-show', '--show_callback', help='Display images of ls meshes during MS', action='store_true')\n parser.add_argument('-plot_mesh3d', '--plot_mesh3d', help='Plot the evolving 3d mesh', action='store_true')\n parser.add_argument('-dtype', '--saved_datatype', help='Filetype for output implicit level sets',\n type=str, default='h5')\n parser.add_argument('-dset_name', '--dset_name', help='Name of dataset to load from hdf5 input file on which to run',\n type=str, default='exported_data')\n parser.add_argument('-permute', '--permute_axes', help='Axes order of training data (xyzc, cxyz, cyxz, etc)',\n type=str, default='xyzc')\n parser.add_argument('-permute_mesh', '--permute_mesh', help='Axes order of output mesh (xyz, yxz, zyx, etc)',\n type=str, default='xyz')\n parser.add_argument('-invert', '--invert_probabilities', help='Axes order of training data (xyzc, cxyz, cyxz, etc)',\n action='store_true')\n parser.add_argument('-hide_ticks', '--hide_check_axis_ticks',\n help='Show the axis labels (numbers, ticks) for the check images',\n action='store_true')\n parser.add_argument('-prob', '--probabilities_search_string', help='Seek this file name for probabilities.',\n type=str, default='stab_Probabilities.h5')\n parser.add_argument('-mask', '--mask_filename', help='Seek this file name for masking the probabilities.',\n type=str, default='empty_string')\n parser.add_argument('-volumetric', '--volumetric', help='Use volumetric pressure in Chan-Vese (Energy~p*V).',\n action='store_true')\n parser.add_argument('-volume0', '--target_volume', help='Target volume for pressure in Chan-Vese (Energy~p*V).',\n type=float, default=40000.0)\n parser.add_argument('-nu_max', '--nu_max', help='Maximum pressure steps per iteration in Chan-Vese (Energy~p*V).',\n type=int, default=5)\n parser.add_argument('-include_boundary_faces', '--include_boundary_faces',\n help='Do not remove boundary faces from the mesh representation of level set',\n action='store_true')\n parser.add_argument('-adjust_for_MATLAB_indexing', '--adjust_for_MATLAB_indexing',\n help='Adjust the output mesh coordinates to reflect 1-indexing instead of zero-indexing',\n action='store_true')\n args = parser.parse_args()\n logging.basicConfig(level=logging.DEBUG)\n\n if args.dataset:\n \"\"\" \n python run_morphsnakes.py -dataset \\\n -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/ \\\n -ofn_ply mesh_apical_ms_ \\\n -ofn_ls msls_apical_ -l1 1 -l2 1 -nu 2 -smooth 1 -postsmooth 5 -postnu 5 -n 26 -n0 76 -exit 5e-6\n \n python run_morphsnakes.py -dataset \\\n -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4UasCAAXmCherry/48Ygal4UasCAAXmCherry_20190207200_excellent/cells_h5/ \\\n -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4UasCAAXmCherry/48Ygal4UasCAAXmCherry_20190207200_excellent/cells_h5/ \\\n -ofn_ply mesh_apical_ms_ -rad0 10 -save -prenu -5 -presmooth 1 \\\n -ofn_ls msls_apical_ -l1 1 -l2 1 -nu 0 -smooth 0.1 -postsmooth 4 -postnu 4 -n 26 -n0 126 -exit 5e-4 -prob *Probabilities_cells.h5 \\\n -init_ls /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4UasCAAXmCherry/48Ygal4UasCAAXmCherry_20190207200_excellent/cells_h5/msls_apical_init.npy\n \n python /mnt/data/code/morphsnakes_wrapper/morphsnakes_wrapper/run_morphsnakes.py -dataset -volumetric -i \\\n /mnt/crunch/48YGal4UasLifeActRuby/201904021800_great/Time6views_60sec_1p4um_25x_1p0mW_exp0p150_3/data/deconvolved_16bit/ \\\n -prob _stab_Probabilities.h5 -n0 5 -o \\\n /mnt/crunch/48YGal4UasLifeActRuby/201904021800_great/Time6views_60sec_1p4um_25x_1p0mW_exp0p150_3/data/deconvolved_16bit/msls_output/ \\\n -prenu -4 -presmooth 1 -ofn_ply mesh_apical_ms_stab_ -ofn_ls msls_apical_stab_ -l1 1 -l2 1 -nu 4 -postnu 2 \\\n -channel 1 -smooth 0.3 -postsmooth 4 -exit 0.000100000 -channel 0 -dtype h5 -permute zyxc -ss 4 \\\n -include_boundary_faces -save -center_guess 200,75,75 -rad0 40 -init_ls \\\n /mnt/crunch/48YGal4UasLifeActRuby/201904021800_great/Time6views_60sec_1p4um_25x_1p0mW_exp0p150_3/data/deconvolved_16bit/msls_output/msls_initguess.h5 -n 15\n\n \n \"\"\"\n print('Running in dataset mode.')\n indir = args.input\n outdir = args.outputdir\n # Run over a dataset of hdf5 files\n print('Seeking files ', indir + '*' + args.probabilities_search_string)\n todo = sorted(glob.glob(indir + '*' + args.probabilities_search_string))\n if len(todo) == 0:\n print('None found > seeking files ', indir + '*' + args.probabilities_search_string + args.saved_datatype)\n todo = sorted(glob.glob(indir + '*' + args.probabilities_search_string + args.saved_datatype))\n if not todo:\n print('None found > seeking files ', indir + '*' + args.probabilities_search_string + '.' + args.saved_datatype)\n todo = sorted(glob.glob(indir + '*' + args.probabilities_search_string + '.' + args.saved_datatype))\n\n for (name, dmyk) in zip(todo, range(len(todo))):\n print('todo[' + str(dmyk) + '] = ' + name)\n\n for (fn, kk) in zip(todo, range(len(todo))):\n timepoint = fn.split('/')[-1].split('_c')[0].split('Time_')[-1]\n outdir_k = outdir + 'morphsnakes_check_' + timepoint + '/'\n\n # Ensure that the directory exists\n if args.save_callback:\n d = os.path.dirname(outdir_k)\n if not os.path.exists(d):\n print('run_morphsnakes.py: creating dir: ', d)\n os.makedirs(d)\n\n print(args.outputfn_ply)\n outfn_ply = outdir + args.outputfn_ply + timepoint + '.ply'\n olsfn = outdir\n\n # Load/define levelset if this is the first timestep\n if kk == 0:\n # Get initial guess by levelset analysis with different number of iterations\n niters = args.niters0\n # Load init_ls if path is supplied\n if args.init_ls_fn == 'empty_string':\n init_ls = None\n # Start with sphere in the middle of image. Obtain the radius of the sphere\n if args.radius_guess > 0:\n radius_guess = args.radius_guess\n else:\n radius_guess = None\n\n if not args.center_guess == 'empty_string':\n if ',' in args.center_guess:\n center_guess = tuple(float(value) for value in args.center_guess.split(','))\n else:\n center_guess = None\n else:\n center_guess = None\n\n # Target volume is supplied in args\n target_volume = args.target_volume\n else:\n # The initial level set filename was supplied. Figure out what file type it is\n if args.init_ls_fn[-3:] == 'npy':\n init_ls = np.load(args.init_ls_fn)\n elif args.init_ls_fn[-3:] in ['.h5', 'df5']:\n print('Extracting init_ls from ', args.init_ls_fn)\n f = h5py.File(args.init_ls_fn, 'r')\n init_ls = f['implicit_levelset'][:]\n f.close()\n print('Extracted init_ls with shape ', np.shape(init_ls))\n\n # Target volume is raw volume from loaded levelset\n if init_ls is not None:\n target_volume = sum(init_ls.ravel())\n else:\n target_volume = args.target_volume\n\n # Since there is an initial set, don't use the default spherical guess\n radius_guess = None\n center_guess = None\n\n # Erode/Dilate the init_ls to avoid spilling out of ROI on next round or avoid collapse, etc\n for _ in range(abs(args.init_ls_nu)):\n if args.init_ls_nu > 0:\n init_ls = ndi.binary_dilation(init_ls)\n else:\n init_ls = ndi.binary_erosion(init_ls)\n\n for _ in range(args.pre_smoothing):\n init_ls = _curvop(init_ls)\n else:\n niters = args.niters\n\n # Declare how many iterations to do before exit\n print('niters = ', niters)\n\n # Clip the image if the parameter clip was given as positive\n if args.clip > 0:\n clip = args.clip\n else:\n clip = None\n\n # Clip the image if the parameter clip_floor was given as positive\n if args.clip_floor > 0:\n clip_floor = args.clip_floor\n else:\n clip_floor = None\n\n # mask the data if mask filename is given\n if '.' in args.mask_filename:\n print('Since . appears in mask_filename, assuming full filename with path is given for mask file...')\n maskfn = args.mask_filename\n if os.path.exists(maskfn):\n raise RuntimeError('Mask filename does not exist! Sought file ' + maskfn)\n elif args.mask_filename is not 'none' and args.mask_filename is not 'empty_string':\n print('Since . appears in mask_filename, assuming full filename with path is given for mask file...')\n maskfn = args.mask_filename + timepoint + args.dtype\n if os.path.exists(maskfn):\n raise RuntimeError('Mask filename does not exist! Sought file ' + maskfn)\n else:\n maskfn = None\n mask = None\n\n if maskfn is not None:\n # load the mask\n mask = load_data(maskfn)\n\n # prepare for volumetric pressure\n do_volumetric = args.volumetric\n print('target_volume = ', target_volume)\n\n # Perform the levelset calculation\n ls = extract_levelset(fn, iterations=niters, channel=args.channel, init_ls=init_ls,\n smoothing=args.smoothing, lambda1=args.lambda1, lambda2=args.lambda2,\n nu=args.nu, post_smoothing0=args.post_smoothing0,\n post_smoothing=args.post_smoothing, post_nu=args.post_nu,\n exit_thres=args.exit_thres, dset_name=args.dset_name,\n impath=outdir_k, plot_each=10, save_callback=args.save_callback,\n show_callback=args.show_callback, axis_order=args.permute_axes,\n plot_mesh3d=args.plot_mesh3d, mask=mask,\n comparison_mesh=None, radius_guess=radius_guess,\n center_guess=center_guess, clip=clip, clip_floor=clip_floor,\n labelcheckax=not args.hide_check_axis_ticks, volumetric=do_volumetric,\n target_volume=target_volume, nu_max=args.nu_max)\n\n # Extract edges of level set and store them in a mesh\n mm = mesh.Mesh()\n\n # If desired, avoid chopping the boundaries by converting all boundary pixels to zero\n if args.include_boundary_faces:\n # expand ls into padded zeros\n ls2 = np.zeros(np.shape(ls) + np.array([2, 2, 2]))\n ls2[1:-1, 1:-1, 1:-1] = ls\n coords, triangles = mcubes.marching_cubes(ls2, 0.5)\n coords -= np.array([1, 1, 1])\n else:\n coords, triangles = mcubes.marching_cubes(ls, 0.5)\n\n # Note that the mesh is such that the corner of the corner voxel would be at -0.5, -0.5, -0.5\n # EXAMPLE TO SHOW THIS:\n # sz = 10\n # X, Y, Z = np.mgrid[:sz, :sz, :sz]\n # ww = sz * 0.5\n # u = np.zeros_like(X) # (X - ww) ** 2 + (Y - ww) ** 2 + (Z - ww) ** 2 - (ww*0.5) ** 2\n # u[np.where(X > 5)] = 1\n # vtx, tri = mcubes.marching_cubes(u, 0)\n\n # Swap axes of coords if desired\n if args.permute_mesh != 'xyz':\n if args.permute_mesh == 'xzy':\n coords = np.swapaxes(coords, 1, 2)\n # preserve triangle orientation\n triangles = triangles[:, [0, 2, 1]]\n elif args.permute_mesh == 'yxz':\n coords = np.swapaxes(coords, 0, 1)\n # preserve triangle orientation\n triangles = triangles[:, [0, 2, 1]]\n elif args.permute_mesh == 'zxy':\n coords = np.swapaxes(coords, 0, 1)\n coords = np.swapaxes(coords, 0, 2)\n elif args.permute_mesh == 'zyx':\n coords = np.swapaxes(coords, 0, 2)\n # preserve triangle orientation\n triangles = triangles[:, [0, 2, 1]]\n elif args.permute_mesh == 'yzx':\n coords = np.swapaxes(coords, 0, 2)\n coords = np.swapaxes(coords, 0, 1)\n else:\n raise RuntimeError('Have not coded for this permute_mesh option. Do so here.')\n\n mm.points = coords * float(args.subsampling_factor)\n mm.triangles = triangles\n print('saving ', outfn_ply)\n mm.save(outfn_ply)\n\n if args.adjust_for_MATLAB_indexing:\n coords += 0.5 * np.ones([1, 3])\n\n # Save the level set data as a numpy or h5 file\n if args.saved_datatype == 'npy':\n # Save ls for this timepoint as npy file\n outfn_ls = outdir + args.outputfn_ls + timepoint + '.npy'\n print('saving ', outfn_ls)\n np.save(outfn_ls, ls)\n elif args.saved_datatype in ['h5', 'hdf5']:\n # Save ls for this timepoint as an hdf5 file\n outfn_ls = outdir + args.outputfn_ls + timepoint + '.h5'\n print('saving ', outfn_ls)\n msaux.save_ls_as_h5(outfn_ls, ls)\n else:\n print('skipping save of implicit ls...')\n\n # Make current level set into next iteration's guess\n init_ls = ls\n\n # Next iteration's target volume is raw output volume from extracted levelset\n target_volume = sum(init_ls.ravel())\n\n # Erode the init_ls several times to avoid spilling out of ROI on next round\n for _ in range(abs(args.init_ls_nu)):\n if args.init_ls_nu > 0:\n init_ls = ndi.binary_dilation(init_ls)\n else:\n init_ls = ndi.binary_erosion(init_ls)\n\n elif args.parameter_sweep:\n \"\"\"\n Example usage \n python ~/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/gut_python/gut_python/run_morphsnakes.py -sweep -init_ls ./msls_apical_nu0p1_s1_pn1_ps1_l1_l2_000080.npy \\\n -l1 1. -l2 2. -postnu 1 -postsmooth 1 \\\n -o ./ -i ./Time_000090_c1_Probabilities.h5 \\\n -ofn_ply mesh_apical_ms_000090 \\\n -ofn_ls mesh_apical_ms_000090 -save\n \"\"\"\n fn = args.input\n outputdir = os.path.join(args.outputdir, '')\n imdir = outputdir + 'morphsnakes_check/'\n ofn_ply_base = args.outputfn_ply\n ofn_ls_base = args.outputfn_ls\n nus = np.arange(0.0, 0.6, 0.1)[::-1]\n smooths = np.arange(3)\n\n if args.init_ls_fn == 'empty_string':\n init_ls = None\n else:\n init_ls = np.load(args.init_ls_fn)\n # Erode the init_ls several times to avoid spilling out of ROI on next round\n for _ in range(abs(args.init_ls_nu)):\n if args.init_ls_nu > 0:\n init_ls = ndi.binary_dilation(init_ls)\n else:\n init_ls = ndi.binary_erosion(init_ls)\n\n # check that it has loaded\n # print('init_ls = ', init_ls)\n\n for nu in nus:\n for smooth in smooths:\n name = '_nu{0:0.2f}'.format(nu).replace('.', 'p') + '_s{0:02d}'.format(smooth)\n name += '_pnu{0:02d}'.format(args.post_nu) + '_p1s{0:02d}'.format(args.post_smoothing0)\n name += '_p2s{0:02d}'.format(args.post_smoothing)\n name += '_l{0:0.2f}'.format(args.lambda1).replace('.', 'p')\n name += '_l{0:0.2f}'.format(args.lambda2).replace('.', 'p')\n\n ofn_ply = outputdir + ofn_ply_base + name + '.ply'\n ofn_ls = outputdir + ofn_ls_base + name\n\n # prepare for volumetric: target volume\n target_volume = sum(init_ls.ravel())\n\n ls = extract_levelset(fn, iterations=args.niters, channel=args.channel, init_ls=init_ls,\n smoothing=smooth, lambda1=args.lambda1, lambda2=args.lambda2,\n nu=nu, post_smoothing0=args.post_smoothing0,\n post_smoothing=args.post_smoothing, post_nu=args.post_nu,\n exit_thres=args.exit_thres,\n impath=imdir, plot_each=10, save_callback=args.save_callback,\n show_callback=args.show_callback,\n comparison_mesh=None, plot_diff=True, volumetric=args.volumetric,\n target_volume=args.target_volume, nu_max=args.nu_max)\n\n # Extract edges of level set\n coords, triangles = mcubes.marching_cubes(ls, 0.5)\n if args.adjust_for_MATLAB_indexing:\n coords += 0.5 * np.ones([1, 3])\n\n mm = mesh.Mesh()\n mm.points = coords\n mm.triangles = triangles\n print('saving ', ofn_ply)\n mm.save(ofn_ply)\n\n # Overwrite the extension if specified in the outputfn\n if ofn_ls[-4:] == '.npy':\n args.saved_datatype = 'npy'\n outfn_ls = ofn_ls + '.npy'\n elif ofn_ls[-3:] == '.h5' or ofn_ls[-5:] == '.hdf5':\n args.saved_datatype = 'h5'\n outfn_ls = ofn_ls + '.' + ofn_ls.split('.')[-1]\n\n # Now save it\n if args.saved_datatype == 'npy':\n # Save ls for this timepoint as npy file\n print('saving ', outfn_ls)\n np.save(outfn_ls, ls)\n elif args.saved_datatype in ['h5', 'hdf5']:\n # Save ls for this timepoint as an hdf5 file\n print('saving ', outfn_ls)\n msaux.save_ls_as_h5(outfn_ls, ls)\n else:\n print('skipping save of implicit ls...')\n\n # Save the result as an image\n img = load_img(fn, args.channel, dset_name=args.dset_name)\n plt.close('all')\n msaux.plot_levelset_result(ls, img, name='ms' + name, imdir='./param_sweep/', fig=None, fig2=None,\n ax=None, title=None, comparison_mesh=None, save=True, show=False)\n plt.close('all')\n\n else:\n \"\"\"Run morphological snakes on a single image to create implicit surface.\n Example usage: \n \n python run_morphsnakes.py \\\n -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/morphsnakes_testing/test_out.ply \\\n -ols /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/morphsnakes_testing/test_out.npy \\\n -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/Time_000001_c1_Probabilities.h5 \\\n -rootdir /Users/npmitchell/Dropbox/Soft_Matter/UCSB/gut_morphogenesis/data/48Ygal4-UAShistRFP/201901021550_folded_2part/\n \n python run_morphsnakes.py -i /Users/npmitchell/Dropbox/Soft_Matter/UCSB/qbio-vip8_shared/tolls/TP0_Ch3_Ill0_Ang0,45,90,135,180,225,270,315.h5\n -o /Users/npmitchell/Dropbox/Soft_Matter/UCSB/qbio-vip8_shared/tolls/msls_output_nu0p10_s1_pn4_ps4_l1_l1/\n -prenu 0 -presmooth 0 -ofn_ply mesh_apical_ms_000000.ply -ofn_ls msls_apical_000000.npy -l1 1 -nu 0.1\n -postnu -2 -channel -1 -smooth 1 -postsmooth 4 -exit 0.000001000 -dset_name inputData -rad0 30 -n 35 -save\n -dtype h5 -init_ls /Users/npmitchell/Dropbox/Soft_Matter/UCSB/qbio-vip8_shared/tolls/Time_000000_c3_levelset.h5 -l2 2 -clip 500\n \n ## LifeAct settings\n datDir=\"/mnt/crunch/48YGal4UasLifeActRuby/201904021800_great/Time6views_60sec_1p4um_25x_1p0mW_exp0p150_3/data/deconvolved_16bit/\"\n datDir=\"/mnt/data/48YGal4UasLifeActRuby/201902201200_unusualfolds/Time6views_60sec_1p4um_25x_obis1_exp0p35_3/data/deconvolved_16bit/\"\n for (( num=1; num<=190; num++ ))\n do\n mslsDir=\"${datDir}msls_output/\"\n idx=$(printf \"%03d\" $(( num )))\n prev=$(printf \"%03d\" $(( num-1 )))\n initls=${mslsDir}msls_apical_stab_000${prev}.h5\n #initls=${mslsDir}msls_initguess.h5\n python /mnt/data/code/morphsnakes_wrapper/morphsnakes_wrapper/run_morphsnakes.py -i \\\n ${datDir}Time_000${idx}_c1_stab_Probabilities.h5 \\\n -init_ls $initls \\\n -o $mslsDir \\\n -prenu -4 -presmooth 0 -ofn_ply mesh_apical_ms_stab_000${idx}.ply -ofn_ls \\\n msls_apical_stab_000${idx}.h5 -l1 1 -l2 1 -postnu 3 -channel 1 -smooth 0.2 -postsmooth 2 \\\n -exit 0.000100000 -channel 0 -dtype h5 -permute zyxc -ss 4 -include_boundary_faces \\\n -center_guess 150,95,95 -rad0 20 -n 15 -volumetric -nu_max 2 -nu 4\n # -save -nu 0.3\n #\n # -volumetric -nu_max 1 -nu 4\n done\n \n ## VIP 10 CAAX\n datDir=\"/mnt/crunch/48YGal4UasLifeActRuby/201904021800_great/Time6views_60sec_1p4um_25x_1p0mW_exp0p150_3/data/deconvolved_16bit/\"\n datDir=\"/mnt/data/48YGal4UasLifeActRuby/201902201200_unusualfolds/Time6views_60sec_1p4um_25x_obis1_exp0p35_3/data/deconvolved_16bit/\"\n datDir=\"/mnt/crunch/gut/48YGal4klarUASCAAXmChHiFP/202001221000_60sec_1p4um_25x_1mW_2mW_exp0p25_exp0p7/Time3views_1017/data/deconvolved_16bit/\"\n for (( num=0; num<=196; num++ ))\n do\n mslsDir=\"${datDir}msls_output/\"\n idx=$(printf \"%03d\" $(( num )))\n prev=$(printf \"%03d\" $(( num-1 )))\n if (($num>0))\n then\n initls=${mslsDir}msls_apical_stab_000${prev}.h5\n else\n initls=${mslsDir}msls_initguess.h5\n fi\n python3 /mnt/data/code/morphsnakes_wrapper/run_morphsnakes.py -i \\\n Time_000${idx}_c1_stab_Probabilities.h5 \\\n -init_ls $initls \\\n -o $mslsDir -prenu -4 -presmooth 0 -ofn_ply mesh_apical_ms_stab_000${idx}.ply \\\n -ofn_ls msls_apical_stab_000${idx}.h5 -l1 1 -l2 1 -nu 0.2 \\\n -postnu 3 -channel 1 -smooth 0.2 -postsmooth 1 \\\n -exit 0.000010 -channel 0 -dtype h5 -permute zyxc -ss 4 \\\n -include_boundary_faces -save -center_guess 200,75,75 -rad0 40 -n 45\n done\n \"\"\"\n fn = args.input\n outputdir = os.path.join(args.outputdir, '')\n outfn_ply = outputdir + args.outputfn_ply\n\n if outfn_ply[-4:] != '.ply':\n outfn_ply += '.ply'\n outfn_ls = outputdir + args.outputfn_ls\n\n print('We will save output as ', outfn_ls)\n\n imdir = outputdir + 'morphsnakes_check/'\n\n if args.init_ls_fn == 'empty_string':\n load_init = False\n elif os.path.exists(args.init_ls_fn) and os.path.isfile(args.init_ls_fn):\n load_init = True\n else:\n load_init = False\n\n print(args.init_ls_fn)\n print('Load init? ' + str(load_init))\n if not load_init:\n init_ls = None\n\n if args.radius_guess > 0:\n radius_guess = args.radius_guess\n else:\n radius_guess = None\n\n if not args.center_guess == 'empty_string':\n if ',' in args.center_guess:\n center_guess = tuple(float(value) for value in args.center_guess.split(','))\n else:\n center_guess = None\n else:\n center_guess = None\n else:\n # Load the initial level set\n if args.init_ls_fn[-3:] == 'npy':\n init_ls = np.load(args.init_ls_fn)\n elif args.init_ls_fn[-3:] in ['.h5', 'df5']:\n print('Loading init_ls from ' + args.init_ls_fn)\n f = h5py.File(args.init_ls_fn, 'r')\n init_ls = f['implicit_levelset'][:]\n f.close()\n elif args.init_ls_fn[-3:] == 'tif' or args.init_ls_fn[-4:] == 'tiff':\n init_ls = read_multipage_tiff(args.init_ls_fn)\n else:\n raise RuntimeError(\"Initial guess must be npy, h5, or tif\")\n\n radius_guess = None\n center_guess = None\n\n # Erode the init_ls several times to avoid spilling out of ROI on next round\n for _ in range(abs(args.init_ls_nu)):\n if args.init_ls_nu > 0:\n init_ls = ndi.binary_dilation(init_ls)\n else:\n init_ls = ndi.binary_erosion(init_ls)\n\n for _ in range(args.pre_smoothing):\n init_ls = _curvop(init_ls)\n\n if args.channel < 0:\n channel = None\n else:\n channel = args.channel\n\n if args.clip > 0:\n clip = args.clip\n else:\n clip = None\n\n # Clip the image if the parameter clip_floor was given as positive\n if args.clip_floor > 0:\n clip_floor = args.clip_floor\n else:\n clip_floor = None\n\n # prepare for volumetric pressure: target volume\n if init_ls is not None:\n target_volume = sum(init_ls.ravel())\n else:\n target_volume = args.target_volume\n\n ls = extract_levelset(fn, iterations=args.niters, channel=channel, init_ls=init_ls,\n smoothing=args.smoothing, lambda1=args.lambda1, lambda2=args.lambda2,\n nu=args.nu, post_smoothing0=args.post_smoothing0,\n post_smoothing=args.post_smoothing, post_nu=args.post_nu,\n exit_thres=args.exit_thres, dset_name=args.dset_name,\n impath=imdir, plot_each=10, save_callback=args.save_callback,\n show_callback=args.show_callback, axis_order=args.permute_axes,\n comparison_mesh=None, radius_guess=radius_guess, center_guess=center_guess,\n plot_mesh3d=args.plot_mesh3d, clip=clip, clip_floor=clip_floor,\n labelcheckax=not args.hide_check_axis_ticks, volumetric=args.volumetric,\n target_volume=target_volume, nu_max=args.nu_max)\n print('Extracted level set')\n\n # Extract edges of level set\n mm = mesh.Mesh()\n # If desired, avoid chopping the boundaries by converting all boundary pixels to zero\n if args.include_boundary_faces:\n # expand ls into padded zeros\n ls2 = np.zeros(np.shape(ls) + np.array([2, 2, 2]))\n ls2[1:-1, 1:-1, 1:-1] = ls\n coords, triangles = mcubes.marching_cubes(ls2, 0.5)\n coords -= np.array([1, 1, 1])\n else:\n coords, triangles = mcubes.marching_cubes(ls, 0.5)\n\n # Swap axes of coords if desired\n if args.permute_mesh != 'xyz':\n if args.permute_mesh == 'xzy':\n coords = np.swapaxes(coords, 1, 2)\n # preserve triangle orientation\n triangles = triangles[:, [0, 2, 1]]\n elif args.permute_mesh == 'yxz':\n coords = np.swapaxes(coords, 0, 1)\n # preserve triangle orientation\n triangles = triangles[:, [0, 2, 1]]\n elif args.permute_mesh == 'zxy':\n coords = np.swapaxes(coords, 0, 1)\n coords = np.swapaxes(coords, 0, 2)\n elif args.permute_mesh == 'zyx':\n coords = np.swapaxes(coords, 0, 2)\n # preserve triangle orientation\n triangles = triangles[:, [0, 2, 1]]\n elif args.permute_mesh == 'yzx':\n coords = np.swapaxes(coords, 0, 2)\n coords = np.swapaxes(coords, 0, 1)\n else:\n raise RuntimeError('Have not coded for this permute_mesh option. Do so here.')\n\n mm.points = coords * float(args.subsampling_factor)\n mm.triangles = triangles\n print('saving ', outfn_ply)\n mm.save(outfn_ply)\n\n if args.adjust_for_MATLAB_indexing:\n coords += 0.5 * np.ones([1, 3])\n\n # Save ls for this timepoint as npy or hdf5 file\n if args.saved_datatype == 'npy':\n # Save ls for this timepoint as npy file\n if outfn_ls[-4:] != '.npy':\n outfn_ls += '.npy'\n\n print('saving ', outfn_ls)\n np.save(outfn_ls, ls)\n elif args.saved_datatype in ['h5', 'hdf5']:\n # Save ls for this timepoint as an hdf5 file\n if outfn_ls[-3:] != '.h5' and outfn_ls[-5:] != '.hdf5':\n outfn_ls = outfn_ls + '.h5'\n\n print('saving ', outfn_ls)\n msaux.save_ls_as_h5(outfn_ls, ls)\n else:\n print('skipping save of implicit ls...')\n\n logging.info(\"Done.\")\n","repo_name":"npmitchell/morphsnakes_wrapper","sub_path":"run_morphsnakes.py","file_name":"run_morphsnakes.py","file_ext":"py","file_size_in_byte":65643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7224099435","text":"# Define a Python function called format_price that:\n# (a) Has two integer parameters, indicating the cost in dollars\n# and cents of an item\n# (b) Returns a single string in the format \"$D.C\".\n# (c) Example: if called with arguments 9 and 99, the function\n# should return the string $9.99\n\ndef format_price(dollar, cent):\n \"\"\" format price as \"$D.C.\" with the amount of dollar and cent and return the formatted string\n :param dollar: integer, dollar amount\n :param cent: integer, cent amount\n :return: str, formatted price\n \"\"\"\n formatted = \"$\" + str(dollar) + \".\" + str(cent)\n print(formatted)\n return formatted\n\n\nprint(\"print outside of the function:\", format_price(9, 99))","repo_name":"ganglix/CMPT141_lecture_code_2023_fall","sub_path":"week_2/ch04_ex1_ex2.py","file_name":"ch04_ex1_ex2.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32873838545","text":"import os\nimport random\nimport pandas as pd\nimport tkinter as tk\nfrom tkinter import ttk\n\n\nclass Gui:\n def __init__(self, master, db):\n \n self.db = db\n\n self.mainFrame = tk.Frame(master)\n self.mainFrame.pack()\n self.filterFrame = tk.Frame(self.mainFrame)\n self.filterFrame.pack()\n\n # Auteur\n self.authorLabel = tk.Label(self.filterFrame, text=\"author\")\n self.authorLabel.pack()\n self.jtpButton = tk.Checkbutton(self.filterFrame, text ='jtp')\n self.jtpButton.pack()\n self.pomButton = tk.Checkbutton(self.filterFrame, text='pom')\n self.pomButton.pack()\n\n year = [str(y) for y in range(2012, 2020)]\n\n # Date\n self.dateFrameFrom = tk.Frame(self.filterFrame)\n self.dateFrameFrom.pack()\n self.dateLabel = tk.Label(self.dateFrameFrom, text=\"From\")\n self.dateLabel.pack()\n self.year_from = ttk.Combobox(self.dateFrameFrom, values=[str(y) for y in range(2012, 2020)], width=10)\n self.year_from.pack(side=tk.LEFT)\n self.month_from = tk.ttk.Combobox(self.dateFrameFrom, values=[str(m) for m in range(1, 13)], width=5)\n self.month_from.pack(side=tk.LEFT)\n self.day_from = tk.ttk.Combobox(self.dateFrameFrom, values=[str(d) for d in range(1, 31)], width=5)\n self.day_from.pack(side=tk.LEFT)\n\n self.dateFrameTo = tk.Frame(self.filterFrame)\n self.dateFrameTo.pack()\n self.dateLabel = tk.Label(self.dateFrameTo, text=\"To\")\n self.dateLabel.pack()\n self.year_to = ttk.Combobox(self.dateFrameTo, values=[str(y) for y in range(2012, 2020)], width=10)\n self.year_to.pack(side=tk.LEFT)\n self.month_to = tk.ttk.Combobox(self.dateFrameTo, values=[str(m) for m in range(1, 13)], width=5)\n self.month_to.pack(side=tk.LEFT)\n self.day_to = tk.ttk.Combobox(self.dateFrameTo, values=[str(d) for d in range(1, 31)], width=5)\n self.day_to.pack(side=tk.LEFT)\n\n # Include words\n self.tagFrame = tk.Frame(self.filterFrame)\n self.tagFrame.pack()\n self.tagLabel = tk.Label(self.tagFrame, text=\"Include words:\")\n self.tagLabel.pack()\n self.tagEntry = tk.Entry(self.tagFrame)\n self.tagEntry.pack()\n\n # Search button\n self.searchButton = tk.Button(self.filterFrame, text='Search', command=self.searchFilter)\n self.searchButton.pack()\n\n def searchFilter(self):\n return 0\n \n \n \nclass Gui_labelling:\n def __init__(self, master, db):\n \n self.master = master\n self.db = db\n\n self.input_file = open('chatbot/input.from', 'a+')\n self.reply_file = open('chatbot/reply.to', 'a+')\n self.members = self.db['members']\n self.members = {member['name'] : member['pseudo'] for member in self.members.find()}\n\n self.messages = self.db['messages']\n\n self.data = [ [], [], [] ]\n cond = {\"$and\": [ {'content': {'$exists': True, '$ne': None } }, {'author': {\"$in\" : list(self.members.keys())} } ] }\n for msg in self.messages.find(cond):\n self.data[0].append(msg['timestamp'])\n self.data[1].append(msg['content'])\n self.data[2].append(self.members[msg['author']])\n self.df = { 'timestamp' : self.data[0], 'txt_msg' : self.data[1], 'author' : self.data[2] }\n self.df = pd.DataFrame.from_dict(self.df)\n self.df = self.df.sort_values(['timestamp'], ascending=[True])\n self.df = self.df.reset_index(drop=True)\n\n self.msg_n = random.randint(5, len(self.df.index)-5)\n self.next_msg = self.msg_n + 10\n\n self.common_reply = [\"oumff\", \"moua\", \"ouais ouais supère\", \"J'aime bien le froumage\", \"inks\", \"ceci être bruh moment\", \"sa ses vraies\", \"ceci être ma naturelle position\", \"oker\", \"gros jeu\", \"Fais pas ta tapet\", \"Criss de centriste\", \"Ses vraies\", \"Ferme ta criss de gueule\", \"Tayeule gros fif\", \"T'es juste une moumoune\", \"icksder\"]\n\n # Main frame\n self.mainFrame = tk.Frame(master)\n self.mainFrame.pack(expand=True)\n\n # Current message\n self.messageFrame = tk.Frame(self.mainFrame)\n self.messageFrame.grid(row=0, columnspan=3, sticky='n')\n self.current_messageFrame = tk.Frame(self.messageFrame)\n self.current_messageFrame.grid(row=0, column=0)\n self.current_messageLabel= tk.Label(self.current_messageFrame, text=\"Message #{}\".format(self.msg_n), width=20)\n self.current_messageLabel.grid(row=0, columnspan=2)\n self.current_authorLabel= tk.Label(self.current_messageFrame, text=\"{}:\".format(self.df.loc[self.msg_n, 'author']), width=12)\n self.current_authorLabel.grid(row=1, column=0)\n self.current_message = tk.Text(self.current_messageFrame, height=5, width=30, wrap=tk.WORD)\n self.current_message.insert(tk.END, self.df.loc[self.msg_n, 'txt_msg'])\n self.current_message.grid(row=1, column=1)\n\n # Preceding messages\n self.preceding_messageFrame = tk.Frame(self.mainFrame)\n self.preceding_messageFrame.grid(row=3, column=0, sticky='e', padx=20, pady=20)\n self.preceding_messageLabel= tk.Label(self.preceding_messageFrame, text=\"Réponse à\")\n self.preceding_messageLabel.grid(row=0, columnspan=3)\n self.preceding_authorLabel = []\n self.preceding_messageCanvas = []\n self.preceding_box = []\n self.preceding_var = []\n self.preceding_author_var = []\n for i in range(5):\n self.preceding_author_var.append(tk.StringVar())\n self.preceding_author_var[i].set(\"{}:\".format(self.df.loc[self.msg_n-(i+1), 'author']))\n self.preceding_authorLabel.append(tk.Label(self.preceding_messageFrame, textvariable=self.preceding_author_var[i], width=12))\n self.preceding_authorLabel[i].grid(row=i+1, column=0)\n self.preceding_messageCanvas.append(tk.Text(self.preceding_messageFrame, height=3, width=40, wrap=tk.WORD))\n self.preceding_messageCanvas[i].insert(tk.END, self.df.loc[self.msg_n-(i+1), 'txt_msg'])\n self.preceding_messageCanvas[i].grid(row=i+1, column=1)\n self.preceding_var.append(tk.BooleanVar())\n self.preceding_box.append(tk.Checkbutton(self.preceding_messageFrame, variable = self.preceding_var[i]))\n self.preceding_box[i].grid(row=i+1, column=2)\n \n # Following messages\n self.following_messageFrame = tk.Frame(self.mainFrame)\n self.following_messageFrame.grid(row=3, column=1, sticky='w', padx=20, pady=20)\n self.following_messageLabel= tk.Label(self.following_messageFrame, text=\"Réponses\")\n self.following_messageLabel.grid(row=0, columnspan=3)\n self.following_authorLabel = [] \n self.following_messageCanvas = []\n self.following_box = []\n self.following_var = []\n self.following_author_var = []\n for i in range(5):\n self.following_author_var.append(tk.StringVar())\n self.following_author_var[i].set(\"{}:\".format(self.df.loc[self.msg_n+(i+1), 'author']))\n self.following_authorLabel.append(tk.Label(self.following_messageFrame, textvariable=self.following_author_var[i], width=12))\n self.following_authorLabel[i].grid(row=i+1, column=0)\n self.following_messageCanvas.append(tk.Text(self.following_messageFrame, height=3, width=40, wrap=tk.WORD))\n self.following_messageCanvas[i].insert(tk.END, self.df.loc[self.msg_n+(i+1), 'txt_msg'])\n self.following_messageCanvas[i].grid(row=i+1, column=1)\n self.following_var.append(tk.BooleanVar())\n self.following_box.append(tk.Checkbutton(self.following_messageFrame, variable=self.following_var[i]))\n self.following_box[i].grid(row=i+1, column=2)\n\n # Common following messages\n self.common_following_messageFrame = tk.Frame(self.mainFrame)\n self.common_following_messageFrame.grid(row=3, column=2, sticky='w', padx=20, pady=20)\n self.common_following_messageLabel= tk.Label(self.common_following_messageFrame, text=\"Réponses courantes\")\n self.common_following_messageLabel.grid(row=0, columnspan=2)\n self.common_following_messageCanvas = []\n self.common_following_box = []\n self.common_following_var = []\n self.random_common_msg = random.sample(self.common_reply, 5)\n for i in range(5):\n self.common_following_messageCanvas.append(tk.Text(self.common_following_messageFrame, height=3, width=40, wrap=tk.WORD))\n self.common_following_messageCanvas[i].insert(tk.END, self.random_common_msg[i])\n self.common_following_messageCanvas[i].grid(row=i+1, column=0)\n self.common_following_var.append(tk.BooleanVar())\n self.common_following_box.append(tk.Checkbutton(self.common_following_messageFrame, variable=self.common_following_var[i]))\n self.common_following_box[i].grid(row=i+1, column=1)\n\n # Other input\n self.other_msgFrame = tk.Frame(self.mainFrame)\n self.other_msgFrame.grid(row=4, columnspan=3, pady=10)\n self.other_inputFrame = tk.Frame(self.other_msgFrame)\n self.other_inputFrame.grid(row=0, column=0, padx=10)\n self.other_inputLabel= tk.Label(self.other_inputFrame, text=\"Autres réponses à possibles (Séparer les réponses avec ; )\")\n self.other_inputLabel.grid()\n self.input_input = tk.Text(self.other_inputFrame, height=5, width=40)\n self.input_input.grid()\n # Other reply\n self.other_replyFrame = tk.Frame(self.other_msgFrame)\n self.other_replyFrame.grid(row=0, column=1, padx=10)\n self.other_replyLabel= tk.Label(self.other_replyFrame, text=\"Autres réponses possibles (Séparer les réponses avec ; )\")\n self.other_replyLabel.grid()\n self.input_reply = tk.Text(self.other_replyFrame, height=5, width=40)\n self.input_reply.grid()\n \n # Next message\n self.buttonsFrame = tk.Frame(self.mainFrame)\n self.buttonsFrame.grid(row=5, columnspan=3, sticky='s')\n self.next_msg_Button = tk.Button(self.buttonsFrame, text='Confirmer\\net passer au\\nmessage #{}'.format(self.next_msg), command=self.nextMsgCmd, width=20)\n self.next_msg_Button.grid(row=0, column=0, padx=10)\n self.rand_msg_Button = tk.Button(self.buttonsFrame, text='Confirmer\\net passer à un\\nmessage aléatoire', command=self.randMsgCmd, width=20)\n self.rand_msg_Button.grid(row=0, column=1, padx=10)\n # Message selection\n self.selection_messageFrame = tk.Frame(self.buttonsFrame)\n self.selection_messageFrame.grid(row=0, column=2, padx=10, pady=10)\n self.selection_Label = tk.Label(self.selection_messageFrame, text=\"Sélection du\\nprochain message\")\n self.selection_Label.grid(row=0, columnspan=3)\n self.selectionLabel = tk.Label(self.selection_messageFrame, text=\"msg# :\")\n self.selectionLabel.grid(row=1, column=0, padx=2, pady=2)\n self.selectionInput = tk.Entry(self.selection_messageFrame, width=10)\n self.selectionInput.grid(row=1, column=1, padx=2, pady=2)\n self.selectionButton = tk.Button(self.selection_messageFrame, text='Ok', command=self.selectMsgCmd)\n self.selectionButton.grid(row=1, column=2, padx=2, pady=2)\n \n\n def __del__(self):\n self.input_file.close()\n self.reply_file.close()\n \n\n def selectMsgCmd(self):\n try: \n self.next_msg = int(self.selectionInput.get())\n self.next_msg_Button.config(text='Confirmer\\net passer au\\nmessage #{}'.format(self.next_msg))\n except ValueError:\n self.master.title(random.sample(['Fais pas chier', 'Press ok to ok'], 1)[0])\n self.selectionInput.delete(\"0\",\"end\")\n\n\n def nextMsgCmd(self):\n self.log_label()\n self.update_msg()\n self.next_msg += 10\n self.next_msg_Button.config(text='Confirmer\\net passer au\\nmessage #{}'.format(self.next_msg))\n\n\n def randMsgCmd(self):\n self.next_msg = random.randint(5, len(self.df.index)-5)\n self.log_label()\n self.update_msg()\n self.next_msg = self.msg_n + 10\n self.next_msg_Button.config(text='Confirmer\\net passer au\\nmessage #{}'.format(self.next_msg))\n \n\n def log_label(self):\n for i in range(len(self.following_var)):\n if self.preceding_var[i].get():\n self.reply_file.write(self.current_message.get(\"1.0\",tk.END))\n self.input_file.write(self.preceding_messageCanvas[i].get(\"1.0\",tk.END))\n \n if self.following_var[i].get():\n self.reply_file.write(self.following_messageCanvas[i].get(\"1.0\",tk.END))\n self.input_file.write(self.current_message.get(\"1.0\",tk.END))\n \n if self.common_following_var[i].get():\n self.reply_file.write(self.common_following_messageCanvas[i].get(\"1.0\",tk.END))\n self.input_file.write(self.current_message.get(\"1.0\",tk.END))\n \n if self.input_input.get(\"1.0\",tk.END):\n for query in self.input_input.get(\"1.0\", tk.END).split(';'):\n reply = self.current_message.get(\"1.0\",tk.END).strip('\\n')\n if (query != '\\n'and reply != '\\n'):\n if repr(query[-2:-1]) != '\\n':\n query += '\\n'\n if repr(reply[-2:-1]) != '\\n':\n reply += '\\n'\n self.reply_file.write(reply)\n self.input_file.write(query)\n\n if self.input_reply.get(\"1.0\",tk.END):\n for reply in self.input_reply.get(\"1.0\", tk.END).split(';'):\n query = self.current_message.get(\"1.0\",tk.END).strip('\\n')\n if (query != '\\n'and reply != '\\n'):\n if repr(query[-2:-1]) != '\\n':\n query += '\\n'\n if repr(reply[-2:-1]) != '\\n':\n reply += '\\n'\n self.reply_file.write(reply)\n self.input_file.write(query)\n\n def update_msg(self):\n \n self.msg_n = self.next_msg\n self.current_authorLabel.config(text=\"{}:\".format(self.df.loc[self.msg_n, 'author']))\n self.current_messageLabel.config(text=\"Message #{}\".format(self.msg_n))\n self.current_message.delete(\"1.0\", \"end\")\n self.current_message.insert(tk.END, self.df.loc[self.msg_n, 'txt_msg'])\n self.random_common_msg = random.sample(self.common_reply, 5)\n\n for i in range(5):\n self.preceding_author_var[i].set(\"{}:\".format(self.df.loc[self.msg_n-(i+1), 'author']))\n self.preceding_messageCanvas[i].delete(\"1.0\", \"end\")\n self.preceding_messageCanvas[i].insert(tk.END, self.df.loc[self.msg_n-(i+1), 'txt_msg'])\n self.preceding_var[i].set(False)\n\n self.following_author_var[i].set(\"{}:\".format(self.df.loc[self.msg_n+(i+1), 'author']))\n self.following_messageCanvas[i].delete(\"1.0\", \"end\")\n self.following_messageCanvas[i].insert(tk.END, self.df.loc[self.msg_n+(i+1), 'txt_msg'])\n self.following_var[i].set(False)\n\n self.common_following_messageCanvas[i].delete(\"1.0\", \"end\")\n self.common_following_messageCanvas[i].insert(tk.END, self.random_common_msg[i])\n self.common_following_var[i].set(False)\n\n self.input_input.delete(\"1.0\", \"end\")\n self.input_reply.delete(\"1.0\", \"end\")","repo_name":"jtpaquet/Touka-Analytics","sub_path":"database/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":15519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19338105559","text":"\r\n\"\"\"\r\nCe programme est le programme principal du projet,\r\nil execute l'application dash et fait appel aux différents autres packages\r\n\"\"\"\r\n\r\nfrom dash import Dash, dcc, html\r\nfrom dash.dependencies import Input, Output, State\r\nfrom dash.exceptions import PreventUpdate\r\nimport plotly.express as px\r\nfrom twitter_collect import fetch_tweets\r\nfrom twitter_analyse import emotions_fct\r\nfrom twitter_analyse import word_cloud\r\nfrom twitter_analyse import fetch_words\r\nfrom twitter_analyse import mosaic\r\n#import pandas as pd\r\nimport dash_daq as daq\r\n\r\nexternal_stylesheets = ['assets/style.css']\r\n\r\napp = Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\n# Structure de la page\r\napp.layout = html.Div([\r\n\r\n # Header\r\n html.Div([\r\n html.Div([\r\n html.Img(className='app-header--logo', src=app.get_asset_url(\r\n 'logo-blanc.svg')),\r\n html.H1(\"Centrale Analytica\", style={'textAlign': 'center'}),\r\n ], className='box head1'),\r\n\r\n # Searchbox\r\n html.Div([dcc.RadioItems(id='research-type', options=[\r\n {'label': 'Subject', 'value': 'subject'},\r\n {'label': 'Hashtag', 'value': 'hashtag'},\r\n {'label': 'User', 'value': 'user'},\r\n ], inline=True, style={'margin': '10px'}, value='subject'),\r\n dcc.Input(id='input-text', type='text',\r\n value='Type here', className='input'),\r\n html.Button(id='submit-button-text', n_clicks=0,\r\n children='Submit', style={'backgroundColor': '#1A8CD8', 'margin': '10px'}),\r\n html.Div(id='wainting-text'),\r\n html.Div(id='ending-text'),\r\n dcc.Checklist(\r\n ['Feelings Proportions', 'Wordcloud', 'Mosaic', 'Gauge'],\r\n id='dropdown-to-show-or-hide-element',\r\n inline=True, value=[],\r\n className=\"checkbox\"\r\n )], className='box head2'),\r\n\r\n\r\n ], className='app-header', style={'background-image': 'url(assets/banniere2.jpg)',\r\n 'background-size': 'cover'}),\r\n\r\n html.Div([\r\n html.Div([\r\n html.Div([dcc.Graph(id='barchart_feelings')],\r\n className='box graph', id='barchart_box'),\r\n html.Div([dcc.Graph(id='wordcloud', className='image1')],\r\n className='box graph', id='wordcloud_box')], className='top flex'),\r\n html.Div([\r\n html.Div([dcc.Graph(id='mosaic', className='image2')],\r\n className='box graph', id='mosaic_box'),\r\n html.Div([daq.Gauge(\r\n id='my-gauge-1',\r\n label=\"Negative-Positive\",\r\n color={\"gradient\": True, \"ranges\": {\r\n \"red\": [0, 0.35], \"yellow\":[0.35, 0.7], \"green\":[0.7, 1]}},\r\n value=0.5,\r\n min=0,\r\n max=1,\r\n size=120\r\n ),\r\n daq.Gauge(\r\n id='my-gauge-2',\r\n label=\"Objective-Subjective\",\r\n color={\"gradient\": True, \"ranges\": {\r\n \"blue\": [0, 0.5], \"red\":[0.5, 1]}},\r\n value=0.5,\r\n min=0,\r\n max=1,\r\n size=120\r\n ),\r\n daq.Gauge(\r\n id='my-gauge-3',\r\n label=\"Unknown-Famous\",\r\n color={\"gradient\": True, \"ranges\": {\r\n \"purple\": [0, 0.5], \"orange\":[0.5, 1]}},\r\n value=0.5,\r\n min=0,\r\n max=1,\r\n size=120\r\n )], className='box graph', id='gauge_box')], className='bottom flex')\r\n ])\r\n], className='page')\r\n\r\n\r\n# Appelé lors de l'appui du boutton, affiche la phrase d'attente\r\n@ app.callback(Output('wainting-text', 'children'),\r\n Input('submit-button-text', 'n_clicks'),\r\n State('input-text', 'value'))\r\ndef start_analyse(n_clicks, request):\r\n \"\"\"Affiche la phrase d'attente lorsque l'analyse est lancée\r\n\r\n Args:\r\n n_clicks (integer): nombre de fois que le bouton a été appuyé\r\n input (string): texte entré par l'utilisateur\r\n\r\n Raises:\r\n PreventUpdate: si le bouton n'a pas été appué ie au chargement de la page\r\n\r\n Returns:\r\n strin: texte à afficher\r\n \"\"\"\r\n if n_clicks == 0:\r\n raise PreventUpdate\r\n\r\n return f'''Your request about \"{request}\" is being treated'''\r\n\r\n\r\n@ app.callback(Output('barchart_feelings', 'figure'),\r\n Output('wordcloud', 'figure'),\r\n Output('mosaic', 'figure'),\r\n Output('my-gauge-1', 'value'),\r\n Output('my-gauge-2', 'value'),\r\n Output('my-gauge-3', 'value'),\r\n Output('ending-text', 'children'),\r\n Input('submit-button-text', 'n_clicks'),\r\n Input('research-type', 'value'),\r\n State('input-text', 'value'))\r\ndef analyse(n_clicks, rtype, request):\r\n \"\"\"Analyse les tweets et crée plusieurs représentations\r\n\r\n Args:\r\n n_clicks (integer): nombre de fois que le bouton a été appuyé\r\n type (string): type de la recherche (subject/hashtag/user)\r\n request (string): texte entré par l'utilisateur\r\n\r\n Raises:\r\n PreventUpdate: si le bouton n'a pas été appué ie au chargement de la page\r\n\r\n Returns:\r\n figure: barchart associé au sentiment détéctés par l'analyse\r\n \"\"\"\r\n\r\n if n_clicks == 0:\r\n raise PreventUpdate\r\n\r\n # Récuperation des données\r\n sub, hashtag, use = False, False, False\r\n if rtype == 'subject':\r\n sub = True\r\n elif rtype == 'hashtag':\r\n hashtag = True\r\n else:\r\n use = True\r\n fetch_tweets.create_data(request, subject=sub,\r\n user=use, hashtag=hashtag, tweet_limit=100)\r\n dataframe = fetch_tweets.get_data(\"twitter_data.json\")\r\n\r\n # analyse des données\r\n datas = emotions_fct.emo_analysis(dataframe)\r\n # création du wordcloud\r\n\r\n feeling = str(datas[0][\"Emotion\"].iloc[datas[0][\"Frequency\"].idxmax()])\r\n print(feeling)\r\n wordcloud = word_cloud.generate_wordcloud(dataframe, feeling=feeling, contour_width=1,\r\n coutor_color='blue', background_color='black')\r\n fig2 = px.imshow(wordcloud)\r\n fig2.update_layout(template=None, paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')\r\n fig2.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)\r\n fig2.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)\r\n\r\n # création de la mosaïque\r\n mots = fetch_words.frequent_words(dataframe, 12)\r\n mosaic_image = mosaic.fetch_image(mots, request)\r\n fig3 = px.imshow(mosaic_image)\r\n fig3.update_layout(template=None, paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')\r\n fig3.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)\r\n fig3.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)\r\n\r\n # Création du barchart\r\n fig = px.bar(datas[0], x='Emotion', y='Frequency', text_auto=True,\r\n title=\"Feelings proportions\")\r\n fig.update_traces(textfont_size=14, textangle=0,\r\n textposition=\"inside\", cliponaxis=False)\r\n fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)',\r\n font_color='white',\r\n title_font_color='white')\r\n\r\n freqp, freqs, freqf = datas[1]['Frequency'][0], datas[1]['Frequency'][1], datas[1]['Frequency'][2]\r\n return fig, fig2, fig3, freqp, freqs, freqf, f'''Your request about \"{request}\" has been treated'''\r\n\r\n\r\n@ app.callback(\r\n Output(component_id='barchart_box', component_property='style'),\r\n Output(component_id='wordcloud_box', component_property='style'),\r\n Output(component_id='mosaic_box', component_property='style'),\r\n Output(component_id='gauge_box', component_property='style'),\r\n [Input(component_id='dropdown-to-show-or-hide-element', component_property='value')])\r\ndef show_hide_element(visibility_state):\r\n \"\"\"Change l'état de certains div pour qu'ils soient visibles ou non\r\n\r\n Args:\r\n visibility_state (list): liste des id dont le state doit être\r\n changé pour block, les autres doivent être changés pour none\r\n\r\n Returns:\r\n dict: type de display des quatres fenêtres\r\n \"\"\"\r\n if 'Feelings Proportions' in visibility_state:\r\n barc = {'display': 'block'}\r\n else:\r\n barc = {'display': 'none'}\r\n\r\n if 'Wordcloud' in visibility_state:\r\n wor = {'display': 'block'}\r\n else:\r\n wor = {'display': 'none'}\r\n\r\n if 'Mosaic' in visibility_state:\r\n mos = {'display': 'block'}\r\n else:\r\n mos = {'display': 'none'}\r\n\r\n if 'Gauge' in visibility_state:\r\n gau = {'display': 'block'}\r\n else:\r\n gau = {'display': 'none'}\r\n\r\n return barc, wor, mos, gau\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","repo_name":"fotisk07/CentraleAnalytica","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9115,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"45253578928","text":"# coding=utf8\n# Please place imports here.\n# BEGIN IMPORTS\n\nimport cv2\nimport numpy as np\nimport scipy\nfrom scipy import ndimage, linalg\nfrom scipy.sparse import csr_matrix\n\n\n# END IMPORTS\ndef compute_photometric_stereo_impl(lights, images):\n \"\"\"\n Given a set of images taken from the same viewpoint and a corresponding set\n of directions for light sources, this function computes the albedo and\n normal map of a Lambertian scene.\n\n If the computed albedo for a pixel has an L2 norm less than 1e-7, then set\n the albedo to black and set the normal to the 0 vector.\n\n Normals should be unit vectors.\n\n Input:\n lights -- N x 3 array. Rows are normalized and are to be interpreted\n as lighting directions.\n images -- list of N images. Each image is of the same scene from the\n same viewpoint, but under the lighting condition specified in\n lights.\n Output:\n albedo -- float32 height x width x 3 image with dimensions matching the\n input images.\n normals -- float32 height x width x 3 image with dimensions matching\n the input images.\n\"\"\"\n\n L = lights # N行3列,每行代表光的方向向量\n L_T = L.T # 转置一下\n albedo = np.zeros((images[0].shape[0], images[0].shape[1], images[0].shape[2]), dtype=np.float32)\n normals = np.zeros((images[0].shape[0], images[0].shape[1], 3), dtype=np.float32) # 法向量对每张图、每个通道都一样\n term1 = np.linalg.inv(L_T.dot(L))\n for channel in range(images[0].shape[2]): # 哪个通道\n for row in range(images[0].shape[0]):\n for col in range(images[0].shape[1]):\n I = [(images[i][row][col][channel]).T for i in range(len(images))]\n term2 = L_T.dot(I) # LT*I\n G = term1.dot(term2) # G=(LT*L)^-1*(LT*I)\n k = np.round(np.linalg.norm(G), 5) # k等于G的模长\n if k < 1e-7:\n k = 0\n else:\n normals[row][col] += G / k\n albedo[row][col][channel] = k\n normals /= images[0].shape[2]\n return albedo, normals\n\ndef pyrdown_impl(image):\n \"\"\"\n Prefilters an image with a gaussian kernel and then downsamples the result\n by a factor of 2.\n\n The following 1D convolution kernel should be used in both the x and y\n directions.\n K = 1/16 [ 1 4 6 4 1 ]\n\n Functions such as cv2.GaussianBlur and\n scipy.ndimage.filters.gaussian_filter are prohibited. You must implement\n the separable kernel. However, you may use functions such as cv2.filter2D\n or scipy.ndimage.filters.correlate to do the actual\n correlation / convolution.\n\n Filtering should mirror the input image across the border.\n For scipy this is mode = mirror.\n For cv2 this is mode = BORDER_REFLECT_101.\n\n Downsampling should take the even-numbered coordinates with coordinates\n starting at 0.\n\n Input:\n image -- height x width [x channels] image of type float32.\n Output:\n down -- ceil(height/2) x ceil(width/2) [x channels] image of type\n float32.\n \"\"\"\n # Creating kernel, K = 1/16 [ 1 4 6 4 1 ]\n gauss_filter = np.zeros((1, 5))\n gauss_filter[0, 0] = 1\n gauss_filter[0, 1] = 4\n gauss_filter[0, 2] = 6\n gauss_filter[0, 3] = 4\n gauss_filter[0, 4] = 1\n\n Ky = 1.0 / 16 * gauss_filter\n Kx = np.transpose(Ky)\n\n # Filter image in x and y directions using gaussian kernel\n filtered_img = cv2.filter2D(image, -1, Ky, borderType=cv2.BORDER_REFLECT_101)\n filtered_img = cv2.filter2D(filtered_img, -1, Kx, borderType=cv2.BORDER_REFLECT_101)\n\n # Downsampling filtered image\n down = filtered_img[::2, ::2]\n\n return down\n\n\ndef pyrup_impl(image):\n \"\"\"\n Upsamples an image by a factor of 2 and then uses a gaussian kernel as a\n reconstruction filter.\n\n The following 1D convolution kernel should be used in both the x and y\n directions.\n K = 1/8 [ 1 4 6 4 1 ]\n Note: 1/8 is not a mistake. The additional factor of 4 (applying this 1D\n kernel twice) scales the solution according to the 2x2 upsampling factor.\n\n Filtering should mirror the input image across the border.\n For scipy this is mode = mirror.\n For cv2 this is mode = BORDER_REFLECT_101.\n\n Upsampling should produce samples at even-numbered coordinates with\n coordinates starting at 0.\n\n Input:\n image -- height x width [x channels] image of type float32.\n Output:\n up -- 2 height x 2 width [x channels] image of type float32.\n \"\"\"\n # Upsampling given image\n up_height = image.shape[0] * 2\n up_width = image.shape[1] * 2\n if (len(image.shape) == 3):\n up_channels = image.shape[2]\n unfiltered_up = np.zeros((up_height, up_width, up_channels))\n else:\n unfiltered_up = np.zeros((up_height, up_width))\n\n unfiltered_up[::2, ::2] = image\n\n # Creating gaussian kernel, K = 1/8 [ 1 4 6 4 1 ]\n gauss_filter = np.zeros((1, 5))\n gauss_filter[0, 0] = 1\n gauss_filter[0, 1] = 4\n gauss_filter[0, 2] = 6\n gauss_filter[0, 3] = 4\n gauss_filter[0, 4] = 1\n\n Ky = 1.0 / 8 * gauss_filter\n Kx = np.transpose(Ky)\n\n # Filter image in x and y directions using gaussian kernel\n up = cv2.filter2D(unfiltered_up, -1, Ky, borderType=cv2.BORDER_REFLECT_101)\n up = cv2.filter2D(up, -1, Kx, borderType=cv2.BORDER_REFLECT_101)\n\n return up\n\ndef project_impl(K, Rt, points):\n \"\"\"\n Project 3D points into a calibrated camera.\n\n If the point has a depth < 1e-7 from the camera or is located behind the\n camera, then set the projection to [np.nan, np.nan].\n\n Input:\n K -- camera intrinsics calibration matrix\n Rt -- 3 x 4 camera extrinsics calibration matrix\n points -- height x width x 3 array of 3D points\n Output:\n projections -- height x width x 2 array of 2D projections\n \"\"\"\n proj_height = points.shape[0]\n proj_width = points.shape[1]\n projections = np.ndarray((proj_height, proj_width, 2))\n\n # Calculating 2D projections\n for row in range(proj_height):\n for col in range(proj_width):\n # Change points to 3D array\n coord = np.ones((1, 4))\n coord[0, :3] = points[row, col, :3]\n\n # xp = K * Rt * X\n X = np.transpose(coord)\n xp = np.dot(K, np.dot(Rt, X))\n\n if (xp[2, 0] < 1e-7):\n projections[row, col, :2] = np.nan\n else:\n xp /= xp[2, 0]\n projections[(row, col)] = xp[:2, 0]\n\n return projections\n\ndef unproject_corners_impl(K, width, height, depth, Rt):\n \"\"\"\n Undo camera projection given a calibrated camera and the depth for each\n corner of an image.\n\n The output points array is a 2x2x3 array arranged for these image\n coordinates in this order:\n\n (0, 0) | (width, 0)\n -------------+------------------\n (0, height) | (width, height)\n\n Each of these contains the 3 vector for the corner's corresponding\n point in 3D.\n\n Tutorial:\n Say you would like to unproject the pixel at coordinate (x, y)\n onto a plane at depth z with camera intrinsics K and camera\n extrinsics Rt.\n\n (1) Convert the coordinates from homogeneous image space pixel\n coordinates (2D) to a local camera direction (3D):\n (x', y', 1) = K^-1 * (x, y, 1)\n the camera center. Multiply it by z to get the point at depth z\n from the camera center.\n (z * x', z * y', z) = z * (x', y', 1)\n (3) Use the inverse of the extrinsics matrix, Rt, to move this point\n from the local camera coordinate system to a world space\n coordinate.\n Note:\n | R t |^-1 = | R' -R't |\n | 0 1 | | 0 1 |\n\n p = R' * (z * x', z * y', z, 1) - R't\n\n Input:\n K -- camera intrinsics calibration matrix\n width -- camera width\n height -- camera height\n depth -- depth of plane with respect to camera\n Rt -- 3 x 4 camera extrinsics calibration matrix\n Output:\n points -- 2 x 2 x 3 array of 3D points\n \"\"\"\n # (1) 2D -> 3D, (x', y', 1) = K^-1 * (x, y, 1)\n corners = np.ones((3, 4))\n corners[:2, 0] = 0\n corners[0, 1] = width\n corners[1, 1] = 0\n corners[0, 2] = 0\n corners[1, 2] = height\n corners[0, 3] = width\n corners[1, 3] = height\n\n K_inv = np.linalg.inv(K)\n cam_dir = np.dot(K_inv, corners)\n\n # (2) Include depth, (z * x', z * y', z) = z * (x', y', 1)\n cam_dir = depth * cam_dir\n\n # (3) Camera coord. system -> world space coord., p = R' * (z * x', z * y', z, 1) - R't\n Rt_matrix = np.zeros((4, 4))\n Rt_matrix[:3, :] = Rt\n Rt_matrix[3, 3] = 1\n Rt_inv = np.linalg.inv(Rt_matrix)\n\n cam_pts = np.ones((4, 4))\n cam_pts[:3, :] = cam_dir\n p = np.dot(Rt_inv, cam_pts)\n\n points = np.zeros((2, 2, 3))\n points[0, 0] = p[:3, 0]\n points[0, 1] = p[:3, 1]\n points[1, 0] = p[:3, 2]\n points[1, 1] = p[:3, 3]\n\n return points\n\ndef preprocess_ncc_impl(image, ncc_size):\n \"\"\"\n Prepare normalized patch vectors according to normalized cross\n correlation.\n\n This is a preprocessing step for the NCC pipeline. It is expected that\n 'preprocess_ncc' is called on every input image to preprocess the NCC\n vectors and then 'compute_ncc' is called to compute the dot product\n between these vectors in two images.\n\n NCC preprocessing has two steps.\n (1) Compute and subtract the mean.\n (2) Normalize the vector.\n\n The mean is per channel. i.e. For an RGB image, over the ncc_size**2\n patch, compute the R, G, and B means separately. The normalization\n is over all channels. i.e. For an RGB image, after subtracting out the\n RGB mean, compute the norm over the entire (ncc_size**2 * channels)\n vector and divide.\n\n If the norm of the vector is < 1e-6, then set the entire vector for that\n patch to zero.\n\n Patches that extend past the boundary of the input image at all should be\n considered zero. Their entire vector should be set to 0.\n\n Patches of shape channels x height x width (e.g. 3 x ncc_size x ncc_size)\n are to be flattened into vectors with the default numpy row major order.\n For example, given the following 2 (channels) x 2 (height) x 2 (width)\n patch, here is how the output vector should be arranged.\n\n channel1 channel2\n +------+------+ +------+------+ height\n | x111 | x121 | | x112 | x122 | |\n +------+------+ +------+------+ |\n | x211 | x221 | | x212 | x222 | |\n +------+------+ +------+------+ v\n width ------->\n\n v = [ x111, x121, x211, x221, x112, x122, x212, x222 ]\n\n Input:\n image -- height x width x channels image of type float32\n ncc_size -- integer width and height of NCC patch region. ncc_size\n will always be odd.\n Output:\n normalized -- heigth x width x (channels * ncc_size**2) array\n \"\"\"\n halfPatch = np.floor(ncc_size / 2)\n height = image.shape[0]\n width = image.shape[1]\n channels = image.shape[2]\n normalized = np.zeros((height, width, ncc_size, ncc_size, channels))\n for row in range(int(np.floor(ncc_size / 2)), int(image.shape[0] - np.floor(ncc_size / 2))):\n for col in range(int(np.floor(ncc_size / 2)), int(image.shape[1] - np.floor(ncc_size / 2))):\n normalized[row][col] = np.copy(\n image[row - int(halfPatch):row + int(halfPatch) + 1, col - int(halfPatch):col + int(halfPatch) + 1, :])\n '''check rows and columns here'''\n normalized = np.reshape(normalized, (height, width, ncc_size ** 2, channels))\n normalized = np.swapaxes(normalized, 2, 3)\n normalized -= np.mean(normalized, axis=3, keepdims=True)\n norms = np.linalg.norm(normalized, axis=(2, 3))\n norms = np.reshape(norms, (height, width, 1, 1))\n normalized /= np.maximum(1e-6, norms)\n normalized = np.reshape(normalized, (height, width, channels * ncc_size ** 2))\n return normalized\n\n\ndef compute_ncc_impl(image1, image2):\n \"\"\"\n Compute normalized cross correlation between two images that already have\n normalized vectors computed for each pixel with preprocess_ncc.\n\n Input:\n image1 -- height x width x (channels * ncc_size**2) array\n image2 -- height x width x (channels * ncc_size**2) array\n Output:\n ncc -- height x width normalized cross correlation between image1 and\n image2.\n \"\"\"\n vectors = image1 * image2\n ncc = np.sum(vectors, axis=2)\n return ncc\n\ndef form_poisson_equation_impl(height, width, alpha, normals, depth_weight, depth):\n \"\"\"\n Creates a Poisson equation given the normals and depth at every pixel in image.\n The solution to Poisson equation is the estimated depth.\n When the mode, is 'depth' in 'combine.py', the equation should return the actual depth.\n When it is 'normals', the equation should integrate the normals to estimate depth.\n When it is 'both', the equation should weight the contribution from normals and actual depth,\n using parameter 'depth_weight'.\n\n Input:\n height -- height of input depth,normal array\n width -- width of input depth,normal array\n alpha -- stores alpha value of at each pixel of image.\n If alpha = 0, then the pixel normal/depth should not be\n taken into consideration for depth estimation\n normals -- stores the normals(nx,ny,nz) at each pixel of image\n None if mode is 'depth' in combine.py\n depth_weight -- parameter to tradeoff between normals and depth when estimation mode is 'both'\n High weight to normals mean low depth_weight.\n Giving high weightage to normals will result in smoother surface, but surface may be very different from\n what the input depthmap shows.\n depth -- stores the depth at each pixel of image\n None if mode is 'normals' in combine.py\n Output:\n constants for equation of type Ax = b\n A -- left-hand side coefficient of the Poisson equation\n note that A can be a very large but sparse matrix so csr_matrix is used to represent it.\n b -- right-hand side constant of the the Poisson equation\n \"\"\"\n\n assert alpha.shape == (height, width)\n assert normals is None or normals.shape == (height, width, 3)\n assert depth is None or depth.shape == (height, width)\n\n '''\n Since A matrix is sparse, instead of filling matrix, we assign values to a non-zero elements only.\n For each non-zero element in matrix A, if A[i,j] = v, there should be some index k such that, \n row_ind[k] = i\n col_ind[k] = j\n data_arr[k] = v\n Fill these values accordingly\n '''\n row_ind = []\n col_ind = []\n data_arr = []\n '''\n For each row in the system of equation fill the appropriate value for vector b in that row\n '''\n b = []\n if depth_weight is None:\n depth_weight = 1\n\n '''\n TODO\n Create a system of linear equation to estimate depth using normals and crude depth Ax = b\n\n x is a vector of depths at each pixel in the image and will have shape (height*width)\n A: ( k, height)\n x: ( height, width, 3)\n b: ( k, width)\n\n If mode is 'depth':\n > Each row in A and b corresponds to an equation at a single pixel\n > For each pixel k, \n if pixel k has alpha value zero do not add any new equation.\n else, fill row in b with depth_weight*depth[k] and fill column k of the corresponding\n row in A with depth_weight.\n\n Justification: \n Since all the elements except k in a row is zero, this reduces to \n depth_weight*x[k] = depth_weight*depth[k]\n you may see that, solving this will give x with values exactly same as the depths, \n at pixels where alpha is non-zero, then why do we need 'depth_weight' in A and b?\n The answer to this will become clear when this will be reused in 'both' mode\n\n Note: The normals in image are +ve when they are along an +x,+y,-z axes, if seen from camera's viewpoint.\n If mode is 'normals':\n > Each row in A and b corresponds to an equation of relationship between adjacent pixels\n > For each pixel k and its immideate neighbour along x-axis l\n if any of the pixel k or pixel l has alpha value zero do not add any new equation.\n else, fill row in b with nx[k] (nx is x-component of normal), fill column k of the corresponding\n row in A with -nz[k] and column k+1 with value nz[k]\n > Repeat the above along the y-axis as well, except nx[k] should be -ny[k].\n\n Justification: Assuming the depth to be smooth and almost planar within one pixel width.\n The normal projected in xz-plane at pixel k is perpendicular to tangent of surface in xz-plane.\n In other word if n = (nx,ny,-nz), its projection in xz-plane is (nx,nz) and if tangent t = (tx,0,tz),\n then n.t = 0, therefore nx/-nz = -tz/tx\n Therefore the depth change with change of one pixel width along x axis should be proporational to tz/tx = -nx/nz\n In other words (depth[k+1]-depth[k])*nz[k] = nx[k]\n This is exactly what the equation above represents.\n The negative sign in ny[k] is because the indexing along the y-axis is opposite of +y direction.\n\n If mode is 'both':\n > Do both of the above steps.\n\n Justification: The depth will provide a crude estimate of the actual depth. The normals do the smoothing of depth map\n This is why 'depth_weight' was used above in 'depth' mode. \n If the 'depth_weight' is very large, we are going to give preference to input depth map.\n If the 'depth_weight' is close to zero, we are going to give preference normals.\n '''\n #TODO Block Begin\n #fill row_ind,col_ind,data_arr,b\n rn = 0\n for row_i in range(height):\n for col_j in range(width):\n k = row_i * width + col_j\n if alpha[row_i, col_j] != 0:\n if depth is not None:\n b.append(depth_weight * depth[row_i, col_j]) # depth\n row_ind.append(rn) # depth\n col_ind.append(k) # depth\n data_arr.append(depth_weight) # depth\n rn += 1\n\n if normals is not None:\n if col_j + 1 <= width - 1 and alpha[row_i, col_j + 1] != 0:\n # normals x-axis\n b.append(normals[row_i, col_j, 0])\n row_ind.append(rn)\n col_ind.append(k)\n data_arr.append(-normals[row_i, col_j, 2])\n row_ind.append(rn)\n col_ind.append(k + 1)\n data_arr.append(normals[row_i, col_j, 2])\n rn += 1\n if row_i + 1 <= height - 1 and alpha[row_i + 1, col_j] != 0:\n # normals mode y-axis\n b.append(-normals[row_i, col_j, 1])\n row_ind.append(rn)\n col_ind.append(k)\n data_arr.append(-normals[row_i, col_j, 2])\n row_ind.append(rn)\n col_ind.append(k + width)\n data_arr.append(normals[row_i, col_j, 2])\n rn += 1\n row = rn\n\n #TODO Block end\n # Convert all the lists to numpy array\n row_ind = np.array(row_ind, dtype=np.int32)\n col_ind = np.array(col_ind, dtype=np.int32)\n data_arr = np.array(data_arr, dtype=np.float32)\n b = np.array(b, dtype=np.float32)\n\n # Create a compressed sparse matrix from indices and values\n A = csr_matrix((data_arr, (row_ind, col_ind)), shape=(row, width * height))\n\n return A, b\n","repo_name":"SincereXIA/XidianCS","sub_path":"专业选修课/计算机视觉/Exp 实验/第三次上机实验/16030199025张俊华+16030140077郁张超exp4/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":19934,"program_lang":"python","lang":"en","doc_type":"code","stars":384,"dataset":"github-code","pt":"85"} +{"seq_id":"24899371207","text":"# Defining Empty Array\narray = []\n\n# getting Array Size\nn = int(input(\"Enter Array Size\"))\n\n# Getting Array Elements\nprint(\"Enter Array Elements\")\nfor i in range(n):\n x=int(input())\n array.append(x)\n\n# Printing Array Elements\nprint(\"the array is\")\nfor i in range(n):\n print(array[i])\n\nres = 0\ncurrent = 0\n\nfor i in range(n):\n if array[i] == 0:\n current=0\n else:\n current+=1\n res = max(res, current)\n\nprint(f\"The Maximum Consecutive Number of Ones are {res}\")","repo_name":"musthafa-vakkayil/python_intermediate_projects","sub_path":"snake_game/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"13485553254","text":"# Reverse A Linked List\n# ********************** #\n\n# Write a function for reversing a linked list. ↴ Do it in place. ↴\n\n# Your function will have one input: the head of the list.\n\n# Your function should return the new head of the list.\n\n# Took me 3 minutes to solve this problem without writing test cases\n\nimport unittest\n\n\ndef reverse(head_of_list):\n\n # Reverse the linked list in place\n prev = None \n curr = head_of_list\n \n while curr:\n nextNode = curr.next\n curr.next = prev\n prev = curr\n curr = nextNode\n\n return prev\n\n\ndef reverse_recur(head):\n if head.next is None:\n return head\n next_node = head.next\n head.next = None\n rev_list = reverse_recur(next_node)\n next_node.next = head\n return rev_list\n\n# Tests\nclass Test(unittest.TestCase):\n\n class LinkedListNode(object):\n\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n def get_values(self):\n node = self\n values = []\n while node is not None:\n values.append(node.value)\n node = node.next\n return values\n\n def test_short_linked_list(self):\n second = Test.LinkedListNode(2)\n first = Test.LinkedListNode(1, second)\n\n result = reverse(first)\n self.assertIsNotNone(result)\n\n actual = result.get_values()\n expected = [2, 1]\n self.assertEqual(actual, expected)\n\n def test_long_linked_list(self):\n sixth = Test.LinkedListNode(6)\n fifth = Test.LinkedListNode(5, sixth)\n fourth = Test.LinkedListNode(4, fifth)\n third = Test.LinkedListNode(3, fourth)\n second = Test.LinkedListNode(2, third)\n first = Test.LinkedListNode(1, second)\n\n result = reverse(first)\n self.assertIsNotNone(result)\n\n actual = result.get_values()\n expected = [6, 5, 4, 3, 2, 1]\n self.assertEqual(actual, expected)\n\n def test_one_element_linked_list(self):\n first = Test.LinkedListNode(1)\n\n result = reverse(first)\n self.assertIsNotNone(result)\n\n actual = result.get_values()\n expected = [1]\n self.assertEqual(actual, expected)\n\n def test_empty_linked_list(self):\n result = reverse(None)\n self.assertIsNone(result)\n\n\nunittest.main(verbosity=2)","repo_name":"bajracha71/CodingChallenges","sub_path":"InterviewCake/reverseLinkedList.py","file_name":"reverseLinkedList.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21365313689","text":"from dlm.models.components.lookuptable import LookupTable\nfrom dlm.models.components.linear import Linear\nfrom dlm.models.components.activation import Activation\nfrom dlm.models import classifier\nimport dlm.utils as U\nimport dlm.io.logging as L\nimport theano.tensor as T\nimport theano\nimport numpy\nimport math\n\nclass MLP(classifier.Classifier):\n\n\tdef __init__(self, args=None, model_path=None):\n\n\t\t######################################################################\n\t\t## Parameters\n\t\t#\n\t\t\n\t\tU.xassert((args or model_path) and not (args and model_path), \"args or model_path are mutually exclusive\")\n\t\t\n\t\tif model_path:\n\t\t\targs, loaded_params = self.load_model(model_path)\n\t\t\n\t\temb_dim = args.emb_dim\n\t\tnum_hidden_list = map(int, args.num_hidden.split(','))\n\t\tif num_hidden_list[0] <= 0:\n\t\t\tnum_hidden_list = []\n\n\t\tvocab_size = args.vocab_size\n\t\tself.ngram_size = args.ngram_size\n\t\tnum_classes = args.num_classes\n\t\tactivation_name = args.activation_name\n\t\tself.args = args\n\t\tself.L1 = 0\n\t\tself.L2_sqr = 0\n\t\tself.params = []\n\t\t\n\t\temb_path, vocab = None, None\n\t\ttry:\n\t\t\temb_path = args.emb_path\n\t\t\tvocab = args.vocab\n\t\texcept AttributeError:\n\t\t\tpass\n\t\t\n\t\trng = numpy.random.RandomState(1234)\n\t\tself.input = T.imatrix('input')\n\n\t\t######################################################################\n\t\t## Lookup Table Layer\n\t\t#\n\t\t\n\t\tlookupTableLayer = LookupTable(\n\t\t\trng=rng,\n\t\t\tinput=self.input,\n\t\t\tvocab_size=vocab_size,\n\t\t\temb_dim=emb_dim,\n\t\t\temb_path=emb_path,\n\t\t\tvocab_path=vocab,\n\t\t\tadd_weights=args.weighted_emb\n\t\t)\n\t\tlast_layer_output = lookupTableLayer.output\n\t\tlast_layer_output_size = (self.ngram_size - 1) * emb_dim\n\t\tself.params += lookupTableLayer.params\n\t\t\n\t\t######################################################################\n\t\t## Hidden Layer(s)\n\t\t#\n\t\t\n\t\tfor i in range(0, len(num_hidden_list)):\n\t\t\tlinearLayer = Linear(\n\t\t\t\trng=rng,\n\t\t\t\tinput=last_layer_output,\n\t\t\t\tn_in=last_layer_output_size,\n\t\t\t\tn_out=num_hidden_list[i],\n\t\t\t\tsuffix=i\n\t\t\t)\n\t\t\tlast_layer_output = linearLayer.output\n\t\t\tlast_layer_output_size = num_hidden_list[i]\n\t\t\tself.params += linearLayer.params\n\t\t\t\n\t\t\tactivation = Activation(\n\t\t\t\tinput=last_layer_output,\n\t\t\t\tfunc_name=activation_name\n\t\t\t)\n\t\t\tlast_layer_output = activation.output\n\t\t\t\n\t\t\tself.L1 = self.L1 + abs(linearLayer.W).sum()\n\t\t\tself.L2_sqr = self.L2_sqr + (linearLayer.W ** 2).sum()\n\t\t\n\t\t######################################################################\n\t\t## Output Linear Layer\n\t\t#\n\t\t\n\t\tlinearLayer = Linear(\n\t\t\trng=rng,\n\t\t\tinput=last_layer_output,\n\t\t\tn_in=last_layer_output_size,\n\t\t\tn_out=num_classes,\n\t\t\t#b_values = numpy.zeros(num_classes) - math.log(num_classes)\n\t\t\tb_values = numpy.full(shape=(num_classes),fill_value=(-math.log(num_classes)),dtype=theano.config.floatX),\n\t\t\tsuffix='out'\n\t\t)\n\t\tlast_layer_output = linearLayer.output\n\t\tself.params += linearLayer.params\n\t\t\n\t\tself.L1 = self.L1 + abs(linearLayer.W).sum()\n\t\tself.L2_sqr = self.L2_sqr + (linearLayer.W ** 2).sum()\n\t\t\n\t\t######################################################################\n\t\t## Model Output\n\t\t#\n\t\t\n\t\tself.output = last_layer_output\n\t\tself.p_y_given_x_matrix = T.nnet.softmax(last_layer_output)\n\t\t\n\t\t# Log Softmax\n\t\tlast_layer_output_shifted = last_layer_output - last_layer_output.max(axis=1, keepdims=True)\n\t\tself.log_p_y_given_x_matrix = last_layer_output_shifted - T.log(T.sum(T.exp(last_layer_output_shifted),axis=1,keepdims=True))\n\n\n\t\t#self.log_Z_sqr = T.log(T.mean(T.sum(T.exp(last_layer_output), axis=1))) ** 2\n\t\t#self.log_Z_sqr = T.sum(T.log(T.sum(T.exp(last_layer_output), axis=1))) ** 2\n\t\tself.log_Z_sqr = T.mean(T.log(T.sum(T.exp(last_layer_output), axis=1)) ** 2)\n\n\t\t######################################################################\n\t\t## Model Predictions\n\n\t\tself.y_pred = T.argmax(self.p_y_given_x_matrix, axis=1)\n\t\t\n\t\t######################################################################\n\t\t## Loading parameters from file (if given)\n\t\t#\n\t\t\n\t\tif model_path:\n\t\t\tself.set_params(loaded_params)\n\t\t\n\t######################################################################\n\t## Model Functions\n\t#\n\t\n\tdef p_y_given_x(self, y):\n\t\treturn self.p_y_given_x_matrix[T.arange(y.shape[0]), y]\n\n\tdef log_p_y_given_x(self, y):\n\t\treturn self.log_p_y_given_x_matrix[T.arange(y.shape[0]), y]\n\t\n\tdef unnormalized_p_y_given_x(self, y):\n\t\treturn self.output[T.arange(y.shape[0]), y]\n\t\n\tdef negative_log_likelihood(self, y, weights=None):\n\t\tif weights:\n\t\t\treturn -T.sum(T.log(self.p_y_given_x(y)) * weights) / T.sum(weights)\n\t\telse:\n\t\t\t#return -T.mean( T.log(self.p_y_given_x(y)))\t\t\t\t\t\t# Unstable : can lead to NaN\n\t\t\treturn -T.mean(self.log_p_y_given_x(y))\t\t\t\t\t\t\t\t# Stable Version\n\n\tdef errors(self, y):\n\t\tif y.ndim != self.y_pred.ndim:\n\t\t\traise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type))\n\t\tif y.dtype.startswith('int'):\n\t\t\treturn T.sum(T.neq(self.y_pred, y))\n\t\telse:\n\t\t\traise NotImplementedError()\n\n","repo_name":"nusnlp/corelm","sub_path":"dlm/models/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"15305303284","text":"from PySide6.QtCore import Signal\nfrom PySide6.QtCore import Qt\nfrom PySide6.QtWidgets import (\n QWidget,\n QFrame,\n QHBoxLayout,\n QVBoxLayout,\n QLabel,\n QTextEdit,\n QGridLayout\n)\n\nfrom GUI.ProjectViewModel import LineItem\n\nclass TreeViewItemWidget(QFrame):\n def __init__(self, content, parent=None):\n super(TreeViewItemWidget, self).__init__(parent)\n\n properties = content.get('properties', {})\n\n layout = QVBoxLayout()\n if content.get('heading'):\n header_widget = WidgetHeader(content['heading'], parent=self)\n self._set_properties(header_widget, properties)\n layout.addWidget(header_widget)\n\n if content.get('subheading'):\n subheading_widget = WidgetSubheading(content['subheading'], parent=self)\n self._set_properties(subheading_widget, properties)\n layout.addWidget(subheading_widget)\n\n if content.get('body'):\n body_widget = WidgetBody(content['body'], parent=self)\n self._set_properties(body_widget, properties)\n layout.addWidget(body_widget)\n\n self._set_properties(self, properties)\n self.setLayout(layout)\n\n def _set_properties(self, widget : QWidget, properties : dict):\n for key, value in properties.items():\n widget.setProperty(key, value)\n\n\nclass WidgetHeader(QLabel):\n def __init__(self, text, parent=None):\n super(WidgetHeader, self).__init__(parent)\n self.setText(text)\n\nclass WidgetSubheading(QLabel):\n def __init__(self, text, parent=None):\n super(WidgetSubheading, self).__init__(parent)\n self.setText(text)\n\nclass WidgetBody(QLabel):\n def __init__(self, text, parent=None):\n super(WidgetBody, self).__init__(parent)\n self.setText(text)\n self.setAlignment(Qt.AlignmentFlag.AlignTop)\n self.setWordWrap(True)\n\nclass LineItemView(QWidget):\n def __init__(self, line, parent=None):\n super(LineItemView, self).__init__(parent)\n\n layout = QVBoxLayout()\n layout.addWidget(LineItemHeader(line, parent=self))\n layout.addWidget(LineItemBody(line, parent=self))\n\n self.setLayout(layout)\n\nclass LineItemHeader(QLabel):\n def __init__(self, line: LineItem, parent=None):\n super(LineItemHeader, self).__init__(parent)\n self.setText(f\"[{str(line.number)}] {str(line.start)} --> {str(line.end)}\")\n\nclass LineItemBody(QLabel):\n def __init__(self, line: LineItem, parent=None):\n super(LineItemBody, self).__init__(parent)\n self.setText(line.text)\n self.setAlignment(Qt.AlignmentFlag.AlignTop)\n self.setWordWrap(True)\n\nclass OptionsGrid(QGridLayout):\n \"\"\"\n Grid layout for options (styling class)\n \"\"\"\n def __init__(self, parent = None) -> None:\n super().__init__(parent)\n\nclass TextBoxEditor(QTextEdit):\n \"\"\"\n Multi-line editor that provides a signal when text contents change\n \"\"\"\n editingFinished = Signal(str)\n\n _original = None\n\n def focusInEvent(self, e) -> None:\n self._original = self.toPlainText()\n return super().focusInEvent(e)\n\n def focusOutEvent(self, e) -> None:\n text = self.toPlainText()\n if text != self._original:\n self.editingFinished.emit(text)\n return super().focusOutEvent(e)\n \n def SetText(self, text):\n self.setText(text)\n self.setPlainText(text)\n\n","repo_name":"machinewrapped/gpt-subtrans","sub_path":"GUI/Widgets/Widgets.py","file_name":"Widgets.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"85"} +{"seq_id":"7950030833","text":"\"\"\"add timestamp to each trial (Performance)\n\nRevision ID: 062aa076bfa6\nRevises: 7576f76db98b\nCreate Date: 2018-10-03 22:51:49.640188\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '062aa076bfa6'\ndown_revision = '7576f76db98b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('performance', sa.Column('timestamp', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('performance', 'timestamp')\n # ### end Alembic commands ###\n","repo_name":"jmcaine/justpractice","sub_path":"alembic/versions/062aa076bfa6_add_timestamp_to_each_trial_performance.py","file_name":"062aa076bfa6_add_timestamp_to_each_trial_performance.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19513490862","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport keras\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.preprocessing import image\nfrom keras.engine import Layer\nfrom keras.applications.inception_resnet_v2 import preprocess_input\nfrom keras.layers import Conv2D, UpSampling2D, InputLayer, Conv2DTranspose, Input, Reshape, merge, concatenate\nfrom keras.layers import Activation, Dense, Dropout, Flatten\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import TensorBoard \nfrom keras.models import Sequential, Model\nfrom keras.layers.core import RepeatVector, Permute\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb\nfrom skimage.transform import resize\nfrom skimage.io import imsave\nimport numpy as np\nimport os\nimport random\nimport tensorflow as tf\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\n\n\n# In[2]:\n\n\n# Get images\nX = []\nfor filename in os.listdir('C:/Users/carmel/Desktop/train/'):\n curr= (load_img('C:/Users/carmel/Desktop/train/'+filename))\n #curr=curr.resize((256,256))\n #imsave(\"C:/Users/carmel/Desktop/result/\"+'1'+\".jpg\", curr)\n curr=img_to_array(curr)\n X.append(curr)\nX = np.array(X, dtype=float)\nXtrain = 1.0/255*X\n\n\n\n# In[3]:\n\n\n#Load weights\ninception = InceptionResNetV2(weights=None, include_top=True)\ninception.load_weights('C:/Users/carmel/Desktop/weights/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5')\ninception.graph = tf.get_default_graph()\n\n\n# In[4]:\n\n\nembed_input = Input(shape=(1000,))\n\n#Encoder\nencoder_input = Input(shape=(96, 96, 1,))\nencoder_output = Conv2D(8, (3,3), activation='relu', padding='same', strides=2)(encoder_input)\nencoder_output = Conv2D(16, (3,3), activation='relu', padding='same')(encoder_output)\nencoder_output = Conv2D(64, (3,3), activation='relu', padding='same', strides=2)(encoder_output)\nencoder_output = Conv2D(96, (3,3), activation='relu', padding='same')(encoder_output)\nencoder_output = Conv2D(96, (3,3), activation='relu', padding='same', strides=2)(encoder_output)\nencoder_output = Conv2D(192, (3,3), activation='relu', padding='same')(encoder_output)\nencoder_output = Conv2D(192, (3,3), activation='relu', padding='same')(encoder_output)\nencoder_output = Conv2D(96, (3,3), activation='relu', padding='same')(encoder_output)\n\n#Fusion\nfusion_output = RepeatVector(12 * 12)(embed_input) \nfusion_output = Reshape(([12, 12, 1000]))(fusion_output)\nfusion_output = concatenate([encoder_output, fusion_output], axis=3) \nfusion_output = Conv2D(96, (1, 1), activation='relu', padding='same')(fusion_output) \n\n#Decoder\ndecoder_output = Conv2D(48, (3,3), activation='relu', padding='same')(fusion_output)\ndecoder_output = UpSampling2D((2, 2))(decoder_output)\ndecoder_output = Conv2D(24, (3,3), activation='relu', padding='same')(decoder_output)\ndecoder_output = UpSampling2D((2, 2))(decoder_output)\ndecoder_output = Conv2D(12, (3,3), activation='relu', padding='same')(decoder_output)\ndecoder_output = Conv2D(6, (3,3), activation='relu', padding='same')(decoder_output)\ndecoder_output = Conv2D(2, (3, 3), activation='tanh', padding='same')(decoder_output)\ndecoder_output = UpSampling2D((2, 2))(decoder_output)\n\nmodel = Model(inputs=[encoder_input, embed_input], outputs=decoder_output)\n#SVG(model_to_dot(model).create(prog='dot', format='svg'))\n#plot_model(model, to_file='model.png')\n\n\n# In[5]:\n\n\n#Create embedding\ndef create_inception_embedding(grayscaled_rgb):\n grayscaled_rgb_resized = []\n for i in grayscaled_rgb:\n i = resize(i, (299, 299, 3), mode='constant')\n grayscaled_rgb_resized.append(i)\n grayscaled_rgb_resized = np.array(grayscaled_rgb_resized)\n grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized)\n with inception.graph.as_default():\n embed = inception.predict(grayscaled_rgb_resized)\n return embed\n\n# Image transformer\ndatagen = ImageDataGenerator(\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=20,\n horizontal_flip=True)\n\n#Generate training data\nbatch_size = 20\n\ndef image_a_b_gen(batch_size):\n for batch in datagen.flow(Xtrain, batch_size=batch_size):\n grayscaled_rgb = gray2rgb(rgb2gray(batch))\n embed = create_inception_embedding(grayscaled_rgb)\n lab_batch = rgb2lab(batch)\n X_batch = lab_batch[:,:,:,0]\n X_batch = X_batch.reshape(X_batch.shape+(1,))\n Y_batch = lab_batch[:,:,:,1:] / 128\n yield ([X_batch, create_inception_embedding(grayscaled_rgb)], Y_batch)\n \n#Train model \n#tensorboard = TensorBoard(log_dir=\"/output\")\n#Train model \nmodel.compile(optimizer='adam', loss='mse')\nmodel.fit_generator(image_a_b_gen(batch_size), epochs=150, steps_per_epoch=21,shuffle=True)\n\n\n\n\n# In[8]:\n\n\ncolor_me = []\nfor filename in os.listdir('C:/Users/carmel/Desktop/test/'):\n curr= (load_img('C:/Users/carmel/Desktop/test/'+filename))\n #curr=curr.resize((256,256))\n #imsave(\"C:/Users/carmel/Desktop/result/\"+'1'+\".jpg\", curr)\n curr=img_to_array(curr)\n color_me.append(curr)\n\n #color_me.append(img_to_array(load_img('C:/Users/carmel/Desktop/test/'+filename)))\ncolor_me = np.array(color_me, dtype=float)\ncolor_me = 1.0/255*color_me\ncolor_me = gray2rgb(rgb2gray(color_me))\ncolor_me_embed = create_inception_embedding(color_me)\ncolor_me = rgb2lab(color_me)[:,:,:,0]\ncolor_me = color_me.reshape(color_me.shape+(1,))\n\n\n# Test model\noutput = model.predict([color_me, color_me_embed])\noutput = output * 128\n\n# Output colorizations\nfor i in range(len(output)):\n cur = np.zeros((96, 96,3))\n cur[:,:,0] = color_me[i][:,:,0]\n cur[:,:,1:] = output[i]\n imsave(\"C:/Users/carmel/Desktop/result/\"+str(i)+\".jpg\", lab2rgb(cur))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"SegevHaviv/Image-Proccessing-Project","sub_path":"Part 3/AutoColoring.py","file_name":"AutoColoring.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72774312918","text":"#!/usr/bin/python3\n\n# run with '--index' to name images with the critterpedia index number\n# useful for getting things to sort properly for sprite sheet creation\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport os\nimport re\nimport requests\nimport json\nimport argparse\n\nsprites_dir = 'sprites'\nurls = {\n 'bugs' : 'https://animalcrossing.fandom.com/wiki/Bugs_(New_Horizons)',\n 'fish' : 'https://animalcrossing.fandom.com/wiki/Fish_(New_Horizons)',\n 'creatures' : 'https://animalcrossing.fandom.com/wiki/Deep-sea_creatures_(New_Horizons)'\n} \n\n# for finding index number by critter name\nwith open('critters.json') as f:\n critters_json = json.load(f)\ncritters={'fish':{},'bugs':{},'creatures':{}}\nfor i in critters_json:\n critters[i['type']].update({i['id']:i['index']})\n\ndef find_images(url):\n html = urlopen(url)\n bs = BeautifulSoup(html, 'html.parser')\n images = bs.find_all('img', {'data-src':re.compile('NH-Icon-.*?.png')})\n return images\n\ndef download_images(critter_type,use_index=False):\n if not os.path.exists(f'{sprites_dir}/{critter_type}'):\n os.makedirs(f'{sprites_dir}/{critter_type}')\n images = find_images(urls[critter_type])\n for image in images:\n filename = image['data-image-key'].lower().replace('nh-icon-','').replace('.png','')\n filename = re.sub(r'[^a-z]+', '', filename) #remove special characters\n if use_index:\n filename = str(critters[critter_type][filename]).zfill(2)\n source = (image['data-src'].split('.png')[0]) + '.png'\n print(source)\n img_data = requests.get(source).content\n with open(f'{sprites_dir}/{critter_type}/{filename}.png', 'wb') as handler:\n handler.write(img_data)\n print(f'{filename}.png')\n\ndef main():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"--index\", help=\"write results to csv\", action=\"store_true\")\n args = arg_parser.parse_args()\n\n if args.index:\n download_images('bugs',True) \n download_images('fish',True)\n download_images('creatures',True)\n return\n else:\n download_images('bugs') \n download_images('fish')\n download_images('creatures')\n\nmain()\n","repo_name":"iyre/critter-tracker","sub_path":"setup/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"32447317454","text":"from typing import List\nfrom enum import IntEnum\nfrom ....delivery_domain import abstract_domain_models\nfrom .delivery_package import DeliveryPackage\n\n\nclass DeliveryStatus(IntEnum):\n PENDING_ACCEPTANCE = 4\n PICKUP_IN_PROGRESS = 5\n PICKED = 6\n ITEM_PICKED_UP = 7\n DELIVERED = 8\n UNDELIVERED = 9\n\n\nclass Delivery(abstract_domain_models.AggregateRoot):\n\n def __init__(self,\n order_id: str,\n delivery_type: int,\n delivery_price: int,\n delivery_packages: List[DeliveryPackage],\n delivery_remark: str\n ):\n \n if not order_id:\n raise \"Missing order id!\"\n\n if not delivery_type:\n raise \"Missing delivery type!\"\n\n self._delivery_package = set()\n for delivery_package in delivery_packages:\n self._delivery_package.add(delivery_package)\n\n self._delivery_status = DeliveryStatus.PENDING_ACCEPTANCE\n self._delivery_type = delivery_type\n\n def set_as_pick_up_in_progress(self):\n self._delivery_status = DeliveryStatus.PICKUP_IN_PROGRESS\n\n def set_as_picked_up(self):\n self._delivery_status = DeliveryStatus.ITEM_PICKED_UP\n\n \n\n\n\n\n","repo_name":"sfayn2/sfayn_gqlserver","sub_path":"services/delivery/delivery_domain/aggregates_model/delivery_aggregate/delivery.py","file_name":"delivery.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"73877669076","text":"from requirement import *\n# batches layers and objects\nfrom nnfs.datasets import spiral_data\n\nnnfs.init()\n# np.random.seed(0)\nX= [[1,2,3,2.5],\n [2.0,5.0,-1.0,2.0],\n [-1.5,2.7,3.3,-0.8]] #initialising inputs\n\nX, y = spiral_data(100, 3)\n\nclass Layer_Dense:\n def __init__(self, n_inputs, n_neurons):\n self.weights = 0.10*np.random.randn(n_inputs, n_neurons)\n self.biases = np.zeros((1, n_neurons))\n\n\n def forward(self, inputs):\n self.output = np.dot(inputs, self.weights) + self.biases\n\nclass Activation_ReLU:\n def forward(self,inputs):\n self.output = np.maximum(0,inputs)\n\n\nlayer1 = Layer_Dense(2, 5)\n# layer2 = Layer_Dense(5, 2)\nactivation1 = Activation_ReLU()\n\nlayer1.forward(X)\n# activation1.forward(layer1.output)\nactivation1.forward(layer1.output)\nprint(layer1.output)\n\n\n\n","repo_name":"extinctsion/neuralNetworkScratch","sub_path":"stages/neuralnetwork7.py","file_name":"neuralnetwork7.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"19982220679","text":"import csv\r\nimport os\r\nimport pandas as pd\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nplay=True\r\nwhile(play==True):\r\n\tprint(\"Enter your knowledge:\")\r\n\tprint()\r\n\told_pri=pd.read_csv(\"users.csv\")\r\n\tda_key=input(\"Key: \")\r\n\tda_val=input(\"Value: \")\r\n\r\n\tmy_pri=pd.DataFrame({\"mykey\":[da_key], \"myvalue\":[da_val]})\r\n\r\n\tnew_pri=old_pri.append(my_pri)\r\n\r\n\tprint(new_pri)\r\n\r\n\tnew_pri.to_csv(\"users.csv\", index=False)\r\n\r\n\tos.system(\"cls\")\r\n","repo_name":"lsav00/privacy","sub_path":"mypri.py","file_name":"mypri.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70694099158","text":"from datetime import datetime\n\ndef get_timestamp():\n return datetime.now().strftime((\"%Y-%m-%d %H:%M:%S\"))\n\n# Data to serve with our API\nPOSTS = {\n \"0\": {\n \"id\": 0,\n \"location\": \"north\",\n \"user_id\": 0,\n \"username\": \"guy\",\n \"timestamp\": get_timestamp(),\n \"details\": \"none\"\n },\n \"1\": {\n \"id\": 1,\n \"location\": \"south\",\n \"user_id\": 1,\n \"username\": \"gal\",\n \"timestamp\": get_timestamp(),\n \"details\": \"none\"\n },\n}\n\n# Create a handler for our read (GET) people\ndef read():\n \"\"\"\n This function responds to a request for /api/people\n with the complete lists of people\n\n :return: sorted list of people\n \"\"\"\n # Create the list of people from our data\n return [POSTS[key] for key in sorted(POSTS.keys())]","repo_name":"blaq/Coursework","sub_path":"CS407 Microservice Architecture/hw8/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19721812625","text":"import csv\nimport numpy as np\n\nyear = str(input('From what year do you want to know the highest amount of track per person?'))\nfilename1 = 'rail_lines.csv'\nfile1 = open(filename1, encoding='utf-8')\ntable1 = csv.reader(file1, delimiter=',', quotechar='\"')\ntable1 = list(table1)\nheader = table1[4:5][0][4:]\n\nrail = {}\nfor i, row in enumerate(table1[5:]):\n for j, col in enumerate(row[4:]):\n if col != '':\n if row[1] not in rail:\n rail[row[1]] = {}\n rail[row[1]][header[j]] = float(col)\n else:\n if row[1] not in rail:\n rail[row[1]] = {}\n rail[row[1]][header[j]] = 0\n\nfilename2 = 'population.csv'\nfile2 = open(filename2, encoding='utf-8')\ntable2 = csv.reader(file2, delimiter=',', quotechar='\"')\ntable2 = list(table2)\nheader = table2[4:5][0][4:]\n\npopulation = {}\nfor i, row in enumerate(table2[5:]):\n for j, col in enumerate(row[4:]):\n if col != '':\n if row[1] not in population:\n population[row[1]] = {}\n population[row[1]][header[j]] = float(col)\n else:\n if row[1] not in population:\n population[row[1]] = {}\n population[row[1]][header[j]] = 0\n\nif year == 'all':\n yeet = np.linspace(1960, 2020, 61)\n for y in yeet:\n j = str(y)[:-2]\n maximum = float(0)\n maximum_i = ''\n for i in rail:\n if rail[i][j] != 0 and population[i][j] != 0:\n track_per_pop = (rail[i][j]) * 1000 / (population[i][j])\n if track_per_pop > maximum:\n maximum = track_per_pop\n maximum_i = i\n if maximum_i != '':\n print('In the year', j, 'the country with the most track per person is:\\n', str(maximum_i), str(maximum))\nelse:\n maximum = float(0)\n maximum_i = ''\n for i in rail:\n if rail[i][year] != 0 and population[i][year] != 0:\n track_per_pop = (rail[i][year]) * 1000 / (population[i][year])\n if track_per_pop > maximum:\n maximum = track_per_pop\n maximum_i = i\n print('In the year', year, 'the country with the most track per person is:\\n', str(maximum_i), str(maximum))\n","repo_name":"Mario-bgt/python_for_physics","sub_path":"scientific_computing/railway_track_per_capita/railway_track_per_cap.py","file_name":"railway_track_per_cap.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"8011559279","text":"import model\n\ndef izpis_igre(igra):\n tekst = (\n 'Stevilo preostalih poskusov: {stevilo_preostalih_poskusov} \\n\\n'\n '{pravilni_del_gesla} \\n\\n'\n 'Neuspeli poskusi: {neuspeli_poskusi} \\n\\n'\n \n ).format(\n stevilo_preostalih_poskusov = model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,\n pravilni_del_gesla = igra.pravilni_del_gesla(),\n neuspeli_poskusi = igra.nepravilni_ugibi()\n )\n\n return tekst\n\ndef izpis_zmage(igra):\n tekst = (\n 'Jupiiiiii, zmaga! Geslo je bilo: {geslo}'\n ).format(\n geslo = igra.pravilni_del_gesla()\n )\n return tekst\n\n\ndef izpis_poraza(igra):\n tekst = (\n 'Buuu, poraz! Geslo je bilo: {geslo}'\n ).format(\n geslo = igra.geslo\n )\n return tekst\n\ndef zahtevaj_vnos():\n return input('Crka: ')\n\ndef pozeni_vmesnik():\n\n igra = model.nova_igra()\n\n while True:\n print(izpis_igre(igra))\n poskus = zahtevaj_vnos()\n igra.ugibaj(poskus)\n if igra.zmaga():\n print(izpis_zmage(igra))\n break\n elif igra.poraz():\n print(izpis_poraza(igra))\n break\n return\n\npozeni_vmesnik()","repo_name":"lueticana/Vislice","sub_path":"tekstovni_vmesnik.py","file_name":"tekstovni_vmesnik.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16327132061","text":"# -*- coding: utf-8 -*-\n#------------------------------------------------------------\nimport sys\nPY3 = False\nif sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int\n\nif PY3:\n import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo\nelse:\n import urlparse # Usamos el nativo de PY2 que es más rápido\n\nimport re\n\nfrom platformcode import config, logger\nfrom core import scrapertools\nfrom core.item import Item\nfrom core import servertools\nfrom core import httptools\nfrom bs4 import BeautifulSoup\nfrom modules import autoplay\n\nIDIOMAS = {'vo': 'VO'}\nlist_language = list(IDIOMAS.values())\nlist_quality = ['default']\nlist_servers = ['vidlox']\n\ncanonical = {\n 'channel': 'ipornovideos', \n 'host': config.get_setting(\"current_host\", 'ipornovideos', default=''), \n 'host_alt': [\"https://ipornovideos.com/\"], \n 'host_black_list': [], \n 'CF': False, 'CF_test': False, 'alfa_s': True\n }\nhost = canonical['host'] or canonical['host_alt'][0]\n\n# solo los mas nuevos resto K2C\n\ndef mainlist(item):\n logger.info()\n itemlist = []\n\n autoplay.init(item.channel, list_servers, list_quality)\n\n itemlist.append(Item(channel=item.channel, title=\"Nuevos\" , action=\"lista\", url=host + \"page/1/\"))\n itemlist.append(Item(channel=item.channel, title=\"Canal\" , action=\"categorias\", url=host))\n itemlist.append(Item(channel=item.channel, title=\"Categorias\" , action=\"categorias\", url=host))\n itemlist.append(Item(channel=item.channel, title=\"Buscar\", action=\"search\"))\n\n autoplay.show_option(item.channel, itemlist)\n\n return itemlist\n\n\ndef search(item, texto):\n logger.info()\n texto = texto.replace(\" \", \"+\")\n item.url = \"%s?s=%s\" % (host,texto)\n try:\n return lista(item)\n except:\n import sys\n for line in sys.exc_info():\n logger.error(\"%s\" % line)\n return []\n\n\ndef categorias(item):\n logger.info()\n itemlist = []\n soup = create_soup(item.url)\n if \"Canal\" in item.title:\n matches = soup.find('li', id='tag_cloud-4').find_all('a')\n else:\n matches = soup.find('li', id='categories-2').find_all('a')\n for elem in matches:\n url = elem['href']\n title = elem.text.strip()\n url = urlparse.urljoin(item.url,url)\n thumbnail = \"\"\n plot = \"\"\n itemlist.append(Item(channel=item.channel, action=\"lista\", title=title, url=url,\n thumbnail=thumbnail , plot=plot) )\n return itemlist\n\n\ndef create_soup(url, referer=None, unescape=False):\n logger.info()\n if referer:\n data = httptools.downloadpage(url, headers={'Referer': referer}, canonical=canonical).data\n else:\n data = httptools.downloadpage(url, canonical=canonical).data\n if unescape:\n data = scrapertools.unescape(data)\n soup = BeautifulSoup(data, \"html5lib\", from_encoding=\"utf-8\")\n return soup\n\n\ndef lista(item):\n logger.info()\n itemlist = []\n soup = create_soup(item.url)\n matches = soup.find_all('div', class_=re.compile(r\"^post-\\d+\"))\n for elem in matches:\n url = elem.a['href']\n title = elem.a.text.strip()\n thumbnail = elem.img['src']\n plot = \"\"\n itemlist.append(Item(channel=item.channel, action=\"findvideos\", title=title, url=url, thumbnail=thumbnail,\n plot=plot, language=\"VO\", fanart=thumbnail, contentTitle=title ))\n next_page = soup.find('div', class_='nav-previous')\n if next_page:\n next_page = next_page.a['href']\n next_page = urlparse.urljoin(item.url,next_page)\n itemlist.append(Item(channel=item.channel, action=\"lista\", title=\"[COLOR blue]Página Siguiente >>[/COLOR]\", url=next_page) )\n return itemlist\n\n\ndef findvideos(item):\n logger.info()\n itemlist = []\n soup = create_soup(item.url).find('div', class_='entry-content')\n match = re.compile('data-fo=\"([^\"]+)\"\\s*data-id=\"([^\"]+)\">', re.DOTALL).findall(str(soup))\n blocks = re.compile('\\[data-id=\"([^\"]+)\"\\]\\{display:block;\\}', re.DOTALL).findall(str(soup))\n for m in match:\n if m[0] in ('k2s.cc', 'tezfiles.com' ) : continue #, 'www.xmegadrive.com'\n if m[1] not in blocks: continue\n id = m[1]\n fo = m[0]\n url = \"%s/wp-content/themes/twentyten/ajax.php\" %host\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\", \"X-Requested-With\": \"XMLHttpRequest\"}\n post= \"type=link&id=%s&fo=%s\" %(id, fo)\n data = httptools.downloadpage(url, post=post, headers=headers).data\n url = scrapertools.find_single_match(data, \"> ~/.sync-log\n\n\"\"\"\n\nfor direc in dirs:\n docs = []\n for doc in os.listdir(direc):\n if not (doc.endswith(\".tex\")) or (doc == \"header.tex\"):\n continue\n f.write(TARGET_FORMAT.format(dir=direc, tex=doc[:-4]))\n\n docs.append(direc + \"/\" + doc[:-4] + \"_trim.pdf\")\n f.write(\"{dir}: {deps}\\n\\n\".format(dir=direc, deps=\" \".join(docs)))\n\nf.write(\"\"\"\\\n.PHONY: clean\nclean:\n\\trm -vf */*~ */*.log */*.bbl */*.blg */*.toc */*.aux */*.out */*.idx */*.ilg */*.ind\n\\trm -vf */*_html.tex */*_trim.tex */*_def.tex */*_def_thm.tex */*_thm_proof.tex\n\\trm -vrf */*_output */*_tmp\n\"\"\")\n\nf.close()\n","repo_name":"b-mehta/cam-notes","sub_path":"make_make.py","file_name":"make_make.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"6986811773","text":"class Solution:\n def removeSubfolders(self, folder: List[str]) -> List[str]:\n \n # Sort, will guarantee the \"/a\" will comes before \"/a/b\" and they appear in sequence\n folder.sort()\n res = []\n res.append(folder[0])\n \n for i in range(1, len(folder)):\n # Append \"/\" to the previous is for the case: [\"/a/b/c\",\"/a/b/ca\"]\n if folder[i].startswith(res[-1] + \"/\"):\n continue\n res.append(folder[i])\n \n return res\n","repo_name":"mmkhaque/LeetCode_Practice","sub_path":"1233. Remove Sub-Folders from the Filesystem.py","file_name":"1233. Remove Sub-Folders from the Filesystem.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3548660979","text":"import pandas as pd\nimport numpy as np\n\n\ndef g(df):\n for i in range(len(df)):\n tot = 0\n if i != 0:\n if df.loc[i, \"UserId\"] == df.loc[i - 1, \"UserId\"]:\n continue\n for j in range(len(df)):\n if df.loc[i, \"UserId\"] == df.loc[j, \"UserId\"]:\n tot += 1\n l = int(0.2 * tot)\n dfupdate = df.iloc[i : i + tot].sample(l, random_state=0)\n dfupdate.Quantity = 0\n df.update(dfupdate)\n return df\n\n\ndef define_test_input(args):\n if args.test_case == 1:\n df = pd.DataFrame(\n {\n \"UserId\": [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3],\n \"ProductId\": [1, 4, 7, 4, 2, 1, 1, 4, 7, 4, 2, 1, 1, 4, 7],\n \"Quantity\": [6, 1, 3, 2, 7, 2, 6, 1, 3, 2, 7, 2, 6, 1, 3],\n }\n )\n if args.test_case == 2:\n df = pd.DataFrame(\n {\n \"UserId\": [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3],\n \"ProductId\": [6, 1, 3, 2, 7, 2, 6, 1, 3, 2, 7, 2, 6, 1, 3],\n \"Quantity\": [1, 4, 7, 4, 2, 1, 1, 4, 7, 4, 2, 1, 1, 4, 7],\n }\n )\n return df\n\n\nif __name__ == \"__main__\":\n import argparse\n import os\n import pickle\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--test_case\", type=int, default=1)\n args = parser.parse_args()\n\n df = define_test_input(args)\n\n with open(\"input/input{}.pkl\".format(args.test_case), \"wb\") as f:\n pickle.dump(df, f)\n\n result = g(df)\n with open(\"ans/ans{}.pkl\".format(args.test_case), \"wb\") as f:\n pickle.dump(result, f)\n","repo_name":"BoyuanJackChen/transformers-v4.29","sub_path":"evaluations/ds_1000/ds1000_data/Pandas/Insertion/q129/test_generate_pickle.py","file_name":"test_generate_pickle.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4453464092","text":"import cv2 \r\nimport numpy as np\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\ndata_path = 'final_project/faces/'\r\nonlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))]\r\nTraining_Data, Labels = [], []\r\n\r\n#파일 개수 만큼 루프 돌리기 \r\nfor i, files in enumerate(onlyfiles):\r\n image_path = data_path + onlyfiles[i]\r\n images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\r\n\r\n if images is None:\r\n continue\r\n Training_Data.append(np.asarray(images, dtype = np.uint8))\r\n Labels.append(i)\r\n\r\n#훈련할 데이터가 없다면 종료.\r\nif len(Labels)==0:\r\n print(\"There is no data to train\")\r\n exit()\r\n\r\nLabels = np.asarray(Labels, dtype=np.int32)\r\n\r\n#모델 생성 \r\nmodel = cv2.face.LBPHFaceRecognizer_create()\r\n\r\n#학습 시작 \r\nmodel.train(np.asarray(Training_Data), np.asarray(Labels))\r\nprint(\"Model Training Complete!!!\")\r\n\r\n#########################################\r\n\r\nface_classifier = cv2.CascadeClassifier('final_project/haarcascade_frontalface_default.xml')\r\n#전체 사진에서 얼굴 부위만 잘라 리턴 \r\ndef face_extractor(img, size=0.5):\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n faces = face_classifier.detectMultiScale(gray,1.3,5)\r\n if faces is():\r\n return img, []\r\n for(x,y,w,h) in faces:\r\n cv2.rectangle(img, (x,y), (x+w, y+h), (0, 255, 255), 2)\r\n roi = img[y:y+h, x:x+w]\r\n roi = cv2.resize(roi, (200, 200)) \r\n return img, roi\r\n #검출된 좌표에 사각 박스 그리고(img), 검출된 부위를 잘라(roi) 전달\r\n\r\n##########################################\r\n\r\n\r\nframe=cv2.imread('final_project/yjg3.jpg', cv2.IMREAD_COLOR) \r\n\r\nimage, face = face_extractor(frame)\r\ntry:\r\n face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)\r\n result = model.predict(face)\r\n #result[1]은 신뢰도이고 0에 가까울 수록 자신과 같다는 뜻이다.\r\n if result[1] < 500:\r\n confidence = int(100*(1-(result[1])/300))\r\n display_string = str(confidence)+'% confidence it it user'\r\n cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX,\r\n 1,(250, 120, 255),2)\r\n\r\n #80보다 크면 동일 인물로 간주하기 \r\n if confidence>=80:\r\n cv2.putText(image, \"Matched\", (250, 450), cv2.FONT_HERSHEY_COMPLEX,\r\n 1, (0, 255, 0), 2 )\r\n cv2.imshow('Face Cropper', image)\r\n else:\r\n cv2.putText(image, \"Un Matched\", (250, 450), cv2.FONT_HERSHEY_COMPLEX, \r\n 1, (0, 0, 255), 2)\r\n cv2.imshow('Face Cropper', image)\r\n print('@')\r\n \r\n\r\nexcept:\r\n cv2.putText(image, \"Face Not Found\", (250, 450), cv2.FONT_HERSHEY_COMPLEX,\r\n 1, (255, 0, 0), 2)\r\n cv2.imshow('Face Cropper', image)\r\n pass\r\n\r\ncv2.waitKey(0)","repo_name":"sunmin1990/Finel_Project_CNN","sub_path":"part3.py","file_name":"part3.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22201898385","text":"#!/usr/bin/env python3\n\nfrom TrelloWarehouse import trello_warehouse\nimport logging\nimport tempfile\nimport os\nimport yaml\n\nimport httplib2\nfrom apiclient import discovery\n\ndef main():\n logger = logging.getLogger(\"sysengreporting\")\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n _stdlog = logging.StreamHandler()\n _stdlog.setLevel(logging.DEBUG)\n _stdlog.setFormatter(formatter)\n\n logger.addHandler(_stdlog)\n\n\n with open(\"config/report.yml\", 'r') as stream:\n report_config = yaml.load(stream)\n\n with open(\"config/trello_secret.yml\", 'r') as stream:\n trello_secret_config = yaml.load(stream)\n\n warehouse = trello_warehouse.TrelloWarehouse(report_config, trello_secret_config)\n logger.info('Welcome to the Warehouse!')\n\n\n \n if not warehouse.get_granular_report():\n return False\n warehouse.write_gspreadsheet()\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"t0ffel/mo-reporting","sub_path":"reporting/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"24608792025","text":"import torch\nimport numpy as np\nimport argparse\nimport soundfile as sf\n#from datasets.lr_musdb import musdb\nimport museval\n#import norbert\nfrom pathlib import Path\nimport scipy.signal\n#import resampy\nfrom asteroid.complex_nn import torch_complex_from_magphase\nimport os\nimport warnings\nimport sys\nimport pandas as pd\nimport torch.nn.functional as F\nimport torchaudio\n\nimport os\nimport glob\nimport tqdm\nimport subprocess\n\n#separator = torch.hub.load('sigsep/open-unmix-pytorch', 'umxl')\n\nfrom eval import inference_args\n\n#this uses the folder structure in my split samples\ndef load_from_tcl_sample(root_dir):\n ls_contents = os.listdir(root_dir)\n all_files = []\n directories = [] #parallel array with allfiles just to keep track of directory names\n for item in ls_contents:\n for root, dirs, files in os.walk(os.path.join(root_dir, item)):\n for test_file in files:\n all_files.append(os.path.join(test_file))\n directories.append(item)\n return all_files, directories\n\n\n#input: 1x2xsamples, in a torch tensor\n#output: (with zero padding if needed), and number of padded samples\ndef frame_cutter(audio_tensor, frame_len_s, sample_rate):\n frame_size = frame_len_s * sample_rate\n \n #padding:\n remainder = frame_size - (audio_tensor.shape[2] % frame_size)\n if remainder !=0:\n audio_tensor = F.pad(input=audio_tensor, pad=(0, remainder, 0, 0, 0, 0), value=0)\n \n split_tensor = torch.split(audio_tensor, frame_size, dim=2)\n #split_tensor = torch.cat(split_tensor, dim=0)\n \n return split_tensor, remainder\n\n#input nx4x2xsamples, start padding\n#output 1x2xsamples, without the previously applied padding\ndef frame_gluer(prediction, remainder):\n #maybe no need to remove padding since it is appleid at the end, I can just cut the audio like i did in the data loaders after merging\n #torch_seq = torch.split(prediction, 1, dim=0) #commented as now we pass ready tuples\n return torch.cat(prediction, dim=3)\n\n\ndef eval_main(\n root,\n samplerate=44100,\n niter=1,\n alpha=1.0,\n softmask=False,\n residual_model=False,\n model_path='.',\n outdir=None,\n start=0.0,\n duration=-1.0,\n no_cuda=False,\n eval_data_path=None, \n instrument='drums',\n variant='no_concat',\n):\n\n\n if os.path.exists(outdir):\n print(\"Results of previous run saved in your chosen outdir: {}, please choose another location\".format(outdir), file=sys.stderr)\n else:\n outdir = os.path.abspath(outdir)\n\n Path(outdir).mkdir(exist_ok=True, parents=True)\n print(\"Evaluated results will be saved in:\\n {}\".format(outdir), file=sys.stderr)\n\n if not eval_data_path:\n print(\"No location given for test data, please set one in cfg/eval.yml\", file=sys.stderr)\n exit()\n\n torch.cuda.empty_cache()\n use_cuda = not no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n test_dataset, directories = load_from_tcl_sample(eval_data_path)\n \n for track, directory in zip(test_dataset, directories):\n output_path = Path(os.path.join(outdir, directory))\n output_path.mkdir(exist_ok=True, parents=True)\n\n track_path = os.path.join(eval_data_path, directory, track)\n #p_out = subprocess.run([\"python3\", \"-m\", \"demucs.separate\", \"-n\", \n # \"tasnet\",\"-o\", output_path, \"-f\", track[:-4], \"--mp3\", track_path], capture_output=True)\n\n subprocess.run([\"python3\", \"-m\", \"demucs.separate\", \"-n\", \"demucs\", \"-o\", \n os.path.join(output_path, track[:-4]), \"--mp3\", track_path])\n\n #print(p_out.returncode)\n #print(p_out.stdout.decode())\n #print(p_out.stderr.decode())\n # for the version used to generate the metrics, read the files and put\n # them in these dicts.\n '''estimates = {}\n estimates['vocals'] = prediction[0][0] \n estimates['drums'] = prediction[0][1]\n estimates['bass'] = prediction[0][2]\n estimates['other'] = prediction[0][3]\n\n #adapt the output of openunmix: it outputs 22050, our gt loaded from the musdb package is\n #in 44100. so, we resample\n if instrument == 'bass':\n estimates['degraded_instrument_track'] = estimates['bass']\n estimates['degraded_backing_track'] = estimates['vocals'] + estimates['other'] + estimates['drums']\n\n elif instrument == 'drums':\n estimates['degraded_instrument_track'] = estimates['drums']\n estimates['degraded_backing_track'] = estimates['vocals'] + estimates['other'] + estimates['bass']\n\n #for key, val in estimates.items():\n # estimates[key] = resampy.resample(estimates[val], 22050, 44100, axis=0)\n\n output_path = Path(os.path.join(outdir, directory, track))\n output_path.mkdir(exist_ok=True, parents=True)\n\n for target, estimate in estimates.items():\n sf.write(str(output_path / Path(target).with_suffix(\".wav\")), estimate.T, samplerate)\n '''\n \nif __name__ == \"__main__\":\n # Training settings\n parser = argparse.ArgumentParser(description=\"OSU Inference\", add_help=False)\n\n parser.add_argument(\"--root\", type=str, help=\"The path to the MUSDB18 dataset\")\n \n parser.add_argument(\n \"--outdir\",\n type=str,\n default=\"./results_using_pre-trained\",\n help=\"Results path where \" \"best_model.pth\" \" is stored\",\n )\n\n parser.add_argument(\"--start\", type=float, default=0.0, help=\"Audio chunk start in seconds\")\n\n parser.add_argument(\n \"--duration\",\n type=float,\n default=-1.0,\n help=\"Audio chunk duration in seconds, negative values load full track\",\n )\n\n parser.add_argument(\n \"--no-cuda\", action=\"store_true\", default=False, help=\"disables CUDA inference\"\n )\n\n args, _ = parser.parse_known_args()\n args = inference_args(parser, args)\n # Somehow these are not getting called at all.\n\n import yaml\n from asteroid.utils import prepare_parser_from_dict, parse_args_as_dict\n\n with open(\"cfg/eval_tcl_tasnet.yml\") as f:\n eval_conf = yaml.safe_load(f)\n eval_parser = prepare_parser_from_dict(eval_conf, parser=parser)\n\n arg_dic, plain_args = parse_args_as_dict(eval_parser, return_plain_args=True)\n\n model = os.path.join(plain_args.model_path, plain_args.model_name)\n \n #model = os.path.join(\"test.pth\")\n\n eval_main(\n root='', #musdb.__path__[0],\n samplerate=plain_args.samplerate,\n alpha=args.alpha,\n softmask=args.softmask,\n niter=args.niter,\n residual_model=args.residual_model,\n model_path=plain_args.model_path, \n outdir=plain_args.output_path,\n start=args.start,\n duration=args.duration,\n no_cuda=args.no_cuda,\n eval_data_path = plain_args.test_data_path,\n instrument=plain_args.instrument,\n variant=plain_args.model\n )\n","repo_name":"Alia-morsi/leakage_removal","sub_path":"eval_tcl_tasnet.py","file_name":"eval_tcl_tasnet.py","file_ext":"py","file_size_in_byte":6922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22832579646","text":"import glvars\nimport katagames_sdk as katasdk\n\nkataen = katasdk.engine\n\npygame = kataen.import_pygame()\nEventReceiver = kataen.EventReceiver\nEngineEvTypes = kataen.EngineEvTypes\n\nclass Avatar:\n def __init__(self):\n self.pos = [240, 135]\n self.direct = 0\n\nclass AvatarView(EventReceiver):\n def __init__(self, avref):\n super().__init__()\n self.avref = avref\n self.img = pygame.image.load('assets/rock.png')\n self.img.set_colorkey((255,0,255))\n self.offset = list(self.img.get_size())\n self.offset[0] -= self.offset[0]//2\n self.offset[1] -= self.offset[1]//2\n self.offset[0] *= -1\n self.offset[1] *= -1\n def proc_event(self, ev, source):\n if ev.type == EngineEvTypes.PAINT:\n ev.screen.fill(pygame.color.Color('antiquewhite2'))\n ev.screen.blit(\n self.img,\n (self.avref.pos[0]+self.offset[0], self.avref.pos[1]+self.offset[1])\n )\n\nclass AvatarCtrl(EventReceiver):\n def __init__(self, avref):\n super().__init__()\n self.avref = avref\n def proc_event(self, ev, source):\n if ev.type == EngineEvTypes.LOGICUPDATE:\n avdir = self.avref.direct\n self.avref.pos[1] = (self.avref.pos[1] + avdir) % glvars.scr_size[1]\n elif ev.type == pygame.KEYDOWN:\n if ev.key == pygame.K_UP:\n self.avref.direct = -1\n elif ev.key == pygame.K_DOWN:\n self.avref.direct = 1\n elif ev.type == pygame.KEYUP:\n prkeys = pygame.key.get_pressed()\n if not(prkeys[pygame.K_UP] or prkeys[pygame.K_DOWN]):\n self.avref.direct = 0\n\n","repo_name":"gaudiatech/sdk-testsuite","sub_path":"evotests/minimumMultifileProject/mon_package/compo.py","file_name":"compo.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"37835395523","text":"'''\nRadix Sort is a non-comparison-based sorting algorithm that works by distributing elements into \nbuckets according to their digits, from the least significant digit (LSD) to the most significant digit (MSD), \nand then collecting them back into a single array. Radix Sort can be implemented using counting sort as a subroutine \nfor each digit position.\n'''\n\ndef countingSort(arr, exp): # O(n * k) where k is range of input and space complexity O(n)\n n = len(arr)\n output = [0] * n\n count = [0] * 10\n\n # Count occurrences of each digit at the specified exponent\n for i in range(n):\n index = arr[i] // exp\n count[index % 10] += 1\n\n # Update count array to store actual positions\n for i in range(1, 10):\n count[i] += count[i - 1]\n\n # Build the output array using the positions from count array\n i = n - 1\n while i >= 0:\n index = arr[i] // exp\n output[count[index % 10] - 1] = arr[i]\n count[index % 10] -= 1\n i -= 1\n\n # Copy the output array back to the original array\n for i in range(n):\n arr[i] = output[i]\n\ndef radixSort(arr):\n max_element = max(arr)\n\n # Do counting sort for each digit position, from LSD to MSD\n exp = 1\n while max_element // exp > 0:\n countingSort(arr, exp)\n exp *= 10\n\nnums = [4,8,2,0,4,142,74625,85,6,2,4,8,88,52,2]\nradixSort(nums)\nprint(nums)","repo_name":"jonazz1995/leetcode","sub_path":"Sorting/radixSort.py","file_name":"radixSort.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28172444845","text":"import numpy as np\r\nfrom ikpy.chain import Chain\r\nfrom ikpy.link import OriginLink, URDFLink\r\nimport matplotlib.pyplot as plt\r\nimport ikpy.utils.plot as plot_utils\r\nimport matplotlib.widgets as widgets\r\n\r\n\r\nrobot_arm_chain = Chain.from_urdf_file(\"J:\\\\Documents\\\\CRS_Robot_Arm\\\\urdf-test.xml\")\r\n\r\n\r\n# Define the robot arm kinematic chain\r\nrobot_arm_chain = Chain(name='5_dof_robot_arm', links=[\r\n \r\n OriginLink(),\r\n URDFLink(\r\n name=\"base_link\",\r\n bounds=[-np.pi, np.pi],\r\n origin_translation=[0, 0, 0.1],\r\n origin_orientation=[0, 0, 0],\r\n rotation=[0, 0, 1],\r\n ),\r\n URDFLink(\r\n name=\"waist\",\r\n bounds=[-np.pi, np.pi],\r\n origin_translation=[0, 0, 1],\r\n origin_orientation=[0, 0, 0],\r\n rotation=[0, 1, 0],\r\n ),\r\n URDFLink(\r\n name=\"shoulder\",\r\n bounds=[-np.pi, np.pi],\r\n origin_translation=[0, 0, 1],\r\n origin_orientation=[0, 0, 0],\r\n rotation=[0, 1, 0],\r\n ),\r\n URDFLink(\r\n name=\"elbow\",\r\n bounds=[-np.pi, np.pi],\r\n origin_translation=[0, 0, 1],\r\n origin_orientation=[0, 0, 0],\r\n rotation=[0, 1, 0],\r\n ),\r\n URDFLink(\r\n name=\"wrist_bend\",\r\n bounds=[-np.pi, np.pi],\r\n origin_translation=[0, 0, 0.5],\r\n origin_orientation=[0, 0, 0],\r\n rotation=[0, 0, 1],\r\n ),\r\n URDFLink(\r\n name=\"tool_head\",\r\n bounds=[-np.pi, np.pi],\r\n origin_translation=[0, 0, 0],\r\n origin_orientation=[0, 0, 0],\r\n rotation=[0, 0, 1],\r\n )\r\n], active_links_mask=[False, True, True, True, True, True, True])\r\n\r\n\r\ndef move_to_position(target_position):\r\n # Calculate the inverse kinematics\r\n joint_angles = robot_arm_chain.inverse_kinematics(target_position)\r\n print(\"Joint angles for the target position are:\", joint_angles)\r\n\r\n # Compute the forward kinematics to verify the result\r\n calculated_position = robot_arm_chain.forward_kinematics(joint_angles)[:3, 3]\r\n print(\"Calculated position for the given joint angles is:\", calculated_position)\r\n\r\n fig, ax = plot_utils.init_3d_figure()\r\n ax.set_xlim(-3, 3)\r\n ax.set_ylim(-3, 3)\r\n ax.set_zlim(0, 6)\r\n robot_arm_chain.plot(joint_angles, ax)\r\n plt.show()\r\n\r\ndef on_slider_change(val):\r\n joint_angles = np.array([s_joint.value for s_joint in sliders])\r\n ax_3d.clear()\r\n robot_arm_chain.plot(joint_angles, ax_3d)\r\n fig_3d.canvas.draw_idle()\r\n\r\n\r\n# Initialize the figure and create sliders for each joint\r\nfig, ax = plt.subplots()\r\nplt.subplots_adjust(bottom=0.3)\r\nsliders = []\r\n\r\nfor i in range(len(robot_arm_chain.links) - 1):\r\n ax_slider = plt.axes([0.15, 0.1 + 0.05 * i, 0.7, 0.03])\r\n min_angle, max_angle = robot_arm_chain.links[i + 1].bounds\r\n slider = widgets.Slider(ax_slider, f\"Joint {i+1}\", min_angle, max_angle, valinit=0, valstep=0.01)\r\n slider.on_changed(on_slider_change)\r\n sliders.append(slider)\r\n\r\nfig_3d, ax_3d = plot_utils.init_3d_figure()\r\nax_3d.set_xlim(-3, 3)\r\nax_3d.set_ylim(-3, 3)\r\nax_3d.set_zlim(0, 6)\r\ninitial_joint_angles = np.zeros(sum(robot_arm_chain.active_links_mask))\r\nrobot_arm_chain.plot(robot_arm_chain.active_to_full(initial_joint_angles, initial_position=np.zeros(3)), ax_3d)\r\n\r\n\r\nplt.show()\r\n# Example usage:\r\ntarget_position = [3, 1, 1]\r\nmove_to_position(target_position)\r\n\r\n# Define the target position for the end effector\r\ntarget_position = [3, 1, 1]\r\n\r\n# Calculate the inverse kinematics\r\njoint_angles = robot_arm_chain.inverse_kinematics(target_position)\r\nprint(\"Joint angles for the target position are:\", joint_angles)\r\n\r\n# Compute the forward kinematics to verify the result\r\ncalculated_position = robot_arm_chain.forward_kinematics(joint_angles)[:3, 3]\r\nprint(\"Calculated position for the given joint angles is:\", calculated_position)\r\n\r\nfig, ax = plot_utils.init_3d_figure()\r\nax.set_xlim(-3, 3)\r\nax.set_ylim(-3, 3)\r\nax.set_zlim(0, 6)\r\nrobot_arm_chain.plot(joint_angles, ax)\r\nplt.show()\r\n","repo_name":"lobad/CRSA250_TERMINATOR","sub_path":"kinematics-test.py","file_name":"kinematics-test.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18479062221","text":"import sublime, sublime_plugin\n\nfrom threading import Timer, Lock\nfrom .complete.complete import find_uses, get_completions, get_diagnostics, get_definition, get_type, reparse, free_tu, free_all\nimport os, re, sys, bisect, json, fnmatch, functools\n\ndef get_settings():\n return sublime.load_settings(\"ClangComplete.sublime-settings\")\n\ndef get_setting(view, key, default=None):\n s = view.settings()\n if s.has(\"clangcomplete_%s\" % key):\n return s.get(\"clangcomplete_%s\" % key)\n return get_settings().get(key, default)\n\ndef get_project_path(view):\n try:\n return view.window().folders()[0]\n except:\n pass\n return \"\"\n\n\ndef get_unsaved_buffer(view):\n buffer = None\n if view.is_dirty():\n buffer = view.substr(sublime.Region(0, view.size()))\n return buffer\n\ndef debug_print(*args):\n if get_settings().get(\"debug\", False): print(*args)\n\n#\n#\n# Retrieve options from cmake \n#\n#\ndef parse_flags(f):\n flags = []\n for line in open(f).readlines():\n if line.startswith(('CXX_FLAGS', 'CXX_DEFINES', 'CXX_INCLUDES', 'C_FLAGS', 'C_DEFINES', 'C_INCLUDES')):\n words = line[line.index('=')+1:].split()\n flags.extend(words)\n return flags\n\ndef canonicalize_path(path, root):\n if path.startswith('-I'): return '-I'+os.path.normpath(os.path.join(root, path[2:])) # rel or abs path\n else: return path\n\ndef parse_compile_commands(root, f):\n flags = []\n compile_commands = json.load(open(os.path.join(root, f)))\n for obj in compile_commands:\n for key, value in obj.items():\n if key == \"command\":\n for string in value.split()[1:]:\n # ninja adds local paths as -I. and -I..\n # make adds full paths as i flags\n flags.append(canonicalize_path(string, root))\n return flags\n\ndef merge_flags(flags, pflags):\n result = []\n def append_result(f):\n if f.startswith(('-I', '-D', '-isystem', '-include', '-isysroot', '-W', '-std', '-pthread', '-f', '-pedantic', '-arch', '-m', '-hc')):\n if f not in pflags and f not in result: result.append(f)\n elif not f.startswith(('-O', '-o', '-c', '-g')) and f.startswith('-'): result.append(f)\n flag = \"\"\n for f in flags:\n if f.startswith('-'):\n append_result(flag)\n flag = f\n else: flag = flag + ' ' + f\n append_result(flag)\n return result\n\ndef filter_flag(f, exclude_options):\n for pat in exclude_options:\n if fnmatch.fnmatch(f, pat): return False\n return True\n\nordered_std_flags = ['-std=c++0x', '-std=gnu++0x', '-std=c++11', '-std=gnu++11', '-std=c++1y', '-std=gnu++1y', '-std=c++14', '-std=gnu++14', '-std=c++1z', '-std=gnu++1z', '-std=c++17', '-std=gnu++17']\ndef find_index(l, elem):\n for i,x in enumerate(l): \n if x == elem: return i\n return -1\n\ndef std_flag_rank(x):\n return find_index(ordered_std_flags, x)\n\n\ndef max_std(x, y):\n if (std_flag_rank(x) > std_flag_rank(y)): return x\n else: return y\n\ndef split_flags(flags, exclude_options):\n result = []\n std_flags = []\n for f in flags:\n if f.startswith('-std'): std_flags.append(f)\n elif filter_flag(f, exclude_options): result.extend(f.split())\n if len(std_flags) > 0: result.append(functools.reduce(max_std, std_flags))\n return result\n\ndef accumulate_options(path, exclude_options):\n flags = []\n for root, dirs, filenames in os.walk(path):\n for f in filenames:\n if f.endswith('compile_commands.json'):\n flags.extend(merge_flags(parse_compile_commands(root, f), flags))\n return split_flags(flags, exclude_options)\n if f.endswith('flags.make'): \n flags.extend(merge_flags(parse_flags(os.path.join(root, f)), flags))\n return split_flags(flags, exclude_options)\n\nproject_options = {}\n\ndef clear_options():\n global project_options\n project_options = {}\n\ndef get_build_dir(view):\n result = get_setting(view, \"build_dir\", [\"build\"])\n if isinstance(result, str): return [result]\n else: return result \n\ndef get_options(project_path, additional_options, exclude_options, build_dirs, default_options):\n if project_path in project_options: return project_options[project_path]\n\n build_dir = next((build_dir for d in build_dirs for build_dir in [os.path.join(project_path, d)] if os.path.exists(build_dir)), None)\n if build_dir != None:\n project_options[project_path] = ['-x', 'c++'] + accumulate_options(build_dir, exclude_options) + additional_options\n else:\n project_options[project_path] = ['-x', 'c++'] + default_options + additional_options\n\n # debug_print(project_path, project_options[project_path])\n return project_options[project_path]\n\ndef get_args(view):\n project_path = get_project_path(view)\n additional_options = get_setting(view, \"additional_options\", [])\n exclude_options = get_setting(view, \"exclude_options\", [])\n build_dir = get_build_dir(view)\n debug_print(\"build dirs:\", build_dir)\n default_options = get_setting(view, \"default_options\", [\"-std=c++11\"])\n debug_print(get_options(project_path, additional_options, exclude_options, build_dir, default_options))\n return get_options(project_path, additional_options, exclude_options, build_dir, default_options)\n\n#\n#\n# Retrieve include files\n#\n#\ndef find_any_of(s, items):\n for item in items:\n i = s.find(item)\n if (i != -1): return i\n return -1\n\ndef bisect_right_prefix(a, x, lo=0, hi=None):\n if lo < 0:\n raise ValueError('lo must be non-negative')\n if hi is None:\n hi = len(a)\n while lo < hi:\n mid = (lo+hi)//2\n if x < a[mid] and not a[mid].startswith(x): hi = mid\n else: lo = mid+1\n return lo\n\ndef find_prefix(items, prefix):\n return items[bisect.bisect_left(items, prefix): bisect_right_prefix(items, prefix)]\n\nproject_includes = {}\nincludes_lock = Lock()\n\ndef clear_includes_impl():\n global project_includes\n if includes_lock.acquire(timeout=10000):\n project_includes = {}\n includes_lock.release()\n else:\n debug_print(\"Can't clear includes\")\n\ndef clear_includes():\n sublime.set_timeout(lambda:clear_includes_impl() , 1)\n\ndef search_include(path):\n start = len(path)\n if path[-1] is not '/': start = start + 1\n result = []\n for root, dirs, filenames in os.walk(path):\n for f in filenames:\n full_name = os.path.join(root, f)\n result.append(full_name[start:])\n return result\n\ndef find_includes(view, project_path):\n result = set()\n is_path = False\n for option in get_args(view):\n if option.startswith('-I'): result.update(search_include(option[2:]))\n if is_path: result.update(search_include(option))\n if option == '-isystem': is_path = True\n else: is_path = False \n for path in get_setting(view, \"default_include_paths\", [\"/usr/include\", \"/usr/local/include\"]):\n result.update(search_include(path))\n return sorted(result)\n\ndef get_includes(view):\n global project_includes\n result = []\n if includes_lock.acquire(blocking=False):\n try:\n project_path = get_project_path(view)\n if project_path not in project_includes:\n project_includes[project_path] = find_includes(view, project_path)\n result = project_includes[project_path]\n except:\n pass\n finally:\n includes_lock.release()\n else:\n debug_print(\"Includes locked: return nothing\")\n return result\n\ndef parse_slash(path, index):\n last = path.find('/', index)\n if last == -1: return path[index:]\n return path[index:last + 1]\n\ndef complete_includes(view, prefix):\n slash_index = prefix.rfind('/') + 1\n paths = find_prefix(get_includes(view), prefix)\n return sorted(set([parse_slash(path, slash_index) for path in paths]))\n\n\n\n#\n#\n# Error panel\n#\n#\nclass ClangTogglePanel(sublime_plugin.WindowCommand):\n def run(self, **args):\n show = args[\"show\"] if \"show\" in args else None\n\n if show or (show == None and not clang_error_panel.is_visible(self.window)):\n clang_error_panel.open(self.window)\n else:\n clang_error_panel.close()\n\n\nclass ClangErrorPanelFlush(sublime_plugin.TextCommand):\n def run(self, edit, data):\n self.view.erase(edit, sublime.Region(0, self.view.size()))\n self.view.insert(edit, 0, data)\n\ndef is_view_visible(view, window=None):\n ret = view != None and view.window() != None\n if ret and window:\n ret = view.window().id() == window.id()\n return ret\n\nclass ClangErrorPanel(object):\n def __init__(self):\n self.view = None\n self.data = \"\"\n\n def set_data(self, data):\n self.data = data\n if self.is_visible(): self.flush()\n\n def get_view(self):\n return self.view\n\n def is_visible(self, window=None):\n return is_view_visible(self.view, window)\n\n def set_view(self, view):\n self.view = view\n\n def flush(self):\n self.view.set_read_only(False)\n self.view.set_scratch(True)\n self.view.run_command(\"clang_error_panel_flush\", {\"data\": self.data})\n self.view.set_read_only(True)\n\n def open(self, window=None):\n if window == None:\n window = sublime.active_window()\n if not self.is_visible(window):\n self.view = window.get_output_panel(\"clangcomplete\")\n self.view.settings().set(\"result_file_regex\", \"^(..[^:\\n]*):([0-9]+):?([0-9]+)?:? (.*)$\")\n self.view.set_syntax_file('Packages/ClangComplete/ErrorPanel.tmLanguage')\n self.flush()\n\n window.run_command(\"show_panel\", {\"panel\": \"output.clangcomplete\"})\n\n def close(self):\n sublime.active_window().run_command(\"hide_panel\", {\"panel\": \"output.clangcomplete\"})\n\n\nclang_error_panel = ClangErrorPanel()\n\n#\n#\n# Get language from sublime \n#\n#\n\nlanguage_regex = re.compile(\"(?<=source\\.)[\\w+#]+\")\n\ndef get_language(view):\n try:\n caret = view.sel()[0].a\n language = language_regex.search(view.scope_name(caret))\n if language == None:\n return None\n return language.group(0)\n except:\n return None\n\ndef is_supported_language(view):\n language = get_language(view)\n if language == None or (language != \"c++\" and\n language != \"c\" and\n language != \"objc\" and\n language != \"objc++\"):\n return False\n return True\n\n\n\nmember_regex = re.compile(r\"(([a-zA-Z_]+[0-9_]*)|([\\)\\]])+)((\\.)|(->))$\")\nnot_code_regex = re.compile(\"(string.)|(comment.)\")\n\ndef convert_completion(x):\n if '\\n' in x:\n c = x.split('\\n', 1)\n return (c[0], c[1])\n else:\n return (x, x)\n\ndef convert_completions(completions):\n return [convert_completion(x) for x in completions]\n\n# def is_member_completion(view, caret):\n# line = view.substr(sublime.Region(view.line(caret).a, caret))\n# if member_regex.search(line) != None:\n# return True\n# elif get_language(view).startswith(\"objc\"):\n# return re.search(r\"\\[[\\.\\->\\s\\w\\]]+\\s+$\", line) != None\n# return False\n\nclass ClangCompleteClearCache(sublime_plugin.TextCommand):\n def run(self, edit):\n sublime.status_message(\"Clearing cache...\")\n clear_includes()\n clear_options()\n free_all()\n\nclass ClangCompleteFindUses(sublime_plugin.TextCommand):\n def run(self, edit):\n debug_print(\"Find Uses\")\n filename = self.view.file_name()\n # The view hasnt finsished loading yet\n if (filename is None): return\n\n row, col = self.view.rowcol(self.view.sel()[0].begin())\n uses = find_uses(filename, get_args(self.view), row+1, col+1, None)\n self.view.window().show_quick_panel(uses, self.on_done, sublime.MONOSPACE_FONT, 0, lambda index:self.quick_open(uses, index))\n\n def quick_open(self, uses, index):\n self.view.window().open_file(uses[index], sublime.ENCODED_POSITION | sublime.TRANSIENT)\n\n def on_done(self):\n pass\n\nclass ClangCompleteGotoDef(sublime_plugin.TextCommand):\n def run(self, edit):\n filename = self.view.file_name()\n # The view hasnt finsished loading yet\n if (filename is None): return\n\n reparse(filename, get_args(self.view), get_unsaved_buffer(self.view))\n\n pos = self.view.sel()[0].begin()\n row, col = self.view.rowcol(pos)\n target = get_definition(filename, get_args(self.view), row+1, col+1)\n\n if (len(target) is 0): sublime.status_message(\"Cant find definition\")\n else: self.view.window().open_file(target, sublime.ENCODED_POSITION)\n\nclass ClangCompleteShowType(sublime_plugin.TextCommand):\n def run(self, edit):\n filename = self.view.file_name()\n # The view hasnt finsished loading yet\n if (filename is None): return\n\n reparse(filename, get_args(self.view), get_unsaved_buffer(self.view))\n\n pos = self.view.sel()[0].begin()\n row, col = self.view.rowcol(pos)\n type = get_type(filename, get_args(self.view), row+1, col+1)\n\n sublime.status_message(type)\n\nclass ClangCompleteComplete(sublime_plugin.TextCommand):\n def show_complete(self):\n self.view.run_command(\"hide_auto_complete\")\n sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"), 1)\n\n def run(self, edit, characters):\n debug_print(\"ClangCompleteComplete\")\n regions = [a for a in self.view.sel()]\n self.view.sel().clear()\n for region in reversed(regions):\n pos = 0\n region.end() + len(characters)\n if region.size() > 0:\n self.view.replace(edit, region, characters)\n pos = region.begin() + len(characters)\n else:\n self.view.insert(edit, region.end(), characters)\n pos = region.end() + len(characters)\n\n self.view.sel().add(sublime.Region(pos, pos))\n caret = self.view.sel()[0].begin()\n word = self.view.substr(self.view.word(caret)).strip()\n debug_print(\"Complete\", word)\n triggers = ['->', '::', '.']\n if word in triggers:\n debug_print(\"Popup completions\")\n self.show_complete()\n\nbuild_panel_window_id = None\n\ndef is_build_panel_visible(window):\n return build_panel_window_id != None and window != None and window.id() == build_panel_window_id\n\nclass ClangCompleteAutoComplete(sublime_plugin.EventListener):\n def complete_at(self, view, prefix, location, timeout):\n debug_print(\"complete_at\", prefix)\n filename = view.file_name()\n # The view hasnt finsished loading yet\n if (filename is None): return []\n if not is_supported_language(view):\n return []\n\n completions = []\n\n line = view.substr(view.line(location))\n\n if line.startswith(\"#include\") or line.startswith(\"# include\"):\n row, col = view.rowcol(location - len(prefix))\n start = find_any_of(line, ['<', '\"'])\n if start != -1: completions = convert_completions(complete_includes(view, line[start+1:col] + prefix))\n else:\n r = view.word(location - len(prefix))\n word = view.substr(r)\n # Skip completions for single colon or dash, since we want to\n # optimize for completions after the `::` or `->` characters\n if word == ':' or word == '-': return []\n p = 0\n if re.match('^\\w+$', word): p = r.begin()\n else: p = r.end() - 1\n row, col = view.rowcol(p)\n # debug_print(\"complete: \", row, col, word)\n completions = convert_completions(get_completions(filename, get_args(view), row+1, col+1, \"\", timeout, get_unsaved_buffer(view)))\n\n return completions\n\n\n def diagnostics(self, view):\n filename = view.file_name() \n # The view hasnt finsished loading yet\n if (filename is None): return []\n diagnostics = get_diagnostics(filename, get_args(view))\n # If there are errors in the precompiled headers, then we will free\n # the tu, and reload the diagnostics\n for diag in diagnostics:\n if \"has been modified since the precompiled header\" in diag or \"modified since it was first processed\" in diag:\n free_tu(filename)\n diagnostics = get_diagnostics(filename, get_args(view))\n break\n return [diag for diag in diagnostics if \"#pragma once in main file\" not in diag]\n\n def show_diagnostics(self, view):\n output = '\\n'.join(self.diagnostics(view))\n clang_error_panel.set_data(output)\n window = view.window()\n if not window is None and len(output) > 1:\n window.run_command(\"clang_toggle_panel\", {\"show\": True})\n\n def on_window_command(self, window, command_name, args):\n global build_panel_window_id\n debug_print(command_name, args)\n if command_name == 'show_panel' and args['panel'] == 'output.exec':\n if 'toggle' in args and args['toggle'] == True and build_panel_window_id != None: build_panel_window_id=None\n else: build_panel_window_id = window.id()\n if command_name == 'hide_panel':\n if build_panel_window_id != None or args != None and ('panel' in args and args['panel'] == 'output.exec'):\n build_panel_window_id = None\n return None\n\n\n def on_post_text_command(self, view, name, args):\n if not is_supported_language(view): return\n \n if 'delete' in name: return\n \n pos = view.sel()[0].begin()\n self.complete_at(view, \"\", pos, 0)\n \n\n def on_query_completions(self, view, prefix, locations):\n if not is_supported_language(view):\n return []\n \n completions = self.complete_at(view, prefix, locations[0], get_setting(view, \"timeout\", 200))\n debug_print(\"on_query_completions:\", prefix, len(completions))\n if (get_setting(view, \"inhibit_sublime_completions\", True)):\n return (completions, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)\n else:\n return (completions)\n\n def on_activated_async(self, view):\n debug_print(\"on_activated_async\")\n if not is_supported_language(view): return\n \n debug_print(\"on_activated_async: get_includes\")\n get_includes(view)\n debug_print(\"on_activated_async: complete_at\")\n self.complete_at(view, \"\", view.sel()[0].begin(), 0)\n\n\n def on_post_save_async(self, view):\n if not is_supported_language(view): return\n\n show_panel = None\n show_diagnostics_on_save = get_setting(view, \"show_diagnostics_on_save\", \"no_build\")\n if show_diagnostics_on_save == 'always': show_panel = True\n elif show_diagnostics_on_save == 'never': show_panel = False\n else: show_panel = not is_build_panel_visible(view.window())\n \n if show_panel: self.show_diagnostics(view)\n \n pos = view.sel()[0].begin()\n self.complete_at(view, \"\", pos, 0)\n\n def on_close(self, view):\n if is_supported_language(view):\n free_tu(view.file_name())\n\n def on_query_context(self, view, key, operator, operand, match_all):\n if key == \"clangcomplete_supported_language\":\n if view == None: view = sublime.active_window().active_view()\n return is_supported_language(view)\n elif key == \"clangcomplete_is_code\":\n return not_code_regex.search(view.scope_name(view.sel()[0].begin())) == None\n elif key == \"clangcomplete_panel_visible\":\n return clang_error_panel.is_visible()\n","repo_name":"pfultz2/ClangComplete","sub_path":"clangcomplete.py","file_name":"clangcomplete.py","file_ext":"py","file_size_in_byte":19771,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"85"} +{"seq_id":"39527478226","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\nimport os\nimport time\nimport pprint\nimport shutil\nimport sys\nimport numpy as np\n\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.parallel\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport libs.core.config as lib_config\nfrom libs.core.config import get_model_name\n\nimport libs.core.trainval as lib_function\nfrom libs.core.inference import get_final_preds\n\nimport libs.core.loss as lib_loss\n\nimport libs.utils.utils as lib_util\nfrom libs.utils.transforms import get_affine_transform\n\nimport libs.dataset as lib_dataset\nimport libs.models.vdn_model as vdn_model\n\nimport utils.vis.util as vis_util\nfrom PIL import ImageDraw\n\nfrom typing import Optional\n\n\nsys.path.append(\".\")\nsave = False\n\nroot_dir = '/VDN'\n\n\nclass VectorDetectionNetwork:\n \"\"\"\n \"\"\"\n\n def __init__(self, train=False, backbone='resnet50'):\n if train:\n vdn_config = os.path.join(root_dir, f\"cfgs/{backbone}/train.yaml\")\n else:\n vdn_config = os.path.join(root_dir, f\"cfgs/{backbone}/eval.yaml\")\n\n lib_config.update_config(vdn_config)\n\n cudnn.benchmark = lib_config.config.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = lib_config.config.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = lib_config.config.CUDNN.ENABLED\n\n if not train:\n model = vdn_model.get_vdn_resnet(lib_config.config, is_train=False)\n model_path = lib_config.config.MODEL.PRETRAINED\n model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(model_path).items()})\n else:\n model = vdn_model.get_vdn_resnet(lib_config.config, is_train=True)\n\n self.gpus = [int(i) for i in lib_config.config.GPUS.split(',')]\n self.model = torch.nn.DataParallel(model, device_ids=self.gpus).cuda()\n\n self.output_dir = os.path.join(root_dir, f'output/demo-{backbone}')\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n def train(self):\n \"\"\"\n \"\"\"\n cfgs = lib_config.config\n\n logger, final_output_dir, tb_log_dir = lib_util.create_logger(cfgs, 'train')\n logger.info(pprint.pformat(cfgs))\n\n # copy model file for reference\n this_dir = os.path.dirname(__file__)\n shutil.copy2(os.path.join(this_dir, '../libs/models', cfgs.MODEL.NAME + '.py'), final_output_dir)\n\n writer_dict = {\n 'writer': SummaryWriter(log_dir=tb_log_dir),\n 'train_global_steps': 0,\n 'valid_global_steps': 0,\n }\n\n # define loss function (criterion) and optimizer\n # crit_heatmap = lib_loss.JointsMSELoss(use_target_weight=cfgs.LOSS.USE_TARGET_WEIGHT).cuda()\n # crit_vector = lib_loss.OrientsMSELoss().cuda()\n crit_heatmap = lib_loss.MSELoss().cuda()\n crit_vector = lib_loss.MSELoss().cuda()\n\n optimizer = lib_util.get_optimizer(cfgs, self.model)\n\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, cfgs.TRAIN.LR_STEP, cfgs.TRAIN.LR_FACTOR\n )\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = lib_dataset.CoCo(\n cfgs,\n cfgs.DATASET.ROOT,\n cfgs.DATASET.TRAIN_SET,\n True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize, ])\n )\n valid_dataset = lib_dataset.CoCo(\n cfgs,\n cfgs.DATASET.ROOT,\n cfgs.DATASET.TEST_SET,\n False,\n transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n )\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=cfgs.TRAIN.BATCH_SIZE * len(self.gpus),\n shuffle=cfgs.TRAIN.SHUFFLE,\n num_workers=cfgs.WORKERS,\n pin_memory=True,\n\n )\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset,\n batch_size=cfgs.TEST.BATCH_SIZE * len(self.gpus),\n shuffle=False,\n num_workers=cfgs.WORKERS,\n pin_memory=True\n )\n\n best_perf = 0.0\n for epoch in range(cfgs.TRAIN.BEGIN_EPOCH, cfgs.TRAIN.END_EPOCH):\n # train for one epoch\n lib_function.train(cfgs, train_loader, self.model, crit_heatmap, crit_vector,\n optimizer, epoch, final_output_dir, writer_dict)\n\n # In PyTorch 1.1.0 and later, you should call optimizer.step() before lr_scheduler.step().\n lr_scheduler.step()\n\n # evaluate on validation set\n perf_indicator = lib_function.validate(cfgs, valid_loader, valid_dataset, self.model,\n crit_heatmap, crit_vector, epoch, final_output_dir, writer_dict)\n\n if perf_indicator > best_perf:\n best_perf = perf_indicator\n is_best_model = True\n else:\n is_best_model = False\n\n lib_util.save_checkpoint({\n 'epoch': epoch + 1,\n 'model': get_model_name(cfgs),\n 'state_dict': self.model.state_dict(),\n 'perf': perf_indicator,\n 'optimizer': optimizer.state_dict(),\n }, is_best_model, final_output_dir)\n\n final_model_state_file = os.path.join(final_output_dir, 'final_state.pth.tar')\n logger.info('saving final model state to {}'.format(final_model_state_file))\n torch.save(self.model.module.state_dict(), final_model_state_file)\n\n def eval(self):\n cfgs = lib_config.config\n\n logger, final_output_dir, tb_log_dir = lib_util.create_logger(cfgs, 'eval')\n logger.info(pprint.pformat(cfgs))\n\n # define loss function (criterion) and optimizer\n crit_heatmap = lib_loss.MSELoss().cuda()\n crit_vector = lib_loss.MSELoss().cuda()\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n valid_dataset = lib_dataset.CoCo(\n cfgs,\n cfgs.DATASET.ROOT,\n cfgs.DATASET.TEST_SET,\n False,\n transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n )\n\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset,\n batch_size=cfgs.TEST.BATCH_SIZE * len(self.gpus),\n shuffle=False,\n num_workers=cfgs.WORKERS,\n pin_memory=True\n )\n\n # evaluate on validation or test set (depending on the cfg)\n lib_function.validate(cfgs, valid_loader, valid_dataset, self.model,\n crit_heatmap, crit_vector, cfgs.TRAIN.END_EPOCH, final_output_dir)\n\n def get_vectors(self, roi_image: np.ndarray, verbose: Optional[str] = None):\n \"\"\"Given roi_image of pointer-type meter dial face, return vectors represented by 2 points [[[ps_x, ps_y],\n [pe_x, pe_y]], ...]. Here ps is for start point, and pe is for end point.\n\n :param roi_image: \n :param verbose: result image name\n :return:\n \"\"\"\n model = self.model\n cfgs = lib_config.config\n\n image_height = roi_image.shape[0]\n image_width = roi_image.shape[1]\n\n center = np.array([image_width * 0.5, image_height * 0.5], dtype=np.float32)\n\n shape = np.array([image_width / 200.0, image_height / 200.0], dtype=np.float32)\n rotation = 0\n\n trans = get_affine_transform(center, shape, rotation, cfgs.MODEL.IMAGE_SIZE)\n\n net_input = cv2.warpAffine(roi_image, trans,\n (int(cfgs.MODEL.IMAGE_SIZE[0]), int(cfgs.MODEL.IMAGE_SIZE[1])),\n flags=cv2.INTER_LINEAR)\n\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n\n net_input = transform(net_input).unsqueeze(0)\n # switch to evaluate mode\n model.eval()\n\n start = time.time()\n with torch.no_grad():\n # compute output heat map\n output_hm, output_vm = model(net_input)\n preds_start, preds_end, _, maxvals = get_final_preds(output_hm.clone().cpu().numpy(),\n output_vm.clone().cpu().numpy(),\n np.asarray([center]), np.asarray([shape]))\n\n spent = time.time() - start\n print('inference time (s): ', spent)\n\n # squeeze the batch and joint dims\n preds_start = np.squeeze(preds_start, (0, 1))\n preds_end = np.squeeze(preds_end, (0, 1))\n maxvals = np.squeeze(maxvals, (0, 1))\n\n if verbose is not None:\n roi_pil = vis_util.cv_img_to_pil(roi_image)\n draw = ImageDraw.Draw(roi_pil)\n\n for i, (start_point, end_point) in enumerate(zip(preds_start, preds_end)):\n # start_point (k, 2); end_point (k)\n print(\"initial_point\", start_point, \"end_point\", end_point)\n vis_util.apply_dot(draw, start_point, image_width, image_height, idx=i)\n vis_util.apply_line(draw, start_point, end_point, image_width, image_height, idx=i)\n\n output_image = vis_util.pil_img_to_cv(roi_pil)\n cv2.imwrite(os.path.join(self.output_dir, f'res_{verbose}.jpg'), output_image)\n\n vis_util.save_batch_heatmaps(net_input, output_hm,\n os.path.join(self.output_dir, f'hmap_{verbose}.jpg'))\n vis_util.save_batch_vectormaps(net_input, output_vm,\n os.path.join(self.output_dir, f'vmap_{verbose}.jpg'))\n\n return preds_start, preds_end, maxvals, spent\n","repo_name":"DrawZeroPoint/VectorDetectionNetwork","sub_path":"modules/vdn.py","file_name":"vdn.py","file_ext":"py","file_size_in_byte":10386,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"85"} +{"seq_id":"21671042951","text":"from django.urls import path\nfrom main.views import *\n\napp_name = 'main'\n\nurlpatterns = [\n path('', HomePage.as_view(), name='home'),\n \n path('products/', product_list, name='products'),\n path('product//', product_detail, name='product'),\n path('like/product//', like_product, name='like'),\n\n path('categories/', CategoryListView.as_view(), name='categories'),\n path('category//', category_detail, name='category'),\n\n path('search/', search, name='search'),\n]","repo_name":"imhassane/karimvente","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9504000866","text":"import torch.nn as nn\nimport torch\n\n\nclass AdaptiveConcatPool2d(nn.Module):\n def __init__(self):\n super().__init__()\n self.avg = nn.AdaptiveAvgPool2d(output_size=(1, 1))\n self.max = nn.AdaptiveMaxPool2d(output_size=(1, 1))\n\n def forward(self, x):\n avg_x = self.avg(x)\n max_x = self.max(x)\n return torch.cat([avg_x, max_x], dim=1).squeeze(2).squeeze(2)\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.shape[0], -1)\n\n\nclass EfficientModel(nn.Module):\n\n def __init__(self, c_out=5, n_tiles=36, tile_size=224, name='efficientnet-b0', strategy='stitched', head='basic'):\n super().__init__()\n\n from efficientnet_pytorch import EfficientNet\n m = EfficientNet.from_pretrained(name, advprop=True, num_classes=c_out, in_channels=3)\n c_feature = m._fc.in_features\n m._fc = nn.Identity()\n self.feature_extractor = m\n self.n_tiles = n_tiles\n self.tile_size = tile_size\n\n if strategy == 'stitched':\n if head == 'basic':\n self.head = nn.Linear(c_feature, c_out)\n elif head == 'concat':\n m._avg_pooling = AdaptiveConcatPool2d()\n self.head = nn.Linear(c_feature * 2, c_out)\n elif head == 'gem':\n m._avg_pooling = GeM()\n self.head = nn.Linear(c_feature, c_out)\n elif strategy == 'bag':\n if head == 'basic':\n self.head = BasicHead(c_feature, c_out, n_tiles)\n elif head == 'attention':\n self.head = AttentionHead(c_feature, c_out, n_tiles)\n\n self.strategy = strategy\n\n def forward(self, x):\n if self.strategy == 'bag':\n x = x.view(-1, 3, self.tile_size, self.tile_size)\n h = self.feature_extractor(x)\n h = self.head(h)\n return h\n\nclass ResnetModel(nn.Module):\n\n def __init__(self, c_out=5, n_tiles=36, tile_size=224, pretrained=True, strategy='stitched', head='basic'):\n super().__init__()\n\n from torchvision.models import resnet34\n m = resnet34(pretrained=pretrained)\n c_feature = m.fc.in_features\n m.fc = nn.Identity()\n self.feature_extractor = m\n self.n_tiles = n_tiles\n self.tile_size = tile_size\n\n if strategy == 'stitched':\n if head == 'basic':\n self.head = nn.Linear(c_feature, c_out)\n elif head == 'concat':\n m._avg_pooling = AdaptiveConcatPool2d()\n self.head = nn.Linear(c_feature * 2, c_out)\n elif head == 'gem':\n m._avg_pooling = GeM()\n self.head = nn.Linear(c_feature, c_out)\n elif strategy == 'bag':\n if head == 'basic':\n self.head = BasicHead(c_feature, c_out, n_tiles)\n elif head == 'attention':\n self.head = AttentionHead(c_feature, c_out, n_tiles)\n\n self.strategy = strategy\n\n def forward(self, x):\n if self.strategy == 'bag':\n x = x.view(-1, 3, self.tile_size, self.tile_size)\n h = self.feature_extractor(x)\n h = self.head(h)\n return h\n\n\nclass BasicHead(nn.Module):\n\n def __init__(self, c_in, c_out, n_tiles):\n self.n_tiles = n_tiles\n super().__init__()\n self.fc = nn.Sequential(AdaptiveConcatPool2d(),\n nn.Linear(c_in * 2, c_out))\n\n def forward(self, x):\n\n bn, c = x.shape\n h = x.view(-1, self.n_tiles, c, 1, 1).permute(0, 2, 1, 3, 4) \\\n .contiguous().view(-1, c, 1 * self.n_tiles, 1)\n h = self.fc(h)\n return h\n\n\nclass AttentionHead(nn.Module):\n\n def __init__(self, c_in, c_out, n_tiles):\n self.n_tiles = n_tiles\n super().__init__()\n self.attention_pool = AttentionPool(c_in, c_in//2)\n self.fc = nn.Linear(c_in, c_out)\n\n def forward(self, x):\n\n bn, c = x.shape\n h = x.view(-1, self.n_tiles, c)\n h = self.attention_pool(h)\n h = self.fc(h)\n return h\n\n\nclass AttentionPool(nn.Module):\n\n def __init__(self, c_in, d):\n super().__init__()\n self.lin_V = nn.Linear(c_in, d)\n self.lin_w = nn.Linear(d, 1)\n\n def compute_weights(self, x):\n key = self.lin_V(x) # b, n, d\n weights = self.lin_w(torch.tanh(key)) # b, n, 1\n weights = torch.softmax(weights, dim=1)\n return weights\n\n def forward(self, x):\n weights = self.compute_weights(x)\n pooled = torch.matmul(x.transpose(1, 2), weights).squeeze(2) # b, c, n x b, n, 1 => b, c, 1\n return pooled\n\n\nclass GeM(nn.Module):\n\n def __init__(self, p=3, eps=1e-6):\n super(GeM, self).__init__()\n self.p = torch.nn.parameter.Parameter(torch.ones(1) * p)\n self.eps = eps\n\n def forward(self, x):\n return torch.nn.functional.avg_pool2d(x.clamp(min=self.eps).pow(self.p), (x.size(-2), x.size(-1))).pow(1./self.p)\n\n def __repr__(self):\n return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + \\\n 'eps=' + str(self.eps) + ')'\n\n\nclass QWKLoss(nn.Module):\n\n def __init__(self, n_class):\n super().__init__()\n self.n_class = n_class\n grid = torch.repeat_interleave(torch.arange(0, n_class).reshape(n_class, 1), repeats=n_class, dim=1)\n self.weights = ((grid - grid.T) ** 2) / float((n_class - 1) ** 2)\n\n def forward(self, logits, y_true):\n y_pred = logits.softmax(1)\n\n weights = self.weights.to(logits.device)\n\n nom = torch.matmul(y_true, weights) # N, C * C, C = N, C\n nom = nom * y_pred # N, C * C, N = N, N\n nom = nom.sum()\n\n denom = y_pred.sum(0, keepdims=True)\n n_hat = y_true.sum(0, keepdims=True) / y_true.shape[0]\n denom = torch.matmul(n_hat.T, denom) * weights\n denom = denom.sum()\n\n # gradient descent minimizes therefore return 1 - kappa instead of kappa = 1 - nom/denom\n return nom / denom\n","repo_name":"ChienYiChi/kaggle-panda-challenge","sub_path":"src/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"73399304598","text":"#Em uma competição de ginástica, cada atleta recebe votos de sete jurados. A melhor e a pior nota são eliminadas. A sua nota fica sendo a média dos votos restantes. Você deve fazer um programa que receba o nome do ginasta e as notas dos sete jurados alcançadas pelo atleta em sua apresentação e depois informe a sua média, conforme a descrição acima informada (retirar o melhor e o pior salto e depois calcular a média com as notas restantes). As notas não são informados ordenadas. Um exemplo de saída do programa deve ser conforme o exemplo abaixo:\nnome_atleta = input(\"Digite o nome do atleta: \")\nnotas = []\n\nfor i in range(7):\n nota = float(input(f\"Digite a nota do jurado {i + 1}: \"))\n notas.append(nota)\n \nnotas.sort()\n\nprint(f\"Resultado final: \")\nprint(f\"Atleta: {nome_atleta}\")\nprint(f\"A pior nota é: {notas[0]}\")\nprint(f\"A melhor nota é: {notas[6]}\")\n\nnotas = notas[1:6]\n\nmedia = sum(notas) / len(notas)\n\nprint(f\"A média é: {media}\")\n","repo_name":"lauralima6/pc-imd","sub_path":"unidade3/lista2/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12817814653","text":"import requests\r\nfrom requests.exceptions import HTTPError\r\n\r\n\r\nclass Api:\r\n def __init__(self, id):\r\n self.id = id\r\n self.method = \"https://api.vk.com/method/\"\r\n self.user = \"users.get?user_ids=\"\r\n self.fhotos = \"photos.getAlbums?owner_id=\"\r\n self.albums = \"&album_ids=title\"\r\n self.friends = \"friends.get?user_id=\"\r\n self.fields = \"&fields=nickname\"\r\n self.token = \"&v=5.107&access_token=c5955299c5955299c595529924c5e72fd8cc595c59552999b481ea4a5e5e0d9b055d64f\"\r\n self.request()\r\n\r\n def request(self):\r\n try:\r\n user = requests.get(f'{self.method}{self.user}{self.id}{self.token}')\r\n info = user.json()['response'][0]\r\n self.id = info['id']\r\n print(\"User:\")\r\n print(info[\"first_name\"], info['last_name'], '\\n')\r\n except HTTPError as error:\r\n print(error)\r\n\r\n try:\r\n fhotos = requests.get(f'{self.method}{self.fhotos}{self.id}{self.albums}{self.token}')\r\n print(\"Albums:\")\r\n info = fhotos.json()[\"response\"][\"items\"]\r\n for elem in info:\r\n print(elem['title'])\r\n print('\\n')\r\n except HTTPError as error:\r\n print(error)\r\n\r\n try:\r\n friends = requests.get(f'{self.method}{self.friends}{self.id}{self.fields}{self.token}')\r\n print(\"Friends:\")\r\n info = friends.json()[\"response\"][\"items\"]\r\n for elem in info:\r\n print(elem[\"first_name\"], elem['last_name'])\r\n except HTTPError as error:\r\n print(error)\r\n\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n print(\"Введите id пользователя\")\r\n request = input()\r\n ex = Api(request)\r\n\r\n","repo_name":"Lizaveta17/API","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38952240481","text":"import configparser\nimport os\n\nfrom pyutil.subcommand import Subcommand\nimport argparse\nimport shlex\n\nfrom save import get_current_display_settings, get_config_name\n\n\nclass Fix(Subcommand):\n NAME = 'fix'\n\n def on_parser_init(self, parser: argparse.ArgumentParser):\n pass\n\n def on_command(self, args):\n cp = configparser.ConfigParser()\n\n if os.path.isfile(args.config):\n cp.read(args.config)\n else:\n print('error: could not load config from ' + str(args.config))\n return\n\n ds = get_current_display_settings(args.displayplacer)\n\n name = get_config_name(ds)\n\n if not cp.has_section(name):\n print('error: could not load config for displays ' + str(name))\n return\n\n print('resetting displays ...', end='', flush=True)\n args.displayplacer(list(cp[name].values()))\n print('done')\n\n","repo_name":"dhkim09a/config-disp","sub_path":"fix.py","file_name":"fix.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8867740550","text":"import re\nimport os\nimport time\nimport math\nimport socket\nimport threading\nimport datetime\nimport random\nimport traceback\nfrom collections import defaultdict\nfrom queue import Queue, Empty\nfrom Utils import timeoutSecs, Beep\n\ntry:\n\tfrom pyllrp.pyllrp import *\nexcept ImportError:\n\tfrom pyllrp import *\nfrom TagGroup import TagGroup, QuadraticRegressionMethod, StrongestReadMethod, FirstReadMethod, MostReadsChoice, DBMaxChoice\n\ngetTimeNow = datetime.datetime.now\ntOld = getTimeNow() - datetime.timedelta( days=200 )\n\nHOME_DIR = os.path.expanduser(\"~\")\n\nConnectionTimeoutSecondsDefault\t= 3\t\t# Interval for connection timeout\nKeepaliveSecondsDefault\t\t\t= 2\t\t# Interval to request a Keepalive message\nRepeatSecondsDefault\t\t\t= 3\t\t# Interval in which a tag is considered a repeat read.\nProcessingMethodDefault \t\t= QuadraticRegressionMethod\nAntennaChoiceDefault\t\t\t= MostReadsChoice\nRemoveOutliersDefault = True\n\nConnectionTimeoutSeconds\t= ConnectionTimeoutSecondsDefault\nKeepaliveSeconds\t\t\t= KeepaliveSecondsDefault\nRepeatSeconds\t\t\t\t= RepeatSecondsDefault\n\nReconnectDelaySeconds\t\t= 2\t\t# Interval to wait before reattempting a connection\nReaderUpdateMessageSeconds\t= 5\t\t# Interval to print we are waiting for input.\n\nTagPopulation = None\t\t# Size of a group to read.\nTagPopulationDefault = 4\n\nReceiverSensitivity = None\nTransmitPower = None\n\t\nInventorySession = 2\t\t# LLRP inventory session.\nTagTransitTime = None\t\t# Time (seconds) expected for tag to cross read field. Default=3\n\nProcessingMethod = ProcessingMethodDefault\nAntennaChoice = AntennaChoiceDefault\nRemoveOutliers = RemoveOutliersDefault\n\ntAntennaConnectedLast = getTimeNow() - datetime.timedelta( days=200 )\ntAntennaConnectedLastLock = threading.Lock()\n\ndef ResetAntennaConnectionsCheck():\n\tglobal tAntennaConnectedLast, tAntennaConnectedLastLock\n\twith tAntennaConnectedLastLock:\n\t\ttAntennaConnectedLast -= datetime.timedelta( days=200 )\n\n#------------------------------------------------------\n\nImpinjDebug = False\ndef GetAddRospecRSSIMessage( MessageID = None, ROSpecID = 123, inventoryParameterSpecID = 1234,\n\t\tantennas = None, modeIdentifiers = None, maxNumberOfAntennaSupported = None,\n\t):\n\t#-----------------------------------------------------------------------------\n\t# Create a read everything Operation Spec message\n\t#\n\tif not antennas:\t# Default to all antennas if unspecified.\n\t\tantennas = [0]\n\t\n\tif maxNumberOfAntennaSupported:\n\t\tantennas = [a for a in antennas if a <= maxNumberOfAntennaSupported]\n\t\n\tif not modeIdentifiers:\t# Default to ModeIndex=1000 as this is common.\n\t\tmodeIdentifiers = [1000]\n\t\n\t# Pick a mode index from those available in the reader.\n\tfor modeIndex in (0,1000):\n\t\tif modeIndex in modeIdentifiers:\n\t\t\tbreak\n\telse:\n\t\tmodeIndex = modeIdentifiers[0]\t# If we can't find the ones we are looking for, pick a valid one.\n\t\n\trospecMessage = ADD_ROSPEC_Message( MessageID = MessageID, Parameters = [\n\t\t# Initialize to disabled.\n\t\tROSpec_Parameter(\n\t\t\tROSpecID = ROSpecID,\n\t\t\tCurrentState = ROSpecState.Disabled,\n\t\t\tParameters = [\n\t\t\t\tROBoundarySpec_Parameter(\t\t# Configure boundary spec (start and stop triggers for the reader).\n\t\t\t\t\tParameters = [\n\t\t\t\t\t\t# Start immediately.\n\t\t\t\t\t\tROSpecStartTrigger_Parameter(ROSpecStartTriggerType = ROSpecStartTriggerType.Immediate),\n\t\t\t\t\t\t# No stop trigger.\n\t\t\t\t\t\tROSpecStopTrigger_Parameter(ROSpecStopTriggerType = ROSpecStopTriggerType.Null),\n\t\t\t\t\t]\n\t\t\t\t),\n\t\t\t\t\n\t\t\t\tAISpec_Parameter(\t\t\t\t# Antenna Inventory Spec (specifies which antennas and protocol to use).\n\t\t\t\t\tAntennaIDs = antennas,\t\t# Use specified antennas.\n\t\t\t\t\tParameters = [\n\t\t\t\t\t\tAISpecStopTrigger_Parameter(\n\t\t\t\t\t\t\tAISpecStopTriggerType = AISpecStopTriggerType.Null,\n\t\t\t\t\t\t),\n\t\t\t\t\t\tInventoryParameterSpec_Parameter(\n\t\t\t\t\t\t\tInventoryParameterSpecID = inventoryParameterSpecID,\n\t\t\t\t\t\t\tProtocolID = AirProtocols.EPCGlobalClass1Gen2,\n\t\t\t\t\t\t\tParameters = [\n\t\t\t\t\t\t\t\tAntennaConfiguration_Parameter(\n\t\t\t\t\t\t\t\t\tAntennaID = 0,\t# All antennas\n\t\t\t\t\t\t\t\t\tParameters = [\n\t\t\t\t\t\t\t\t\t\tC1G2InventoryCommand_Parameter(\n\t\t\t\t\t\t\t\t\t\t\tTagInventoryStateAware = False,\n\t\t\t\t\t\t\t\t\t\t\tParameters = [\n\t\t\t\t\t\t\t\t\t\t\t\tC1G2RFControl_Parameter(\n\t\t\t\t\t\t\t\t\t\t\t\t\tModeIndex = modeIndex,\n\t\t\t\t\t\t\t\t\t\t\t\t\tTari = 0,\n\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\tC1G2SingulationControl_Parameter(\n\t\t\t\t\t\t\t\t\t\t\t\t\tSession = 2,\n\t\t\t\t\t\t\t\t\t\t\t\t\tTagPopulation = 32,\n\t\t\t\t\t\t\t\t\t\t\t\t\tTagTransitTime = 0,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t),\n\t\t\t\t\t]\n\t\t\t\t),\n\t\t\t\t\n\t\t\t\tROReportSpec_Parameter(\t\t\t# Report spec (specifies what to send from the reader).\n\t\t\t\t\tROReportTrigger = ROReportTriggerType.Upon_N_Tags_Or_End_Of_ROSpec,\n\t\t\t\t\tN = 1,\n\t\t\t\t\tParameters = [\n\t\t\t\t\t\tTagReportContentSelector_Parameter(\n\t\t\t\t\t\t\tEnableAntennaID = True,\n\t\t\t\t\t\t\tEnableFirstSeenTimestamp = True,\n\t\t\t\t\t\t\tEnablePeakRSSI = True,\n\t\t\t\t\t\t),\n\t\t\t\t\t]\n\t\t\t\t),\n\t\t\t]\n\t\t)\t# ROSpec_Parameter\n\t])\t# ADD_ROSPEC_Message\n\treturn rospecMessage\n\t\nclass Impinj:\n\n\tdef __init__( self, dataQ, strayQ, messageQ, shutdownQ, impinjHost, impinjPort, antennaStr, statusCB ):\n\t\tself.impinjHost = impinjHost\n\t\tself.impinjPort = impinjPort\n\t\tself.statusCB = statusCB\n\t\tif not antennaStr:\n\t\t\tself.antennas = [0]\n\t\telse:\n\t\t\tself.antennas = [int(a) for a in antennaStr.split()]\n\t\tself.tagGroup = None\n\t\tself.tagGroupTimer = None\n\t\tself.dataQ = dataQ\t\t\t# Queue to write tag reads.\n\t\tself.strayQ = strayQ\t\t# Queue to write stray reads.\n\t\tself.messageQ = messageQ\t# Queue to write operational messages.\n\t\tself.shutdownQ = shutdownQ\t# Queue to listen for shutdown.\n\t\tself.logQ = Queue()\n\t\tself.rospecID = 123\n\t\tself.readerSocket = None\n\t\tself.timeCorrection = None\t# Correction between the reader's time and the computer's time.\n\t\tself.connectedAntennas = []\n\t\tself.antennaReadCount = defaultdict(int)\n\t\tself.lastReadTime = {}\n\t\tself.start()\n\t\t\n\tdef start( self ):\n\t\t# Create a log file name.\n\t\ttNow = getTimeNow()\n\t\tdataDir = os.path.join( HOME_DIR, 'ImpinjData' )\n\t\tif not os.path.isdir( dataDir ):\n\t\t\tos.makedirs( dataDir )\n\t\tself.fname = os.path.join( dataDir, tNow.strftime('Impinj-%Y-%m-%d-%H-%M-%S.txt') )\n\t\t\n\t\t# Create a log queue and start a thread to write the log.\n\t\tself.logQ.put( 'msg', 'Tag ID,Discover Time' )\n\t\tself.logFileThread = threading.Thread( target=self.handleLogFile )\n\t\tself.logFileThread.daemon = True\n\t\tself.logFileThread.start()\n\t\n\t\tself.keepGoing = True\n\t\tself.tagCount = 0\n\t\t\n\t#-------------------------------------------------------------------------\n\t\n\tdef checkKeepGoing( self ):\n\t\tif not self.keepGoing:\n\t\t\treturn False\n\t\t\t\n\t\ttry:\n\t\t\t# Check the shutdown queue for a message. If there is one, shutdown.\n\t\t\td = self.shutdownQ.get( False )\n\t\t\tself.keepGoing = False\n\t\t\treturn False\n\t\texcept Empty:\n\t\t\treturn True\n\t\t\t\n\tdef reconnectDelay( self ):\n\t\tif self.checkKeepGoing():\n\t\t\ttime.sleep( ReconnectDelaySeconds )\n\t\t\t\n\t#-------------------------------------------------------------------------\n\t\n\tdef sendCommand( self, message ):\n\t\tself.messageQ.put( ('Impinj', '-----------------------------------------------------') )\n\t\tself.messageQ.put( ('Impinj', 'Sending Message:\\n{}\\n'.format(message)) )\n\t\ttry:\n\t\t\tmessage.send( self.readerSocket )\n\t\texcept Exception as e:\n\t\t\tself.messageQ.put( ('Impinj', 'Send command fails: {}'.format(e)) )\n\t\t\treturn False\n\t\t\t\n\t\ttry:\n\t\t\tresponse = WaitForMessage( message.MessageID, self.readerSocket )\n\t\texcept Exception as e:\n\t\t\tself.messageQ.put( ('Impinj', 'Get response fails: {}'.format(e)) )\n\t\t\treturn False\n\t\t\t\n\t\tself.messageQ.put( ('Impinj', 'Received Response:\\n{}\\n'.format(response)) )\n\t\treturn True, response\n\t\t\n\tdef sendCommands( self ):\n\t\tself.connectedAntennas = []\n\t\tself.antennaReadCount = defaultdict(int)\n\t\t\n\t\tself.messageQ.put( ('Impinj', 'Connected to: ({}:{})'.format(self.impinjHost, self.impinjPort) ) )\n\t\t\n\t\tself.messageQ.put( ('Impinj', 'Waiting for READER_EVENT_NOTIFICATION...') )\n\t\tresponse = UnpackMessageFromSocket( self.readerSocket )\n\t\tself.messageQ.put( ('Impinj', '\\nReceived Response:\\n{}\\n'.format(response)) )\n\t\t\n\t\t# Compute a correction between the reader's time and the computer's time.\n\t\treaderTime = response.getFirstParameterByClass(UTCTimestamp_Parameter).Microseconds\n\t\treaderTime = datetime.datetime.utcfromtimestamp( readerTime / 1000000.0 )\n\t\tself.timeCorrection = getTimeNow() - readerTime\n\t\t\n\t\tself.messageQ.put( ('Impinj', '\\nReader time is {} seconds different from computer time\\n'.format(self.timeCorrection.total_seconds())) )\n\t\t\n\t\t# Reset to factory defaults.\n\t\tsuccess, response = self.sendCommand( SET_READER_CONFIG_Message(ResetToFactoryDefault = True) )\n\t\tif not success:\n\t\t\treturn False\n\t\t\t\n\t\t# Get the connected antennas.\n\t\tself.getConnectedAntennas()\n\t\t\n\t\t# Configure a periodic Keepalive message.\n\t\t# Change receiver sensitivity (if specified). This value is reader dependent.\n\t\treceiverSensitivityParameter = []\n\t\tif ReceiverSensitivity is not None:\n\t\t\treceiverSensitivityParameter.append(\n\t\t\t\tRFReceiver_Parameter( \n\t\t\t\t\tReceiverSensitivity = ReceiverSensitivity\n\t\t\t\t)\n\t\t\t)\n\t\t\n\t\t# Change transmit power (if specified). This value is reader dependent.\n\t\ttransmitPowerParameter = []\n\t\tif TransmitPower is not None:\n\t\t\ttransmitPowerParameter.append(\n\t\t\t\tRFTransmitter_Parameter( \n\t\t\t\t\tHopTableID = 1,\n\t\t\t\t\tChannelIndex = 0,\n\t\t\t\t\tTransmitPower = TransmitPower,\n\t\t\t\t)\n\t\t\t)\n\t\t\n\t\t# Change Inventory Control (if specified).\n\t\tinventoryCommandParameter = []\n\t\tif any(v is not None for v in [InventorySession, TagPopulation, TagTransitTime]):\n\t\t\tinventoryCommandParameter.append(\n\t\t\t\tC1G2InventoryCommand_Parameter( Parameters = [\n\t\t\t\t\t\tC1G2SingulationControl_Parameter(\n\t\t\t\t\t\t\tSession = InventorySession or 0,\n\t\t\t\t\t\t\tTagPopulation = TagPopulation or TagPopulationDefault,\n\t\t\t\t\t\t\tTagTransitTime = (TagTransitTime or 3)*1000,\n\t\t\t\t\t\t),\n\t\t\t\t\t],\n\t\t\t\t)\n\t\t\t)\n\t\t\n\t\tsuccess, response = self.sendCommand(\n\t\t\tSET_READER_CONFIG_Message( Parameters = [\n\t\t\t\t\tAntennaConfiguration_Parameter(\n\t\t\t\t\t\tAntennaID = 0,\n\t\t\t\t\t\tParameters = receiverSensitivityParameter + transmitPowerParameter + inventoryCommandParameter,\n\t\t\t\t\t),\n\t\t\t\t\tKeepaliveSpec_Parameter(\n\t\t\t\t\t\tKeepaliveTriggerType = KeepaliveTriggerType.Periodic,\n\t\t\t\t\t\tPeriodicTriggerValue = int(KeepaliveSeconds*1000),\n\t\t\t\t\t),\n\t\t\t\t],\n\t\t\t),\n\t\t)\n\t\tif not success:\n\t\t\treturn False\n\t\t\n\t\t# Disable all rospecs in the reader.\n\t\tsuccess, response = self.sendCommand( DISABLE_ROSPEC_Message(ROSpecID = 0) )\n\t\tif not success:\n\t\t\treturn False\n\t\t\n\t\t# Delete our old rospec.\n\t\tsuccess, response = self.sendCommand( DELETE_ROSPEC_Message(ROSpecID = self.rospecID) )\n\t\tif not success:\n\t\t\treturn False\n\t\t\t\n\t\t# Get the C1G2UHFRFModeTable and extract available mode identifiers.\n\t\tmodeIdentifiers = None\n\t\tmaxNumberOfAntennaSupported = 4\n\t\ttry:\n\t\t\tsuccess, response = self.sendCommand(GET_READER_CAPABILITIES_Message(RequestedData = GetReaderCapabilitiesRequestedData.All))\n\t\t\tif success:\n\t\t\t\tmodeIdentifiers = [e.ModeIdentifier for e in response.getFirstParameterByClass(C1G2UHFRFModeTable_Parameter).Parameters]\n\t\t\t\tgdc = response.getFirstParameterByClass(GeneralDeviceCapabilities_Parameter)\n\t\t\t\tmaxNumberOfAntennaSupported = gdc.MaxNumberOfAntennaSupported\n\t\t\telse:\n\t\t\t\tself.messageQ.put( ('Impinj', 'GET_READER_CAPABILITIES fails.') )\n\t\texcept Exception as e:\n\t\t\tself.messageQ.put( ('Impinj', 'GET_READER_CAPABILITIES Exception: {}:\\n{}'.format(e, traceback.format_exc())) )\n\t\t\t\t\n\t\t# Configure our new rospec.\n\t\tif ProcessingMethod == FirstReadMethod:\n\t\t\tcmd = GetBasicAddRospecMessage(ROSpecID = self.rospecID, antennas = self.antennas)\n\t\telse:\n\t\t\tcmd = GetAddRospecRSSIMessage(\n\t\t\t\tROSpecID = self.rospecID, antennas = self.antennas,\n\t\t\t\tmodeIdentifiers=modeIdentifiers, maxNumberOfAntennaSupported=maxNumberOfAntennaSupported\n\t\t\t)\n\t\tsuccess, response = self.sendCommand(cmd)\n\t\tif not success:\n\t\t\treturn False\n\t\t\t\n\t\t# Enable our new rospec.\n\t\tsuccess, response = self.sendCommand( ENABLE_ROSPEC_Message(ROSpecID = self.rospecID) )\n\t\tif not success:\n\t\t\treturn False\n\t\t\n\t\tsuccess = (success and isinstance(response, ENABLE_ROSPEC_RESPONSE_Message) and response.success())\n\t\treturn success\n\t\n\tdef getConnectedAntennas( self ):\n\t\tsuccess, response = self.sendCommand( GET_READER_CONFIG_Message(RequestedData=GetReaderConfigRequestedData.AntennaProperties) )\n\t\tif success:\n\t\t\tself.connectedAntennas = [p.AntennaID for p in response.Parameters\n\t\t\t\tif isinstance(p, AntennaProperties_Parameter) and p.AntennaConnected and p.AntennaID <= 4]\n\t\treturn success\n\t\t\n\tdef reportTag( self, tagID, discoveryTime, sampleSize=1, antennaID=0, quadReg=False ):\n\t\tlrt = self.lastReadTime.get(tagID, tOld)\n\t\tif discoveryTime > lrt:\n\t\t\tself.lastReadTime[tagID] = discoveryTime\n\t\t\n\t\tif (discoveryTime - lrt).total_seconds() < RepeatSeconds:\n\t\t\tself.messageQ.put( (\n\t\t\t\t'Impinj',\n\t\t\t\t'Received {}. tag={} Skipped (<{} secs ago). {}'.format(self.tagCount, tagID, RepeatSeconds,\n\t\t\t\tdiscoveryTime.strftime('%H:%M:%S.%f')),\n\t\t\t\tself.antennaReadCount,\n\t\t\t\t)\n\t\t\t)\n\t\t\treturn False\n\t\t\t\n\t\tself.dataQ.put( (tagID, discoveryTime) )\n\t\t\n\t\tself.logQ.put( (\n\t\t\t\t'log',\n\t\t\t\t'{},{}'.format(\n\t\t\t\t\ttagID,\n\t\t\t\t\tdiscoveryTime.strftime('%a %b %d %H:%M:%S.%f %Z %Y-%m-%d'),\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\t\t\n\t\tself.messageQ.put( (\n\t\t\t'Impinj',\n\t\t\t'{} {}. {} - {}{}{}'.format(\n\t\t\t\t\t'QuadReg' if quadReg else 'FirstRead',\n\t\t\t\t\tself.tagCount,\n\t\t\t\t\ttagID,\n\t\t\t\t\tdiscoveryTime.strftime('%H:%M:%S.%f'),\n\t\t\t\t\t' samples={}'.format(sampleSize) if sampleSize > 1 else '',\n\t\t\t\t\t' antennaID={}'.format(antennaID) if antennaID else '',\n\t\t\t),\n\t\t\tself.antennaReadCount,\n\t\t\t)\n\t\t)\n\t\tBeep()\n\t\treturn True\n\t\n\tdef handleTagGroup( self ):\n\t\tif not self.tagGroup:\n\t\t\treturn\n\t\treads, strays = self.tagGroup.getReadsStrays( method=ProcessingMethod, antennaChoice=AntennaChoice )\n\t\tfor tagID, discoveryTime, sampleSize, antennaID in reads:\n\t\t\tself.reportTag( tagID, discoveryTime, sampleSize, antennaID, True )\n\t\t\t\n\t\tself.strayQ.put( ('strays', strays) )\n\t\tself.tagGroupTimer = threading.Timer( 1.0, self.handleTagGroup )\n\t\tself.tagGroupTimer.start()\n\t\n\tdef handleLogFile( self ):\n\t\twhile True:\n\t\t\tmsg = self.logQ.get()\n\t\t\tself.logQ.task_done()\n\t\t\t\n\t\t\tif msg[0] == 'shutdown':\n\t\t\t\treturn\n\t\t\ttry:\n\t\t\t\tpf = open( self.fname, 'a' )\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tpf.write( msg[1] if msg[1].endswith('\\n') else msg[1] + '\\n' )\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tmsg = self.logQ.get( False )\n\t\t\t\texcept Empty:\n\t\t\t\t\tbreak\n\t\t\t\tself.logQ.task_done()\n\t\t\t\t\n\t\t\t\tif msg[0] == 'shutdown':\n\t\t\t\t\treturn\n\t\t\t\tpf.write( msg[1] if msg[1].endswith('\\n') else msg[1] + '\\n' )\n\t\t\tpf.close()\n\t\t\ttime.sleep( 0.1 )\n\t\n\tdef runServer( self ):\n\t\tglobal tAntennaConnectedLast, tAntennaConnectedLastLock\n\t\t\n\t\tself.messageQ.put( ('BackupFile', self.fname) )\n\t\t\n\t\tself.messageQ.put( ('Impinj', '*****************************************' ) )\n\t\tself.messageQ.put( ('Impinj', 'Reader Server Started: ({}:{})'.format(self.impinjHost, self.impinjPort) ) )\n\t\t\t\n\t\t# Create an old default time for last tag read.\n\t\ttOld = getTimeNow() - datetime.timedelta( days = 200 )\n\t\tutcfromtimestamp = datetime.datetime.utcfromtimestamp\n\t\t\n\t\twhile self.checkKeepGoing():\n\t\t\tself.readerSocket = None\t# Voodoo to ensure that the socket is reset properly.\n\t\t\t\n\t\t\t#------------------------------------------------------------\n\t\t\t# Connect Mode.\n\t\t\t#\n\t\t\t# Create a socket to connect to the reader.\n\t\t\tself.readerSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n\t\t\tself.readerSocket.settimeout( ConnectionTimeoutSeconds )\n\t\t\t\n\t\t\tself.messageQ.put( ('Impinj', 'state', False) )\n\t\t\tself.messageQ.put( ('Impinj', '') )\n\t\t\tself.messageQ.put( ('Impinj', 'Trying to Connect to Reader: ({}:{})...'.format(self.impinjHost, self.impinjPort) ) )\n\t\t\tself.messageQ.put( ('Impinj', 'ConnectionTimeout={:.2f} seconds'.format(ConnectionTimeoutSeconds) ) )\n\t\t\t\n\t\t\ttry:\n\t\t\t\tself.readerSocket.connect( (self.impinjHost, self.impinjPort) )\n\t\t\texcept Exception as e:\n\t\t\t\tself.messageQ.put( ('Impinj', 'Reader Connection Failed: {}'.format(e) ) )\n\t\t\t\tself.readerSocket.close()\n\t\t\t\tself.messageQ.put( ('Impinj', 'Attempting Reconnect in {} seconds...'.format(ReconnectDelaySeconds)) )\n\t\t\t\tself.reconnectDelay()\n\t\t\t\tcontinue\n\n\t\t\tself.messageQ.put( ('Impinj', 'state', True) )\n\t\t\t\n\t\t\t#------------------------------------------------------------\n\t\t\t# Initialize the reader.\n\t\t\t#\n\t\t\ttry:\n\t\t\t\tsuccess = self.sendCommands()\n\t\t\texcept Exception as e:\n\t\t\t\tself.messageQ.put( ('Impinj', 'Send Command Error={}'.format(e)) )\n\t\t\t\tsuccess = False\n\t\t\t\t\n\t\t\tif not success:\n\t\t\t\tself.messageQ.put( ('Impinj', 'Reader Initialization Failed.') )\n\t\t\t\tself.messageQ.put( ('Impinj', 'Disconnecting Reader.' ) )\n\t\t\t\tself.messageQ.put( ('Impinj', 'state', False) )\n\t\t\t\tself.readerSocket.close()\n\t\t\t\tself.messageQ.put( ('Impinj', 'Attempting Reconnect in {} seconds...'.format(ReconnectDelaySeconds)) )\n\t\t\t\tself.reconnectDelay()\n\t\t\t\tself.statusCB()\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\tself.statusCB(\n\t\t\t\tconnectedAntennas = self.connectedAntennas,\n\t\t\t\ttimeCorrection = self.timeCorrection,\n\t\t\t)\n\t\t\t\n\t\t\tself.tagGroup = TagGroup()\n\t\t\tself.handleTagGroup()\n\t\t\t\t\n\t\t\ttUpdateLast = tKeepaliveLast = getTimeNow()\n\t\t\tantennaCheckInterval = 10.0\n\t\t\twith tAntennaConnectedLastLock:\n\t\t\t\ttAntennaConnectedLast = tUpdateLast - datetime.timedelta( seconds=antennaCheckInterval-2.0 )\t# Force the antenna status to be updated at start.\n\t\t\t\n\t\t\tself.tagCount = 0\n\t\t\tlastDiscoveryTime = None\n\t\t\twhile self.checkKeepGoing():\n\t\t\t\n\t\t\t\t#------------------------------------------------------------\n\t\t\t\t# Read Mode.\n\t\t\t\t#\n\t\t\t\t\n\t\t\t\tt = getTimeNow()\n\t\t\t\t\t\n\t\t\t\t#------------------------------------------------------------\n\t\t\t\t# Check on the antenna connection status.\n\t\t\t\t#\n\t\t\t\tif (t - tAntennaConnectedLast).total_seconds() >= antennaCheckInterval:\n\t\t\t\t\tself.messageQ.put( ('Impinj', 'Checking Antenna Connections...') )\n\t\t\t\t\ttry:\n\t\t\t\t\t\tGET_READER_CONFIG_Message(RequestedData=GetReaderConfigRequestedData.AntennaProperties).send( self.readerSocket )\n\t\t\t\t\t\twith tAntennaConnectedLastLock:\n\t\t\t\t\t\t\ttAntennaConnectedLast = t\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'GET_READER_CONFIG send fails: {}'.format(e)) )\n\t\t\t\t\t\tself.readerSocket.close()\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Attempting Reconnect...') )\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t#------------------------------------------------------------\n\t\t\t\t# Messages from the reader.\n\t\t\t\t# Handle connection/timeout errors here.\n\t\t\t\t#\n\t\t\t\ttry:\n\t\t\t\t\tresponse = UnpackMessageFromSocket( self.readerSocket )\n\t\t\t\t\n\t\t\t\texcept socket.timeout:\n\t\t\t\t\tif (t - tKeepaliveLast).total_seconds() > KeepaliveSeconds * 2:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Reader Connection Lost (missing Keepalive).') )\n\t\t\t\t\t\tself.readerSocket.close()\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Attempting Reconnect...') )\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\tif (t - tUpdateLast).total_seconds() >= ReaderUpdateMessageSeconds:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Listening for Impinj reader data...') )\n\t\t\t\t\t\ttUpdateLast = t\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tif (t - tKeepaliveLast).total_seconds() > KeepaliveSeconds * 2:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Reader Connection Lost (Check your network adapter).') )\n\t\t\t\t\t\tself.readerSocket.close()\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Attempting Reconnect...') )\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\tif (t - tUpdateLast).total_seconds() >= ReaderUpdateMessageSeconds:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Listening for Impinj reader data...') )\n\t\t\t\t\t\ttUpdateLast = t\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\t#------------------------------------------------------------\n\t\t\t\t# Keepalive.\n\t\t\t\t#\n\t\t\t\tif isinstance(response, KEEPALIVE_Message):\n\t\t\t\t\t# Respond to the KEEP_ALIVE message with KEEP_ALIVE_ACK.\n\t\t\t\t\ttry:\n\t\t\t\t\t\tKEEPALIVE_ACK_Message().send( self.readerSocket )\n\t\t\t\t\texcept socket.timeout:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Reader Connection Lost (Keepalive_Ack timeout).') )\n\t\t\t\t\t\tself.readerSocket.close()\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Attempting Reconnect...') )\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\t\ttKeepaliveLast = getTimeNow()\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t#------------------------------------------------------------\n\t\t\t\t# Reader config (to get antenna connection status).\n\t\t\t\t#\n\t\t\t\tif isinstance(response, GET_READER_CONFIG_RESPONSE_Message):\n\t\t\t\t\tself.connectedAntennas = sorted( p.AntennaID for p in response.Parameters\n\t\t\t\t\t\tif isinstance(p, AntennaProperties_Parameter) and p.AntennaConnected and p.AntennaID <= 4 )\n\t\t\t\t\tself.messageQ.put( ('Impinj', 'Connected antennas: {}'.format(','.join(str(a) for a in self.connectedAntennas)) ) )\n\t\t\t\t\tself.statusCB(\n\t\t\t\t\t\tconnectedAntennas = self.connectedAntennas,\n\t\t\t\t\t\ttimeCorrection = self.timeCorrection,\n\t\t\t\t\t)\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\t#------------------------------------------------------------\n\t\t\t\t# Unexpected messages.\n\t\t\t\t#\n\t\t\t\tif not isinstance(response, RO_ACCESS_REPORT_Message):\n\t\t\t\t\tif not isinstance(response, READER_EVENT_NOTIFICATION_Message):\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Skipping: {}'.format(response.__class__.__name__)) )\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\t#------------------------------------------------------------\n\t\t\t\t# Tag read.\n\t\t\t\t#\n\t\t\t\ttry:\n\t\t\t\t\tdiscoveryTime = utcfromtimestamp( tag['Timestamp'] / 1000000.0 )\n\t\t\t\t\tif ImpinjDebug and lastDiscoveryTime is not None:\n\t\t\t\t\t\tprint( '{} \\r'.format( (discoveryTime - lastDiscoveryTime).total_seconds() ) )\n\t\t\t\t\tlastDiscoveryTime = discoveryTime\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\t\t\n\t\t\t\tfor tag in response.getTagData():\n\t\t\t\t\tself.tagCount += 1\n\t\t\t\t\t\n\t\t\t\t\tantennaID = tag['AntennaID']\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.antennaReadCount[antennaID] += 1\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Received {}. Missing AntennaID.'.format(self.tagCount)) )\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttagID = tag['EPC']\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Received {}. Skipping: missing tagID.'.format(self.tagCount)) )\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttagID = HexFormatToStr( tagID )\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Received {}. Skipping: HexFormatToStr fails. Error={}'.format(self.tagCount, e)) )\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdiscoveryTime = tag['Timestamp']\t\t# In microseconds since Jan 1, 1970\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tself.messageQ.put( ('Impinj', 'Received {}. Skipping: Missing Timestamp'.format(self.tagCount)) )\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tpeakRSSI = tag.get('PeakRSSI', None)\t\t# -127..127 in db.\n\t\t\t\t\t\n\t\t\t\t\t# Convert discoveryTime to Python format and correct for reader time difference.\n\t\t\t\t\tdiscoveryTime = utcfromtimestamp( discoveryTime / 1000000.0 ) + self.timeCorrection\n\t\t\t\t\t\n\t\t\t\t\tif peakRSSI is not None:\n\t\t\t\t\t\tself.tagGroup.add( antennaID, tagID, discoveryTime, peakRSSI )\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.reportTag( tagID, discoveryTime, antennaID=antennaID )\n\t\t\n\t\t# Cleanup.\n\t\tif self.readerSocket:\n\t\t\ttry:\n\t\t\t\tresponse = self.sendCommand( CLOSE_CONNECTION_Message() )\n\t\t\texcept socket.timeout:\n\t\t\t\tpass\n\t\t\tself.readerSocket.close()\n\t\t\tself.readerSocket = None\n\t\t\n\t\tself.logQ.put( ('shutdown',) )\n\t\tself.logFileThread.join()\n\n\t\tif self.tagGroupTimer:\n\t\t\tself.tagGroupTimer.cancel()\n\t\t\n\t\treturn True\n\t\t\n\tdef purgeDataQ( self ):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\td = self.dataQ.get( False )\n\t\t\texcept Empty:\n\t\t\t\tbreak\n\nimpinj = None\ndef ImpinjServer( dataQ, messageQ, strayQ, shutdownQ, impinjHost, impinjPort, antennaStr, statusCB=None ):\n\tglobal impinj\n\timpinj = Impinj(dataQ, messageQ, strayQ, shutdownQ, impinjHost, impinjPort, antennaStr, statusCB)\n\timpinj.runServer()\n","repo_name":"esitarski/CrossMgr","sub_path":"CrossMgrImpinj/Impinj.py","file_name":"Impinj.py","file_ext":"py","file_size_in_byte":23159,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"85"} +{"seq_id":"1203306561","text":"import sys\nsys.stdin = open('problem1.txt')\n\n\ndef count_time(selected):\n global min_time # 최소 시간 기록\n state = [] # 선택한 탈출구, 소요 시간, 상태(이동중1, 탈출대기중2, 탈출3)\n for i in range(len(selected)):\n if selected[i] == 0:\n state.append([0, mt[0][i], 1])\n if selected[i] == 1:\n state.append([1, mt[1][i], 1])\n # print(state)\n\n # 시간의 흐름 기록\n time = 0\n\n # 사람들의 탈출여부를 기록\n escape = [0] * len(M)\n\n # 시간에 따른 두 비상구의 상태 기록(대기인원)\n e0 = []\n e1 = []\n\n while sum(escape) < len(M): # 모든 사람이 탈출할때까지 계속\n # 시간이 증가한다.\n time += 1\n # 비상구의 대기 인원이 줄어든다.\n if len(e0) > 0:\n e0.pop(0)\n if len(e1) > 0:\n e1.pop(0)\n # 각 사람의 상태가 변화한다.\n a0 = a1 = False\n for j in range(len(state)):\n # 아직 탈출하지 못했으면 소요시간 감소\n if not escape[j]:\n if state[j][1] > 0:\n state[j][1] -= 1\n\n # 이동중이었고 소요시간이 0이되면 탈출구에 도착했으니 대기인원 확인\n if state[j][2] == 1 and state[j][1] == 0:\n # 0번 탈출구 도착\n if state[j][0] == 0:\n state[j][1] += len(e0)\n e0.append('w')\n state[j][2] = 2\n # 1번 탈출구 도착\n elif state[j][0] == 1:\n state[j][1] += len(e1)\n e1.append('w')\n state[j][2] = 2\n\n # 대기중에 소요시간이 0이 되면 탈출\n # 탈출은 한번에 한 사람만 해야 되는데 동시에 탈출하는 경우가 생기나?\n elif state[j][2] == 2 and state[j][1] == 0:\n if state[j][0] == 0 and a0 == False:\n state[j][2] = 3\n escape[j] = 1\n a0 = True\n elif state[j][0] == 1 and a1 == False:\n state[j][2] = 3\n escape[j] = 1\n a1 = True\n\n # 모두 탈출 했을 때의 시간 측정, 최소 시간이면 갱신\n if min_time > time:\n min_time = time\n\n\ndef comb(selected, idx):\n if idx == len(M):\n # print(selected)\n count_time(selected)\n return\n\n selected[idx] = 0\n comb(selected, idx + 1)\n selected[idx] = 1\n comb(selected, idx + 1)\n\n\nT = int(input())\nfor tc in range(1, T + 1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n # print(N)\n # print(arr)\n\n\n # 사람과 비상구의 위치 기록\n M = []\n E = []\n for r in range(N):\n for c in range(N):\n if arr[r][c] == 1:\n M.append((r, c))\n elif arr[r][c] == 2:\n E.append((r, c))\n # print(M, E)\n\n # 사람으로부터 비상구까지의 이동시간 전처리\n mt = [[0] * len(M) for _ in range(len(E))]\n for i in range(len(E)):\n for j in range(len(M)):\n mt[i][j] = abs(M[j][0] - E[i][0]) + abs(M[j][1] - E[i][1])\n # print(mt)\n\n #결과 저장\n min_time = 987654321\n\n # 각 사람이 두 비상구(1번, 2번) 중 어느 비상구를 선택할지 결정한다.(조합)\n selected = [-1] * len(M)\n comb(selected, 0)\n\n # 최소시간\n\n\n print('#{} {}'.format(tc, min_time))\n\n\n","repo_name":"Anseik/algorithm","sub_path":"A형 모의 역량평가/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12665824100","text":"#listar entradas por bloques de paginacion\n#borrar entrada de diario\n#buscar entrada por fecha\n#opcion para editar entrada\n#registrar entrada de diario\n#prueba de cambio por web\nfrom peewee import *\nfrom collections import OrderedDict\nimport datetime\nimport sys\ndb =SqliteDatabase(\"diario.db\")\n\n#Database\nclass Entry(Model):\n timestamp = TimestampField(default=datetime.datetime.now())\n content = TextField()\n class Meta:\n database = db\n#fin Database\n\n\ndef add_entry():\n \"\"\"Add New Entry\"\"\"\n print(\"Entry your thought\")\n print(\"Press Ctrl + D to finish\")\n data = sys.stdin.read().strip()\n if data:\n if input(\"do you want to save? [Y/N]\").lower().strip() != 'n':\n Entry.create(content=data)\n print(\"Your entry save successfull!\")\n\ndef edit_entry():\n \"\"\"\"Edit Entry\"\"\"\n\ndef search_entry():\n \"\"\"Search Entry\"\"\"\n search_query =input(\"ingrese dato de busqueda: \").strip()\n list_entry(search_query)\n\ndef list_entry(search_query=None):\n \"\"\"List Entry\"\"\"\n entries = Entry.select().order_by(Entry.timestamp.desc())\n \n if search_query:\n entries = entries.where(Entry.content.contains(search_query))\n\n for entry in entries:\n timestamp= entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')\n print(timestamp)\n print('-'*len(timestamp))\n print(entry.content)\n print('+'*len(entry.content))\n print('\\n')\n\ndef delete_entry():\n \"\"\"Delete Entry\"\"\"\n\ndef create_and_connect():\n db.connect()\n db.create_tables([Entry],safe=True)\n\n\n#menu to the diary\n\ndef menu_loop():\n \"\"\"show Menu\"\"\"\n choice = None\n while choice != 'q':\n print(\"-\"*8)\n print(\"Press 'q' to quit\")\n for key,value in menu.items():\n print(\"{}) {}\".format(key,value.__doc__))\n choice = input(\"Action: \").lower().strip()\n if choice in menu:\n menu[choice]()\n\nmenu = OrderedDict([\n (\"a\",add_entry),\n (\"l\",list_entry),\n (\"d\",delete_entry),\n (\"s\",search_entry)\n ])\n#Fin menu to diary\ncreate_and_connect()\nmenu_loop()\n","repo_name":"ppinac/Practica_Python","sub_path":"Diary/proyect.py","file_name":"proyect.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9600754882","text":"from pathlib import Path\n\nfrom setuptools import setup, find_packages\n\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\nsetup(\n name=\"aigen\",\n version=\"0.1.0\",\n license='MIT',\n author=\"Aigen Protocol\",\n author_email='kailash@ravenprotocol.com',\n packages=find_packages(),\n long_description=long_description,\n long_description_content_type='text/markdown',\n description=\"Aigen's open-source tools to create AINFTs effortlessly\",\n url='https://github.com/aigenprotocol/aigen',\n keywords=\"Aigen, open-source, AINFT, effortlessly\",\n install_requires=[\n \"python-dotenv\",\n \"tensorflow==2.13.0\",\n \"pandas\"\n ]\n)\n","repo_name":"aigenprotocol/aigen","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"85"} +{"seq_id":"31964389986","text":"print('Practising everything')\r\nprint('this is newline and\\n \\t this is new tab ')\r\n\r\npoem = '''\r\nwhat am i to you he asks\r\nyou are every hope i've ever had\r\nin a human form i replied..\r\n'''\r\nprint('------------------------------')\r\nprint(poem)\r\nprint('------------------------------')\r\n\r\nfive = 10-2+3-6\r\nprint(f'this should be five : {five}')\r\n\r\ndef secret_formula(started):\r\n\tbeans = started*500\r\n\tjars = beans/1000\r\n\tcrates = jars/100\r\n\treturn beans,jars,crates\r\n\r\nstart_point = 1000\r\nbeans,jars,crates = secret_formula(start_point)\r\n\r\nprint('with start point of {}'.format(start_point))\r\nprint(f'We would have {beans} jelly beans {jars} jars and {crates} crates..')\r\n\r\nstart_point/=10\r\nformula = secret_formula(start_point)\r\n\r\nprint('we will now have {} beans {} jars and {} crates..'.format(formula))\r\n","repo_name":"yeshwindbz9/pp","sub_path":"lp3thw/ex24.py","file_name":"ex24.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8782209400","text":"\"\"\"\nRenames the friendly_name from entity_name and writtes it into the entity_registry.\n\nVersion V.0.0.1\n\nPackage:\n sg_renamer.py\n\nconfiguration.yaml:\n # entity_name musst be --> <> --> without spaces and \n sg_renamer:\n - platform_name: hue --> plattform_name to rename all entities\n domain: \n light --> domains to rename from the platform\n\nFor more details about this Class, please refer to the documentation at\nhttps://github.com/swissglider/homeassistant_custome_components\n\"\"\"\nimport logging\nimport yaml\n\nimport homeassistant.helpers.entity as entity_helper\nfrom homeassistant.helpers.event import track_state_change\nfrom homeassistant.components.group import ENTITY_ID_FORMAT as GROUP_ID_FORMAT\n\n# Initialize the logger\n_LOGGER = logging.getLogger(__name__)\n\n# The domain of your component. Equal to the filename of your component.\nDOMAIN = \"sg_renamer\"\n\ndef setup(hass, config):\n \n conf = config.get(DOMAIN, {})\n to_rename_comp = {}\n\n for panel in conf:\n if 'platform_name' not in panel:\n _LOGGER.critical('Error in config: following Parameter nor set - name')\n return False\n platform_name = panel['platform_name']\n \n domain_filter = []\n if 'domain' in panel:\n filters = panel['domain'].split( )\n for filter in filters:\n domain_filter.append(filter)\n\n to_rename_comp[platform_name] = {\n 'platform_name': platform_name,\n 'domain': domain_filter\n }\n\n #entity_id = entity_helper.generate_entity_id('{}','renamer', hass=hass)\n name_changer = NameChanger(to_rename_comp, hass, DOMAIN)\n hass.services.register(DOMAIN, 'rename', name_changer.rename)\n\n # Return boolean to indicate that initialization was successfully.\n return True\n\nclass NameChanger:\n def __init__(self, to_rename_comp, hass, domain):\n self._to_rename_comp = to_rename_comp\n self._hass = hass\n self._domain = domain\n\n def rename(self, call):\n for comp_name in self._to_rename_comp:\n comp = self._to_rename_comp[comp_name]\n entities = self._get_all_entities(comp['domain'])\n entities = self._get_filtered_by_plattform(entities, comp['platform_name'])\n self._rename_all_frienly_names(entities)\n\n def _get_all_entities(self, domain_filters):\n entities = []\n if domain_filters and len(domain_filters) != 0:\n for domain_filter in domain_filters:\n entities = entities + self._hass.states.entity_ids(domain_filter)\n else:\n entities = self._hass.states.entity_ids()\n return entities\n\n def _get_filtered_by_plattform(self, entities, platform_name):\n return_entities = []\n\n PATH_REGISTRY = 'entity_registry.yaml'\n path = self._hass.config.path(PATH_REGISTRY)\n data = None\n with open(path) as fp:\n data = yaml.load(fp)\n for entity_id, info in data.items():\n if (entity_id in entities) and (str(info['platform']) == str(platform_name)):\n return_entities.append(entity_id)\n \n return return_entities\n\n def _get_friendly_name(self, name):\n ''' return a friendly name from name. '''\n if name:\n name = name.replace(\".\", \" \")\n name = name.replace(\"_\", \" \")\n name = name.title()\n return name\n \n # ========================================================\n # ========================== Change Registry Name\n # ========================================================\n\n def _changeFriendlyName(self, enitity_name, friendly_name):\n ''' Change the Registry Name of the entity. '''\n import requests\n import json\n url = 'http://localhost:8123/api/config/entity_registry/' + str(enitity_name)\n payload = {'name': str(friendly_name)}\n headers = {'content-type': 'application/json'}\n r = requests.post(url, data=json.dumps(payload), headers=headers)\n if r.status_code is not 200:\n _LOGGER.warning('Frienly Name(' + friendly_name + ') change was not successfull for Entity: ' + str(enitity_name) + \" Status-Code: \" + str(r.status_code))\n\n def _rename_all_frienly_names(self, entities):\n ''' Change all entities Registry Names to friendly name. '''\n for entity in entities:\n object_name = entity.partition('.')[2]\n friendly_name = object_name.partition('_')[2]\n friendly_name = self._get_friendly_name(friendly_name)\n self._changeFriendlyName(entity, friendly_name)\n","repo_name":"swissglider/homeassistant_custome_components","sub_path":"custom_components/sg_renamer.py","file_name":"sg_renamer.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35125718242","text":"# Config file: options for signal fitting\n\n_year = '2018'\n\nsignalScriptCfg = {\n \n # Setup\n #'inputWSDir':'/home/hep/mdk16/PhD/ggtt/CMSSW_10_2_0/src/HHToGGTT/output_trees/ws/signal_%s'%_year,\n #'inputWSDir':'/home/hep/mdk16/PhD/ggtt/ParamNN/outputTrees/ws/signal_%s'%_year,\n 'inputWSDir':'/home/hep/mdk16/PhD/ggtt/ResonantGGTT/tagging_output/radionM500_HHggTauTau/outputTrees/2018/500/ws/signal_2018',\n 'procs':'auto', # if auto: inferred automatically from filenames\n 'cats':'auto', # if auto: inferred automatically from (0) workspace\n 'ext':'ggtt_resonant_500',\n 'analysis':'ggtt_resonant_500', # To specify which replacement dataset mapping (defined in ./python/replacementMap.py)\n 'year':'%s'%_year, # Use 'combined' if merging all years: not recommended\n 'massPoints':'125',\n\n #Photon shape systematics \n 'scales':'',\n 'scalesCorr':'',\n 'scalesGlobal':'',\n 'smears':'',\n\n # Job submission options\n 'batch':'local', # ['condor','SGE','IC','local']\n 'queue':'hep.q'\n #'batch':'condor', # ['condor','SGE','IC','local']\n #'queue':'espresso',\n\n}\n","repo_name":"YonsiG/flashggFinalFit_XYH_ggbb","sub_path":"Signal/config_ggtt_500.py","file_name":"config_ggtt_500.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31713146587","text":"import os, re\n\ndef compare_and_replace(afile, bfile, tfile):\n\twith open(afile, 'rt') as file1:\n\t\twith open(bfile, 'rt') as file2:\n\t\t\twith open(tfile, 'wt') as file3:\n\t\t\t\tfor line1 in file1:\n\t\t\t\t\tresult2 = None\n\t\t\t\t\tresult1 = re.match(r'(\".*\") *?= *?(\".*\");', line1)\n\t\t\t\t\tif result1:\n\t\t\t\t\t\tkey1 = result1.group(1)\n\t\t\t\t\t\tisExit = False\n\t\t\t\t\t\tfile2.seek(0)\n\t\t\t\t\t\tfor line2 in file2:\n\t\t\t\t\t\t\tresult2 = re.match(r'(\".*\") *?= *?(\".*\");', line2)\n\t\t\t\t\t\t\tif result2:\n\t\t\t\t\t\t\t\tkey2 = result2.group(1)\n\t\t\t\t\t\t\t\tvalue2 = result2.group(2)\n\t\t\t\t\t\t\t\tif key1 == key2:\n\t\t\t\t\t\t\t\t\tnew_value_line = f\"{key1} = {value2};\\n\"\n\t\t\t\t\t\t\t\t\tfile3.write(new_value_line)\n\t\t\t\t\t\t\t\t\tisExit = True\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif isExit == False:\n\t\t\t\t\t\t\tfile3.write(line1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tfile3.write(line1)\n\t\t\t\t\t\tcontinue\n\ndef recursionAllFiles(base_dir):\n\tfiles = []\n\tfor root, ds, fs in os.walk(base_dir):\n\t\tfor f in fs:\n\t\t\tif re.match(r'\\w+[.](strings)$', f):\n\t\t\t\tfullname = os.path.join(root, f)\n\t\t\t\tfiles.append(fullname)\n\treturn files\n\ndef create_dir_on_result_dir(a_dir, result_dir):\n\twith os.scandir(a_dir) as entries:\n\t for entry in entries:\n\t dir_path = os.path.join(result_dir, entry.name)\n\t if not os.path.exists(dir_path):\n\t \tos.mkdir(dir_path)\n\n\ndef main():\n\tprint(f'处理Result文件目录')\n\tcreate_dir_on_result_dir(\"A\", \"Result\")\n\ta_dir_files = recursionAllFiles(\"A\")\n\tb_dir_files = recursionAllFiles(\"B\")\n\tprint(f'开始处理文件')\n\tfor a_f in a_dir_files:\n\t\ta_file_name = os.path.basename(a_f)\n\t\ta_file_dir = os.path.basename(os.path.dirname(a_f))\n\t\tis_found = False\n\t\tfor b_f in b_dir_files:\n\t\t\tb_file_dir = os.path.basename(os.path.dirname(b_f))\n\t\t\tif a_file_dir == b_file_dir:\n\t\t\t\tresult_f = os.path.join(\"Result\", b_file_dir, a_file_name)\n\t\t\t\tprint(f'处理文件中: 从 {a_f} 到 {result_f}')\n\t\t\t\tis_found = True\n\t\t\t\tcompare_and_replace(a_f, b_f, result_f)\n\t\tif is_found == False:\n\t\t\tprint(f\"没有找到文件: {a_f}\")\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"stoull/Notebook","sub_path":"MobileDevelopment/Xcode/Xcode翻译文件处理/按行比较两个文件/按行对比抽取并替换工程翻译/compare_and_replace.py","file_name":"compare_and_replace.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"19581284167","text":"n = int(input())\nk = int(input())\ndata = [[0] * (n+1) for _ in range(n+1)]\ninfo = [] # direction info\n\n# for apple\nfor _ in range(k):\n a, b = map(int, input().split())\n data[a][b] = 1\n\n# set direction input\nl = int(input())\nfor _ in range(l):\n sec, c = input().split()\n info.append((int(sec), c))\n\n# (동,남,서,북) 의 방향(초기화) <= 좌표 이동 관련, 이 방법이 제일 편하다! 이걸로 할 껄\ndx = [0,1,0,-1]\ndy = [1,0,-1,0]\n\ndef turn(direction, c):\n if c == \"L\":\n direction = (direction - 1) % 4\n else:\n direction = (direction + 1) % 4\n return direction\n\ndef simulate():\n x,y = 1,1 # snake's head\n data[x][y] = 2 # is snake? => 2\n direction = 0 # 처음엔 동쪽을 보고 있��\n time = 0 # second!\n index = 0 # rotation flag\n q = [(x,y)] # snake\n\n while True:\n nx = x + dx[direction]\n ny = y + dy[direction]\n # snake is in map size, 머리가 몸통에 부딫히지 않는 경우\n if 1 <= nx and nx <= n and 1 <= ny and ny <= n and data[nx][ny] != 2:\n # 사과 없으면, 이동 후 꼬리 제거\n if data[nx][ny] == 0:\n data[nx][ny] = 2\n # 다음 위치 갱신\n q.append((nx,ny))\n # 이전 위치\n px, py = q.pop(0)\n data[px][py] = 0\n # 사과 존재 시, 이동 후 꼬리 그대로!\n if data[nx][ny] == 1:\n data[nx][ny] = 2\n q.append((nx,ny))\n else:\n time += 1\n break\n \n x,y = nx,ny # 실제 머리 위치 갱신\n time += 1\n if index < l and time == info[index][0]: # 회전할 시간일 때는\n direction = turn(direction, info[index][1])\n index += 1\n return time\n\nprint(simulate())\n","repo_name":"daunjeong824/Practice_Algorithm","sub_path":"Implementation/snake_correct.py","file_name":"snake_correct.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29798517565","text":"#Open file and compress lines into an array\nwith open(\"input.txt\") as f:\n content = f.readlines()\ncontent = str(content[0])\n\ndef search(index):\n count = 0\n j = index\n while True:\n if content[j] == \"!\":\n j += 1\n elif content[j] == \">\":\n return [j,count]\n else:\n count += 1\n j += 1\n\ndef clean(content):\n count = 0\n new_content = \"\"\n end = -1\n for i in range(len(content)):\n if not(i <= end):\n if content[i] == \"<\":\n search_return = search(i+1)\n end = search_return[0]\n count += search_return[1]\n else:\n new_content += content[i]\n return [new_content,count]\n\n\nclean_return = clean(content)\nclean_content = clean_return[0]\nbracket_store = []\nscore = 0\nfor bracket in clean_content:\n if bracket == \"{\":\n bracket_store.append(bracket)\n elif bracket == \"}\":\n score += len(bracket_store)\n bracket_store.pop(len(bracket_store)-1)\n\nanswer_one = str(score)\nanswer_two = str(clean_return[1])\nprint(\"p1: \" + answer_one)\nprint(\"p2: \" + answer_two)\n","repo_name":"bunceandbean/advent-of-code","sub_path":"2017/day09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33909692913","text":"import cv2\r\nimport numpy as np\r\nfrom .check_Image_quality_score import ImageQualityValue,BlurOrNot,BrightnessAndContrastScore,CheckResolutionOfImage\r\nfrom .adjust_brightness_and_contrast_and_enhance_image import BrightnessAndContrastEnhancement\r\nfrom .blur_removal import RemoveBlur\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nimport uuid\r\nimport os\r\nimport io\r\nimport requests\r\nimport base64\r\nfrom app.utils import upload_to_s3,download_from_s3\r\nfrom PIL import Image, ImageEnhance\r\nfrom .resolution_main import ResolutionEnhancement\r\n\r\nclass ImageEnhancementCommon(object):\r\n\r\n def image_blur_removal(self,image,is_human):\r\n check_blur_image = True\r\n is_human = True\r\n\r\n ### @updated: 16 June 2022 - commenting the blur score > 80 condition. to enable uncomment the below code\r\n #check_blur_image = BlurOrNot.check_blur(image)\r\n\r\n if check_blur_image:\r\n if is_human:\r\n image = np.array(image)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n image = RemoveBlur.remove_blur(image)\r\n image = np.array(image)\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n else:\r\n image = ImageEnhance.Sharpness(image).enhance(1.2)\r\n \r\n return check_blur_image,image\r\n\r\n def image_resolution_enhancement(self,image,is_human):\r\n ResolutionEnhancements = ResolutionEnhancement()\r\n is_resolution_enhancement_required = CheckResolutionOfImage.check_resolution(image)\r\n if is_resolution_enhancement_required:\r\n\r\n if is_human:\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n image = ResolutionEnhancements.main(image)\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n else:\r\n image = ResolutionEnhancements.main(image)\r\n image = Image.fromarray(image)\r\n return image\r\n\r\n def image_brightness_contrast_and_color_enhancement(self,image,is_human):\r\n\r\n brightness_score = BrightnessAndContrastScore.brightness_score(image)\r\n contrast_score = BrightnessAndContrastScore.contrast_quality(image)\r\n image_quality_score = ImageQualityValue.test_measure_BRISQUE(image)\r\n\r\n if is_human:\r\n\r\n if image_quality_score >= 80 or brightness_score <= 120 or contrast_score <= 2:\r\n \r\n image = BrightnessAndContrastEnhancement.adjust_gamma(image) \r\n image = BrightnessAndContrastEnhancement.automatic_brightness_and_contrast(image)\r\n else:\r\n\r\n if image_quality_score >= 45 or brightness_score <= 120 or contrast_score <= 2:\r\n image = ImageEnhance.Color(image).enhance(1.2)\r\n image = ImageEnhance.Brightness(image).enhance(1.2)\r\n image = ImageEnhance.Contrast(image).enhance(1.2)\r\n\r\n return image\r\n\r\n def send_file_path(self,request_data,input_image,output_image,is_human): \r\n\r\n if is_human:\r\n output_image_byte_array = cv2.imencode('.'+request_data[\"image_file_format\"], output_image)[1].tostring() \r\n else:\r\n try:\r\n output_image_byte_array = io.BytesIO()\r\n output_image.save(output_image_byte_array, format=request_data[\"image_file_format\"]) \r\n output_image_byte_array = output_image_byte_array.getvalue()\r\n except:\r\n output_image_byte_array = cv2.imencode('.'+request_data[\"image_file_format\"], output_image)[1].tostring()\r\n input_image_byte_array = io.BytesIO() \r\n input_image_byte_array = input_image_byte_array.getvalue()\r\n file_name = str(uuid.uuid4()) + '.' + request_data[\"image_file_format\"]\r\n input_file_path = upload_to_s3(input_image_byte_array,request_data, 'input/'+file_name)\r\n output_file_path = upload_to_s3(output_image_byte_array,request_data, 'output/'+file_name)\r\n return output_file_path,base64.b64encode(output_image_byte_array)\r\n\r\n\r\nEnhancedImage=ImageEnhancementCommon()\r\n\r\n \r\n\r\n\r\n\r\n \r\n","repo_name":"siva3io/Eunimart_ai_image_optimizer","sub_path":"app/services/image_optimizer/image_enhancement.py","file_name":"image_enhancement.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11991500451","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Acceleration, brake, steering\ncolumn_to_values = {}\n# Acceleration\ncolumn_to_values[0] = ([0, 1], 0.4)\n# Brake\ncolumn_to_values[1] = ([0, 1], 0.4)\n# Steering\ncolumn_to_values[2] = ([-1, -3/4, -1/3, 0, 1/3, 3/4, 1], 0.1)\n\nin_file_name = \"all_sensors_all_controls.csv\"\nout_file_name = \"blackboard_quantized.csv\"\ninput_file = open(in_file_name)\noutput_file = open(out_file_name, \"w\")\n\n# Write the header (and add the missing column label)\noutput_file.write(input_file.readline()[:-1] + \",TRACK_EDGE_18\\n\")\n\n# Process the rest of the lines\nfor line in input_file:\n line_data = line.strip().split(\",\")\n \n # Process columns that need to be quantized\n for col in column_to_values:\n value = float(line_data[col])\n\n # Find the corresponding discrete value\n discrete_values = np.array(column_to_values[col][0])\n closest_value_idx = np.argmin(np.abs(discrete_values - value))\n line_data[col] = str(closest_value_idx)\n\n # Regroup the data and write to new file\n output_file.write(\",\".join(line_data) + \"\\n\")\n\ninput_file.close()\noutput_file.close()\n\n# Check\noriginal = np.loadtxt(in_file_name, delimiter=\",\", skiprows=1)\nprocessed = np.loadtxt(out_file_name, delimiter=\",\", skiprows=1)\ntitles = [\"Acceleration\", \"Brake\", \"Steering\"]\nfor i in range(3): \n plt.subplot(3, 1, i+1)\n plt.plot(original[:,i], lw=0.5)\n plt.plot(processed[:, i], \"r.\", markersize=1)\n plt.title(titles[i])\nplt.show()\n\nfor i in range(3):\n plt.subplot(1, 3, i+1)\n plt.hist(processed[:, i])\n plt.title(titles[i])\nplt.show()\n","repo_name":"dfdazac/ci-uva-2017","sub_path":"data/control_quantization.py","file_name":"control_quantization.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2607142071","text":"# Utilizzo della libreria SQLAlchemy: strumento Object Relational Mapper (ORM) per la comunicazione tra programmi Python e database.\r\n# traduce le classi Python in tabelle su database relazionali e converte automaticamente le chiamate di funzione in istruzioni SQL\r\n\r\n\r\nfrom datetime import *\r\nfrom unicodedata import *\r\n\r\n# metodo costruttore create_engine crea Connessione al database\r\n# Per usare DB-API per la connessione, la stringa ha la seguente forma:\r\n# engine = create_engine(\"dialect[+driver]://user:password@host/dbname\",echo = True)\r\n# driver : mysql-connector-python\r\n\r\n# create_engine() è il metodo costruttore crea una classe engine ovvero la connessione al server MySQL\r\n# ---- metodi classe Engine\r\n# connect() Ritorna oggetto di connessione\r\n# execute() Esegue un’istruzione SQL\r\n\r\nfrom sqlalchemy import create_engine\r\nengine = create_engine('mysql+mysqlconnector://frida:basedati_2022@localhost/centro_sportivo')\r\nconn = engine.connect()\r\n\r\n# output delle info su connessione al database\r\nprint('')\r\nprint('CREATA CONNESSIONE CON DATABASE centro_sportivo: ' ,conn )\r\nprint(\"DRIVER di connessione con il DBMS: \", engine.driver)\r\nprint(\"CONNESSIONE al Database : \", engine.url)\r\nprint('')\r\nprint(\"ELENCO TABELLE DATABASE centro_sportivo\")\r\nprint('---------------------------------------')\r\nfrom sqlalchemy import inspect\r\ninsp = inspect(engine)\r\nresult = insp.get_table_names()\r\nfor row in result: print(row)\r\n\r\n# Metadata è un CATALOGO di oggetti Table e loro costrutti che contiene definizioni di tabelle (Table) e oggetti associati come indici, viste, trigger, ecc.\r\n# ------------- i tipi generici ammessi in SQLAlchemy sono: --------------------------\r\n# BigInteger # SmallInteger Integer Numericb Floatn Boolean String Text Date DateTime Time\r\n\r\n# importazione oggetti per la definizione delle tabelle e degli indici all’interno del catalogo di metadati\r\nfrom sqlalchemy import MetaData, Table, Column, Integer, String, Date, Time, ForeignKey, Index, PrimaryKeyConstraint, ForeignKeyConstraint\r\n\r\n# istanzia un oggetto MetaData\r\nmeta = MetaData()\r\n\r\n# ----------- MAPPATURA DELLE TABELLE del database centro_sportivo sugli oggetti Python -----------\r\ntipo_manutenzione = Table('tipo_manutenzione', meta,\r\n Column('CodIntervento', String, nullable=False, primary_key=True),\r\n Column('Descrizione', String),\r\n Column('Note', String),\r\n )\r\n\r\ntipo_mansioni = Table('tipo_mansioni', meta,\r\n Column('CodTipoMansioni', String, nullable=False, primary_key=True),\r\n Column('Descrizione', String),\r\n )\r\n\r\nmansioni = Table('mansioni', meta,\r\n Column('CodMansione', String, nullable=False, primary_key=True),\r\n Column('Descrizione', String),\r\n Column('CodTipoMansioni', String, ForeignKey('tipo_mansioni.CodTipoMansioni')),\r\n )\r\n\r\ntesserini = Table('tesserini', meta,\r\n Column('CodiceTesserino', String, primary_key=True, nullable=False),\r\n Column('Stato', String),\r\n Index('CodiceTesserino')\r\n )\r\n\r\ntransiti = Table('transiti', meta,\r\n Column('CodTesserino', String, ForeignKey('tesserini.CodiceTesserino'), nullable=False),\r\n Column('Data', Date, nullable=False),\r\n Column('Ora', Time, nullable=False),\r\n Column('Verso', String, nullable=False),\r\n PrimaryKeyConstraint('CodTesserino', 'Data', 'Ora', 'Verso'), # chiave primaria composta,\r\n Index('CodTesserino', 'Data', 'Ora'),\r\n Index('Data')\r\n )\r\n\r\nanagrafica = Table('anagrafica', meta,\r\n Column('CodiceFiscale', String(16), nullable=False, primary_key=True),\r\n Column('Nominativo', String),\r\n Column('DataNascita', Date),\r\n Column('ComuneNascita', String),\r\n Column('IndirizzoResidenza', String),\r\n Column('Email', String, nullable=False),\r\n Column('Telefono', String, nullable=False),\r\n Index('CodiceFiscale')\r\n )\r\n\r\nclienti = Table('clienti', meta,\r\n Column('CodiceFiscale', String(16), ForeignKey('anagrafica.CodiCeFiscale'), nullable=False,\r\n primary_key=True)\r\n )\r\n\r\nassegnazione_tesserini = Table('assegnazione_tesserini', meta,\r\n Column('CodTesserino', String, ForeignKey('tesserini.CodiceTesserino'), nullable=False),\r\n Column('CodFiscale', String(16), ForeignKey('clienti.CodiceFiscale'), nullable=False),\r\n Column('DataAssegnazione', Date),\r\n PrimaryKeyConstraint('CodTesserino', 'CodFiscale'), # chiave primaria composta,\r\n ForeignKeyConstraint(['CodTesserino'], ['tesserini.CodiceTesserino'])\r\n )\r\n\r\nfasce_orarie = Table('fasce_orarie', meta,\r\n Column('idFasciaOraria', String, nullable=False, primary_key=True),\r\n Column('Descrizione', String, )\r\n )\r\n\r\nscaglioni_orari = Table('scaglioni_orari', meta,\r\n Column('IdScaglioneOrario', String, nullable=False, primary_key=True),\r\n Column('FasciaOrariaAppartenenza', String, ForeignKey('fasce_orarie.idFasciaOraria')),\r\n Column('Descrizione', String, )\r\n )\r\n\r\norario_lavoro = Table('orario_lavoro', meta,\r\n Column('CodOrarioLavoro', String, nullable=False, primary_key=True),\r\n Column('OraInizio',Time, nullable=False),\r\n Column('OraFine', Time, nullable=False)\r\n )\r\n\r\npersonale = Table('personale', meta,\r\n Column('CodiceFiscale', String, ForeignKey('anagrafica.CodiCeFiscale'), nullable=False,\r\n primary_key=True),\r\n Column('CodOrarioLavoro', String, ForeignKey('orario_lavoro.CodOrarioLavoro')),\r\n Column('CodMansione', String, ForeignKey('mansioni.CodMansione'), nullable=False),\r\n Column('CodTesserino', String, ForeignKey('tesserini.CodiceTesserino'), nullable=False)\r\n )\r\n\r\ntipologia_corsi = Table('tipologia_corsi', meta,\r\n Column('IdCorso', String, ForeignKey('personale.CodiCeFiscale'), nullable=False,\r\n primary_key=True),\r\n Column('Descrizione', String),\r\n Column('Titolare', String, ForeignKey('Personale.CodFiscale'), nullable=False),\r\n Column('IdSalaImpianto', String, ForeignKey('sale_impianti.IdSalaImpianto')),\r\n Column('NumMin', Integer),\r\n Column('NumMax', Integer)\r\n )\r\n\r\norario_corsi_settimanali = Table('orario_corsi_settimanali', meta,\r\n Column('IdOrario', String, nullable=False, primary_key=True),\r\n Column('CodCorso', String, ForeignKey('tipologia_corsi.IdCorso'), nullable=False),\r\n Column('CodScaglioneOrario', String, ForeignKey('scaglioni_orari.IdScaglioneOrario')),\r\n Column('GiornoSettimana', String, nullable=False)\r\n )\r\n\r\nprenotazioni_corsi = Table('prenotazioni_corsi', meta,\r\n Column('IdOrario', String, ForeignKey('orario_corsi_settimanali.IdOrario'), nullable=False),\r\n Column('Data', Date, nullable=False),\r\n Column('CodFiscaleCliente', String, ForeignKey('clienti.CodiceFiscale'), nullable=False, ),\r\n Column('Stato', String),\r\n PrimaryKeyConstraint('IdOrario', 'Data', 'CodFiscaleCliente'), # chiave primaria composta,\r\n Index('IdOrario', 'Data', 'CodFiscaleCliente')\r\n )\r\n\r\nservizi_estetico_medici = Table('servizi_estetico_medici', meta,\r\n Column('CodPrestazione', String, nullable=False, primary_key=True),\r\n Column('Tariffa', Date, nullable=False),\r\n Column('NumMin', Integer),\r\n Column('NumMax', Integer)\r\n )\r\n\r\norario_servizi = Table('orario_servizi', meta,\r\n Column('IdOrario', String, nullable=False, primary_key=True),\r\n Column('CodPrestazione', Date, ForeignKey('servizi_estetico_medici.CodPrestazione'), nullable=False),\r\n Column('IdScaglioneOrario', String, ForeignKey('scaglioni_orari.IdScaglioneOrario')),\r\n Column('GiornoSettimana', String, nullable=False)\r\n )\r\n\r\nprenotazioni_servizi = Table('prenotazioni_servizi', meta,\r\n Column('IdOrario', String, ForeignKey('orario_servizi.IdOrario'), nullable=False),\r\n Column('Data', Date, nullable=False),\r\n Column('CodFiscaleCliente', String(16), ForeignKey('clienti.CodiceFiscale'), nullable=False),\r\n Column('Stato', String),\r\n PrimaryKeyConstraint('IdOrario', 'Data', 'CodFiscaleCliente'),\r\n Index('IdOrario', 'Data', 'CodFiscaleCliente')\r\n )\r\n\r\nsale_impianti = Table('sale_impianti', meta,\r\n Column('IdSalaImpianto', String, primary_key=True),\r\n Column('Descrizione', String),\r\n Column('Ubicazione', String),\r\n Column('CapienzaMax', Integer),\r\n )\r\n\r\norario_sale_impianti = Table('orario_sale_impianti', meta,\r\n Column('IdOrario', String, primary_key=True),\r\n Column('IdSalaImpianto', String, ForeignKey('sale_impianti.IdSalaImpianto')),\r\n Column('ScaglioneOrario', String, ForeignKey('scaglioni_orari.IdScaglioneOrario')),\r\n Column('GiornoSettimana', String)\r\n )\r\n\r\nprenotazioni_sale_impianti = Table('prenotazioni_sale_impianti', meta,\r\n Column('IdOrario', String, ForeignKey('orario_sale_impianti.IdOrario'), nullable=False),\r\n Column('Data', Date, nullable=False),\r\n Column('CodFiscaleCliente', String(16), ForeignKey('clienti.CodiceFiscale'),\r\n nullable=False),\r\n PrimaryKeyConstraint('IdOrario', 'Data', 'CodFiscaleCliente'),\r\n Index('IdOrario', 'Data', 'CodFiscaleCliente')\r\n )\r\n\r\ntipo_abbonamenti = Table('tipo_abbonamenti', meta,\r\n Column('IdAbbonamento', String, primary_key=True, nullable=False),\r\n Column('Descrizione', String),\r\n Column('Tariffa', Integer),\r\n Column('Durata', Integer))\r\n\r\nsconti = Table('sconti', meta,\r\n Column('CodSconto', String(10), nullable=False, primary_key=True),\r\n Column('Percentuale', Integer),\r\n Column('Descrizione', String))\r\n\r\nabbonamenti = Table('abbonamenti', meta,\r\n Column('ProgAbbonamento', Integer, primary_key=True, nullable=False, autoincrement=True),\r\n Column('IdAbbonamento', String, ForeignKey('tipo_abbonamenti.IdAbbonamento')),\r\n Column('CodFiscaleCliente', String(16), ForeignKey('clienti.CodiceFiscale')),\r\n Column('CodSconto', String, ForeignKey('sconti.CodSconto')),\r\n Column('DataInizio', Date)\r\n )\r\n\r\nmanutenzione_impianti = Table('manutenzione_impianti', meta,\r\n Column('CodIntervento', String, ForeignKey('tipo_manutenzione.CodIntervento'),nullable=False),\r\n Column('CFManutentore', String(16), ForeignKey('personale.CodiceFiscale'), nullable=False),\r\n Column('IdSalaImpianto', String, ForeignKey('sale_impianti.IdSalaImpianto'), nullable=False),\r\n Column('Data', Date, nullable=False),\r\n Column('OraInizio', Time, nullable=False),\r\n Column('Durata', Integer),\r\n PrimaryKeyConstraint('CodIntervento', 'CFManutentore', 'IdSalaImpianto', 'Data','OraInizio'), # chiave primaria composta,\r\n Index('Data', 'IdSalaImpianto'),\r\n Index('CodIntervento')\r\n )\r\n\r\n# al metodo create_all() dell'oggetto meta - classe MetaData, si passa l’oggetto engine per creare le tabella e memorizzare tutte le informazioni in metadata\r\nmeta.create_all(engine)\r\nfrom sqlalchemy.sql.functions import func\r\n\r\n# -------------------------------------------------------------------------------\r\n# ----------------- SELECT con classe sqlalchemy.sql ----------------------------\r\n# -------------------------------------------------------------------------------\r\n\r\nfrom sqlalchemy.sql import select, insert, delete, update, union\r\nfrom sqlalchemy import and_, or_\r\nfrom datetime import *\r\n\r\nfrom datetime import *\r\n\r\nprint('')\r\nprint('--------------------------------------------- CONTROLLO ABBONAMENTI --------------------------------------- ')\r\nprint('1.\tSelezionare gli estremi degli abbonamenti stipulati che abbiano durata annuale o sconto superiore al 15%')\r\nprint('')\r\n\r\n# query 1\r\n# SELECT DATE_FORMAT( utc_date() , \"%e-%c-%Y\" ) as data_verifica_scadenze,\r\n# DATE_FORMAT( abbonamenti.DataInizio , \"%e-%c-%Y\" ) as data_inizio,\r\n# tipo_abbonamenti.Durata as durata_abbonamento,\r\n# tipo_abbonamenti.Descrizione,\r\n# sconti.Descrizione, anagrafica.Nominativo,\r\n# anagrafica.Email, anagrafica.Telefono\r\n# FROM abbonamenti, anagrafica, tipo_abbonamenti, sconti\r\n# WHERE abbonamenti.CodFiscaleCliente = anagrafica.CodiceFiscale\r\n# AND abbonamenti.IdAbbonamento = tipo_abbonamenti.IdAbbonamento\r\n# AND abbonamenti.CodSconto = sconti.CodSconto\r\n# AND (tipo_abbonamenti.Durata = 360 OR sconti.Percentuale >=15)\r\n\r\nquery1 = select( abbonamenti.c.DataInizio, tipo_abbonamenti.c.Durata, tipo_abbonamenti.c.Descrizione, sconti.c.Descrizione,\r\n anagrafica.c.Nominativo, anagrafica.c.Email, anagrafica.c.Telefono).where(abbonamenti.c.CodFiscaleCliente==anagrafica.c.CodiceFiscale,\r\n abbonamenti.c.IdAbbonamento==tipo_abbonamenti.c.IdAbbonamento,\r\n abbonamenti.c.CodSconto == sconti.c.CodSconto,\r\n or_(tipo_abbonamenti.c.Durata == 360, sconti.c.Percentuale >=15)\r\n\r\n )\r\nresult = conn.execute(query1)\r\nfor row in result:\r\n print(row)\r\n\r\n\r\n# QUERY 2\r\n# SELECT anagrafica.Nominativo, transiti.CodTesserino,\r\n# DATE_FORMAT(transiti.Data, \"%e-%c-%Y\" ) as Data_Timbratura, transiti.Ora,\r\n# REPLACE( REPLACE(transiti.Verso, 'I' ,'ENTRATA ') ,'U','USCITA ') As Verso_timbratura,\r\n# orario_lavoro.*\r\n# FROM personale, orario_lavoro, transiti , anagrafica\r\n# WHERE transiti.CodTesserino = personale.CodTesserino\r\n# AND orario_lavoro.CodOrarioLavoro= personale.CodOrarioLavoro\r\n# AND anagrafica.CodiCeFiscale = personale.CodiCeFiscale\r\n# ORDER BY personale.CodiCeFiscale, transiti.Data, transiti.Ora, transiti.Verso\r\n\r\nprint('')\r\nprint('---------------------------------------- CONTROLLO TRANSITI ----------------------------------------- ')\r\nprint('2.\tVisualizzare elenco timbrature dei dipendenti ordinato per nominativo, data, ora e verso.')\r\nprint('')\r\nquery2 = select( anagrafica.c.Nominativo, transiti.c.CodTesserino, transiti.c.Data, transiti.c.Ora, transiti.c.Verso,\r\n orario_lavoro.c.CodOrarioLavoro, orario_lavoro.c.OraInizio, orario_lavoro.c.OraFine\r\n ).where( transiti.c.CodTesserino == personale.c.CodTesserino,\r\n anagrafica.c.CodiceFiscale == personale.c.CodiceFiscale,\r\n orario_lavoro.c.CodOrarioLavoro== personale.c.CodOrarioLavoro,\r\n transiti.c.Data>= date(2022, 2, 2), transiti.c.Data<= date(2022, 5, 5),\r\n ).order_by( anagrafica.c.CodiceFiscale, transiti.c.Data, transiti.c.Ora, transiti.c.Verso)\r\n\r\nresult = conn.execute(query2)\r\nfor row in result: print(row)\r\n\r\n\r\n# QUERY 3\r\n# SELECT REPLACE(transiti.Verso, 'I' ,'ENTRATA ') As Verso_timbratura, anagrafica.Nominativo, orario_lavoro.CodOrarioLavoro, orario_lavoro.c.OraInizio,\r\n# DATE_FORMAT(transiti.Data, \"%e-%c-%Y\" ) as Data_Timbratura, MAX(transiti.Ora) AS CONTROLLO,\r\n# FROM personale, orario_lavoro, transiti , anagrafica\r\n# WHERE transiti.CodTesserino = personale.CodTesserino\r\n# AND anagrafica.CodiCeFiscale = personale.CodiCeFiscale\r\n# AND orario_lavoro.CodOrarioLavoro= personale.CodOrarioLavoro\r\n# AND transiti.Verso ='I'\r\n# UNION\r\n#SELECT REPLACE(transiti.Verso, 'U' ,'USCITA ') As Verso_timbratura, anagrafica.Nominativo, orario_lavoro.CodOrarioLavoro, orario_lavoro.c.OraFine,\r\n# DATE_FORMAT(transiti.Data, \"%e-%c-%Y\" ) as Data_Timbratura, MIN(transiti.Ora) AS CONTROLLO,\r\n# FROM personale, orario_lavoro, transiti , anagrafica\r\n# WHERE transiti.CodTesserino = personale.CodTesserino\r\n# AND anagrafica.CodiCeFiscale = personale.CodiCeFiscale\r\n# AND orario_lavoro.CodOrarioLavoro= personale.CodOrarioLavoro\r\n# AND transiti.Verso ='U'\r\n\r\nprint('')\r\nprint('---------------------------------------- CONTROLLO scarto ora delle TIMBRATURE ---------------------------------------- ')\r\nprint('3.\tSelezionare, per ciascun dipendente, il massimo ritardo di ingresso e la data in cui si è verificato, e, per ciascun dipendente,')\r\nprint(' la data e l’orario di uscita minimi, sempre in relazione al proprio orario di lavoro')\r\nprint('')\r\n\r\nquery3 = select(transiti.c.Verso, anagrafica.c.Nominativo, orario_lavoro.c.CodOrarioLavoro, orario_lavoro.c.OraInizio, transiti.c.Data, func.max(transiti.c.Ora).label(\"controllo\")\r\n ).where( anagrafica.c.CodiceFiscale == personale.c.CodiceFiscale,\r\n transiti.c.CodTesserino == personale.c.CodTesserino,\r\n transiti.c.Verso=='I',\r\n orario_lavoro.c.CodOrarioLavoro== personale.c.CodOrarioLavoro\r\n ).group_by(anagrafica.c.Nominativo\r\n ).order_by(anagrafica.c.CodiceFiscale, transiti.c.Data).union\\\r\n (\r\n select(transiti.c.Verso, anagrafica.c.Nominativo, orario_lavoro.c.CodOrarioLavoro, orario_lavoro.c.OraFine, transiti.c.Data, func.min(transiti.c.Ora).label(\"controllo\")\r\n ).where(anagrafica.c.CodiceFiscale == personale.c.CodiceFiscale,\r\n transiti.c.CodTesserino == personale.c.CodTesserino,\r\n transiti.c.Verso == 'U',\r\n orario_lavoro.c.CodOrarioLavoro == personale.c.CodOrarioLavoro\r\n ).group_by(anagrafica.c.Nominativo\r\n ).order_by(anagrafica.c.CodiceFiscale, transiti.c.Data)\r\n )\r\n\r\nresult = conn.execute(query3)\r\nfor row in result: print(row)\r\n\r\n# QUERY 4\r\n# SELECT MAX(massimali.ore_lavorate) AS TotaleOre, massimali.codice as CodLavoro,\r\n# massimali.lavoro_eseguito as LavoroEseguito FROM\r\n# ( SELECT SUM(Durata) AS ore_lavorate,\r\n# manutenzione_impianti.CodIntervento AS codice,\r\n# tipo_manutenzione.Descrizione as lavoro_eseguito\r\n# FROM manutenzione_impianti, tipo_manutenzione\r\n# WHERE tipo_manutenzione.CodIntervento = manutenzione_impianti.CodIntervento\r\n# AND manutenzione_impianti.Data > \"2020:04:18\"\r\n# GROUP BY manutenzione_impianti.CodIntervento\r\n# ORDER BY manutenzione_impianti.CodIntervento ) AS massimali\r\n# 4.\tSelezionare il codice e la descrizione della tipologia di manutenzione su sale ed impianti che ha\r\n# richiesto il massimo numero di lavoro a partire dal giorno 19 aprile 2022, indicando anche il monte ore raggiunto\r\n\r\nprint('')\r\nprint('------------------------------- MASSIMALI ORE LAVORATE distinti per tipologie ---------------------------------------- ')\r\nprint('4. Calcolo massimali ore lavoro:')\r\nprint('Selezionare il codice e la descrizione della tipologia di manutenzione su sale ed impianti raggruppate per numero di lavoro a partire dal giorno 19 aprile 2022')\r\nprint('indicando anche il massimo monte ore raggiunto.')\r\nprint('')\r\nsubq = select( func.sum(manutenzione_impianti.c.Durata).label(\"massimale\"), (manutenzione_impianti.c.CodIntervento).label(\"codice\"),\r\n (tipo_manutenzione.c.Descrizione).label(\"lavoro\")\r\n ).where(manutenzione_impianti.c.CodIntervento == tipo_manutenzione.c.CodIntervento,\r\n manutenzione_impianti.c.Data > date(2020, 4, 18)\r\n ).group_by(manutenzione_impianti.c.CodIntervento\r\n ).subquery()\r\nresult = conn.execute(subq)\r\nfor row in result: print(row)\r\n\r\nquery4 = select(func.max(subq.c.massimale))\r\nprint('')\r\nprint(' Massimale più alto -----------------------------------')\r\nresult = conn.execute(query4)\r\nfor row in result: print(row)\r\n\r\n# query 5\r\n# SELECT anagrafica.Nominativo, sale_impianti.Descrizione AS Impianto ,\r\n# tipo_manutenzione.Descrizione, manutenzione_impianti.OraInizio,\r\n# DATE_FORMAT(MAX(manutenzione_impianti.Data),\"%e-%c-%Y\" ) AS Ultima_data_manutenzione\r\n# FROM manutenzione_impianti, sale_impianti, tipo_manutenzione, anagrafica\r\n# WHERE manutenzione_impianti.CodIntervento = 'VASCA'\r\n# AND manutenzione_impianti.CodIntervento=tipo_manutenzione.CodIntervento\r\n# AND manutenzione_impianti.CFManutentore = anagrafica.CodiCeFiscale\r\n# AND manutenzione_impianti.IdSalaImpianto = sale_impianti.IdSalaImpianto\r\n# GROUP BY sale_impianti.IdSalaImpianto\r\nprint('')\r\nprint('----------------------------- CONTROLLO MANUTENZIONI ----------------------------------------------------------------')\r\nprint('5.\tControllare la data di ultima manutenzione delle vasche d’acqua, indicando l’ora e il nominativo del manutentore')\r\nprint('')\r\nquery5 = select( anagrafica.c.Nominativo, sale_impianti.c.Descrizione,tipo_manutenzione.c.Descrizione,\r\n manutenzione_impianti.c.OraInizio, func.max(manutenzione_impianti.c.Data)\r\n ).where( manutenzione_impianti.c.CodIntervento == 'VASCA',\r\n manutenzione_impianti.c.CodIntervento == tipo_manutenzione.c.CodIntervento,\r\n manutenzione_impianti.c.CFManutentore == anagrafica.c.CodiceFiscale,\r\n manutenzione_impianti.c.IdSalaImpianto == sale_impianti.c.IdSalaImpianto\r\n ).group_by(sale_impianti.c.IdSalaImpianto)\r\n\r\nresult = conn.execute(query5)\r\nfor row in result: print(row)\r\n\r\n# Query 6\r\n# SELECT prenotazioni_corsi.Data AS DATA_PRENOTAZIONE, scaglioni_orari.Descrizione AS ORARIO_PRENOTAZIONE,\r\n# tipologia_corsi.Descrizione AS CORSO_PRENOTATO, prenotazioni_corsi.CodFiscaleCliente,\r\n# anagrafica.Nominativo AS CLIENTE, orario_corsi_settimanali.CodCorso\r\n# FROM prenotazioni_corsi, anagrafica, orario_corsi_settimanali, tipologia_corsi, scaglioni_orari\r\n# WHERE prenotazioni_corsi.CodFiscaleCliente = anagrafica.CodiCeFiscale\r\n# AND prenotazioni_corsi.IdOrario = orario_corsi_settimanali.IdOrario\r\n# AND orario_corsi_settimanali.CodCorso = tipologia_corsi.IdCorso\r\n# AND orario_corsi_settimanali.CodScaglioneOrario = scaglioni_orari.IdScaglioneOrario\r\n# AND prenotazioni_corsi.Data BETWEEN \"2022-04-15\" AND \"2022-04-24\"\r\n# AND prenotazioni_corsi.Stato ='C'\r\n# ORDER BY prenotazioni_corsi.Data DESC , scaglioni_orari.Descrizione DESC , anagrafica.Nominativo\r\nprint('')\r\nprint('---------------------------------- CONTROLLO PRENOTAZIONI ATTIVE ----------------------------------------------------- ')\r\nprint('6.\tControllo delle prenotazioni attive e non disdette dal giorno 16 aprile 2022 al giorno 22 aprile 2022')\r\nprint('')\r\nquery6 = select( prenotazioni_corsi.c.Data, scaglioni_orari.c.Descrizione,\r\n tipologia_corsi.c.Descrizione, prenotazioni_corsi.c.CodFiscaleCliente,\r\n anagrafica.c.Nominativo, orario_corsi_settimanali.c.CodCorso\r\n ).where( prenotazioni_corsi.c.CodFiscaleCliente == anagrafica.c.CodiceFiscale,\r\n prenotazioni_corsi.c.IdOrario == orario_corsi_settimanali.c.IdOrario,\r\n orario_corsi_settimanali.c.CodCorso == tipologia_corsi.c.IdCorso,\r\n orario_corsi_settimanali.c.CodScaglioneOrario == scaglioni_orari.c.IdScaglioneOrario,\r\n and_(prenotazioni_corsi.c.Data >= date(2022, 4, 16), prenotazioni_corsi.c.Data <= date(2022, 4, 24)),\r\n prenotazioni_corsi.c.Stato =='C'\r\n ).order_by( prenotazioni_corsi.c.Data, scaglioni_orari.c.Descrizione, anagrafica.c.Nominativo)\r\n\r\nresult = conn.execute(query6)\r\nfor row in result: print(row)\r\n\r\n\r\n# QUERY 7\r\n# SELECT prenotazioni_corsi.IdOrario, count(prenotazioni_corsi.CodFiscaleCliente) AS NUMERO_PRENOTAZIONI,\r\n# orario_corsi_settimanali.GiornoSettimana, orario_corsi_settimanali.CodScaglioneOrario AS SCAGLIONE,\r\n# tipologia_corsi.Descrizione\r\n# FROM prenotazioni_corsi, orario_corsi_settimanali, tipologia_corsi\r\n# WHERE prenotazioni_corsi.IdOrario = orario_corsi_settimanali.IdOrario\r\n# AND orario_corsi_settimanali.CodCorso = tipologia_corsi.IdCorso\r\n# AND prenotazioni_corsi.Stato ='C'\r\n# GROUP BY (prenotazioni_corsi.IdOrario)\r\n# HAVING orario_corsi_settimanali.CodScaglioneOrario IN\r\n# ( SELECT scaglioni_orari.IdScaglioneOrario FROM scaglioni_orari WHERE scaglioni_orari.FasciaOrariaAppartenenza = \"FASCIA1\")\r\n# sostituita subquery SQL con join .....\r\nprint('')\r\nprint('---------------------------------- PRENOTAZIONI FASCIA MATTUTINA ------------------------------------------')\r\nprint(' 7. Numero di prenotazioni attive, distinte per tipologia di corso, nella fascia oraria mattutina')\r\nprint('')\r\nquery7 = select( prenotazioni_corsi.c.IdOrario, func.count(prenotazioni_corsi.c.CodFiscaleCliente),\r\n orario_corsi_settimanali.c.GiornoSettimana, orario_corsi_settimanali.c.CodScaglioneOrario,tipologia_corsi.c.Descrizione\r\n ).where( prenotazioni_corsi.c.IdOrario == orario_corsi_settimanali.c.IdOrario,\r\n orario_corsi_settimanali.c.CodCorso == tipologia_corsi.c.IdCorso,\r\n prenotazioni_corsi.c.Stato =='C',\r\n orario_corsi_settimanali.c.CodScaglioneOrario ==scaglioni_orari.c.IdScaglioneOrario,\r\n scaglioni_orari.c.FasciaOrariaAppartenenza == \"FASCIA1\"\r\n ).group_by( prenotazioni_corsi.c.IdOrario)\r\n\r\nresult = conn.execute(query7)\r\nfor row in result: print(row)\r\n\r\n\r\n# QUERY8\r\n# 8. clienti che prenotano corsi di lunedì ma che nello stesso giorno non hanno mai prenotato sale o impianti.\r\n# SELECT anagrafica.Nominativo\r\n# FROM prenotazioni_corsi, anagrafica, orario_corsi_settimanali\r\n# WHERE prenotazioni_corsi.CodFiscaleCliente = anagrafica.CodiCeFiscale\r\n# AND prenotazioni_corsi.IdOrario = orario_corsi_settimanali.IdOrario\r\n# AND orario_corsi_settimanali.GiornoSettimana = \"Lunedì\"\r\n# GROUP BY anagrafica.Nominativo\r\n# AND anagrafica.Nominativo <> ANY\r\n# (SELECT anagrafica.Nominativo\r\n# FROM prenotazioni_sale_impianti, anagrafica, orario_sale_impianti\r\n# WHERE prenotazioni_sale_impianti.CodFiscaleCliente= anagrafica.CodiCeFiscale\r\n# AND prenotazioni_sale_impianti.IdOrario = orario_sale_impianti.IdOrario\r\n# AND orario_sale_impianti.GiornoSettimana = \"Lunedì\"\r\n# Group BY anagrafica.Nominativo\r\n\r\nprint('')\r\nprint('---------------------------- CONTROLLO TIPOLOGIA PRENOTAZIONI ---------------------------------------------------------------------')\r\nprint('8.\tSelezionare i nominativi dei clienti che prenotano corsi di lunedì ma che nello stesso giorno non hanno mai prenotato sale o impianti')\r\nprint('')\r\nprint( 'Clienti che prenotano corsi di lunedì ---------------------------- ')\r\nprint('')\r\nsubquery1 = select( (anagrafica.c.Nominativo).label(\"nominativo\")\r\n ).where( prenotazioni_corsi.c.CodFiscaleCliente == anagrafica.c.CodiceFiscale,\r\n prenotazioni_corsi.c.IdOrario == orario_corsi_settimanali.c.IdOrario,\r\n orario_corsi_settimanali.c.GiornoSettimana == \"Lunedì\"\r\n ).distinct(anagrafica.c.Nominativo).subquery()\r\nresult = conn.execute(subquery1)\r\nfor row in result: print(row)\r\n\r\nprint('')\r\nprint( 'Clienti che prenotano sale ed impianti di lunedì --------------------------------- ')\r\nprint('')\r\nsubquery2 = select( (anagrafica.c.Nominativo).label(\"nominativo\")\r\n ).where( prenotazioni_sale_impianti.c.CodFiscaleCliente == anagrafica.c.CodiceFiscale,\r\n prenotazioni_sale_impianti.c.IdOrario == orario_sale_impianti.c.IdOrario,\r\n orario_sale_impianti.c.GiornoSettimana == \"Lunedì\"\r\n ).distinct(anagrafica.c.Nominativo).subquery()\r\n\r\nresult = conn.execute(subquery2)\r\nfor row in result: print(row)\r\n\r\nprint('')\r\nprint('Clienti che di lunedì prenotano corsi ma non prenotano mai sale ed impianti --------------------------------- ')\r\nprint('IMPLEMENTAZIONE CON SUBQUERY :')\r\nprint('')\r\nquery8 = select(subquery1.c.nominativo).where(subquery1.c.nominativo != subquery2.c.nominativo)\r\nresult = conn.execute(query8)\r\nfor row in result: print(row)\r\n\r\nprint('')\r\nprint('Clienti che di lunedì prenotano corsi ma non prenotano mai sale ed impianti --------------------------------- ')\r\nprint('IMPLEMENTAZIONE PASSANDO AL COMPILATORE STRINGA DI TESTO CON STATMENT SQL :')\r\nprint('')\r\nfrom sqlalchemy.sql import text\r\nfor row in result: print(row)\r\nquery8= text( \"SELECT DISTINCT anagrafica.Nominativo FROM prenotazioni_corsi, anagrafica, orario_corsi_settimanali WHERE prenotazioni_corsi.CodFiscaleCliente = anagrafica.CodiCeFiscale AND prenotazioni_corsi.IdOrario = orario_corsi_settimanali.IdOrario AND orario_corsi_settimanali.GiornoSettimana = 'Lunedì' AND anagrafica.Nominativo <> ANY (SELECT DISTINCT anagrafica.Nominativo FROM prenotazioni_sale_impianti, anagrafica, orario_sale_impianti WHERE prenotazioni_sale_impianti.CodFiscaleCliente= anagrafica.CodiCeFiscale AND prenotazioni_sale_impianti.IdOrario = orario_sale_impianti.IdOrario AND orario_sale_impianti.GiornoSettimana = 'Lunedì') \")\r\nresult= conn.execute(query8)\r\nfor row in result: print(row)\r\n\r\n\r\n# Query 9\r\n# 9.\tLa segreteria necessita di mandare notifiche di scadenza al fine di invitare al rinnovo dell’abbonamento. Selezionare i nominativi,\r\n# email e numero telefonico dei clienti del centro sportivo che hanno abbonamenti in scadenza (termine abbonamento a meno di 15 giorni)\r\nprint('---------------------------------------- ELENCO ABBONAMENTI IN SCADENZA--------------------------------------- ')\r\nprint('9. selezionare gli abbonamenti che scadono a meno di 15 giorni')\r\nprint('IMPLEMENTAZIONE PASSANDO AL COMPILATORE STRINGA DI TESTO CON STATMENT SQL :')\r\nprint('')\r\nfrom sqlalchemy.sql import text\r\n\r\nquery9 = text('SELECT DATE_FORMAT( utc_date() , \"%e-%c-%Y\" ) , DATE_FORMAT( abbonamenti.DataInizio , \"%e-%c-%Y\" ) , tipo_abbonamenti.Durata , DATE_FORMAT( (abbonamenti.DataInizio + INTERVAL tipo_abbonamenti.Durata DAY) , \"%e-%c-%Y\" ) , DATEDIFF( abbonamenti.DataInizio + INTERVAL tipo_abbonamenti.Durata DAY, CURDATE()) , tipo_abbonamenti.Descrizione, anagrafica.Nominativo, anagrafica.Email, anagrafica.Telefono FROM abbonamenti, anagrafica, tipo_abbonamenti WHERE abbonamenti.CodFiscaleCliente = anagrafica.CodiceFiscale AND tipo_abbonamenti.IdAbbonamento=abbonamenti.IdAbbonamento AND DATEDIFF( abbonamenti.DataInizio + INTERVAL tipo_abbonamenti.Durata DAY, CURDATE()) < 25')\r\n\r\nresult = conn.execute(query9)\r\nfor row in result:\r\n print(row)\r\n\r\n\r\n\r\n\r\n","repo_name":"fschiavoni70/ETIVITY4_BDD_2022","sub_path":"centro_sportivo_SQLalchemy_esame_frida_schiavoni.py","file_name":"centro_sportivo_SQLalchemy_esame_frida_schiavoni.py","file_ext":"py","file_size_in_byte":32741,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38595909633","text":"\"\"\"\nThis module divides a documents into individuals words or sequence of words by splitting on the blank spaces.\n\"\"\"\nfrom nltk import ngrams, word_tokenize\nfrom fiesta.transformers.document_transformer import document_transformer\n\ndef tokenization (document_collection, index_of_document = None):\n \"\"\"This method divides a documents into individual words (strings) by splitting on the blank spaces.\n Args:\n document_collection (str, list or file directory): document collection to be tokenized \n Returns: \n list: list of divided documents into individual words\n \"\"\"\n full_document = document_transformer(document_collection)\n tokenized_document = []\n for document_part in full_document:\n tokenized_document.append(word_tokenize(document_part))\n if index_of_document!= None:\n return tokenized_document[index_of_document]\n return tokenized_document\n\ndef n_grams_tokenization (document_collection, n, index_of_document = None):\n \"\"\"Divides a documents into sequence of n words (strings) by splitting on the blank spaces.\n Args:\n document_collection (str, list or file directory): document collection to be tokenized\n \t\t\tn (int): length of the sequence of words\n Returns: \n list: list of divided documents into sequences of words\n \"\"\"\n all_n_grams = []\n full_document = document_transformer(document_collection)\n\n for document_part in full_document:\n ngrams_document = []\n n_grams = ngrams(document_part.split(), n)\n for grams in n_grams:\n ngrams_document.append(grams)\n all_n_grams.append(ngrams_document)\n if index_of_document!= None:\n return all_n_grams[index_of_document]\n return all_n_grams","repo_name":"kammillam/fiesta","sub_path":"fiesta/preprocessing/tokenization.py","file_name":"tokenization.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27506731711","text":"from django.db.models import Q, Max\nfrom django.db.models import Count\nfrom django.db import transaction\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMessage\nimport datetime, time, os, re, json\nimport dateutil.parser\nfrom . import models as M\nfrom . import auth\nfrom . import constants as CST\nfrom . import utils_response as UR #, utils_format as UF\nfrom django.template import Template\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nimport shutil\nimport logging\n\n#SINGLETONS:\n\n__MARK_TRIGGERS = {\n 3:5, 5:3, 7:9, 9:7\n}\n#typical fields we request for objects\n__NAMES = {\n \"location\": {\n \"id_ensemble\": None,\n \"top\": \"y\",\n \"left\": \"x\",\n \"page\": None,\n \"id_source\": None,\n \"ID\": \"id\",\n \"w\": None,\n \"h\": None,\n},\n \"location2\": {\n \"id_ensemble\": \"ensemble_id\",\n \"top\": \"y\",\n \"left\": \"x\",\n \"page\": None,\n \"duration\": None,\n \"pause\": None,\n \"id_source\": \"source_id\",\n \"ID\": \"id\",\n \"w\": None,\n \"h\": None,\n\t\"section_id\": None\n},\n \"location_v_comment\": {\n \"id_ensemble\": None,\n \"top\": \"y\",\n \"left\": \"x\",\n \"page\": None,\n \"id_source\": None,\n \"ID\": \"id_location\",\n \"w\": None,\n \"h\": None,\n \"body\": \"substring(body, 0, 95) || replace(substring(body, 95, 5), '&', '') as body\",\n \"big\": \"cast(length(body)>100 as integer) as big\"\n},\n \"location_v_comment2\":{\n \"ID\": \"location_id\",\n \"id_ensemble\": \"location.ensemble_id\",\n \"top\": \"location.y\",\n \"left\": \"location.x\",\n \"page\": \"location.page\",\n \"duration\": \"location.duration\",\n \"pause\": \"location.pause\",\n \"id_source\": \"location.source_id\",\n \"w\": \"location.w\",\n \"h\": \"location.h\",\n \"body\": None,\n \"is_title\": \"location.is_title\",\n\t\"duration\": \"location.duration\",\n \"section_id\": \"location.section_id\"\n},\n \"tag\": {\n \"ID\": \"id\",\n \"user_id\": \"individual_id\",\n \"comment_id\": \"comment_id\"\n},\n \"html5location\":{\n \"ID\": \"id\",\n \"id_location\": \"location_id\",\n \"path1\": \"path1\",\n \"path2\": \"path2\",\n \"offset1\": \"offset1\",\n \"offset2\": \"offset2\"\n },\n \"comment\": {\n \"ID\": \"id\",\n \"ID_location\": \"id_location\",\n \"email\": None, #replaced in __post_process_comments\n \"id_parent\": None,\n \"id_author\": None, #deleted in __post_process_comments\n \"ctime\": \"cast(ctime as text) as ctime\", #so that psycopg is not tempted to convert it to a native python format\n \"body\": None,\n \"signed\": None,\n \"id_type\": None,\n \"n_answerplease\": None,\n \"n_approve\": None,\n \"n_reject\": None,\n \"n_favorite\": None,\n \"n_hide\": None,\n\n \"p\": \"preselected\",\n \"admin\": None,\n \"id_ensemble\": None, #this one is redundant but deleted in __post_process_comments (after use)\n},\n \"comment2\": {\n \"ID\": \"id\",\n \"ID_location\": \"location_id\",\n \"id_parent\": \"parent_id\",\n \"id_author\": \"author_id\",\n \"ctime\": None,\n \"created\": None,\n #\"created\": Template(\"\"\"{{ V.ctime|notthisweek|date:\"D d M Y\" }}\"\"\"),\n \"body\": None,\n \"signed\": None,\n \"type\": None,\n \"fullname\": Template(\"{{V.author.firstname}} {{V.author.lastname}}\"),\n \"admin\": None\n},\n \"location_stats\": {\n \"id_location\": None,\n \"answerplease\": None,\n \"approve\": None,\n \"reject\": None,\n \"favorite\": None,\n \"hide\": None\n},\n \"files\": {\n \"ID\": \"id_source\",\n \"id\": \"id_source\",\n \"ID_ensemble\": \"id_ensemble\",\n \"id_ensemble\": None,\n \"id_folder\": None,\n \"title\": None,\n \"numpages\": None,\n \"w\": \"ncols\",\n \"h\": \"nrows\",\n \"rotation\": None,\n},\n \"ensembles\": {\n \"ID\": \"id_ensemble\",\n \"id\": \"id_ensemble\",\n \"name\": None,\n \"admin\": None,\n \"description\": None,\n \"public\": None\n },\n\"files2\": {\n \"ID\": \"source_id\",\n \"id\": \"source_id\",\n \"ID_ensemble\": \"ensemble_id\",\n \"id_ensemble\": \"ensemble_id\",\n \"id_folder\": \"folder_id\",\n \"title\": \"source.title\",\n \"numpages\": \"source.numpages\",\n \"w\": \"source.w\",\n \"h\": \"source.h\",\n \"rotation\": \"source.rotation\",\n \"assignment\": None,\n \"due\": None,\n \"filetype\": \"source.type\",\n \"date_published\": \"published\"\n},\n \"ensembles2\": {\n \"ID\": \"ensemble_id\",\n \"id\": \"ensemble_id\",\n \"name\": \"ensemble.name\",\n \"admin\": UR.Expression(False),\n \"description\": \"ensemble.description\",\n \"allow_guest\": \"ensemble.allow_guest\",\n \"allow_anonymous\": \"ensemble.allow_anonymous\",\n \"allow_staffonly\": \"ensemble.allow_staffonly\",\n \"allow_tag_private\": \"ensemble.allow_tag_private\",\n \"use_invitekey\": \"ensemble.use_invitekey\",\n \"default_pause\": \"ensemble.default_pause\",\n } ,\n \"folders2\":{\n \"ID\": \"folder_id\",\n \"id\": \"folder_id\",\n \"id_parent\": \"folder.parent_id\",\n \"id_ensemble\": \"ensemble_id\",\n \"name\": \"folder.name\"},\n \"members\":{\n \"ID\": \"user_id\",\n \"id\": \"user_id\",\n \"section_id\": None,\n \"email\": \"user.email\",\n \"firstname\": \"user.firstname\",\n \"lastname\": \"user.lastname\",\n \"guest\": \"user.guest\",\n \"admin\": None},\n\n \"assignment_grade\": {\n \"id\": None,\n \"grade\": None,\n \"id_user\": \"user_id\",\n \"id_source\": \"source_id\"},\n \"threadmark\":{\n \"id\": None,\n \"location_id\": None,\n \"user_id\": None,\n \"active\": None,\n \"type\": None,\n \"comment_id\": None\n }\n}\n\ndef get_ensembles(uid, payload):\n id = None\n if \"id_ensemble\" in payload:\n id = payload[\"id_ensemble\"]\n elif \"id\" in payload:\n id = payload[\"id\"]\n names = {\n \"ID\": \"ensemble_id\",\n \"name\": \"ensemble.name\",\n \"admin\": None,\n \"description\": \"ensemble.description\",\n \"allow_staffonly\": \"ensemble.allow_staffonly\",\n \"allow_anonymous\": \"ensemble.allow_anonymous\",\n \"allow_guest\": \"ensemble.allow_guest\",\n \"allow_download\": \"ensemble.allow_download\",\n \"allow_ondemand\": \"ensemble.allow_ondemand\",\n \"invitekey\": \"ensemble.invitekey\",\n }\n my_memberships = M.Membership.objects.select_related(\"ensemble\").filter(user__id=uid, deleted=False)\n if id is not None:\n my_memberships = my_memberships.filter(ensemble__id=id)\n return UR.qs2dict(my_memberships, names, \"ID\")\n\ndef get_folders(uid, payload):\n id = payload[\"id\"] if \"id\" in payload else None\n names = {\n \"ID\": \"id\",\n \"id_parent\": \"parent_id\",\n \"id_ensemble\": \"ensemble_id\",\n \"name\": None}\n my_memberships = M.Membership.objects.filter(user__id=uid, deleted=False)\n my_ensembles = M.Ensemble.objects.filter(membership__in=my_memberships)\n my_folders = M.Folder.objects.filter(ensemble__in=my_ensembles)\n if id is not None:\n my_folders = my_folders.filter(id=id)\n return UR.qs2dict(my_folders, names, \"ID\")\n\ndef get_sections(uid, payload):\n id = None\n if \"section_id\" in payload:\n id = payload[\"section_id\"]\n elif \"id\" in payload:\n id = payload[\"id\"]\n names = {\n \"ID\": \"id\",\n \"id_ensemble\": \"ensemble_id\",\n \"name\": None}\n my_memberships = M.Membership.objects.filter(user__id=uid, deleted=False)\n my_ensembles = M.Ensemble.objects.filter(membership__in=my_memberships)\n my_sections = M.Section.objects.filter(ensemble__in=my_ensembles)\n if id is not None:\n my_sections = my_sections.filter(id=id)\n if \"id_ensemble\" in payload:\n my_sections = my_sections.filter(ensemble__id=payload[\"id_ensemble\"])\n return UR.qs2dict(my_sections, names, \"ID\")\n\ndef get_file_stats(uid, payload):\n from . import db\n id_ensemble = payload[\"id_ensemble\"]\n names = {\n \"id\": \"source_id\",\n \"seen\":None,\n \"total\": None,\n \"mine\": None\n }\n from_clause = \"\"\"(select vc.source_id as source_id, count(vc.id) as total , sum(cast(s.comment_id is not null as integer)) as seen, sum(cast(vc.author_id=? as integer)) as mine\nfrom base_v_comment vc left join (select distinct comment_id from base_commentseen where user_id = ?) as s on s.comment_id=vc.id , base_membership m\nwhere\nm.user_id = ? and m.ensemble_id = vc.ensemble_id and ((vc.type>2 or ( vc.type>1 and m.admin=true)) or vc.author_id=?)\nand (m.section_id is null or vc.section_id = m.section_id or vc.section_id is null)\nand vc.ensemble_id=?\ngroup by source_id) as v1\"\"\"\n return db.Db().getIndexedObjects(names, \"id\", from_clause, \"true\" , (uid,uid, uid, uid, id_ensemble))\n\n# Get members of an ensemble\ndef get_members(eid):\n memberships = M.Membership.objects.select_related(\"user\").filter(ensemble__id=eid, deleted=False)\n users = {}\n for membership in memberships:\n user_entry = UR.model2dict(membership.user)\n\n # Remove unnecessary fields\n del user_entry[\"guest\"]\n del user_entry[\"confkey\"]\n del user_entry[\"valid\"]\n del user_entry[\"saltedhash\"]\n del user_entry[\"salt\"]\n del user_entry[\"password\"]\n\n # Add section\n if membership.section == None:\n user_entry[\"section\"] = None\n else:\n user_entry[\"section\"] = membership.section.id\n\n # Add user dictionary to users\n users[membership.user.id] = user_entry\n return users\n\n\ndef get_all_members(uid, payload):\n \"\"\"\n Get all members of an ensemble i.e. registered participants, pending invitations, pending email confirmation,\n and deleted members. Although uid is not required, it's been added to ensure that getObjects() in views.py\n can call this method.\n \"\"\"\n eid = payload[\"id_ensemble\"]\n ensemble = M.Ensemble.objects.get(pk=eid)\n memberships = M.Membership.objects.select_related(\"user\").filter(ensemble=ensemble)\n pendingconfirmations = memberships.filter(user__in=M.User.objects.filter(valid=False), deleted=False)\n real_memberships = memberships.filter(user__in=M.User.objects.filter(valid=True), deleted=False)\n deleted_memberships = memberships.filter(user__in=M.User.objects.filter(valid=True), deleted=True)\n pendinginvites = M.Invite.objects.select_related(\"user\").filter(ensemble=ensemble).exclude(user__id__in=real_memberships.values(\"user_id\"))\n\n pendingconfirmations_list = __memberships_to_users_list(pendingconfirmations)\n real_memberships_list = __memberships_to_users_list(real_memberships)\n deleted_memberships_list = __memberships_to_users_list(deleted_memberships)\n pendinginvites_list = __memberships_to_users_list(pendinginvites)\n\n users = {\n \"registered\": real_memberships_list,\n \"pending_invitation\": pendinginvites_list,\n \"pending_email_confirmation\": pendingconfirmations_list,\n \"deleted\": deleted_memberships_list\n }\n return users\n\ndef __memberships_to_users_list(memberships):\n # Optimization: pass to this methods memberships that have a select_related(\"user\"), so that django doesn't have to issue one DB query per user when fetching the user details. \n result = []\n \n for membership in memberships:\n user_entry = UR.model2dict(membership.user)\n\n # Remove unnecessary fields\n del user_entry[\"guest\"]\n del user_entry[\"confkey\"]\n del user_entry[\"valid\"]\n del user_entry[\"saltedhash\"]\n del user_entry[\"salt\"]\n del user_entry[\"password\"]\n\n # Add section\n if membership.section == None:\n user_entry[\"section\"] = None\n else:\n user_entry[\"section\"] = membership.section.id\n\n # Add admin status\n if membership.admin:\n user_entry[\"admin\"] = True\n else:\n user_entry[\"admin\"] = False\n\n user_entry[\"user_id\"] = user_entry.pop(\"id\") # Rename id key as user_id\n user_entry[\"membership_id\"] = membership.id\n\n # Add user dictionary to result\n result.append(user_entry)\n return result\n\n\ndef get_section_participants(uid, payload):\n eid = payload[\"id_ensemble\"]\n ensemble = M.Ensemble.objects.get(pk=eid)\n sections = M.Section.objects.filter(ensemble=ensemble)\n all_students = M.Membership.objects.filter(ensemble=ensemble).filter(guest=False)\n students2 = []\n for s in sections:\n session_dict = UR.model2dict(s)\n session_dict[\"participants\"] = __memberships_to_users_list(all_students.filter(section=s))\n students2.append(session_dict)\n\n no_section_dict = {\n \"name\": \"\",\n \"id\": -1,\n \"ensemble_id\": eid,\n \"participants\": __memberships_to_users_list(all_students.filter(section=None))}\n\n return {\n \"class_sections\": students2,\n \"no_section\": no_section_dict\n }\n\n\ndef get_class_settings(uid, payload):\n try:\n if \"id_ensemble\" in payload:\n return UR.model2dict(M.Ensemble.objects.get(pk=payload[\"id_ensemble\"]))\n elif \"invitekey\" in payload:\n return UR.model2dict(M.Ensemble.objects.get(invitekey=payload[\"invitekey\"]))\n except M.Ensemble.DoesNotExist:\n return None\n\n\ndef get_stats_ensemble(payload):\n from . import db\n id_ensemble = payload[\"id_ensemble\"]\n enforce_deadline = payload.get(\"enforce_deadline\", False)\n names = {\n \"ID\": \"record_id\",\n \"cnt\": None,\n \"numchars\": None,\n \"numwords\": None\n }\n if enforce_deadline: \n from_clause = \"\"\"(select count(v.id) as cnt, v.author_id || '_' || v.source_id as record_id, sum(length(c.body)) as numchars, sum(array_length(regexp_split_to_array(c.body, E'\\\\\\\\s+'), 1)) as numwords from base_v_comment v, base_comment c, base_ownership own where v.type>1 and v.ensemble_id=? and v.id=c.id and own.source_id=v.source_id and own.ensemble_id=v.ensemble_id and (not(own.assignment) or (v.ctime <= own.due)) group by v.author_id, v.source_id) as v1\"\"\"\n else:\n from_clause = \"\"\"(select count(v.id) as cnt, v.author_id || '_' || v.source_id as record_id, sum(length(c.body)) as numchars, sum(array_length(regexp_split_to_array(c.body, E'\\\\\\\\s+'), 1)) as numwords from base_v_comment v, base_comment c where v.type>1 and v.ensemble_id=? and v.id=c.id group by v.author_id, v.source_id) as v1\"\"\"\n retval={\"stats\": db.Db().getIndexedObjects(names, \"ID\", from_clause, \"true\" , (id_ensemble,))}\n grades = M.AssignmentGrade.objects.filter(source__ownership__ensemble__id=id_ensemble)\n ownerships = M.Ownership.objects.select_related(\"source\", \"ensemble\", \"folder\").filter(ensemble__id=id_ensemble, deleted=False)\n memberships = M.Membership.objects.select_related(\"user\").filter(ensemble__id=id_ensemble, deleted=False)\n sections = M.Section.objects.filter(membership__in=memberships)\n retval[\"users\"] = UR.qs2dict(memberships, __NAMES[\"members\"] , \"ID\")\n retval [\"files\"] = UR.qs2dict(ownerships, __NAMES[\"files2\"] , \"ID\")\n retval[\"ensembles\"] = UR.qs2dict(ownerships, __NAMES[\"ensembles2\"] , \"ID\")\n retval[\"grades\"] = UR.qs2dict(grades, __NAMES[\"assignment_grade\"], \"id\")\n retval[\"sections\"] = UR.qs2dict(sections)\n return retval\n\ndef get_social_interactions(id_ensemble):\n # Generate how many times each student communicated with another student for a given group.\n from . import db\n names = {\n \"cnt\":None,\n \"id\": None\n }\n #for one-way a1 initiates and a2 replies.\n from_clause = \"\"\"\n (select count(id) as cnt, a2||'_'||a1 as id from (select c1.id, c1.author_id as a1, c2.author_id as a2 from base_v_comment c1, base_v_comment c2 where c2.parent_id=c1.id and c1.ensemble_id=?) as v1 group by a1, a2) as v2\"\"\"\n output = {}\n output[\"oneway\"] = db.Db().getIndexedObjects(names, \"id\", from_clause, \"true\" , (id_ensemble,))\n #for two-way, a1 initiates, a2 replies, and a1 re-replies.\n from_clause = \"\"\"\n (select count(id) as cnt, a2||'_'||a1 as id from (select c1.id, c1.author_id as a1, c2.author_id as a2 from base_v_comment c1, base_v_comment c2, base_v_comment c3 where c3.parent_id=c2.id and c3.author_id=c1.author_id and c2.parent_id=c1.id and c1.ensemble_id=?) as v1 group by a1, a2) as v2\"\"\"\n output[\"twoway\"] = db.Db().getIndexedObjects(names, \"id\", from_clause, \"true\" , (id_ensemble,))\n return output\n\ndef get_social_interactions_clusters(id_ensemble):\n # Generate how many times each student communicated with another student using the clusters given in that group metadata.\n import db, json\n names = {\n \"cnt\":None,\n \"id\": None\n }\n ensemble = M.Ensemble.objects.get(pk=id_ensemble)\n clusters = json.loads(ensemble.metadata)[\"groups\"]\n output = []\n for cluster in clusters:\n #for one-way a1 initiates and a2 replies.\n from_clause = \"\"\"\n (select count(id) as cnt, a2||'_'||a1 as id from (select c1.id, c1.author_id as a1, c2.author_id as a2 from base_v_comment c1, base_v_comment c2 where c2.parent_id=c1.id and c1.ensemble_id=? and c1.source_id in (%s)) as v1 group by a1, a2) as v2\"\"\" %(\",\".join([str(j) for j in cluster[\"source\"]]),)\n output.append(db.Db().getIndexedObjects(names, \"id\", from_clause, \"true\" , (id_ensemble,)))\n return output\n\n\ndef set_grade_assignment(uid, P):\n id_user = P[\"id_user\"]\n id_source = P[\"id_source\"]\n record = None\n try:\n record = M.AssignmentGrade.objects.get(user__id=id_user, source__id=id_source)\n rh = M.AssignmentGradeHistory()\n rh.user_id = record.user_id\n rh.grade = record.grade\n rh.source_id = record.source_id\n rh.grader = record.grader\n rh.ctime = record.ctime\n rh.save()\n record.ctime = datetime.datetime.now()\n except M.AssignmentGrade.DoesNotExist:\n record = M.AssignmentGrade()\n record.user_id = id_user\n record.source_id = id_source\n record.grade = P[\"grade\"]\n record.grader_id = uid\n record.save()\n return UR.model2dict(record, __NAMES[\"assignment_grade\"], \"id\")\n\n\ndef get_guestfileinfo(id_source):\n ownership = M.Ownership.objects.select_related(\"source\", \"ensemble\", \"folder\").filter(source__id=id_source, deleted=False)\n o = {\n \"files\": UR.qs2dict(ownership, __NAMES[\"files2\"] , \"ID\"),\n \"ensembles\": UR.qs2dict(ownership, __NAMES[\"ensembles2\"] , \"ID\") ,\n \"folders\": UR.qs2dict(ownership, __NAMES[\"folders2\"] , \"ID\") ,\n }\n if len(ownership)==1:\n if ownership[0].source.type == M.Source.TYPE_YOUTUBE:\n o[\"youtubeinfos\"]= UR.model2dict(ownership[0].source.youtubeinfo, None, \"id\")\n return o\n\ndef get_files(uid, payload):\n id = payload[\"id\"] if \"id\" in payload else None\n names = __NAMES[\"files2\"]\n my_memberships = M.Membership.objects.filter(user__id=uid, deleted=False)\n my_ensembles = M.Ensemble.objects.filter(membership__in=my_memberships)\n my_ownerships = M.Ownership.objects.select_related(\"source\").filter(ensemble__in=my_ensembles, deleted=False)\n return UR.qs2dict(my_ownerships, names, \"ID\")\n\ndef save_settings(uid, payload):\n #print \"save settings w/ payload %s\" % (payload, )\n for k in [k for k in payload if k not in [\"__PASSWD__\", \"firstname\", \"lastname\"]]:\n ds = M.DefaultSetting.objects.get(name=k)\n m = M.UserSetting.objects.filter(user__id=uid, setting__id=ds.id)\n if len(m)==0:\n m = M.UserSetting(user_id=uid, setting_id=ds.id)\n else:\n m = m[0]\n m.value = payload[k]\n m.ctime = datetime.datetime.now()\n m.save()\n #DB().doTransaction(\"update nb2_user_settings set valid=0 where id_user=? and name=?\", (uid, k))\n #DB().doTransaction(\"insert into nb2_user_settings(id_user, name, value) values (?, ?, ?)\", (uid, k, payload[k]))\n\n u = M.User.objects.get(pk=uid)\n if \"__PASSWD__\" in payload:\n password = payload[\"__PASSWD__\"]\n u.set_password(password)\n u.firstname = payload[\"firstname\"]\n u.lastname = payload[\"lastname\"]\n u.save()\n return get_settings(uid, {})\n\n\ndef get_settings(uid, payload):\n ds = M.DefaultSetting.objects.all()\n us = M.UserSetting.objects.filter(user__id=uid)\n sl = M.SettingLabel.objects.all()\n user = M.User.objects.get(pk=uid)\n user_entry = UR.model2dict(user)\n # Remove unnecessary fields\n del user_entry[\"guest\"]\n del user_entry[\"confkey\"]\n del user_entry[\"valid\"]\n del user_entry[\"saltedhash\"]\n del user_entry[\"salt\"]\n del user_entry[\"password\"]\n\n return {\"ds\": UR.qs2dict(ds), \"us\":UR.qs2dict(us), \"sl\":UR.qs2dict(sl), \"user\":user_entry}\n\n\ndef getLocation(id):\n \"\"\"Returns an \"enriched\" location\"\"\"\n o = M.Comment.objects.select_related(\"location\").filter(location__id=id, parent__id=None, deleted=False)\n loc_dict = UR.qs2dict(o, __NAMES[\"location_v_comment2\"], \"ID\")\n h5l = None\n try:\n h5l = o[0].location.html5location if len(o) else None\n except M.HTML5Location.DoesNotExist:\n pass\n h5l_dict = UR.model2dict(h5l, __NAMES[\"html5location\"], \"ID\") if h5l else None\n return (loc_dict, h5l_dict)\n\ndef getTopCommentsFromLocations(location_ids):\n comments = {}\n for loc_id in location_ids:\n loc_comments = M.Comment.objects.filter(location_id=loc_id)\n if loc_comments.count() > 0:\n comments[loc_id] = {\"replies\": loc_comments.count(), \"head_body\": loc_comments[0].body}\n # TODO: use .first() for above instead of [0] once we update to 1.6\n\n return comments\n\ndef getAdvancedFilteredLocationsByFile(id_source, n, r, filterType):\n source_locations = M.Location.objects.filter(source__id=id_source).annotate(reply=Count('comment')).filter(reply__gt=0)\n\n if filterType == \"reply\":\n filter_locations=source_locations.order_by('-reply')\n elif filterType == \"students\":\n filter_locations=source_locations.annotate(students=Count('comment__author',distinct=True)).order_by('-students')\n elif filterType == \"longest\":\n return {}\n #TODO: fix\n #filter_locations=source_locations.annotate(body=Q(comment__parent=None)).extra(select={'length':'Length(body)'}).order_by('-length')\n elif filterType == \"random\":\n filter_locations=source_locations.order_by('?')\n\n if r == \"threads\":\n filter_locations = filter_locations[:n]\n else:\n nTotal = len(source_locations)\n filter_locations = filter_locations[:nTotal * n / 100]\n\n retval = {}\n for loc in filter_locations:\n retval[loc.id] = loc.id\n\n return retval\n\ndef getComment(id, uid):\n names = __NAMES[\"comment2\"]\n comment = M.Comment.objects.select_related(\"location\", \"author\").extra(select={\"admin\": 'select cast(admin as integer) from base_membership, base_location where base_membership.user_id=base_comment.author_id and base_membership.ensemble_id = base_location.ensemble_id and base_location.id=base_comment.location_id'}).get(pk=id)\n return UR.model2dict(comment, names, \"ID\")\n\ndef getTagsByComment(comment_id):\n tags = M.Tag.objects.filter(comment__id=comment_id)\n return UR.qs2dict(tags, __NAMES[\"tag\"], \"ID\")\n\n# Returns a QuerySet of Locations in a thread that the user is tagged somewhere in\ndef getLocationsTaggedIn(uid):\n tags = M.Tag.objects.filter(individual__id=uid)\n comments = M.Comment.objects.filter(id__in=tags.values_list(\"comment_id\"))\n loc_set = {}\n for comment in comments:\n loc_set[comment.location.id] = None\n return M.Location.objects.filter(id__in=list(loc_set.keys()))\n\ndef getCommentsByFile(id_source, uid, after):\n names_location = __NAMES[\"location_v_comment2\"]\n names_comment = __NAMES[\"comment2\"]\n ensembles_im_admin = M.Ensemble.objects.filter(membership__in=M.Membership.objects.filter(user__id=uid).filter(admin=True))\n locations_im_admin = M.Location.objects.filter(ensemble__in=ensembles_im_admin)\n comments = M.Comment.objects.extra(select={\"admin\": 'select cast(admin as integer) from base_membership where base_membership.user_id=base_comment.author_id and base_membership.ensemble_id = base_location.ensemble_id limit 1'}).select_related(\"location\", \"author\").filter(location__source__id=id_source, deleted=False, moderated=False).filter(Q(location__in=locations_im_admin, type__gt=1) | Q(author__id=uid) | Q(type__gt=2))\n membership = M.Membership.objects.filter(user__id=uid, ensemble__ownership__source__id=id_source, deleted=False)[0]\n if membership.section is not None:\n seen = M.CommentSeen.objects.filter(comment__location__source__id=id_source, user__id=uid)\n #idea: we let you see comments\n # - that are in your current section\n # - that aren't in any section\n # - that you've seen before\n # - that you've authored.\n comments = comments.filter(Q(location__section=membership.section)|\n Q(location__section=None)|\n Q(location__comment__in=seen.values_list(\"comment_id\"))|\n Q(author__id=uid))\n threadmarks = M.ThreadMark.objects.filter(location__in=comments.values_list(\"location_id\"))\n if after is not None:\n comments = comments.filter(ctime__gt=after)\n threadmarks = threadmarks.filter(ctime__gt=after)\n\n # Filter out private tags that aren't yours\n #comments_tagged_in = M.Tag.objects.filter(individual__id=uid).values_list(\"comment_id\")\n locations_tagged_in = getLocationsTaggedIn(uid)\n comments = comments.filter(Q(author__id=uid) | ~Q(type__in=[4]) | Q(location__in=locations_tagged_in))\n\n # Get Tags\n tags = M.Tag.objects.filter(comment__in=comments)\n\n html5locations = M.HTML5Location.objects.filter(location__comment__in=comments)\n locations_dict = UR.qs2dict(comments, names_location, \"ID\")\n comments_dict = UR.qs2dict(comments, names_comment, \"ID\")\n html5locations_dict = UR.qs2dict(html5locations, __NAMES[\"html5location\"], \"ID\")\n threadmarks_dict = UR.qs2dict(threadmarks, __NAMES[\"threadmark\"], \"id\")\n tag_dict = UR.qs2dict(tags, __NAMES[\"tag\"], \"ID\")\n #Anonymous comments\n ensembles_im_admin_ids = [o.id for o in ensembles_im_admin]\n for k,c in comments_dict.items():\n if not c[\"signed\"] and not (locations_dict[c[\"ID_location\"]][\"id_ensemble\"] in ensembles_im_admin_ids or uid==c[\"id_author\"]):\n c[\"fullname\"]=\"Anonymous\"\n c[\"id_author\"]=0\n return locations_dict, html5locations_dict, comments_dict, threadmarks_dict, tag_dict\n\ndef get_comments_collection(uid, P):\n output = {}\n comments_refs = M.Comment.objects.filter(id__in=P[\"comments\"], deleted=False, moderated=False)\n locations= M.Location.objects.filter(comment__in=comments_refs)\n html5locations = M.HTML5Location.objects.filter(location__in=locations)\n locations_im_admin = locations.filter(ensemble__in= M.Ensemble.objects.filter(membership__in=M.Membership.objects.filter(user__id=uid).filter(admin=True)))\n comments = M.Comment.objects.extra(select={\"admin\": 'select cast(admin as integer) from base_membership, base_location where base_membership.user_id=base_comment.author_id and base_membership.ensemble_id = base_location.ensemble_id and base_location.id = base_comment.location_id'}).select_related(\"location\", \"author\").filter(deleted=False, moderated=False, location__in=locations).filter(Q(location__in=locations_im_admin, type__gt=1) | Q(author__id=uid) | Q(type__gt=2))\n ensembles = M.Ensemble.objects.filter(location__in=locations)\n files = M.Source.objects.filter(location__in=locations)\n ownerships = M.Ownership.objects.select_related(\"source\", \"ensemble\", \"folder\").filter(source__in=files, ensemble__in=ensembles)\n seen = M.CommentSeen.objects.select_related(\"comment\").filter(comment__in=comments).filter(user__id=uid)\n output[\"ensembles\"]=UR.qs2dict(ownerships, __NAMES[\"ensembles2\"] , \"ID\")\n output[\"files\"]=UR.qs2dict(ownerships, __NAMES[\"files2\"] , \"ID\")\n output[\"folders\"]=UR.qs2dict(ownerships, __NAMES[\"folders2\"] , \"ID\")\n output[\"locations\"] = UR.qs2dict( comments, __NAMES[\"location_v_comment2\"], \"ID\")\n output[\"html5locations\"] = UR.qs2dict( html5locations, __NAMES[\"html5locations\"], \"ID\")\n comments_dict = UR.qs2dict( comments, __NAMES[\"comment2\"] , \"ID\")\n #Anonymous comments\n for k,c in comments_dict.items():\n if c[\"type\"] < 3:\n c[\"fullname\"]=\"Anonymous\"\n c[\"id_author\"]=0\n output[\"comments\"] = comments_dict\n output[\"seen\"] = UR.qs2dict(seen, {\"id\": None, \"id_location\": \"comment.location_id\"}, \"id\")\n return output\n\ndef get_comments_auth(uid, P):\n output = {}\n id_ensemble = False\n id_source = False\n if \"id_ensemble\" in P:\n id_ensemble = P[\"id_ensemble\"]\n if \"id_source\" in P:\n id_source = P[\"id_source\"]\n comments_authored = M.Comment.objects.filter(author__id=uid, deleted=False, moderated=False)\n if id_ensemble:\n comments_authored = comments_authored.filter(location__ensemble__id=id_ensemble)\n if id_source:\n comments_authored = comments_authored.filter(location__source__id=id_source)\n locations= M.Location.objects.filter(comment__in=comments_authored)\n locations_im_admin = locations.filter(ensemble__in= M.Ensemble.objects.filter(membership__in=M.Membership.objects.filter(user__id=uid).filter(admin=True)))\n comments = M.Comment.objects.extra(select={\"admin\": 'select cast(admin as integer) from base_membership, base_location where base_membership.user_id=base_comment.author_id and base_membership.ensemble_id = base_location.ensemble_id and base_location.id = base_comment.location_id'}).select_related(\"location\", \"author\").filter(deleted=False, moderated=False, location__in=locations).filter(Q(location__in=locations_im_admin, type__gt=1) | Q(author__id=uid) | Q(type__gt=2))\n ensembles = M.Ensemble.objects.filter(location__in=locations)\n files = M.Source.objects.filter(location__in=locations)\n ownerships = M.Ownership.objects.select_related(\"source\", \"ensemble\", \"folder\").filter(source__in=files, ensemble__in=ensembles)\n seen = M.CommentSeen.objects.select_related(\"comment\").filter(comment__in=comments).filter(user__id=uid)\n sequence = comments_authored.values_list('id', flat=True).order_by('-id')\n output[\"ensembles\"]=UR.qs2dict(ownerships, __NAMES[\"ensembles2\"] , \"ID\")\n output[\"files\"]=UR.qs2dict(ownerships, __NAMES[\"files2\"] , \"ID\")\n output[\"folders\"]=UR.qs2dict(ownerships, __NAMES[\"folders2\"] , \"ID\")\n output[\"locations\"] = UR.qs2dict( comments, __NAMES[\"location_v_comment2\"], \"ID\")\n comments_dict = UR.qs2dict( comments, __NAMES[\"comment2\"] , \"ID\")\n #Anonymous comments\n for k,c in comments_dict.items():\n if c[\"type\"] < 3:\n c[\"fullname\"]=\"Anonymous\"\n c[\"id_author\"]=0\n output[\"comments\"] = comments_dict\n output[\"seen\"] = UR.qs2dict(seen, {\"id\": None, \"id_location\": \"comment.location_id\"}, \"id\")\n description = \"My Notes \"\n if id_ensemble and ownerships.count():\n description += \"on %s \" % (ownerships[0].ensemble.name,)\n if id_source and ownerships.count():\n description += \"on %s \" % (ownerships[0].source.title,)\n description += \"(%s comments)\" % (comments_authored.count(),)\n output[\"sequence\"] = {\"type\": \"comment\", \"data\": list(sequence), \"description\": description}\n return output\n\n\ndef get_comments_auth_admin(uid, P):\n output = {}\n id_ensemble = P.get(\"id_ensemble\", False)\n id_source = P.get(\"id_source\", False)\n id_author = P.get(\"id_author\", False)\n unread = P.get(\"unread\", False)\n\n locations_im_admin = M.Location.objects.filter(ensemble__in= M.Ensemble.objects.filter(membership__in=M.Membership.objects.filter(user__id=uid).filter(admin=True)))\n if id_author:\n locations_im_admin = locations_im_admin.filter(id__in=M.Location.objects.filter(comment__in=M.Comment.objects.filter(author__id=id_author, deleted=False, moderated=False)))\n if id_ensemble:\n locations_im_admin=locations_im_admin.filter(ensemble__id=id_ensemble)\n if id_source:\n locations_im_admin=locations_im_admin.filter(source__id=id_source)\n\n comments = M.Comment.objects.extra(select={\"admin\": 'select max(cast(admin as integer)) from base_membership, base_location where base_membership.user_id=base_comment.author_id and base_membership.ensemble_id = base_location.ensemble_id and base_location.id = base_comment.location_id'}).select_related(\"location\", \"author\").filter(deleted=False, moderated=False, location__in=locations_im_admin).filter(Q(type__gt=1) | Q(author__id=uid))\n seen = M.CommentSeen.objects.select_related(\"comment\").filter(comment__in=comments).filter(user__id=uid)\n if unread:\n comments_unread = comments.exclude(commentseen__in=seen)\n locations_im_admin = locations_im_admin.filter(comment__in=comments_unread)\n comments = M.Comment.objects.extra(select={\"admin\": 'select max(cast(admin as integer)) from base_membership, base_location where base_membership.user_id=base_comment.author_id and base_membership.ensemble_id = base_location.ensemble_id and base_location.id = base_comment.location_id'}).select_related(\"location\", \"author\").filter(deleted=False, moderated=False, location__in=locations_im_admin).filter(Q(type__gt=1) | Q(author__id=uid))\n seen = M.CommentSeen.objects.select_related(\"comment\").filter(comment__in=comments).filter(user__id=uid)\n ensembles = M.Ensemble.objects.filter(location__in=locations_im_admin)\n files = M.Source.objects.filter(location__in=locations_im_admin)\n ownerships = M.Ownership.objects.select_related(\"source\", \"ensemble\", \"folder\").filter(source__in=files, ensemble__in=ensembles)\n sequence = comments.values_list('id', flat=True).order_by('-id')\n output[\"ensembles\"]=UR.qs2dict(ownerships, __NAMES[\"ensembles2\"] , \"ID\")\n output[\"files\"]=UR.qs2dict(ownerships, __NAMES[\"files2\"] , \"ID\")\n output[\"folders\"]=UR.qs2dict(ownerships, __NAMES[\"folders2\"] , \"ID\")\n output[\"locations\"] = UR.qs2dict( comments, __NAMES[\"location_v_comment2\"], \"ID\")\n output[\"comments\"] = UR.qs2dict( comments, __NAMES[\"comment2\"] , \"ID\")\n output[\"seen\"] = UR.qs2dict(seen, {\"id\": None, \"id_location\": \"comment.location_id\"}, \"id\")\n description = \"Notes \"\n if id_author:\n author = M.User.objects.get(pk=id_author)\n description += \"from %s %s \" % (author.firstname, author.lastname)\n if id_ensemble and ownerships.count():\n description += \"on %s \" % (ownerships[0].ensemble.name,)\n if id_source and ownerships.count():\n description += \"on %s \" % (ownerships[0].source.title,)\n if unread:\n description = \"Unread \"+description\n description += \"(%s comments)\" % (comments.count(),)\n output[\"sequence\"] = {\"type\": \"comment\", \"data\": list(sequence), \"description\": description}\n return output\n\n\n\ndef getPublicCommentsByFile(id_source):\n names_location = __NAMES[\"location_v_comment2\"]\n names_comment = __NAMES[\"comment2\"]\n comments = M.Comment.objects.extra(select={\"admin\": 'select cast(admin as integer) from base_membership where base_membership.user_id=base_comment.author_id and base_membership.ensemble_id = base_location.ensemble_id'}).select_related(\"location\", \"author\").filter(location__source__id=id_source, deleted=False, moderated=False, type__gt=2)\n locations_dict = UR.qs2dict(comments, names_location, \"ID\")\n comments_dict = UR.qs2dict(comments, names_comment, \"ID\")\n #Anonymous comments\n for k,c in comments_dict.items():\n if c[\"type\"] < 3:\n c[\"fullname\"]=\"Anonymous\"\n c[\"id_author\"]=0\n return locations_dict, comments_dict\n\n\n\ndef getSeenByFile(id_source, uid):\n names = {\"id\": \"comment.id\", \"id_location\": \"comment.location_id\"}\n locations = M.Location.objects.filter(source__id=id_source)\n comments = M.Comment.objects.filter(location__in=locations)\n seen = M.CommentSeen.objects.select_related(\"comment\").filter(comment__in=comments).filter(user__id=uid)\n return UR.qs2dict(seen, names, \"id\")\n\ndef markThread(uid, payload):\n mtype = int(payload[\"type\"])\n lid = int(payload[\"id_location\"])\n #comment_id = None if \"comment_id\" not in payload or payload[\"comment_id\"] is None else int(payload[\"comment_id\"])\n comment_id = int(payload[\"comment_id\"])\n mark = M.ThreadMark.objects.filter(user__id=uid, type=mtype, location__id=lid, active=True)\n if mark.count()>0: # only allow one active threadmark of a given type per thread\n mark = mark[0]\n mh = M.ThreadMarkHistory()\n mh.active = mark.active\n mh.comment_id = mark.comment_id\n mh.ctime = mark.ctime\n mh.location_id = mark.location_id\n mh.user_id = mark.user_id\n mh.type = mark.type\n mh.save()\n mark.ctime = datetime.datetime.now()\n active_default = True\n if comment_id != mark.comment_id:\n #there was a real change of comment_id: don't update active default value\n active_default = mark.active\n else: #then probably just a toggle\n active_default = not mark.active\n mark.comment_id = comment_id\n mark.active = payload[\"active\"] if \"active\" in payload else active_default # if no arg given, toggle\n else:\n mark = M.ThreadMark()\n mark.user_id = uid\n mark.location_id = lid\n mark.comment_id = comment_id\n mark.type = mtype\n mark.active = payload[\"active\"] if \"active\" in payload else True\n mark.save()\n return UR.model2dict(mark)\n\ndef getMark(uid, payload):\n names = {\n \"ID\": \"id\",\n \"type\": None,\n }\n comment = M.Comment.objects.get(pk=int(payload[\"id_comment\"]))\n user = M.User.objects.get(pk=uid)\n o = M.Mark.objects.filter(comment=comment, user=user)\n return UR.qs2dict(o, names, \"ID\")\n #return DB().getIndexedObjects(names, \"ID\", \"nb2_v_mark3\", \"id=? and id_user=?\", (int(payload[\"id_comment\"]),uid));\n\ndef instantTagReminder(comment, recipient):\n # Email Data\n subject = \"You were tagged in a new note on NB!\"\n V = {\"reply_to\": settings.SMTP_REPLY_TO, \"protocol\": settings.PROTOCOL, \"hostname\": settings.HOSTNAME}\n\n # Send Email\n default_setting = M.DefaultSetting.objects.get(name=\"email_confirmation_tags\")\n try:\n user_setting = M.UserSetting.objects.get(setting=default_setting, user=recipient)\n except M.UserSetting.DoesNotExist:\n user_setting = default_setting\n if user_setting.value > 0:\n context = {\"V\": V, \"comment\": comment, \"recipient\": recipient}\n msg = render_to_string(\"email/msg_instant_tag_reminder\", context)\n email = EmailMessage(subject, msg, settings.EMAIL_FROM, (recipient.email,), (settings.EMAIL_BCC,))\n email.send(fail_silently=True)\n\ndef addNote(payload):\n id_location = None\n author = M.User.objects.get(pk=payload[\"id_author\"])\n location = None\n h5l = None\n parent = M.Comment.objects.get(pk=payload[\"id_parent\"]) if \"id_parent\" in payload else None\n #putting this in factor for duplicate detection:\n similar_comments = M.Comment.objects.filter(parent=parent, ctime__gt=datetime.datetime.now()-datetime.timedelta(0,10,0), author=author, body=payload[\"body\"]);\n\n #do we need to insert a location ?\n if \"id_location\" in payload:\n location = M.Location.objects.get(pk=payload[\"id_location\"])\n #refuse if similar comment\n similar_comments = similar_comments.filter(location=location)\n if similar_comments.count():\n return []\n else:\n location = M.Location()\n location.source = M.Source.objects.get(pk=payload[\"id_source\"])\n location.ensemble = M.Ensemble.objects.get(pk=payload[\"id_ensemble\"])\n try:\n location.y = payload[\"top\"]\n except KeyError:\n logger = logging.getLogger(\"errorlog\")\n logger.error(\"no top for loc source %s, ensemble %s, author %s\" %(payload[\"id_source\"],payload[\"id_ensemble\"],payload[\"id_author\"]))\n raise\n\n location.x = payload[\"left\"]\n location.w = payload[\"w\"]\n location.h = payload[\"h\"]\n location.page = payload[\"page\"]\n # Duration for YouTube comments\n if \"duration\" in payload:\n location.duration = payload[\"duration\"]\n if \"pause\" in payload and auth.canPauseComment(author.id, location.source.id):\n location.pause = payload[\"pause\"]\n if \"title\" in payload:\n location.is_title = payload[\"title\"] == 1\n location.section = M.Membership.objects.get(user=author, ensemble=location.ensemble, deleted=False).section\n\n #refuse if similar comment\n similar_comments = similar_comments.filter(location__in=M.Location.objects.filter(source=location.source, ensemble=location.ensemble, y=location.y, x=location.x, w=location.w, h=location.h, page=location.page));\n if similar_comments.count():\n return []\n\n location.save()\n #do we need to add an html5 location ?\n if \"html5range\" in payload:\n h5range = payload[\"html5range\"]\n h5l = M.HTML5Location()\n h5l.path1 = h5range[\"path1\"]\n h5l.path2 = str(h5range[\"path2\"])\n h5l.offset1 = h5range[\"offset1\"]\n h5l.offset2 = h5range[\"offset2\"]\n h5l.location = location\n h5l.save()\n\n # Should we import this comment from somewhere?\n body = payload[\"body\"]\n matchObj = re.match( r'@import\\((\\d+), *(.*)\\)', body)\n\n if (matchObj):\n from_loc_id = int(matchObj.group(1))\n import_type = matchObj.group(2)\n\n # Am I allowed to do this? Am I an admin in the source?\n src_membership = M.Location.objects.get(pk=from_loc_id).ensemble.membership_set.filter(user=author,admin=True)\n if src_membership.count() > 0:\n return importAnnotation(import_type, from_loc_id, location)\n else:\n return []\n else:\n comment = M.Comment()\n comment.parent = parent\n comment.location = location\n comment.author = author\n comment.body = payload[\"body\"]\n comment.type = payload[\"type\"]\n comment.signed = payload[\"signed\"] == 1\n comment.save()\n\n # Add any tags\n if \"tags\" in payload:\n new_tags = payload[\"tags\"]\n for tag_id in new_tags:\n tagged_user = M.User.objects.get(pk=tag_id)\n\n tag = M.Tag()\n tag.type = 1\n tag.comment = comment\n tag.individual = tagged_user\n tag.save()\n\n instantTagReminder(comment, tagged_user)\n\n # mark seen by author\n session, previous_activity = markActivity(UR.CID)\n if session is not None:\n markCommentSeen(author.id, session.id, {comment.id: time.time()*1000})\n\n return [comment]\n\ndef setLocationSection(id_location, id_section):\n location = M.Location.objects.get(pk = id_location)\n if id_section:\n location.section = M.Section.objects.get(pk = id_section)\n else:\n location.section = None\n location.save()\n return location\n\ndef promoteLocationByCopy(id_location):\n location = M.Location.objects.get(pk = id_location)\n html5locations = M.HTML5Location.objects.filter(location = location)\n html5location = None\n\n if html5locations.exists():\n html5location = html5locations[0]\n\n if location.section == None:\n return { \"status\": \"Already promoted\" }\n\n # Get List of All Sections Except for mine\n sections = M.Section.objects.filter(ensemble=location.ensemble).exclude(pk=location.section.pk)\n top_comment = M.Comment.objects.get(location = location, parent__isnull = True)\n\n # Resulting Lists\n new_locs = []\n new_comments = []\n\n for section in sections:\n location.pk = None # prepare to make a new copy of location\n top_comment.pk = None # prepare to make a new copy of the top comment\n\n location.section = section\n\n location.save()\n\n # Create a Fresh HTML5Location for each location\n if html5location:\n html5location.pk = None\n html5location.location = location\n html5location.save()\n\n top_comment.location = location\n top_comment.signed = False\n top_comment.save()\n\n new_locs.append(location.pk)\n new_comments.append(top_comment.pk)\n return new_locs, new_comments\n\ndef importAnnotation(import_type, from_loc_id, target_location):\n\n # Now, import body text of the comment\n comment = M.Comment.objects.get(location = from_loc_id, parent__isnull = True)\n importPk = comment.pk\n comment.pk = None\n comment.location = target_location\n comment.signed = False\n comment.save()\n\n oldToNew = {}\n\n toReturn = [comment]\n\n # If we need to import the whole thread, do that\n if import_type == \"all\":\n oldToNew[importPk] = comment.pk\n toVisit = [ importPk ]\n\n while len(toVisit) > 0:\n visiting = toVisit.pop()\n comments = M.Comment.objects.filter(location = from_loc_id, parent = visiting)\n for c in comments:\n toVisit.append(c.pk)\n oldPk = c.pk\n c.pk = None\n c.location = target_location\n c.signed = False\n c.parent = M.Comment.objects.get(pk = oldToNew[visiting])\n c.save()\n oldToNew[oldPk] = c.pk\n toReturn.append(c)\n return toReturn\n\ndef bulkImportAnnotations(from_source_id, to_source_id, locs_array, import_type):\n\n from_source = M.Source.objects.get(pk=from_source_id)\n to_source = M.Source.objects.get(pk=to_source_id)\n\n for id_location in locs_array:\n location = M.Location.objects.get(pk=id_location)\n html5locations = M.HTML5Location.objects.filter(location=location)\n\n if location.source_id != from_source.pk:\n return { \"status\": \"Source File Mismatch locsrc: %s, src %s\"%(location.source_id,from_source_id) }\n\n # Copy Location and update the source\n location.pk = None\n location.source_id = to_source_id\n location.save()\n\n if html5locations.exists():\n html5location = html5locations[0]\n html5location.pk = None\n html5location.location = location\n html5location.save()\n\n # Arguments: old loc id target loc\n importAnnotation( import_type, id_location, location)\n\n return {\"status\": \"Success\" }\n\ndef editNote(payload):\n id_type = payload[\"type\"]\n comment_id = payload[\"id_comment\"]\n comment = M.Comment.objects.get(pk=comment_id)\n comment.body = payload[\"body\"]\n comment.type = id_type\n comment.signed = payload[\"signed\"]\n comment.save()\n\n retval = None\n # Edit time and duration if they are in payload\n if (\"page\" in payload) and (\"duration\" in payload):\n comment.location.page = payload[\"page\"]\n comment.location.duration = payload[\"duration\"]\n comment.location.save()\n retval = comment.location\n # Edit whether to pause on comment if in payload\n if (\"pause\" in payload):\n comment.location.pause = payload[\"pause\"]\n comment.location.save()\n\n # Edit Tags if they are in payload\n if \"tags\" in payload:\n tagset = payload[\"tags\"]\n comment_tags = M.Tag.objects.filter(comment__id=comment_id)\n # If user in tagset and not yet tagged, add tag\n for user_id in tagset:\n try:\n # If no exception, the tag is already there\n tag = comment_tags.get(individual__id=user_id)\n except M.Tag.DoesNotExist:\n # Tag in tagset but not database, add to database\n tagged_user = M.User.objects.get(pk=user_id)\n tag = M.Tag()\n tag.type = 1\n tag.comment = comment\n tag.individual = tagged_user\n tag.save()\n instantTagReminder(comment, tagged_user)\n # If tag exists on this comment and not in tagset, delete it\n deleted_tags = comment_tags.exclude(individual__id__in=tagset)\n deleted_tags.delete()\n\n if not retval == None:\n retval = UR.model2dict(retval, __NAMES[\"location2\"], \"ID\")\n for loc_id in retval:\n retval = retval[loc_id]\n break\n retval[\"body\"] = M.Comment.objects.get(location__id=loc_id, parent=None).body\n\n return retval\n\ndef deleteNote(payload):\n id = int(payload[\"id_comment\"])\n comment = M.Comment.objects.get(pk=id)\n comment.deleted = True\n comment.save()\n\ndef deleteThread(payload):\n comments = M.Comment.objects.filter(location__id = payload[\"id_location\"])\n comments.delete()\n location = M.Location.objects.get(pk=payload[\"id_location\"])\n location.delete()\n\ndef approveNote(uid, payload):\n value = int(payload[\"value\"])\n id_comment = int(payload[\"id_comment\"])\n DB().doTransaction(\"update nb2_comment set preselected=? where id=?\", (value, id_comment))\n\ndef delete_file(uid, P):\n id = P[\"id\"]\n if P[\"item_type\"]==\"file\":\n o = M.Ownership.objects.get(source__id=id)\n o.deleted = True\n o.save()\n return id\n else: #folder\n folder = M.Folder.objects.get(pk=id)\n M.Ownership.objects.filter(folder__id=id).update(folder=None)\n folder.delete()\n return id\n\ndef create_ensemble(uid, P): #name, description, uid, allow_staffonly, allow_anonymous, ):\n import random, string\n ensemble = M.Ensemble(name=P[\"name\"], description=P[\"description\"])\n if \"allow_staffonly\" in P:\n ensemble.allow_staffonly = P[\"allow_staffonly\"]\n if \"allow_anonymous\" in P:\n ensemble.allow_anonymous = P[\"allow_anonymous\"]\n if \"allow_guest\" in P:\n ensemble.allow_guest = P[\"allow_guest\"]\n if \"use_invitekey\" in P:\n ensemble.use_invitekey = P[\"use_invitekey\"]\n if \"allow_download\" in P:\n ensemble.allow_download = P[\"allow_download\"]\n if \"allow_ondemand\" in P:\n ensemble.allow_ondemand = P[\"allow_ondemand\"]\n if \"default_pause\" in P:\n ensemble.default_pause = P[\"default_pause\"]\n ensemble.invitekey = \"\".join([ random.choice(string.ascii_letters+string.digits) for i in range(0,50)])\n ensemble.save()\n id = ensemble.pk\n membership = M.Membership(ensemble_id=id, user_id=uid, admin=1)\n membership.save()\n return id\n\ndef createSourceID():\n o = M.Source()\n o.save()\n return o.id\n\ndef create_folder(id_ensemble, id_parent, name):\n folder = M.Folder(parent_id=id_parent, ensemble_id=id_ensemble, name=name)\n folder.save()\n return folder.pk\n\ndef rename_file(uid, P):\n if P[\"item_type\"]==\"file\":\n source = M.Source.objects.get(pk=P['id'])\n source.title = P[\"title\"]\n source.save()\n return get_files(uid, {\"id\": P[\"id\"]})\n else:\n folder = M.Folder.objects.get(pk=P[\"id\"])\n folder.name = P[\"title\"]\n folder.save()\n return get_folders(uid, {\"id\": P[\"id\"]})\n\ndef edit_assignment(uid, P):\n source = M.Source.objects.get(pk=P['id'])\n ownership = M.Ownership.objects.get(source=source)\n ownership.assignment = P[\"assignment\"]\n ownership.due = dateutil.parser.parse(P[\"due\"])\n ownership.save()\n return get_files(uid, {\"id\": P[\"id\"]})\n\n\n\ndef move_file(uid, P):\n id = P[\"id\"]\n if P[\"item_type\"]==\"file\":\n o = M.Ownership.objects.get(source__id=id)\n o.folder_id = P[\"dest\"]\n o.save()\n return get_files(uid, {\"id\": id})\n else:\n o = M.Folder.objects.get(pk=id)\n o.parent_id = P[\"dest\"]\n o.save()\n return get_folders(uid, {\"id\": id})\n\ndef copy_file(uid, P):\n new_source = M.Source.objects.get(pk=P[\"source_id\"])\n new_source.title = P[\"target_name\"]\n new_source.pk = None\n new_source.submittedby_id = uid\n new_source.save()\n\n new_ownership = M.Ownership.objects.get(source__id=P[\"source_id\"])\n new_ownership.pk = None\n new_ownership.source = new_source\n new_ownership.published = datetime.datetime.now()\n new_ownership.deleted = False\n # new_ownership.due = None\n\n if P[\"target_type\"] == \"ensemble\":\n new_ownership.ensemble_id = P[\"target_id\"]\n new_ownership.folder = None\n else: # target type is folder, we know for sure since it was validated before\n folder = M.Folder.objects.get(pk=P[\"target_id\"])\n new_ownership.folder = folder\n new_ownership.ensemble = folder.ensemble\n\n new_ownership.save()\n\n if new_source.type == 1: # ondemand pdf\n try:\n new_ondemand = M.OnDemandInfo.objects.get(source__pk=P[\"source_id\"])\n new_ondemand.pk = None\n new_ondemand.ensemble = new_ownership.ensemble\n new_ondemand.source = new_source\n new_ondemand.save()\n except M.OnDemandInfo.DoesNotExist:\n pass\n pdf_dir = \"%s/%s\" % (settings.HTTPD_MEDIA, settings.REPOSITORY_DIR)\n old_file = \"%s/%s\" % (pdf_dir, P[\"source_id\"])\n new_file = \"%s/%s\" % (pdf_dir, new_source.pk)\n shutil.copyfile(old_file, new_file)\n\n elif new_source.type == 2: # youtube\n new_youtube = M.YoutubeInfo.objects.get(source__pk=P[\"source_id\"])\n new_youtube.pk = None\n new_youtube.source = new_source\n new_youtube.save()\n elif new_source.type == 3: # html5 video\n pass\n elif new_source.type == 4: # html5\n new_html5 = M.HTML5Info.objects.get(source__pk=P[\"source_id\"])\n new_html5.pk = None\n new_html5.source = new_source\n new_html5.save()\n\n return new_source.pk\n\n\ndef createSource(uid, payload):\n \"\"\"\n if id_source in payload, use that id, provided no record already exists, if not use new id\n returns the id\n \"\"\"\n source = None\n url = payload[\"url\"]\n page = parsePage(url)\n if \"id_source\" in payload:\n id = payload[\"id_source\"]\n source = M.Source.objects.get(pk=id)\n if source is not None and source.numpages!=0:\n assert False, \"already a source w/ id=%s !!!\" % (id,)\n return None\n else:\n source.submittedby_id = uid\n else:\n source = M.Source(submittedby_id=uid)\n source.title = page[\"path\"][1:]\n id = source.save() # Django's default behavior is auto-commit\n return id\n\ndef parsePage(s):\n s = s.strip()\n #print \"parsing <%s>\" % (s,)\n port = \"\"\n path = \"/\"\n query = \"\"\n r = re.compile('(\\w*)://([^/]*)[/]?(.*)')\n m1 = r.match(s)\n scheme = m1.group(1)\n dn_and_port = m1.group(2)\n path_and_query = m1.group(3)\n #print \"m1 parsed: %s\", (m1.groups(),)\n #get port if any:\n r = re.compile('([^:]*):([\\d]*)')\n m2 = r.match(dn_and_port)\n if m2 is None:\n dn = dn_and_port\n else:\n dn = m2.group(1)\n port = m2.group(2)\n if path_and_query != \"\": #need to get path and query from dn\n r = re.compile('([^\\?]*)\\?(.*)')\n m3 = r.match(path_and_query)\n if m3 is not None:\n #print \"m3 parsed: %s\", (m3.groups(),)\n path += m3.group(1)\n query = m3.group(2)\n else:\n path += path_and_query\n page = {}\n page[\"scheme\"] = scheme\n page[\"dn\"] = dn\n page[\"port\"] = port\n page[\"path\"] = path\n page[\"query\"] = query\n #print \"here's the page I've parsed: %s\" % (page,)\n return page\n\ndef addOwnership(id_source, id_ensemble, id_folder=None):\n ownership = M.Ownership()\n ownership.source_id = id_source\n ownership.ensemble_id = id_ensemble\n if id_folder is not None:\n ownership.folder_id = id_folder\n ownership.save()\n return ownership\n\ndef markIdle(uid, id_session, o):\n for id in o:\n t1= datetime.datetime.fromtimestamp(int(id)/1000)\n t2= datetime.datetime.fromtimestamp(o[id]/1000)\n x = M.Idle(session_id=id_session, t1=t1, t2=t2)\n x.save()\n\ndef markCommentSeen(uid, id_session, o):\n for id in o:\n try:\n comment_id = int(id)\n x = M.CommentSeen(user_id=uid, session_id=id_session, comment_id=comment_id, ctime=datetime.datetime.fromtimestamp((o[id]+0.0)/1000))\n x.save()\n except ValueError:\n pass\n\ndef markPageSeen(uid, id_session, o):\n for id in o:\n page, id_source, junk = id.split(\"|\")\n x = M.PageSeen(user_id=uid, session_id=id_session, source_id=id_source, page=page, ctime=datetime.datetime.fromtimestamp((o[id]+0.0)/1000))\n x.save()\n\ndef getFilename(id_source):\n return M.Source.objects.get(pk=id_source)\n\ndef register_session(uid, p):\n o = M.Session(user_id=uid, ctime=p[\"ctime\"], ip=p[\"ip\"])\n o.save()\n\ndef register_user(uid, P):\n import random, string\n #we need to change confkey, so that the access from the confkey that user had gotten as guest can't work anymore.\n new_confkey = \"\".join([ random.choice(string.ascii_letters+string.digits) for i in range(0,20)])\n u = M.User.objects.get(pk=uid)\n u.firstname = P[\"firstname\"]\n u.lastname = P[\"lastname\"]\n u.confkey = new_confkey\n u.email = P[\"email\"]\n u.set_password(P[\"password\"])\n u.guest = False\n u.save()\n try:\n gh = M.GuestHistory.objects.get(user=u)\n gh.t_end = datetime.datetime.now()\n gh.save()\n except M.GuestHistory.DoesNotExist:\n pass\n return new_confkey\n\n\ndef page_served(uid, p):\n o = M.Landing(user_id=uid, ip=p[\"ip\"], referer=p[\"referer\"], path=p[\"path\"], client=p[\"client\"])\n o.save()\n\ndef markActivity(cid):\n if cid==\"0\" or cid==\"1\":\n return None, None #temporary fix\n try:\n session = M.Session.objects.filter(ctime=cid)[0]\n previous_activity = session.lastactivity\n session.lastactivity = datetime.datetime.now()\n session.save()\n return session, previous_activity\n except (M.Session.DoesNotExist, ValidationError, IndexError):\n pass\n return None, None\n\ndef getPending(uid, payload):\n #reply requested threadmarks:\n questions = M.ThreadMark.objects.filter(location__ensemble__membership__user__id=uid, type=1, active=True).exclude(user__id=uid)\n comments = M.Comment.objects.filter(location__threadmark__in=questions, parent__id=None, type=3, deleted=False, moderated=False)\n locations = M.Location.objects.filter(comment__in=comments)\n all_comments = M.Comment.objects.filter(location__in=locations)\n unrated_replies = all_comments.extra(tables=[\"base_threadmark\"], where=[\"base_threadmark.location_id=base_comment.location_id and base_threadmark.ctime 1:\n return\n lst = '.'.join(cur)\n if lst not in self.res:\n self.res.append(lst)\n return\n elif s and (num > 5 or num == 5 and int(s) > 255):\n return\n elif not s and num < 4:\n return\n else:\n for i in range(1, 4):\n if i <= len(s) and int(s[:i]) < 256:\n self.dfs(s[i:], cur + [s[:i]], num + 1)\n","repo_name":"zzh730/LeetCode","sub_path":"Backtracking/Restore IP Addresses.py","file_name":"Restore IP Addresses.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29153116693","text":"import socket\nimport sys\nimport time\nimport traceback\nimport queue\nimport logging\nimport json\nimport websocket\nimport paho.mqtt.client as mqtt\nfrom threading import Thread\nfrom threading import Timer\nfrom phue import Bridge\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\nclass Error(Exception):\n def __init__(self, message):\n self.message = message\n\nclass HueError(Error):\n pass\n\n\nclass HueMqttServer:\n config = None\n bridge = None\n mqtt_connected = False\n mqtt_client = None\n status = { 'groups': {}, 'lights': {}, 'sensors':{}, 'scenes':{}}\n\n bridge_worker = None\n bridge_timer = None\n poll_time=60\n \n ws = None\n ws_worker = None\n \n class BridgeThread(Thread):\n bridge_queue = queue.Queue()\n def run(self):\n self.run=True\n while self.run:\n job = self.bridge_queue.get(block=True)\n logger.info(\"Bridge work: \" + str(job))\n if(job==\"quit\"):\n run=False\n else:\n job.start()\n job.join() \n self.bridge_queue.task_done()\n\n def __init__(self, config):\n self.config = config\n self.bridge_worker = self.BridgeThread()\n self.bridge_worker.setDaemon(True)\n self.bridge_worker.start()\n\n def mqtt_connect(self):\n if self.mqtt_broker_reachable():\n logger.info('Connecting to ' + self.config['mqtt_host'] + ':' + self.config['mqtt_port'])\n self.mqtt_client = mqtt.Client(self.config['mqtt_client_id'])\n if 'mqtt_user' in self.config and 'mqtt_password' in self.config:\n self.mqtt_client.username_pw_set(self.config['mqtt_user'], self.config['mqtt_password'])\n\n self.mqtt_client.on_connect = self.mqtt_on_connect\n self.mqtt_client.on_disconnect = self.mqtt_on_disconnect\n self.mqtt_client.on_message = self.mqtt_on_message\n self.mqtt_client.on_subscribe = self.mqtt_on_subscribe\n self.mqtt_client.enable_logger()\n self.mqtt_client.will_set(self.config['mqtt_topic_prefix'] + \"/connected\", \"0\", 1, True)\n\n try:\n self.mqtt_client.connect(self.config['mqtt_host'], int(self.config['mqtt_port']), 10)\n except:\n logger.error(traceback.format_exc())\n self.mqtt_client = None\n else:\n logger.error(self.config['mqtt_host'] + ':' + self.config['mqtt_port'] + ' not reachable!')\n \n def mqtt_on_subscribe(self, client, userdata, mid, granted_qos):\n logger.info('Subscribed. Message id=' + str(mid))\n \n def mqtt_on_connect(self, mqtt_client, userdata, flags, rc):\n logger.info('...mqtt_connected!')\n self.mqtt_client.subscribe(self.config['mqtt_topic_prefix'] + '/get/#')\n self.mqtt_client.message_callback_add(self.config['mqtt_topic_prefix'] + '/get/#', self.mqtt_on_message_get)\n self.mqtt_client.subscribe(self.config['mqtt_topic_prefix'] + '/set/#')\n self.mqtt_client.message_callback_add(self.config['mqtt_topic_prefix'] + '/set/lights/#', self.mqtt_on_message_set_light)\n self.mqtt_client.subscribe(self.config['mqtt_topic_prefix'] + '/command/#')\n self.mqtt_client.message_callback_add(self.config['mqtt_topic_prefix'] + '/command/#', self.mqtt_on_message_command)\n\n self.mqtt_client.publish(self.config['mqtt_topic_prefix'] + \"/connected\", \"1\", 1, True)\n self.bridge_worker.bridge_queue.put(Thread(target=self.bridge_connect))\n\n def mqtt_on_disconnect(self, mqtt_client, userdata, rc):\n logger.warn('Diconnected! will reconnect! ...')\n if rc is 0:\n self.mqtt_connect()\n else:\n time.sleep(5)\n while not self.mqtt_broker_reachable():\n time.sleep(10)\n self.mqtt_client.reconnect()\n \n def mqtt_on_message_get(self, client, userdata, message):\n topic = message.topic.split(\"/\")\n if topic[2] in self.status:\n for dev in self.status[topic[2]]:\n if self.status[topic[2]][dev]['name'] == topic[3]:\n logger.info(\"Get request for {} recieved\".format(\n topic[3]))\n topic_prefix = \"{}/{}/{}/{}\".format(\n self.config['mqtt_topic_prefix'],\n 'status', topic[2], topic[3] )\n msg = json.dumps(self.status[topic[2]][dev])\n self.mqtt_client.publish(topic_prefix, msg, 0 , True)\n break\n \n def mqtt_on_message_set_light(self, client, userdata, message): \n topic = message.topic.split(\"/\")\n payload = json.loads(message.payload.decode(\"utf-8\"))\n light = self.bridge.get_light(topic[3]) \n if light:\n if len(topic) == 4 or (len(topic) == 5 and topic[4]) == '':\n # hue/set/light/*name* or hue/set/light/*name*/\n import pdb;pdb.set_trace()\n self.BridgeThread.bridge_queue.put(\n Thread(\n target=self.bridge.set_light, \n args=(topic[3], payload)))\n elif len(topic) == 5:\n value = None\n if topic[4] == \"on\": \n #import pdb;pdb.set_trace()\n if payload.lower() in (\"0\",\"false\",\"off\"):\n value = False\n elif payload.lower() in (\"1\",\"true\",\"on\"):\n value = True\n elif topic[4] in (\"bri\", \"ct\", \"sat\", \"hue\"):\n value = int(message.payload)\n \n if(value != None):\n logger.info('Set light \"' + topic[3] + '\" ' + topic[4] + ' to ' + str(value))\n self.BridgeThread.bridge_queue.put(Thread(target=self.bridge.set_light, args=(light['name'], topic[4], value)))\n else:\n logger.warn('Wrong value for light \"' + topic[3] + \"/\" + topic[4] + '\" : ' + payload)\n \n def mqtt_on_message_command(self,client, userdata, message):\n topic = message.topic.split(\"/\")\n if topic[2] == \"scan_sensors\":\n logger.info(\"Scanning for new sensors\")\n self.BridgeThread.bridge_queue.put(\n Thread(target=self.bridge.scan_sensors))\n elif topic[2] == \"scan_lights\":\n logger.info(\"Scanning for new lights\")\n self.BridgeThread.bridge_queue.put(\n Thread(target=self.bridge.scan_sensors))\n \n def mqtt_on_message(self, client, userdata, message):\n topic = message.topic\n payload = message.payload.decode(\"utf-8\")\n logger.info(\"MQTT message: \" + topic + \": \" + payload)\n\n\n def mqtt_broker_reachable(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(5)\n try:\n s.connect((self.config['mqtt_host'], int(self.config['mqtt_port'])))\n s.close()\n return True\n except socket.error:\n return False\n\n def bridge_connect(self):\n logger.info(\"bridge_connect\")\n self.bridge = Bridge(ip=self.config['bridge_ip'], username=self.config['bridge_username'])\n response = self.bridge.get_api()\n if response.__class__ == list and 'error' in response[0]:\n raise HueError(\"Connection to bridge failed: \" + response[0]['error']['description'])\n else:\n logger.info('Bridge connected!')\n if self.bridge_timer:\n self.bridge_timer.cancel()\n \n if 'websocketport' in response['config']:\n ws = websocket.WebSocketApp(\"ws://{}:{}/\".format(\n self.config['bridge_ip'],\n response['config']['websocketport']),\n on_message = self.ws_on_message)\n #on_error = on_error,\n #on_close = on_close)\n ws_worker = Thread(target = ws.run_forever, \n name = \"websocket\", daemon = True)\n ws_worker.start()\n \n self.update_bridge()\n self.mqtt_client.publish(self.config['mqtt_topic_prefix'] + \"/connected\", \"2\", 1, True)\n \n def ws_on_message(self, message):\n logger.debug(\"Recieved WS message: {}\".format(message))\n # Update device\n msg = json.loads(message)\n if msg['e'] == 'changed':\n dev = self.status[msg['r']][msg['id']]\n if 'state' in msg:\n ts = int(time.time()*1000)\n dev['state'] = msg['state']\n dev['val'] = self._get_val(msg['r'], dev['type'], \n dev['state'])\n dev['lc'] = dev['ts']\n dev['ts'] = ts\n topic_prefix = ( self.config['mqtt_topic_prefix'] \n + '/status/' + msg['r'] + '/' + dev['name'] )\n msg = json.dumps(dev)\n self.mqtt_client.publish(topic_prefix, msg, 0 , True)\n \n def update_bridge(self):\n logger.debug('Bridge update')\n if self.bridge_timer:\n self.bridge_timer.cancel() # \n self.publish_status()\n self.bridge_timer = Timer(self.poll_time, self.update_bridge)\n self.bridge_timer.start()\n \n def _get_val(self, res, _type, state):\n val = -1\n if res == 'lights':\n val = state['bri']\n elif res == 'groups':\n if state['any_on']:\n if state['all_on']:\n val = \"all_on\"\n else:\n val = \"any_on\"\n else:\n val = \"none_on\"\n elif res == 'sensors':\n if _type == 'ZHASwitch':\n val = state['buttonevent']\n elif _type == 'Daylight':\n val = state['daylight']\n else:\n logger.warn(\"Unknown sensor type\")\n \n return val\n\n def publish_status(self):\n api=self.bridge.get_api()\n for res in ('groups', 'lights', 'sensors'):\n for dev_id in api[res]:\n dev = api[res][dev_id]\n state = {}\n \n if 'etag' in dev:\n if (dev_id in self.status[res]\n and 'etag' in self.status[res][dev_id]\n and self.status[res][dev_id]['etag'] == dev['etag']):\n # device has not changed\n continue\n else:\n state['etag'] = dev['etag']\n ts = int(time.time()*1000)\n \n if 'manufacturername' in dev:\n state['manufacturername'] = dev['manufacturername']\n if 'modelid' in dev:\n state['modelid'] = dev['modelid']\n if 'name' in dev:\n state['name'] = dev['name']\n if 'type' in dev:\n state['type'] = dev['type']\n if 'uniqueid' in dev:\n state['uniqueid'] = dev['uniqueid']\n if 'state' in dev:\n state['state'] = dev['state']\n state['val'] = self._get_val(res, state['type'], \n dev['state'])\n \n \n if dev_id not in self.status[res]:\n logger.info('Discovered new ' + state['type'] \n + ': ' + str(state))\n self.status[res][dev_id] = {}\n self.status[res][dev_id]['ts'] = ts\n \n \n logger.debug('Status of device \"' + dev['name'] + '\" changed')\n #import pdb;pdb.set_trace()\n state['lc'] = self.status[res][dev_id]['ts']\n state['ts'] = ts\n \n self.status[res][dev_id] = state\n topic_prefix = ( self.config['mqtt_topic_prefix'] \n + '/status/' + res + '/' + dev['name'] )\n msg = json.dumps(state)\n self.mqtt_client.publish(topic_prefix, msg, 0 , True)\n\n\n def start(self):\n self.mqtt_connect()\n try:\n self.mqtt_client.loop_forever()\n except (KeyboardInterrupt, SystemExit):\n logger.info(\"Quitting\")\n self.bridge_worker.bridge_queue.put(\"quit\")\n if self.bridge_timer:\n self.bridge_timer.cancel()\n sys.exit()\n except socket.error:\n logger.error(\"Lost MQTT connection\")\n sys.exit();\n","repo_name":"stefan-kuepper/hue-mqtt","sub_path":"huemqtt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"23393094851","text":"import imageio\r\nimport time\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport skimage as skimage\r\nfrom skimage import transform, color\r\nimport csv\r\nfrom random import randint\r\nfrom math import sqrt\r\n\r\nimport itertools as it\r\n\r\nfrom vizdoom import *\r\n\r\nfrom Network import DFP_Network\r\nfrom utils import *\r\n\r\n\r\nclass ExperienceBuffer():\r\n def __init__(self, buffer_size = 20000):\r\n self.buffer = []\r\n self.buffer_size = buffer_size\r\n \r\n def add(self,experience):\r\n if len(list(self.buffer)) + len(list(experience)) >= self.buffer_size:\r\n self.buffer[0:(len(list(experience))+len(list(self.buffer)))-self.buffer_size] = []\r\n self.buffer.extend(experience)\r\n \r\n def sample(self,size):\r\n return np.reshape(np.array(random.sample(self.buffer,size)),[size,10])\r\n \r\n \r\nclass Worker():\r\n def __init__(self,game,model_path,offsets,exp_buff,num_measurements,gif_path,exploration,xdim,ydim):\r\n self.offsets = offsets\r\n self.exp_buff = exp_buff\r\n self.model_path = model_path\r\n self.gif_path = gif_path\r\n self.episode_kills = []\r\n self.episode_lengths = []\r\n self.loss = []\r\n self.g_norm = []\r\n self.attack_cooldown = False\r\n self.last_hit_n_ago = 10\r\n self.num_measurements = num_measurements\r\n self.summary_writer = tf.summary.FileWriter(\"train_1\")\r\n self.g = [0.4,0,1,0.25,0.25,0.5,0.5]\r\n\r\n self.frame_skip = 4\r\n self.temp = 0.25\r\n self.env = game\r\n self.start_game(init=True)\r\n\r\n self.xdim = xdim\r\n self.ydim = ydim\r\n\r\n self.local_DFP = DFP_Network(self.action_group_lengths,len(offsets),num_measurements,self.xdim,self.ydim)\r\n \r\n self.exploration = exploration\r\n self.just_pressed_attack = False\r\n self.number_of_maps = 1\r\n self.timeout_steps = 150\r\n self.train_stats_every_n = 25\r\n self.test_episode = False\r\n self.test_every_n = 100\r\n self.test_for_n = 10\r\n self.test_counter = 1\r\n\r\n self.total_hits=[]\r\n #if set to 32 or higher over-fitting to recent experiences will likely occur\r\n #not sure how much lower it needs to be\r\n self.mini_batches_per_64exp = 1\r\n \r\n #reshape objective weights \r\n self.set_objective_weights(init=True)\r\n \r\n\r\n\r\n def set_objective_weights(self,init=False):\r\n #set objective weights to test weights or random training weights\r\n \r\n if self.test_episode:\r\n self.measurement_weights = self.g\r\n else:\r\n if np.random.uniform()>.75:\r\n self.measurement_weights = np.random.rand(self.num_measurements[1])\r\n else:\r\n self.measurement_weights = self.g\r\n \r\n self.objective_weights1 = np.tile(self.measurement_weights,len(self.move_actions)*(len(offsets)-2)).reshape(len(self.move_actions),len(offsets)-2,self.num_measurements[1])\r\n self.objective_weights2 = np.tile(self.measurement_weights,len(self.jump_actions)*(len(offsets)-2)).reshape(len(self.jump_actions),len(offsets)-2,self.num_measurements[1])\r\n self.objective_weights3 = np.tile(self.measurement_weights,len(self.use_actions)*(len(offsets)-2)).reshape(len(self.use_actions),len(offsets)-2,self.num_measurements[1])\r\n self.objective_weights4 = np.tile(self.measurement_weights,len(self.attack_actions)*(len(offsets)-2)).reshape(len(self.attack_actions),len(offsets)-2,self.num_measurements[1])\r\n \r\n #put more weight on closer predictions than later in objective function\r\n# if init==True:\r\n# discounts = []\r\n# for i in range(0,self.num_measurements[1]-1):\r\n# weight = 0.99 ** (offsets[i])\r\n# discounts.append(weight)\r\n# print(discounts)\r\n# self.temporal_discounting1 = np.tile(discounts,len(self.move_actions)*(len(offsets)-4)).reshape(len(self.move_actions),len(offsets)-4,self.num_measurements[1])\r\n# self.temporal_discounting2 = np.tile(discounts,len(self.jump_actions)*(len(offsets)-4)).reshape(len(self.jump_actions),len(offsets)-4,self.num_measurements[1])\r\n# self.temporal_discounting3 = np.tile(discounts,len(self.use_actions)*(len(offsets)-4)).reshape(len(self.use_actions),len(offsets)-4,self.num_measurements[1])\r\n# self.temporal_discounting4 = np.tile(discounts,len(self.attack_actions)*(len(offsets)-4)).reshape(len(self.attack_actions),len(offsets)-4,self.num_measurements[1])\r\n\r\n \r\n def start_game(self,init=False):\r\n if init:\r\n self.env.load_config(\"doom2.cfg\")\r\n #forward back strafe l,r turn l,r and speed 54 valid combinations\r\n moven = 7\r\n self.move_actions = [list(a) for a in it.product([0, 1], repeat=moven)]\r\n self.move_actions = [a for a in self.move_actions if a[0]+a[1]<2 and a[2]+a[3]<2 \r\n and a[5]+a[6]<2]\r\n #jump 2 valid combinations\r\n jumpn = 1\r\n self.jump_actions = [list(a) for a in it.product([0, 1], repeat=jumpn)]\r\n #use 2 valid combinatoins\r\n usen = 1\r\n self.use_actions = [list(a) for a in it.product([0, 1], repeat=usen)]\r\n\r\n #switch next or prev weapon or attack (only one at a time 4 valid combinations)\r\n attackn = 3\r\n self.attack_actions = [list(a) for a in it.product([0, 1], repeat=attackn)]\r\n self.attack_actions = [a for a in self.attack_actions if a[0]+a[1]+a[2]<2]\r\n \r\n #turn left or right (only one can't do both 3 possibilities) \r\n #turnn = 2\r\n #self.turn_actions = [list(a) for a in it.product([0,1], repeat=turnn)]\r\n #self.turn_actions = [a for a in self.turn_actions if a[0] + a[1] < 2]\r\n\r\n\r\n n_move_actions = len(self.move_actions)\r\n n_jump_actions = len(self.jump_actions)\r\n n_use_actions = len(self.use_actions)\r\n n_attack_actions = len(self.attack_actions)\r\n #n_turn_actions = len(self.turn_actions)\r\n self.action_group_lengths = [n_move_actions,n_jump_actions,n_use_actions,n_attack_actions]\r\n #gives a total of 864 valid combinations (catesian product of all sub action groups -> len=18*2*2*4*3)\r\n #but only a total of 29 outputs from the neural network (sum of outputs of each group -> 18+2+2+4+3=29)\r\n \r\n #self.actions = [list(a) for a in it.product([0, 1], repeat=n)]\r\n #self.actions = [a for a in self.actions if a[0]+a[1]<2 and a[2]+a[3]<2 and \r\n # a[7]+a[8]<2 and a[9]+a[10]+a[11]<2] \r\n \r\n #e.g. can't turn left and right at the same time\r\n print(\"Starting worker \")\r\n self.env.set_labels_buffer_enabled(True)\r\n self.env.set_console_enabled(True)\r\n #self.env.set_ticrate(10)\r\n self.env.init()\r\n else:\r\n map_num = randint(1,self.number_of_maps)\r\n #map_num = 1 #override to test \r\n if map_num<10:\r\n str_map_num = \"0\" + str(map_num)\r\n elif map_num<100:\r\n str_map_num = str(map_num)\r\n mapstr = \"map\" + str_map_num\r\n \r\n self.env.set_doom_map(mapstr)\r\n self.env.new_episode()\r\n\r\n altx_spawns = [-537.573,719.645,179.128,390.992,795.696,192.956]\r\n alty_spawns = [697.594,1591.212,1568.438,1129.852,553.442,1775.975]\r\n\r\n numaltspawns = 6\r\n altspawnnum = randint(1,numaltspawns+1)\r\n if altspawnnum < numaltspawns:\r\n xcoord = altx_spawns[altspawnnum-1]\r\n ycoord = alty_spawns[altspawnnum-1]\r\n warp_command = \"warp \" + str(xcoord) + \" \" + str(ycoord)\r\n self.env.send_game_command(warp_command)\r\n \r\n if np.random.uniform()>0.5:\r\n self.env.send_game_command(\"give shotgun\")\r\n \r\n self.timeout_steps = int(min(100 * 2**(self.episode_count/8000),400))\r\n\r\n def update_experience_memory(self,rollout):\r\n #takes a random sample of 1/4th of the experiences from the episode\r\n #and stores them into memory.\r\n \r\n \r\n rollout = np.reshape(rollout,[-1,10])\r\n measurements = np.stack(rollout[:,2],axis=0)\r\n m_present = measurements[:,(-num_predict_measurements):]\r\n targets = get_f(m_present,self.offsets) #Generate targets using measurements and offsets\r\n rollout[:,4] = list(zip(targets))\r\n \r\n #size = len(rollout) // 4\r\n #rollout = np.array(random.sample(rollout,size))\r\n \r\n self.exp_buff.add(list(zip(rollout)))\r\n \r\n\r\n def train(self,sess):\r\n #Get a batch of experiences from the buffer and use them to update the global network\r\n #to filter down to the chosen actions by batch.\r\n if len(self.exp_buff.buffer) > 64:\r\n exp_batch = self.exp_buff.sample(64)\r\n a_idx_array = np.vstack(exp_batch[:,1])\r\n #print(a_idx_array)\r\n a1 = action_indecies_to_tensor(a_idx_array[:,0],self.action_group_lengths[0])\r\n a2 = action_indecies_to_tensor(a_idx_array[:,1],self.action_group_lengths[1])\r\n a3 = action_indecies_to_tensor(a_idx_array[:,2],self.action_group_lengths[2])\r\n a4 = action_indecies_to_tensor(a_idx_array[:,3],self.action_group_lengths[3])\r\n \r\n m_in_prepped = np.stack(exp_batch[:,2],axis=0)\r\n m_in_prepped = self.prep_m(np.copy(m_in_prepped[:,:num_observe_measurements]),levels=True,verbose=False)\r\n \r\n target_m_prepped = np.stack(exp_batch[:,4],axis=0)\r\n target_m_prepped = target_m_prepped[:,0,:,:]\r\n target_m_prepped = self.prep_m(np.copy(target_m_prepped),levels=False,verbose=False)\r\n \r\n ahists = np.stack(exp_batch[:,5],axis=0)\r\n\r\n memstate1 = np.stack(exp_batch[:,6],axis=0)\r\n memstate2 = np.stack(exp_batch[:,7],axis=0)\r\n memstate3 = np.stack(exp_batch[:,8],axis=0)\r\n memstate4 = np.stack(exp_batch[:,9],axis=0)\r\n \r\n observations = np.stack(exp_batch[:,0],axis=0)\r\n \r\n \r\n feed_dict = {self.local_DFP.observation:observations,\r\n self.local_DFP.measurements:m_in_prepped,\r\n self.local_DFP.action_history:ahists,\r\n self.local_DFP.a1_chosen:a1,\r\n self.local_DFP.a2_chosen:a2,\r\n self.local_DFP.a3_chosen:a3,\r\n self.local_DFP.a4_chosen:a4,\r\n self.local_DFP.target:target_m_prepped,\r\n self.local_DFP.goals:np.vstack(exp_batch[:,3]),\r\n self.local_DFP.memcache_l1:memstate1,\r\n self.local_DFP.memcache_l2:memstate2,\r\n self.local_DFP.memcache_l3:memstate3,\r\n self.local_DFP.memcache_l4:memstate4,\r\n self.local_DFP.episodes:self.episode_count}\r\n loss,g_n,v_n,_ = sess.run([self.local_DFP.loss,\r\n self.local_DFP.grad_norms,\r\n self.local_DFP.var_norms,\r\n self.local_DFP.apply_grads],feed_dict=feed_dict)\r\n return loss, g_n,v_n\r\n else:\r\n return 0,0,0\r\n \r\n def network_pass_to_actions(self,a1_dist,a2_dist,a3_dist,a4_dist):\r\n #convert forward pass of network into indecies which indicate\r\n #which action from each group is most advantageous according to\r\n #current measurment goal\r\n\r\n a1_pred = a1_dist[0,:,2:,:] * self.objective_weights1 #* self.temporal_discounting1\r\n a1_pred=np.sum(a1_pred,axis=2)\r\n a1_pred=np.sum(a1_pred,axis=1)\r\n a1 = np.argmax(a1_pred)\r\n \r\n a2_pred = a2_dist[0,:,2:,:] * self.objective_weights2 #* self.temporal_discounting2\r\n a2_pred=np.sum(a2_pred,axis=2)\r\n a2_pred=np.sum(a2_pred,axis=1)\r\n a2 = np.argmax(a2_pred)\r\n \r\n a3_pred = a3_dist[0,:,2:,:] * self.objective_weights3 #* self.temporal_discounting3\r\n a3_pred=np.sum(a3_pred,axis=2)\r\n a3_pred=np.sum(a3_pred,axis=1)\r\n a3 = np.argmax(a3_pred)\r\n \r\n a4_pred = a4_dist[0,:,2:,:] * self.objective_weights4 #* self.temporal_disocunting4\r\n a4_pred=np.sum(a4_pred,axis=2)\r\n a4_pred=np.sum(a4_pred,axis=1)\r\n a4 = np.argmax(a4_pred) \r\n\r\n return a1,a2,a3,a4 \r\n\r\n def choose_action(self,s4,m4,ahistory):\r\n if self.exploration == 'bayesian':\r\n \r\n explore = not self.test_episode\r\n \r\n m_prepped = self.prep_m(m4,levels=True)[0,:]\r\n out_tensors = [self.local_DFP.merged_input2,self.local_DFP.mem1_dense1,\r\n self.local_DFP.mem2_dense1,self.local_DFP.mem3_dense1,\r\n self.local_DFP.a1_advantages2,self.local_DFP.a2_advantages2,\r\n self.local_DFP.a3_advantages2,self.local_DFP.a4_advantages2]\r\n\r\n present,mem1,mem2,mem3,a1_dist, a2_dist, a3_dist, a4_dist = sess.run(out_tensors, \r\n feed_dict={\r\n self.local_DFP.observation:[s4],\r\n self.local_DFP.measurements:[m_prepped],\r\n self.local_DFP.goals:[self.measurement_weights],\r\n self.local_DFP.action_history:[ahistory],\r\n self.local_DFP.exploring:explore,\r\n self.local_DFP.memcache_l1:[self.memcache_1],\r\n self.local_DFP.memcache_l2:[self.memcache_2],\r\n self.local_DFP.memcache_l3:[self.memcache_3],\r\n self.local_DFP.memcache_l4:[self.memcache_4],\r\n self.local_DFP.episodes:self.episode_count})\r\n\r\n present = np.reshape(present,[1,512])\r\n self.memcache_1 = np.append(present,self.memcache_1[:4,:],axis=0)\r\n if self.episode_steps % 5 == 0:\r\n mem1 = np.reshape(mem1,[1,256])\r\n self.memcache_2 = np.append(mem1,self.memcache_2[:5,:],axis=0)\r\n if self.episode_steps % 25 == 0:\r\n mem2 = np.reshape(mem2,[1,256])\r\n self.memcache_3 = np.append(mem2,self.memcache_3[:5,:],axis=0)\r\n if self.episode_steps % 125 == 0:\r\n mem3 = np.reshape(mem3,[1,256])\r\n self.memcache_4 = np.append(mem3,self.memcache_4[:5,:],axis=0)\r\n \r\n \r\n a1,a2,a3,a4 = self.network_pass_to_actions(a1_dist,a2_dist,a3_dist,a4_dist)\r\n \r\n else:\r\n raise ValueError('Exploration policy,',exploration,\r\n ', is undefined. Please implement policy in Worker.choose_action')\r\n \r\n\r\n if self.attack_cooldown>0:\r\n a4 = self.attack_action_in_progress\r\n self.attack_cooldown -= 1\r\n if (a4==1 or a4==2) and self.attack_cooldown==1:\r\n a4=0 #need to release switch weapon button to be able to switch weapons again on next step!\r\n self.attack_action_in_progress = 0\r\n self.just_pressed_attack = False\r\n\r\n else:\r\n self.attack_action_in_progress = a4\r\n\r\n if a4==1 or a4==2:\r\n self.attack_cooldown = 8 #on the 9th step after pressing switch weapons, the agent will actually fire if fire is pressed\r\n elif a4==3:\r\n self.just_pressed_attack = True\r\n if self.selected_weapon[-1]==2:\r\n self.attack_cooldown = 3\r\n elif self.selected_weapon[-1]==3:\r\n self.attack_cooldown = 7\r\n elif self.selected_weapon[-1]==1:\r\n self.attack_cooldown = 3\r\n elif a4==0:\r\n self.attack_cooldown = 0 #doing nothing has no cooldown\r\n \r\n\r\n #action_array is an action accepted by Vizdoom engine\r\n a = np.asarray([a1,a2,a3,a4])\r\n action_array = np.concatenate((self.move_actions[a1],self.jump_actions[a2],self.use_actions[a3],self.attack_actions[a4])).tolist()\r\n return a,action_array\r\n\r\n def process_m(self,m_raw):\r\n \r\n \r\n #n_weapons = sum(m_raw[8:17]) / 4.5 - 1\r\n \r\n self.selected_weapon = [self.selected_weapon[-1],m_raw[1]]\r\n self.selected_ammo = [self.selected_ammo[-1],m_raw[0]]\r\n self.successfully_fired_shot = (self.selected_weapon[1]==self.selected_weapon[0]) and (self.selected_ammo[1] < self.selected_ammo[0])\r\n \r\n fist_active = 1 if self.selected_weapon[-1]==1 else 0\r\n pistol_active = 1 if self.selected_weapon[-1]==2 else 0\r\n shotgun_active = 1 if self.selected_weapon[-1]==3 else 0\r\n\r\n #weap1 = 1 if fist only and =2 if fist and chainsaw\r\n weapon1 = 1 if m_raw[9]>1 else 0\r\n #weap2 = 1 if pistol\r\n weapon2 = 1 if m_raw[10]>0 else 0\r\n #weap3 = 1 if shotty and =2 if also super shotty\r\n weapon3 = 1 if m_raw[11]>0 else 0\r\n\r\n #ammo2 = pistol bullets\r\n ammo2 = m_raw[20]\r\n #ammo3 = shotgun shells\r\n ammo3 = m_raw[21]\r\n \r\n health = m_raw[2]\r\n armor = m_raw[3]\r\n \r\n #all_kills includes monsters killing other monsters which can be very confusing\r\n #in the early stages of training/exploring as the agent will get 3-6 kills totally randomly\r\n self.all_kills.append(m_raw[5] )\r\n \r\n items = m_raw[4] \r\n \r\n \r\n self.episode_xpos.append(m_raw[6])\r\n self.episode_ypos.append(m_raw[7])\r\n if len(self.episode_xpos) > 1:\r\n\r\n area_explored = compute_circles_visited(self.episode_xpos,self.episode_ypos,verbose=False)\r\n self.episode_explored.append(area_explored)\r\n\r\n #labels has info about visible objects including enemies (used for hit detection)\r\n labels = self.state.labels\r\n agent = [self.episode_xpos[-1],self.episode_ypos[-1],m_raw[29]]\r\n using_melee = True if fist_active else False\r\n hit_scored = detect_hits(labels,agent,melee=using_melee)\r\n\r\n if hit_scored and self.attack_action_in_progress==3:\r\n #if aiming close to visible enemy and attack action in progress we score a \"hit\"\r\n self.hits += 1\r\n self.last_hit_n_ago = 0\r\n \r\n if self.last_hit_n_ago<=3:\r\n #if within 3 steps we scored a \"hit\" and an enemy dies we score a \"kill\"\r\n self.last_hit_n_ago+=1\r\n current_kills = self.all_kills[-1] - self.all_kills[-2]\r\n self.direct_kills = self.direct_kills + current_kills \r\n\r\n else: \r\n area_explored = 0\r\n dist_traveled = 0\r\n \r\n m = [weapon1,weapon2,weapon3,fist_active,pistol_active,\r\n shotgun_active,health,armor,self.direct_kills,ammo2,ammo3,\r\n self.hits,area_explored]\r\n #m = [health,kills,pistol_ammo]\r\n\r\n return m \r\n \r\n def prep_m(self,m,levels=False,verbose=False):\r\n #takes numpy array (?,num_measurements) and normalizes for network\r\n #can normalize in levels (i.e. for input to M) or changes (i.e. for output target)\r\n \r\n \r\n if levels:\r\n #measurements represent running totals or current value in case of health\r\n m = np.reshape(m,[-1,num_observe_measurements])\r\n m[:,0] = m[:,0]/2 #weap1\r\n m[:,1] = m[:,1]/1 #weap2\r\n m[:,2] = m[:,2]/1 #weap3\r\n m[:,3] = m[:,3]/1 #fist\r\n m[:,4] = m[:,4]/1 #pistol\r\n m[:,5] = m[:,5]/1 #shotgun\r\n m[:,6] = m[:,6]/50 - 1 #health\r\n m[:,7] = m[:,7]/50 - 1 #armor\r\n m[:,8] = m[:,8]/10 - 1 #kills\r\n m[:,9] = m[:,9]/40 - 1 #ammo2\r\n m[:,10] = m[:,10]/10 - 1 #ammo3\r\n\r\n \r\n if verbose:\r\n print(\"range level Weapon1: \", np.amin(m[:,0]),\" to \",np.amax(m[:,0]))\r\n print(\"range level Weapon2: \",np.amin(m[:,1]), \" to \",np.amax(m[:,1]))\r\n print(\"range level Weapon3: \",np.amin(m[:,2]), \" to \",np.amax(m[:,2]))\r\n print(\"range level Health: \", np.amin(m[:,3]),\" to \",np.amax(m[:,3]))\r\n print(\"range level Armor: \",np.amin(m[:,4,]), \" to \",np.amax(m[:,4]))\r\n print(\"range level Kills: \",np.amin(m[:,5]), \" to \",np.amax(m[:,5]))\r\n print(\"range level Ammo2: \",np.amin(m[:,6]), \" to \",np.amax(m[:,6]))\r\n print(\"range level Ammo3: \",np.amin(m[:,7]), \" to \",np.amax(m[:,7]))\r\n\r\n\r\n else:\r\n m[:,:,0] = m[:,:,0]/75 #health\r\n m[:,:,1] = m[:,:,1]/75 #armor\r\n m[:,:,2] = m[:,:,2]/2 - 1 #kills\r\n m[:,:,3] = m[:,:,3]/15 #ammo2\r\n m[:,:,4] = m[:,:,4]/10 #ammo3\r\n m[:,:,5] = m[:,:,5]/20 - 1 #hits needs minus 1\r\n m[:,:,6] = m[:,:,6]/5 - 1 #explored\r\n\r\n\r\n \r\n \r\n if verbose:\r\n print(\"range delta health: \", np.amin(m[:,:,0]),\" to \",np.amax(m[:,:,0]))\r\n print(\"range delta armor: \",np.amin(m[:,:,1]), \" to \",np.amax(m[:,:,1]))\r\n print(\"range delta kills: \",np.amin(m[:,:,2]), \" to \",np.amax(m[:,:,2]))\r\n print(\"range delta ammo2: \", np.amin(m[:,:,3]),\" to \",np.amax(m[:,:,3]))\r\n print(\"range delta ammo3: \",np.amin(m[:,:,4]), \" to \",np.amax(m[:,:,4]))\r\n print(\"range delta hits: \",np.amin(m[:,:,5]), \" to \",np.amax(m[:,:,5]))\r\n print(\"range delta explore: \",np.amin(m[:,:,6]), \" to \",np.amax(m[:,:,6]))\r\n \r\n return m\r\n \r\n def work(self,sess,saver,train):\r\n self.mini_batch_iterations = 0\r\n self.episode_count = 35099\r\n total_steps = 0\r\n prevsteps=0\r\n start_time = time.time()\r\n self.total_explored = []\r\n reset_stats=False\r\n while True:\r\n self.selected_ammo = [0,0]\r\n self.selected_weapon = [0,0]\r\n self.hits = 0\r\n self.episode_buffer = []\r\n episode_frames = []\r\n episode_finished = False\r\n self.episode_steps = 1\r\n self.episode_xpos = []\r\n self.episode_ypos = []\r\n self.episode_explored = []\r\n self.direct_kills = 0\r\n self.all_kills = [0]\r\n self.episode_count +=1\r\n self.memcache_1 = np.zeros(shape=[5,512])\r\n self.memcache_2 = np.zeros(shape=[6,256])\r\n self.memcache_3 = np.zeros(shape=[6,256])\r\n self.memcache_4 = np.zeros(shape=[6,256])\r\n #every 50 episodes test for 10 episodes (test means 0 epsilon greedy exploration and set objective weights)\r\n if self.test_episode:\r\n self.test_counter += 1\r\n if self.test_counter > self.test_for_n:\r\n self.test_counter = 1\r\n self.test_episode = False\r\n else:\r\n self.test_episode = (self.episode_count % self.test_every_n == 0)\r\n \r\n self.set_objective_weights()\r\n self.start_game()\r\n \r\n self.state = self.env.get_state()\r\n m_raw = self.state.game_variables\r\n m = self.process_m(m_raw)\r\n \r\n s = self.state.screen_buffer\r\n s = skimage.transform.resize(s,(self.xdim,self.ydim,3))\r\n s = skimage.color.rgb2lab(s)\r\n s[:,:,0] = s[:,:,0]/50 - 1\r\n s[:,:,1] = s[:,:,1]/128\r\n s[:,:,2] = s[:,:,2]/128\r\n sbuffer = np.stack(([s]*2), axis=2) \r\n sbuffer = np.reshape(sbuffer,[120,160,6])\r\n\r\n abuffer = np.zeros([27,12])\r\n\r\n steps_per_sec = (total_steps-prevsteps)//(time.time()-start_time)\r\n prevsteps = total_steps\r\n start_time = time.time()\r\n\r\n while episode_finished == False:\r\n\r\n #update experience memory to work with new network\r\n #we need to remember the state of the memory cache\r\n #at each experience for traiing\r\n #very important to pass a copy of m4 and not m4, otherwise mbuffer will be permanently modified\r\n #s4 = sbuffer[:,:,[0,8,17,26]]\r\n #m4 = mbuffer[:,[0,7]]\r\n #a4 = abuffer[:,[0,8,17,26]]\r\n a,action_chosen = self.choose_action(sbuffer,np.copy(m[:num_observe_measurements]),abuffer) \r\n if not recording:\r\n self.env.make_action(action_chosen,self.frame_skip) \r\n else:\r\n self.env.set_action(action_chosen)\r\n for _ in range(self.frame_skip):\r\n self.env.advance_action()\r\n \r\n self.episode_buffer.append([sbuffer,a,m,self.measurement_weights,np.zeros(len(self.offsets)),abuffer,\r\n np.copy(self.memcache_1),np.copy(self.memcache_2),np.copy(self.memcache_3),np.copy(self.memcache_4)])\r\n \r\n if self.env.is_episode_finished():\r\n episode_finished=True \r\n else:\r\n \r\n self.state = self.env.get_state()\r\n m_raw = self.state.game_variables\r\n m = self.process_m(m_raw) \r\n\r\n srgb = self.state.screen_buffer\r\n \r\n srgb = skimage.transform.resize(srgb,(self.xdim,self.ydim,3))\r\n s = skimage.color.rgb2lab(srgb)\r\n s[:,:,0] = s[:,:,0]/50 - 1\r\n s[:,:,1] = s[:,:,1]/128\r\n s[:,:,2] = s[:,:,2]/128\r\n \r\n s = np.reshape(s, (self.xdim, self.ydim, 3))\r\n\r\n sbuffer = np.append(s, sbuffer[:,:, :3], axis=2)\r\n\r\n abuffer = np.append(np.reshape(action_chosen,[1,12]),abuffer[:26,:],axis=0)\r\n \r\n if self.test_episode and self.test_counter==self.test_for_n:\r\n srgb = srgb[:,:,::-1]\r\n episode_frames.append(srgb)\r\n \r\n total_steps += 1 \r\n self.episode_steps += 1\r\n \r\n if self.episode_steps > 3000:\r\n #end episode after ~6 minutes\r\n episode_finished = True\r\n elif self.episode_steps>self.timeout_steps:\r\n if self.episode_explored[-1] - self.episode_explored[-self.timeout_steps] == 0 and self.all_kills[-1] - self.all_kills[-self.timeout_steps] == 0:\r\n #end episode if we have not explored anywhere new or got any kills for a period of time\r\n episode_finished = True\r\n \r\n self.episode_kills.append(self.direct_kills)\r\n self.episode_lengths.append(self.episode_steps*4/35)\r\n self.total_explored.append(self.episode_explored[-1])\r\n self.total_hits.append(self.hits)\r\n\r\n\r\n \r\n # Update the network using the experience buffer at the end of the episode.\r\n self.update_experience_memory(self.episode_buffer)\r\n if train == True and total_steps>20000:\r\n losses = []\r\n norms = []\r\n iterations = (self.mini_batches_per_64exp * self.episode_steps) // 64\r\n for i in range(1,int(iterations)):\r\n loss,g_n,v_n = self.train(sess)\r\n losses.append(loss)\r\n norms.append(g_n)\r\n self.loss.append(np.mean(losses))\r\n self.g_norm.append(np.mean(norms))\r\n self.mini_batch_iterations += iterations\r\n print(\"Avg Loss: \", self.loss[-1], \"Average g_norm: \", self.g_norm[-1],\r\n \"Total Iterations: \", self.mini_batch_iterations)\r\n \r\n \r\n # Periodically save gifs of episodes, model parameters, and summary statistics.\r\n if self.episode_count % 100 == 0 and train == True:\r\n saver.save(sess,self.model_path+'/model-'+str(self.episode_count)+'.ckpt')\r\n print(\"Saved Model\")\r\n\r\n if self.test_episode and self.test_counter==self.test_for_n:\r\n mean_kills = np.mean(self.episode_kills)\r\n mean_length = np.mean(self.episode_lengths)\r\n mean_explored = np.mean(self.total_explored)\r\n mean_hits = np.mean(self.total_hits)\r\n time_per_step = 1/35*4\r\n self.images = np.array(episode_frames)\r\n imageio.mimwrite(self.gif_path+'/image'+str(self.episode_count)+'.gif',self.images,duration=time_per_step)\r\n savelist = [self.episode_count,total_steps,mean_length,mean_kills,mean_hits,mean_explored,steps_per_sec]\r\n with open('teststats8700.csv', 'a') as myfile:\r\n wr = csv.writer(myfile)\r\n wr.writerow(['{:.2f}'.format(x) for x in savelist])\r\n \r\n reset_stats=True\r\n \r\n if self.episode_count % self.train_stats_every_n==0 and total_steps>30000:\r\n mean_kills = np.mean(self.episode_kills)\r\n mean_length = np.mean(self.episode_lengths)\r\n mean_explored = np.mean(self.total_explored)\r\n mean_hits = np.mean(self.total_hits)\r\n summary = tf.Summary()\r\n summary.value.add(tag='Performance/Kills', simple_value=float(mean_kills))\r\n summary.value.add(tag='Performance/Length', simple_value=float(mean_length))\r\n summary.value.add(tag='Performance/Exploration', simple_value=float(mean_explored))\r\n summary.value.add(tag='Performance/hits', simple_value=float(mean_hits))\r\n if train == True:\r\n summary.value.add(tag='Losses/Loss', simple_value=float(loss))\r\n summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))\r\n self.summary_writer.add_summary(summary, self.episode_count) \r\n self.summary_writer.flush()\r\n \r\n reset_stats=True\r\n \r\n print(\"episodes: \",self.episode_count,\", Total Experiences: \",total_steps,\"Steps per second: \",\r\n steps_per_sec, \"Episode Kills: \", self.episode_kills[-1], \"Explored: \", self.total_explored[-1],\r\n \"Episode Length: \", int(self.episode_lengths[-1]), \" seconds\",)\r\n print(\"Total Hits: \", self.hits, \"Episode Goal: \",self.measurement_weights, \"Exploration: \", self.exploration,\r\n \"Testing? \" , self.test_episode,\"Timeout Steps\",self.timeout_steps,\"learning rate\",3e-4 * 0.5**(self.episode_count/9000)) \r\n \r\n if reset_stats:\r\n self.episode_kills=[]\r\n self.episode_lengths = []\r\n self.total_explored = []\r\n self.total_hits=[]\r\n reset_stats=False\r\n \r\n \r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n \r\n numactions = 576\r\n num_total_measurements = 13\r\n num_observe_measurements = 11 #Number of observed measurements\r\n num_predict_measurements = 7 #number of predicted measurements\r\n offsets = [2,4,8,16,32,64] # Set of temporal offsets\r\n load_model = True #ther to load a saved model\r\n train = True #Whether to train the network\r\n model_path = 'C:/Users/djdev/Documents/tensorflow models/doom2_entryway_memory' #Path to save the model to\r\n gif_path = './frames_goals' #Path to save gifs of agent performance to\r\n exploration = 'bayesian'\r\n \r\n recording = True #enables smooth playback for agent recording\r\n\r\n #frame dimensions\r\n xdim = 120\r\n ydim = 160\r\n \r\n tf.reset_default_graph()\r\n \r\n exp_buff = ExperienceBuffer()\r\n \r\n if not os.path.exists(model_path):\r\n os.makedirs(model_path)\r\n \r\n if not os.path.exists(gif_path):\r\n os.makedirs(gif_path)\r\n \r\n with open('teststats8700.csv', 'w') as myfile:\r\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\r\n wr.writerow([\"Total Episodes\",\"Total Steps\",\"Length\",\"Kills\",\"Circles Explored\",\"Steps Per Second\"])\r\n \r\n \r\n\r\n # Create worker classes\r\n agent = Worker(DoomGame(),model_path,offsets,\r\n exp_buff,[num_observe_measurements,num_predict_measurements],\r\n gif_path,exploration,xdim,ydim)\r\n saver = tf.train.Saver(max_to_keep=5,keep_checkpoint_every_n_hours=2)\r\n \r\n with tf.Session() as sess:\r\n if load_model == True:\r\n print('Loading Model...')\r\n ckpt = tf.train.get_checkpoint_state(model_path)\r\n ckpt = 'C:/Users/djdev/Documents/tensorflow models/doom2_entryway_memory/model-35100.ckpt'\r\n saver.restore(sess,ckpt)\r\n else:\r\n sess.run(tf.global_variables_initializer())\r\n agent.work(sess,saver,True)\r\n","repo_name":"devinjdangelo/Doom2DFP","sub_path":"Models/Doom DFP/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":33557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5703149203","text":"from flask import render_template, request, redirect, current_app\nfrom os import urandom\nimport os\n\nDEFAULT_CONFIG = {\n \"base\": {\n \"SECRET_KEY\": {\n \"alias\": \"密钥\",\n \"default\": str(urandom(24)),\n \"help\": \"\"\n },\n \"SECRET_KEY_SALT\": {\n \"alias\": \"密钥加盐\",\n \"default\": \"\",\n \"help\": \"\"\n }\n },\n \"db\": {\n \"SQLALCHEMY_DATABASE_URI\": {\n \"alias\": \"数据库\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": {\n \"alias\": \"数据自动更新\",\n \"default\": False,\n \"help\": \"\"\n }\n },\n \"mail\": {\n \"MAIL_SERVER\": {\n \"alias\": \"邮箱服务器\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"MAIL_PORT\": {\n \"alias\": \"邮箱端口\",\n \"default\": 25,\n \"help\": \"\"\n },\n \"MAIL_USE_TLS\": {\n \"alias\": \"邮箱是否使用TLS\",\n \"default\": [True, False],\n \"help\": \"\"\n },\n \"MAIL_USE_SSL\": {\n \"alias\": \"邮箱是否使用SSL\",\n \"default\": [True, False],\n \"help\": \"\"\n },\n \"MAIL_USERNAME\": {\n \"alias\": \"邮箱名\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"MAIL_PASSWORD\": {\n \"alias\": \"邮箱密码\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"MAIL_DEFAULT_SENDER\": {\n \"alias\": \"默认发送邮箱\",\n \"default\": \"\",\n \"help\": \"\"\n }\n },\n \"cache\": {\n \"CACHE_TYPE\": {\n \"alias\": \"缓存类型\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"CACHE_DEFAULT_TIMEOUT\": {\n \"alias\": \"缓存超时\",\n \"default\": 60,\n \"help\": \"\"\n },\n \"CACHE_KEY_PREFIX\": {\n \"alias\": \"缓存prefix\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"CACHE_REDIS_HOST\": {\n \"alias\": \"redis缓存HOST\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"CACHE_REDIS_PORT\": {\n \"alias\": \"redis缓存端口\",\n \"default\": 30,\n \"help\": \"\"\n },\n \"CACHE_REDIS_PASSWORD\": {\n \"alias\": \"redis缓存密码\",\n \"default\": \"\",\n \"help\": \"\"\n },\n \"CACHE_REDIS_DB\": {\n \"alias\": \"redis缓存数据库\",\n \"default\": 2,\n \"help\": \"\"\n },\n \"CACHE_NO_NULL_WARNING\": {\n \"alias\": \"缓存为空不警告\",\n \"default\": [True, False],\n \"help\": \"\"\n },\n }\n}\n\n\nclass Install(object):\n def __init__(self, app=None, config=DEFAULT_CONFIG):\n self.app = app\n self.config = config\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n app.add_url_rule(\n '/install', view_func=self.install, methods=[\"GET\", \"POST\"])\n\n def install(self):\n if request.method == \"POST\":\n data = \"\\n\".join([\"{0} = \\\"{1}\\\"\".format(key, value)\n for key, value in request.form.items()])\n data = \"#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n\" + data\n cfg_path = os.path.join(current_app.root_path, os.pardir, 'c.py')\n with open(cfg_path, 'w') as f:\n f.write(data)\n return redirect(\"/install\")\n return render_template('maple/install.html', config=self.config)\n","repo_name":"hnynes/ITforums","sub_path":"forum/Lib/site-packages/flask_maple/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17045714522","text":"import sys\n\ndef frequency(word):\n freqs = {}\n for char in word:\n freqs[char] = freqs.get(char, 0) + 1\n return freqs\n\ndef sortFreq(freqs):\n letters = freqs.keys()\n tuples = []\n for l in letters:\n tuples.append((freqs[l], l))\n tuples.sort()\n return tuples\n\ndef get_key(lst): # Used as key in sorted function\n return lst[0]\n\ndef build_trees(tuples):\n while len(tuples) > 1:\n least_two = tuples[0:2]\n rest = tuples[2:]\n freq_sum = least_two[0][0] + least_two[1][0]\n tuples = rest + [(freq_sum, least_two)]\n sorted(tuples, key=get_key)\n return tuples[0]\n\ndef trim_tree (tree) :\n # Trim the freq counters off, leaving just the letters\n p = tree[1] # ignore freq count in [0]\n if type(p) == str :\n return p # if just a leaf, return it\n else :\n return (trim_tree(p[0]), trim_tree(p[1]))# trim left then right and recombine\n\ndef assignCodes (node, pat='') :\n global codes\n if type(node) == type(\"\") :\n codes[node] = pat # A leaf. set its code\n else :\n assignCodes(node[0], pat+\"0\") # Branch point. Do the left branch\n assignCodes(node[1], pat+\"1\") # then do the right branch.\n\ndef encode (word) :\n global codes\n binary_output = \"\"\n for ch in word:\n binary_output += codes[ch]\n return binary_output\n\n\ndef decode (tree, str) :\n output = \"\"\n p = tree\n for bit in str :\n if bit == '0' :\n p = p[0] # Head up the left branch\n else:\n p = p[1] # or up the right branch\n if type(p) == type(\"\") :\n output += p # found a character. Add to output\n p = tree # and restart for next character\n return output\n\ndef huffman_encoding(data):\n if len(data) < 1:\n raise Exception('data is empty')\n return\n\n global codes\n freqs = frequency(data)\n tuples = sortFreq(freqs)\n tree = build_trees(tuples)\n trim = trim_tree(tree)\n if type(tree[1]) == str:\n codes[tree[1]] = '0'\n else:\n assignCodes(trim)\n return encode(data), trim\n\ndef huffman_decoding(data,tree):\n return decode(tree, data)\n\ndef huffman_decoding(data,tree):\n return decode(tree, data)\n\n\ndef build_test_case(a_great_sentence, test_no=1):\n print(\"Test Case: \", test_no)\n\n data_size = sys.getsizeof(a_great_sentence)\n\n print(\"The size of the data is: {}\".format(data_size))\n print(\"The content of the data is: {}\".format(a_great_sentence))\n\n try:\n encoded_data, tree = huffman_encoding(a_great_sentence)\n except Exception as e:\n print(str(e))\n return\n encoded_data_size = sys.getsizeof(int(encoded_data, base=2))\n\n print(\"The size of the encoded data is: {}\".format(encoded_data_size))\n print(\"The content of the encoded data is: {}\".format(encoded_data))\n\n decoded_data = huffman_decoding(encoded_data, tree)\n decoded_data_size = sys.getsizeof(decoded_data)\n\n print (\"The size of the decoded data is: {}\".format(decoded_data_size))\n print (\"The content of the decoded data is: {}\".format(decoded_data))\n if (data_size == decoded_data_size) and \\\n (a_great_sentence == decoded_data):\n print('pass\\n')\n else:\n print('fail\\n')\n\nif __name__ == '__main__':\n\n codes = {}\n build_test_case(\"The bird is the word\", 1)\n #pass\n build_test_case(\"a\", 2)\n #pass \n build_test_case(\"\", 3)\n #data is empty\n build_test_case(\"aaaa\", 4)\n #pass\n","repo_name":"siam923/Data-Stucture-Algorithm","sub_path":"DataStructure/problem_3.py","file_name":"problem_3.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31770185250","text":"from odoo import api, fields, models\n\nclass StockBackorderConfirmation(models.TransientModel):\n _inherit = 'stock.backorder.confirmation'\n\n @api.model\n def default_get(self, fields):\n res = super().default_get(fields)\n res['log_user_id'] = self.pick_ids.log_user_id.id\n return res","repo_name":"praw-odoo/task","sub_path":"user_po/models/backorder.py","file_name":"backorder.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73363908111","text":"import sqlite3\n\nclass Client:\n \n def __init__(self):\n self.TABLE = \"clients\"\n \n def open(self):\n connection=sqlite3.connect(\"./bd_tkinter_tp.db\")\n return connection\n \n def createClientsTable(self):\n connection=self.open()\n\n connection.execute(\"\"\"CREATE TABLE if NOT EXISTS clients (\n id integer primary key autoincrement,\n first_name text,\n last_name text,\n street_name text,\n house_number integer\n )\"\"\")\n connection.close()\n \n def add(self, data):\n connection=self.open()\n cursor=connection.cursor()\n sql=f\"INSERT into {self.TABLE}(first_name, last_name, street_name, house_number) values (?,?,?,?)\"\n cursor.execute(sql, data)\n connection.commit()\n connection.close()\n\n def get_clients(self):\n connection=self.open()\n cursor=connection.cursor()\n # SQL query to retrive all clients by first and last name.\n sql=\"SELECT last_name FROM clients\"\n # Executes the query and saves all results in the data variable using fetchall.\n cursor.execute(sql)\n data = cursor.fetchall()\n # Closes the connection and returns the data.\n connection.close()\n return data\n\n def get_client_id(self, data):\n connection=self.open()\n cursor=connection.cursor()\n sql=\"SELECT id FROM clients WHERE last_name = ?\"\n # Executes the query and saves all results in the data variable using fetchone.\n cursor.execute(sql, (data, ))\n result = cursor.fetchone()\n clientId = result[0] if result else None\n # Closes the connection and returns the data.\n connection.close()\n return clientId","repo_name":"matiasdigialleonardo/tp-python-tkinter","sub_path":"src/entities/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25408257400","text":"#LABORATORIO 4\n#alumno JULIO ANTHONY ENGELS RUIZ COTO - 1284719 \nimport RPi.GPIO as GPIO #Biblioteca phyton control de los GP\nimport time\nGPIO.setmode(GPIO.BCM) # define modo de numeracion \nGPIO.setwarnings(False) #desactiva los mensajes de alerta\n\nGPIO.setup(16, GPIO.IN) #Este pin es una entrada.\nGPIO.setup(20, GPIO.OUT) # este pin es de salida\nGPIO.setup(21, GPIO.OUT) # este pin es de salida\n\n\nwhile True:\n while True:\n inputvalor = GPIO.input(16)\n if(inputvalor == True):\n GPIO.output(20, True) #enciende led 1\n GPIO.output(21, False) #apaga led 2\n print(\"Se ha presionado la tecla 1\" , end = \"\\r\")\n time.sleep(0.4)\n break\n while True:\n inputvalor = GPIO.input(16)\n if(inputvalor == True):\n GPIO.output(20, False) #apaga led 1\n GPIO.output(21, True) #enciende led 2\n print(\"Se ha presionado la tecla 2\", end = \"\\r\")\n time.sleep(0.4)\n break\n","repo_name":"engelsruiz09/ARQUI-II","sub_path":"LABORATORIO/LAB4/lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8987469736","text":"import datetime\nimport os\nimport statistics\nimport sys\n\nfrom PyQt5 import QtWidgets\n\nfrom downloader import download\n\n'''\nкласс с чтением txt\n\nread_quotes - возвращает dict с датами и валютами\n\nlast_value - возвращает последние полученное с CBR значение\n\nmean_value_year - возвращает списки годов и средних значений валюты за эти годы\n\nvalue_month - возвращает список дат с прыжком через 5 дней и среднее значение за этот прыжок, все ограничивается 30 днями\n\n\n'''\n\ndownload_ = download()\n\n\nclass DataBase(): # класс базы данных, тут осуществляется работа с значениями валют\n def __init__(self):\n self.date_value = {}\n self.mean_value = []\n self.data_list = ['usd.txt', 'euro.txt', 'pound.txt', 'tenge.txt']\n self.main_files = ['mainwin.ui', 'currency', 'images_currency', 'usd.txt', 'euro.txt', 'pound.txt', 'tenge.txt']\n\n def read_quotes(self, file_currency): # читает txt\n with open(f'currency\\{file_currency}', 'r') as fout:\n lines = fout.readlines()\n for i in lines:\n i = i.split('|')\n self.date_value[(i[2]).replace('\\n', '')] = float(i[1].replace(',', '.'))\n return self.date_value\n\n def last_value(self):\n return list(self.date_value.items())[-1]\n\n def mean_value_year(self, file_currency):\n self.mean_value = []\n temp_value = []\n temp_year = []\n with open(f'currency\\{file_currency}', 'r') as fout:\n lines = fout.readlines()\n for i in lines:\n year = int((i.split('|')[2]).split('.')[2].replace('\\n', ''))\n if year not in temp_year:\n if temp_value == []:\n temp_year.append(year)\n i = float(i.split('|')[1].replace(',', '.'))\n temp_value.append(i)\n else:\n self.mean_value.append(statistics.mean(temp_value))\n temp_value = []\n else:\n i = float(i.split('|')[1].replace(',', '.'))\n temp_value.append(i)\n if temp_year[-1] == 2023:\n self.mean_value.append(statistics.mean(temp_value))\n return self.mean_value, temp_year\n\n def value_month(self, file_currency):\n dates = []\n values = []\n with open(f'currency\\{file_currency}', 'r') as fout:\n lines = fout.readlines()[:-25: -3]\n for i in lines:\n dates.append('.'.join((i.split('|')[2]).split('.')[0:2]))\n values.append(float(i.split('|')[1].replace(',', '.')))\n dates.reverse()\n values.reverse()\n return dates, values\n\n def check_files(self):\n for i in self.main_files:\n if i in self.data_list:\n if os.path.isfile(f'currency\\{i}') == False:\n download_.download_currency()\n elif os.path.exists(i) == False:\n self.show_Error('OoOpS, someone deleted main folder/files, u need reinstall Beenance' + i)\n sys.exit()\n for file_currency in self.data_list:\n with open(f'currency\\{file_currency}', 'r') as fout:\n lines = fout.readlines()\n if lines == []:\n download_.download_currency()\n print('begin checked')\n try:\n for i in lines:\n i = i.rstrip('\\n')\n name_file, value, date = i.split('|')\n if name_file not in self.data_list:\n download_.download_currency()\n print('type error of txt')\n break\n elif value.replace(',', '', 1).isdigit() == False:\n download_.download_currency()\n print('type error of value')\n break\n datetime.datetime.strptime(date, '%d.%m.%Y')\n except Exception:\n print(1)\n download_.download_currency()\n\n def show_Error(self, error):\n app = QtWidgets.QApplication([])\n error_dialog = QtWidgets.QErrorMessage()\n error_dialog.showMessage(error)\n error_dialog.setWindowTitle(\"directory error\")\n app.exec()\n","repo_name":"11kara11/Beenance-Project","sub_path":"DataBase.py","file_name":"DataBase.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"34165339806","text":"\r\nimport json\r\nimport logging\r\n\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.shortcuts import render\r\nfrom django.http import JsonResponse\r\nfrom django.core.serializers.json import DjangoJSONEncoder\r\nfrom django.views import View\r\nfrom django.utils.decorators import method_decorator\r\nfrom django.views.generic.detail import SingleObjectMixin\r\n\r\nfrom django.db.models.functions import Lower\r\n\r\nfrom main.decorators import user_is_staff\r\n\r\nfrom main.models import experiment_session_days\r\n\r\nclass ExperimentSessionPayoutsView(SingleObjectMixin, View):\r\n '''\r\n Experiment session day paysheet view\r\n '''\r\n\r\n template_name = \"staff/experimentSessionPayoutsView.html\"\r\n model = experiment_session_days\r\n\r\n @method_decorator(login_required)\r\n @method_decorator(user_is_staff)\r\n def get(self, request, *args, **kwargs):\r\n '''\r\n handle get requests\r\n '''\r\n\r\n logger = logging.getLogger(__name__)\r\n\r\n esd = self.get_object()\r\n payGroup = kwargs['payGroup']\r\n\r\n return render(request,\r\n self.template_name,\r\n {\"sessionDay\":esd, \r\n \"id\":esd.id,\r\n \"consent_form\" : json.dumps(esd.experiment_session.consent_form.json(), cls=DjangoJSONEncoder) if esd.experiment_session.consent_form else json.dumps(None,cls=DjangoJSONEncoder),\r\n \"payGroup\":payGroup})\r\n \r\n @method_decorator(login_required)\r\n @method_decorator(user_is_staff)\r\n def post(self, request, *args, **kwargs):\r\n '''\r\n handle post requests\r\n '''\r\n\r\n logger = logging.getLogger(__name__) \r\n\r\n data = json.loads(request.body.decode('utf-8'))\r\n\r\n id = esd = self.get_object().id\r\n\r\n if data[\"action\"] == \"getSession\":\r\n return getSession(data, id, request.user)\r\n \r\n return JsonResponse({\"response\" : \"error\"},safe=False)\r\n\r\n#return the session info to the client\r\ndef getSession(data, id, request_user): \r\n logger = logging.getLogger(__name__)\r\n logger.info(\"Get Session Day Payouts\") \r\n logger.info(data)\r\n\r\n payGroup = data[\"payGroup\"]\r\n\r\n esd = experiment_session_days.objects.get(id=id)\r\n\r\n if payGroup == \"bumps\": \r\n\r\n esd.users_who_printed_bumps.add(request_user)\r\n esd.save()\r\n\r\n esdu = esd.ESDU_b.filter(bumped = True)\\\r\n .order_by(Lower('user__last_name'), Lower('user__first_name'))\r\n\r\n elif payGroup == \"payouts\" or payGroup == \"consent\":\r\n esd.users_who_printed_paysheet.add(request_user)\r\n esd.save()\r\n\r\n esdu = esd.ESDU_b.filter(attended = True)\\\r\n .order_by(Lower('user__last_name'), Lower('user__first_name')) \r\n\r\n return JsonResponse({\"sessionDayUsers\" : [i.json_runInfo() for i in esdu], \r\n \"experiment_session_day\" : esd.json_runInfo(request_user)}, safe=False)","repo_name":"jeffreykirchner/ESIRecruiter","sub_path":"main/views/staff/experiment_session_payouts_view.py","file_name":"experiment_session_payouts_view.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6995414530","text":"import math\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\ndef set_configs(n_columns):\n \n decision_tree = {\n 'DT' : [DecisionTreeClassifier,\n {'criterion' : ['gini'],\n 'min_samples_leaf' : list(range(1, 31)),\n 'min_samples_split' : [5], \n 'random_state' : [42]}]\n }\n \n knn = {\n 'KNN' : [KNeighborsClassifier,\n {'n_neighbors' : list(range(3, 62, 2))}]\n }\n \n mlp = {\n 'MLP' : [MLPClassifier,\n {'hidden_layer_sizes' : list(range(5, 35, 1)),\n 'random_state' : [42]}]\n }\n \n random_forest = {\n 'RF' : [RandomForestClassifier,\n {'n_estimators' : list(range(50, 500, 15)),\n 'min_samples_split' : [math.floor(abs(math.sqrt(n_columns - 1)))], \n 'random_state' : [42]}]\n }\n \n svm = {\n 'SVM' : [SVC,\n {'kernel' : ['rbf'], 'C' : [1], 'gamma' : list(np.arange(0.0025, 0.75, 0.025)), \n 'random_state' : [42]}]\n }\n \n \n all_configs = {\n 'dt' : decision_tree,\n 'knn' : knn,\n 'mlp' : mlp,\n 'rf' : random_forest,\n 'svm' : svm\n }\n \n return all_configs","repo_name":"diegominatel/ahp-gaussiano-selecao-modelo","sub_path":"Experimentos/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"33757589689","text":"from Artist import Artist\nfrom Artwork import Artwork\n\n\nif __name__ == \"__main__\":\n print('this program puts together artists and pairs them with their artwork title.')\n print('enter artist name:')\n userArtistName = input()\n print('enter birth year for', userArtistName,':')\n userBirthYear = int(input())\n print('enter death year for', userArtistName,':', '(if alive, type \"alive\")')\n userDeathYear = str(input())\n print('enter title of ', userArtistName , 'artistic piece:')\n userTitle = input()\n print('enter year created:')\n userYearCreated = int(input())\n\n userArtist = Artist(userArtistName, userBirthYear, userDeathYear)\n newArtwork = Artwork(userTitle, userYearCreated, userArtist)\n\n newArtwork.printInfo()\n exit()","repo_name":"jeffrey11lewis/ArtistandArtwork","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25630725782","text":"from behave import *\nfrom chronos.models import *\nfrom chronos.controllers.taskController import TaskController\nimport time\nimport datetime\n\n\n@given('that I am about to work on a task')\ndef step_impl(context):\n context.searchResult = TaskController.filter_by_code(code=1)\n\n\n@when('I use the stopwatch for 5 seconds')\ndef step_impl(context):\n stopwatch = Stopwatch()\n stopwatch.start()\n time.sleep(5)\n stopwatch.stop()\n context.stopwatch = stopwatch\n assert int(stopwatch.recordedTime) == 5\n\n\n@then('it is save in the TimeRecord')\ndef step_impl(context):\n init = time.localtime(context.stopwatch.initialTime)\n finish = time.localtime(context.stopwatch.initialTime + context.stopwatch.recordedTime)\n record = TimeRecord(startTime=datetime.time(init.tm_hour, init.tm_min, init.tm_sec),\n endTime=datetime.time(finish.tm_hour, finish.tm_min, finish.tm_sec),\n date=datetime.date(init.tm_year, init.tm_mon, init.tm_mday))\n record.save()\n assert record.startTime == datetime.time(init.tm_hour, init.tm_min, init.tm_sec) and \\\n record.endTime == datetime.time(finish.tm_hour, finish.tm_min, finish.tm_sec) and \\\n record.date == datetime.date(init.tm_year, init.tm_mon, init.tm_mday)\n","repo_name":"mpordomingo/ing-del-software-3","sub_path":"features/steps/activateStopwatchSteps.py","file_name":"activateStopwatchSteps.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15477966386","text":"import torch as th\nfrom .model import DynamicModel\n\n\nclass PendulumModel(DynamicModel):\n def __init__(self, mass, length, gravity=9.81):\n self.mass = mass\n self.length = length\n self.g = gravity\n\n def dynamics(self, time, state, control):\n theta, dtheta = state\n return (\n th.vstack([dtheta, -(self.g/self.length)*th.sin(theta)]) +\n th.vstack([th.tensor(0), control/(self.mass * th.pow(th.tensor(self.length), 2))])\n )\n","repo_name":"cisimon7/LearningAndMPC","sub_path":"systems/pendulum.py","file_name":"pendulum.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2998198276","text":"import PIL\nfrom PIL import Image as PILImage\nfrom io import BytesIO\n\n\nclass Image(object):\n def resize_image(self, body, extension, width):\n \"\"\"\n Resize and optimize image\n :param body: the image\n :param extension: image type ('.png' or '.jpg' or '.jpeg')\n :param width: image width\n :return: bytesIO with image resized\n \"\"\"\n try:\n img = PILImage.open(BytesIO(body))\n wpercent = width / float(img.size[0])\n hsize = int((float(img.size[1]) * float(wpercent)))\n img = img.resize((width, hsize), PIL.Image.ANTIALIAS)\n\n buffer = BytesIO()\n\n if extension in [\".jpeg\", \".jpg\"]:\n format = \"JPEG\"\n img.save(buffer, format, quality=85, optimize=True)\n if extension in [\".png\"]:\n format = \"PNG\"\n img.save(buffer, format, optimize=True)\n\n buffer.seek(0)\n\n return buffer\n except Exception as e:\n message = \"resize_image FAIL: {}\".format(e)\n print(message)\n return None\n","repo_name":"softinio/pymagecli","sub_path":"pymagecli/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2490269785","text":"def add_time(start, duration, starting_day=\"\"):\n # Split the 'start' input into hours and minutes and store the AM/PM part\n pieces = start.split()\n time = pieces[0].split(\":\")\n end = pieces[1]\n\n # Calculate the 24-hour clock format if the time is PM\n if end == \"PM\":\n hour = int(time[0]) + 12\n time[0] = str(hour)\n \n # Split the 'duration' input into hours and minutes\n dur_time = duration.split(\":\")\n \n # Add hours and minutes to the initial time\n new_hour = int(time[0]) + int(dur_time[0])\n new_minutes = int(time[1]) + int(dur_time[1])\n\n # Handle the case where minutes exceed 60\n if new_minutes >= 60:\n hours_add = new_minutes // 60\n new_minutes -= hours_add * 60\n new_hour += hours_add\n\n # Handle the case where hours exceed 24\n days_add = 0\n if new_hour > 24:\n days_add = new_hour // 24\n new_hour -= days_add * 24\n \n # Determine whether the result should be AM or PM\n # Convert to 12-hour clock format\n if new_hour > 0 and new_hour < 12:\n end = \"AM\"\n elif new_hour == 12:\n end = \"PM\"\n elif new_hour > 12:\n end = \"PM\"\n new_hour -= 12\n else: # new_hour == 0\n end = \"AM\"\n new_hour += 12\n\n # Determine if there are additional days added and format accordingly\n if days_add > 0:\n if days_add == 1:\n days_later = \" (next day)\"\n else:\n days_later = \" (\" + str(days_add) + \" days later)\"\n else:\n days_later = \"\"\n\n # Define the days of the week\n week_days = (\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\")\n\n # Handle the starting_day parameter to calculate the day of the week\n if starting_day:\n # Calculate how many weeks have passed and adjust days_add\n weeks = days_add // 7\n i = week_days.index(starting_day.lower().capitalize()) + (days_add - 7 * weeks)\n \n # Ensure the day index stays within the range (0-6)\n if i > 6:\n i -= 7\n day = \", \" + week_days[i]\n else:\n day = \"\"\n \n # Format the new time with hours, minutes, AM/PM, day of the week, and days later\n new_time = str(new_hour) + \":\" + \\\n (str(new_minutes) if new_minutes > 9 else (\"0\" + str(new_minutes))) + \\\n \" \" + end + day + days_later\n \n return new_time\n","repo_name":"Parisaakl/Time-Calculator","sub_path":"time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2322123100","text":"import os\nimport pandas as pd\nfrom os import path as osp\nimport urllib.request as ur\nimport io\nfrom collections import defaultdict\nfrom tqdm import tqdm\nfrom typing import Union\nfrom rdkit import Chem\nfrom rdkit.Chem.Scaffolds import MurckoScaffold\nimport random\n\n\nclass KfoldDataset:\n def __init__(self, name: str):\n assert name.startswith(\"kfold_s\")\n self.seed = int(name[len(\"kfold_s\")])\n self.name = name.split(\"_\")[-1]\n assert self.name in [\n \"bace\",\n \"bbbp\",\n \"clintox\",\n \"sider\",\n \"tox21\",\n \"toxcast\",\n \"freesolv\",\n \"esol\",\n \"lipo\",\n \"qm7\",\n \"qm8\",\n ]\n self.url = \"https://raw.githubusercontent.com/tencent-ailab/grover/main/exampledata/finetune/{}.csv\".format(\n self.name\n )\n self.download()\n\n def download(self):\n fndir = \"/tmp/kfold\"\n os.makedirs(fndir, exist_ok=True)\n fn = self.url.rpartition(\"/\")[2]\n fn = osp.join(fndir, fn)\n self.fn = fn\n if os.path.exists(fn):\n return\n\n with ur.urlopen(self.url) as src:\n content = src.read().decode(\"utf8\")\n with io.open(fn, \"w\", encoding=\"utf8\") as tgt:\n tgt.write(content)\n\n def read_csv(self):\n return pd.read_csv(self.fn)\n\n def scaffold_split(self, data, sizes=(0.8, 0.1, 0.1), balanced: bool = True):\n assert sum(sizes) == 1\n train_size, val_size, test_size = (\n sizes[0] * len(data),\n sizes[1] * len(data),\n sizes[2] * len(data),\n )\n train, val, test = [], [], []\n train_scaffold_count, val_scaffold_count, test_scaffold_count = 0, 0, 0\n\n def scaffold2smiles(mols):\n scaffolds = defaultdict(set)\n for i, mol in tqdm(enumerate(mols), total=len(mols)):\n scaffold = generate_scaffold(mol)\n scaffolds[scaffold].add(i)\n return scaffolds\n\n scaffload2indices = scaffold2smiles(data)\n\n if balanced:\n index_sets = list(scaffload2indices.values())\n big_index_sets = []\n small_index_sets = []\n for index_set in index_sets:\n if len(index_set) > val_size / 2 or len(index_set) > test_size / 2:\n big_index_sets.append(index_set)\n else:\n small_index_sets.append(index_set)\n random.seed(self.seed)\n random.shuffle(big_index_sets)\n random.shuffle(small_index_sets)\n index_sets = big_index_sets + small_index_sets\n else:\n raise NotImplementedError()\n\n for index_set in index_sets:\n if len(train) + len(index_set) <= train_size:\n train += index_set\n train_scaffold_count += 1\n elif len(val) + len(index_set) <= val_size:\n val += index_set\n val_scaffold_count += 1\n else:\n test += index_set\n test_scaffold_count += 1\n\n return train, val, test\n\n def get_dataset(self):\n all_data = self.read_csv()\n if \"smiles\" in list(all_data.columns):\n all_smiles = all_data[\"smiles\"].values.tolist()\n else:\n all_smiles = all_data[\"mol\"].values.tolist()\n train_idx, valid_idx, test_idx = self.scaffold_split(all_smiles)\n all_tasks = list(all_data.columns)[1:]\n labels = all_data[all_tasks].values.tolist()\n\n def ret_pd(idx):\n smiles_list = [all_smiles[i] for i in idx]\n labels_split = [labels[i] for i in idx]\n return pd.DataFrame({\"text\": smiles_list, \"labels\": labels_split})\n\n pds = [ret_pd(train_idx), ret_pd(valid_idx), ret_pd(test_idx)]\n\n return all_tasks, pds, None\n\n\ndef generate_scaffold(mol: Union[str, Chem.Mol], include_chirality: bool = False) -> str:\n mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol\n scaffold = MurckoScaffold.MurckoScaffoldSmiles(mol=mol, includeChirality=include_chirality)\n return scaffold\n","repo_name":"microsoft/DVMP","sub_path":"molecule/kfold.py","file_name":"kfold.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"83"} +{"seq_id":"25688945453","text":"# gpt_api.py\nimport aiohttp\nimport asyncio\nimport math\nimport os\n\nAPI_URL = \"https://api.openai.com/v1/chat/completions\"\nAPI_KEY = os.getenv(\"API_KEY\") # Get the API key from an environment variable\nMAX_TOKENS = 4096 # Setting to a bit less than actual limit for safety\n\nasync def makeGpt3APICall(file_content, initial_prompt):\n\ttotal_length = len(initial_prompt) + len(file_content)\n\tnum_chunks = math.ceil(total_length / MAX_TOKENS)\n\tchunk_size = len(file_content) // num_chunks\n\n\tsummaries = []\n\tasync with aiohttp.ClientSession() as session:\n\t\tfor i in range(num_chunks):\n\t\t\tchunk_start = i * chunk_size\n\t\t\tchunk_end = chunk_start + chunk_size if i != num_chunks - 1 else len(file_content)\n\t\t\tfile_chunk = file_content[chunk_start:chunk_end]\n\n\t\t\tprompt_chunk = f\"{initial_prompt}\\n\\n{file_chunk}\\n\\n\"\n\t\t\theaders = {\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\t\"Authorization\": f\"Bearer {API_KEY}\"\n\t\t\t}\n\t\t\tdata = {\n\t\t\t\t\"model\": \"gpt-3.5-turbo\",\n\t\t\t\t\"messages\": [\n\t\t\t\t\t{\"role\": \"system\", \"content\": \"You are a language model that summarizes code.\"},\n\t\t\t\t\t{\"role\": \"user\", \"content\": prompt_chunk}\n\t\t\t\t]\n\t\t\t}\n\t\t\tasync with session.post(API_URL, headers=headers, json=data) as response:\n\t\t\t\tresponse_data = await response.json()\n\n\t\t\t\tif 'choices' in response_data:\n\t\t\t\t\tmessage = response_data[\"choices\"][0][\"message\"][\"content\"]\n\t\t\t\t\tsummaries.append(message)\n\t\t\t\telse:\n\t\t\t\t\terror_message = response_data.get('error', {}).get('message')\n\t\t\t\t\tsummaries.append(\"Error: No response choices\")\n\treturn ' '.join(summaries)\n","repo_name":"HNKunwar/GPT-Code-Directory-Analyzer","sub_path":".idea/Analyzer/gpt_api.py","file_name":"gpt_api.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34322266259","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport abc\nimport logging\nfrom abc import ABC\n\nimport numpy as np\n\nfrom wrt.classifiers.classifier import Classifier\nfrom wrt.attacks.attack import RemovalAttack\nfrom wrt.exceptions import ClassifierError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Overwriting(RemovalAttack):\n \"\"\"\n The attack consists of overwriting a watermark by embedding a different one using\n the same scheme\n \"\"\"\n\n def __init__(\n self,\n classifier,\n defense,\n init_kwargs=None,\n embed_args=None,\n embed_kwargs=None,\n other_kwargs=None\n ):\n \"\"\"\n Create a :class:`.Regularization` instance.\n\n :param classifier: Classifier; A trained classifier.\n :param defense: Watermark; the watermark scheme to overwrite\n \"\"\"\n super(Overwriting, self).__init__(classifier)\n\n if not isinstance(classifier, Classifier):\n raise ClassifierError(self.__class__, [Classifier], classifier)\n\n self.defense = defense\n self.init_kwargs = init_kwargs if init_kwargs is not None else {}\n self.embed_args = embed_args if embed_args is not None else ()\n self.embed_kwargs = embed_kwargs if embed_kwargs is not None else {}\n self.other_kwargs = other_kwargs if other_kwargs is not None else {}\n\n @classmethod\n def is_valid_classifier_type(cls, classifier):\n \"\"\"\n Checks whether the classifier provided is a classifier which this class can perform an attack on\n :param classifier:\n :return:\n \"\"\"\n return True if isinstance(classifier, Classifier) else False\n\n def preprocess_data(self, x, y):\n \"\"\"\n Preprocess the training data and labels for embedding\n :param x: np.ndarray; training data\n :param y: np.ndarray; corresponding labels\n :return: (np.ndarray, np.ndarray); the processed data\n \"\"\"\n if 'normalize' in self.other_kwargs:\n mean, std = self.other_kwargs['normalize']\n else:\n mean, std = np.tile(0, x.shape[1]), np.tile(1, x.shape[1])\n mean, std = mean.reshape((1, x.shape[1], 1, 1)), std.reshape((1, x.shape[1], 1, 1))\n return ((x - mean) / std).astype(np.float32), y\n\n def remove(self, x, y=None, **kwargs):\n \"\"\"\n Apply the overwriting attack\n\n :param x: An array with the target inputs.\n :type x: `np.ndarray`\n :param y: np.ndarray; Corresponding labels\n :return: None\n \"\"\"\n if y is None:\n raise ValueError(\"Labels must be provided\")\n\n x, y = self.preprocess_data(x, y)\n\n # Drop unused labels.\n print(\"Predicting labels for overwriting!\")\n print(y.shape, type(y), y[0])\n y = self.classifier.predict(x, batch_size=32,\n learning_phase=kwargs.setdefault(\"learning_phase\", False))\n y = np.eye(self.classifier.nb_classes())[np.argmax(y, axis=1)]\n print(y.shape, type(y), y[0])\n\n defense_overwrite_instance = self.defense(self.classifier, **self.init_kwargs)\n embed_kwargs = {**self.embed_kwargs, 'x_train': x, 'y_train': y}\n defense_overwrite_instance.embed(**embed_kwargs)\n\n\n","repo_name":"dnn-security/Watermark-Robustness-Toolbox","sub_path":"wrt/attacks/removal/overwriting.py","file_name":"overwriting.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"83"} +{"seq_id":"43071504791","text":"# Juft son bor yoki yo'qligini tekshirish\n# 1-usul:\n\"\"\" sonlar = [1, 3, 5, 7]\njuft_son_bormi = False\nfor son in sonlar:\n if son % 2 == 0:\n juft_son_bormi = True\n\nif juft_son_bormi:\n print(\"Juft son bor ekan!\")\nelse:\n print(\"juft son topilmadi!\") \"\"\"\n\n# 2-usul:\nsonlar = [1, 3, 5, 7]\nfor son in sonlar:\n if son % 2 == 0:\n print(\"Juft son bor ekan!\")\n break\nelse: # for va while tsikllari bn else ishlataolamiz!\n print(\"Juft son topilmadi!\")","repo_name":"shakhgulomjonov/pda_mashqlar","sub_path":"Pythonda ayrim yechimlar/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35339508592","text":"import collections\n\nimport deephyper.skopt\nimport numpy as np\nfrom deephyper.search.nas._regevo import RegularizedEvolution\n\n# Adapt minimization -> maximization with DeepHyper\nMAP_liar_strategy = {\n \"cl_min\": \"cl_max\",\n \"cl_max\": \"cl_min\",\n}\nMAP_acq_func = {\n \"UCB\": \"LCB\",\n}\n\n\nclass AgEBO(RegularizedEvolution):\n \"\"\"`Aging evolution with Bayesian Optimization `_.\n\n This algorithm build on the `Regularized Evolution `_. It cumulates Hyperparameter optimization with Bayesian optimisation and Neural architecture search with regularized evolution.\n\n Args:\n problem (NaProblem): Neural architecture search problem describing the search space to explore.\n evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.\n random_state (int, optional): Random seed. Defaults to None.\n log_dir (str, optional): Log directory where search's results are saved. Defaults to \".\".\n verbose (int, optional): Indicate the verbosity level of the search. Defaults to 0.\n population_size (int, optional): the number of individuals to keep in the population. Defaults to ``100``.\n sample_size (int, optional): the number of individuals that should participate in each tournament. Defaults to ``10``.\n n_initial_points (int, optional): Number of collected objectives required before fitting the surrogate-model. Defaults to ``10``.\n initial_points (List[Dict], optional): A list of initial points to evaluate where each point is a dictionnary where keys are names of hyperparameters and values their corresponding choice. Defaults to ``None`` for them to be generated randomly from the search space.\n surrogate_model (str, optional): Surrogate model used by the Bayesian optimization. Can be a value in ``[\"RF\", \"ET\", \"GBRT\", \"DUMMY\"]``. Defaults to ``\"RF\"``.\n acq_func (str, optional): Acquisition function used by the Bayesian optimization. Can be a value in ``[\"UCB\", \"EI\", \"PI\", \"gp_hedge\"]``. Defaults to ``\"UCB\"``.\n kappa (float, optional): Manage the exploration/exploitation tradeoff for the \"UCB\" acquisition function. Defaults to ``0.001`` for strong exploitation.\n xi (float, optional): Manage the exploration/exploitation tradeoff of ``\"EI\"`` and ``\"PI\"`` acquisition function. Defaults to ``0.000001`` for strong exploitation.\n n_points (int, optional): The number of configurations sampled from the search space to infer each batch of new evaluated configurations. Defaults to ``10000``.\n liar_strategy (str, optional): Definition of the constant value use for the Liar strategy. Can be a value in ``[\"cl_min\", \"cl_mean\", \"cl_max\"]`` . Defaults to ``\"cl_max\"``.\n n_jobs (int, optional): Number of parallel processes used to fit the surrogate model of the Bayesian optimization. A value of ``-1`` will use all available cores. Defaults to ``1``.\n sync_communcation (bool, optional): Performs the search in a batch-synchronous manner. Defaults to ``False`` for asynchronous updates.\n \"\"\"\n\n def __init__(\n self,\n problem,\n evaluator,\n random_state: int = None,\n log_dir: str = \".\",\n verbose: int = 0,\n # RE\n population_size: int = 100,\n sample_size: int = 10,\n # BO\n n_initial_points: int = 10,\n initial_points=None,\n surrogate_model: str = \"RF\",\n acq_func: str = \"UCB\",\n kappa: float = 0.001,\n xi: float = 0.000001,\n n_points: int = 10000,\n liar_strategy: str = \"cl_max\",\n n_jobs: int = 1,\n sync_communication: bool = False,\n ):\n super().__init__(\n problem,\n evaluator,\n random_state,\n log_dir,\n verbose,\n population_size,\n sample_size,\n )\n\n # Initialize opitmizer of hyperparameter space\n if len(self._problem._hp_space._space) == 0:\n raise ValueError(\n \"No hyperparameter space was defined for this problem use 'RegularizedEvolution' instead!\"\n )\n\n # check input parameters\n surrogate_model_allowed = [\"RF\", \"ET\", \"GBRT\", \"DUMMY\"]\n if not (surrogate_model in surrogate_model_allowed):\n raise ValueError(\n f\"Parameter 'surrogate_model={surrogate_model}' should have a value in {surrogate_model_allowed}!\"\n )\n\n acq_func_allowed = [\"UCB\", \"EI\", \"PI\", \"gp_hedge\"]\n if not (acq_func in acq_func_allowed):\n raise ValueError(\n f\"Parameter 'acq_func={acq_func}' should have a value in {acq_func_allowed}!\"\n )\n\n if not (np.isscalar(kappa)):\n raise ValueError(\"Parameter 'kappa' should be a scalar value!\")\n\n if not (np.isscalar(xi)):\n raise ValueError(\"Parameter 'xi' should be a scalar value!\")\n\n if not (type(n_points) is int):\n raise ValueError(\"Parameter 'n_points' shoud be an integer value!\")\n\n liar_strategy_allowed = [\"cl_min\", \"cl_mean\", \"cl_max\"]\n if not (liar_strategy in liar_strategy_allowed):\n raise ValueError(\n f\"Parameter 'liar_strategy={liar_strategy}' should have a value in {liar_strategy_allowed}!\"\n )\n\n if not (type(n_jobs) is int):\n raise ValueError(\"Parameter 'n_jobs' should be an integer value!\")\n\n self._n_initial_points = n_initial_points\n self._initial_points = []\n if initial_points is not None and len(initial_points) > 0:\n for point in initial_points:\n if isinstance(point, list):\n self._initial_points.append(point)\n elif isinstance(point, dict):\n self._initial_points.append(\n [point[hp_name] for hp_name in problem.hyperparameter_names]\n )\n else:\n raise ValueError(\n f\"Initial points should be dict or list but {type(point)} was given!\"\n )\n self._liar_strategy = MAP_liar_strategy.get(liar_strategy, liar_strategy)\n\n base_estimator = self._get_surrogate_model(\n surrogate_model, n_jobs, random_state=self._random_state.randint(0, 2**31)\n )\n self._hp_opt = None\n self._hp_opt_kwargs = dict(\n acq_optimizer=\"sampling\",\n acq_optimizer_kwargs={\n \"n_points\": n_points,\n \"filter_duplicated\": False,\n },\n dimensions=self._problem._hp_space._space,\n base_estimator=base_estimator,\n acq_func=MAP_acq_func.get(acq_func, acq_func),\n acq_func_kwargs={\"xi\": xi, \"kappa\": kappa},\n n_initial_points=self._n_initial_points,\n initial_points=self._initial_points,\n random_state=self._random_state,\n )\n\n self._gather_type = \"ALL\" if sync_communication else \"BATCH\"\n\n def _setup_hp_optimizer(self):\n self._hp_opt = deephyper.skopt.Optimizer(**self._hp_opt_kwargs)\n\n def _saved_keys(self, job):\n res = {\"arch_seq\": str(job.config[\"arch_seq\"])}\n hp_names = self._problem._hp_space._space.get_hyperparameter_names()\n\n for hp_name in hp_names:\n if hp_name == \"loss\":\n res[\"loss\"] = job.config[\"loss\"]\n else:\n res[hp_name] = job.config[\"hyperparameters\"][hp_name]\n\n return res\n\n def _search(self, max_evals, timeout):\n if self._hp_opt is None:\n self._setup_hp_optimizer()\n\n num_evals_done = 0\n population = collections.deque(maxlen=self._population_size)\n\n # Filling available nodes at start\n batch = self._gen_random_batch(size=self._evaluator.num_workers)\n self._evaluator.submit(batch)\n\n # Main loop\n while max_evals < 0 or num_evals_done < max_evals:\n # Collecting finished evaluations\n new_results = list(self._evaluator.gather(self._gather_type, size=1))\n\n if len(new_results) > 0:\n population.extend(new_results)\n\n self._evaluator.dump_evals(\n saved_keys=self._saved_keys, log_dir=self._log_dir\n )\n\n num_received = len(new_results)\n num_evals_done += num_received\n\n hp_results_X, hp_results_y = [], []\n\n # If the population is big enough evolve the population\n if len(population) == self._population_size:\n children_batch = []\n\n # For each new parent/result we create a child from it\n for new_i in range(len(new_results)):\n # select_sample\n indexes = self._random_state.choice(\n self._population_size, self._sample_size, replace=False\n )\n sample = [population[i] for i in indexes]\n\n # select_parent\n parent = self._select_parent(sample)\n\n # copy_mutate_parent\n child = self._copy_mutate_arch(parent)\n\n # add child to batch\n children_batch.append(child)\n\n # collect infos for hp optimization\n new_i_hp_values = self._problem.extract_hp_values(\n config=new_results[new_i][0]\n )\n new_i_y = new_results[new_i][1]\n hp_results_X.append(new_i_hp_values)\n hp_results_y.append(-new_i_y)\n\n self._hp_opt.tell(hp_results_X, hp_results_y) # !fit: costly\n new_hps = self._hp_opt.ask(\n n_points=len(new_results), strategy=self._liar_strategy\n )\n\n new_configs = []\n for hp_values, child_arch_seq in zip(new_hps, children_batch):\n new_config = self._problem.gen_config(child_arch_seq, hp_values)\n new_configs.append(new_config)\n\n # submit_childs\n if len(new_results) > 0:\n self._evaluator.submit(new_configs)\n\n else: # If the population is too small keep increasing it\n # For each new parent/result we create a child from it\n for new_i in range(len(new_results)):\n new_i_hp_values = self._problem.extract_hp_values(\n config=new_results[new_i][0]\n )\n new_i_y = new_results[new_i][1]\n hp_results_X.append(new_i_hp_values)\n hp_results_y.append(-new_i_y)\n\n self._hp_opt.tell(hp_results_X, hp_results_y) # !fit: costly\n new_hps = self._hp_opt.ask(\n n_points=len(new_results), strategy=self._liar_strategy\n )\n\n new_batch = self._gen_random_batch(\n size=len(new_results), hps=new_hps\n )\n self._evaluator.submit(new_batch)\n\n def _gen_random_batch(self, size: int, hps: list = None) -> list:\n batch = []\n if hps is None:\n points = self._hp_opt.ask(n_points=size)\n for hp_values in points:\n arch_seq = self._random_search_space()\n config = self._problem.gen_config(arch_seq, hp_values)\n batch.append(config)\n else: # passed hps are used\n assert size == len(hps)\n for hp_values in hps:\n arch_seq = self._random_search_space()\n config = self._problem.gen_config(arch_seq, hp_values)\n batch.append(config)\n return batch\n\n def _copy_mutate_arch(self, parent_arch: list) -> list:\n \"\"\"\n # ! Time performance is critical because called sequentialy\n\n Args:\n parent_arch (list(int)): embedding of the parent's architecture.\n\n Returns:\n dict: embedding of the mutated architecture of the child.\n\n \"\"\"\n i = self._random_state.choice(len(parent_arch))\n child_arch = parent_arch[:]\n\n range_upper_bound = self.space_list[i][1]\n elements = [j for j in range(range_upper_bound + 1) if j != child_arch[i]]\n\n # The mutation has to create a different search_space!\n sample = self._random_state.choice(elements, 1)[0]\n\n child_arch[i] = sample\n return child_arch\n\n def _get_surrogate_model(\n self, name: str, n_jobs: int = None, random_state: int = None\n ):\n \"\"\"Get a surrogate model from Scikit-Optimize.\n\n Args:\n name (str): name of the surrogate model.\n n_jobs (int): number of parallel processes to distribute the computation of the surrogate model.\n\n Raises:\n ValueError: when the name of the surrogate model is unknown.\n \"\"\"\n accepted_names = [\"RF\", \"ET\", \"GBRT\", \"DUMMY\"]\n if not (name in accepted_names):\n raise ValueError(\n f\"Unknown surrogate model {name}, please choose among {accepted_names}.\"\n )\n\n if name == \"RF\":\n surrogate = deephyper.skopt.learning.RandomForestRegressor(\n n_jobs=n_jobs, random_state=random_state\n )\n elif name == \"ET\":\n surrogate = deephyper.skopt.learning.ExtraTreesRegressor(\n n_jobs=n_jobs, random_state=random_state\n )\n elif name == \"GBRT\":\n surrogate = deephyper.skopt.learning.GradientBoostingQuantileRegressor(\n n_jobs=n_jobs, random_state=random_state\n )\n else: # for DUMMY and GP\n surrogate = name\n\n return surrogate\n","repo_name":"deephyper/deephyper","sub_path":"deephyper/search/nas/_agebo.py","file_name":"_agebo.py","file_ext":"py","file_size_in_byte":14052,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"83"} +{"seq_id":"19911444063","text":"## Oefening 9: eenheden van tijd\n# Maak een programma dat een lengte van tijd opvraagt van een gebruiker \n# als een reeks dagen, uren, minuten en seconden (afzonderlijk \n# opgevraagd). Bereken en toon het totale aantal seconden dat deze tijd \n# vertegenwoordigt.\n\naantal_dagen = int(input(\"Geef het aantal dagen: \"))\naantal_uren = int(input(\"Geef het aantal uren: \"))\naantal_minuten = int(input(\"Geef het aantal minuten: \"))\naantal_seconden = int(input(\"Geef het aantal seconden: \"))\ntotaal_aantal_seconden = aantal_seconden + (aantal_minuten*60) + (aantal_uren*3600) + (aantal_dagen*24*3600)\nprint(f\"De door jou ingegeven tijd bedraagt {totaal_aantal_seconden}s\")\n\n","repo_name":"JeanBacquaert/PythonOefeningen","sub_path":"oplossingen_labo1/oef9.py","file_name":"oef9.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3169257900","text":"# Дан текстовый файл. Удалить из него последнюю строку. Результат записать в другой файл.\n\nwith open(file=\"file1.txt\", mode=\"r\", encoding=\"utf-8\") as my_file1, open(file=\"file2.txt\", mode=\"w\", encoding=\"utf-8\")\\\n as my_file2:\n lines = my_file1.readlines()\n\n if len(lines) > 0:\n lines.pop()\n\n my_file2.writelines(lines)","repo_name":"Depthecho/Python217_FMI_works","sub_path":"Lesson-20 hw/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"837538999","text":"def migrate_1_to_2(config):\n \"\"\"Create better structure by moving most settings out of dict root\n and into sub categories. Some keys are also renamed to be consistent\n with other UIs.\n \"\"\"\n\n def move_key(source, dest, source_key, dest_key=None):\n if dest_key is None:\n dest_key = source_key\n\n dest[dest_key] = source[source_key]\n del source[source_key]\n\n # These are moved to 'torrentview' sub dict\n for k in [\n 'sort_primary',\n 'sort_secondary',\n 'move_selection',\n 'separate_complete',\n ]:\n move_key(config, config['torrentview'], k)\n\n # These are moved to 'addtorrents' sub dict\n for k in [\n 'show_misc_files',\n 'show_hidden_folders',\n 'sort_column',\n 'reverse_sort',\n 'last_path',\n ]:\n move_key(config, config['addtorrents'], 'addtorrents_%s' % k, dest_key=k)\n\n # These are moved to 'cmdline' sub dict\n for k in [\n 'ignore_duplicate_lines',\n 'torrents_per_tab_press',\n 'third_tab_lists_all',\n ]:\n move_key(config, config['cmdline'], k)\n\n move_key(\n config,\n config['cmdline'],\n 'save_legacy_history',\n dest_key='save_command_history',\n )\n\n # Add key for localization\n config['language'] = ''\n\n # Migrate column settings\n columns = [\n 'queue',\n 'size',\n 'state',\n 'progress',\n 'seeds',\n 'peers',\n 'downspeed',\n 'upspeed',\n 'eta',\n 'ratio',\n 'avail',\n 'added',\n 'tracker',\n 'savepath',\n 'downloaded',\n 'uploaded',\n 'remaining',\n 'owner',\n 'downloading_time',\n 'seeding_time',\n 'completed',\n 'seeds_peers_ratio',\n 'complete_seen',\n 'down_limit',\n 'up_limit',\n 'shared',\n 'name',\n ]\n column_name_mapping = {\n 'downspeed': 'download_speed',\n 'upspeed': 'upload_speed',\n 'added': 'time_added',\n 'savepath': 'download_location',\n 'completed': 'completed_time',\n 'complete_seen': 'last_seen_complete',\n 'down_limit': 'max_download_speed',\n 'up_limit': 'max_upload_speed',\n 'downloading_time': 'active_time',\n }\n\n from deluge.ui.console.modes.torrentlist.torrentview import default_columns\n\n # These are moved to 'torrentview.columns' sub dict\n for k in columns:\n column_name = column_name_mapping.get(k, k)\n config['torrentview']['columns'][column_name] = {}\n if k == 'name':\n config['torrentview']['columns'][column_name]['visible'] = True\n else:\n move_key(\n config,\n config['torrentview']['columns'][column_name],\n 'show_%s' % k,\n dest_key='visible',\n )\n move_key(\n config,\n config['torrentview']['columns'][column_name],\n '%s_width' % k,\n dest_key='width',\n )\n config['torrentview']['columns'][column_name]['order'] = default_columns[\n column_name\n ]['order']\n\n return config\n","repo_name":"deluge-torrent/deluge","sub_path":"deluge/ui/console/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":1428,"dataset":"github-code","pt":"83"} +{"seq_id":"8206718845","text":"# Error code 123 = login error\n# Error code 10 = Import error / restart\n# Error code 0 = App closed\n#\n#\n#\n#\n#\n#\nimport os, sys\nimport subprocess\nkey = 's94546'\nresult = os.getlogin()\nprint(result)\nif result == \"arjun\":\n print(key, \"found su\")\nelif result == '2019ASharma':\n print(key, \"found su\")\nelif result == '2019asharma':\n print(key, \"found su\")\nelif result == 'admin':\n print('get a life')\nelif result == '2019HLegg':\n print(key, \"found su\")\nelif result == '2019hlegg':\n print(key, \"found su\")\nelif result == '2019Hlegg':\n print(key, \"found su\")\nelif result == '2019JDandison':\n print(key, \"found su\")\nelif result == '2019jdandison':\n print(key, \"found su\")\nelif result == '2019Jdandison':\n print(key, \"found su\")\nelse:\n print(result, \"No key\")\n exit(123)\nreslt = subprocess.check_output(\n f'C:/Users/{result}/AppData/Local/Programs/Python/Python37/python.exe -m pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location',\n stdin=None, stderr=None, shell=False, universal_newlines=True)\nreslt2 = subprocess.check_output(\n f'python.exe -m pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location',\n stdin=None, stderr=None, shell=False, universal_newlines=True)\nreslt3 = subprocess.check_output(\n f'python -m pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location',\n stdin=None, stderr=None, shell=False, universal_newlines=True)\nreslt4 = subprocess.check_output(\n f'pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location',\n stdin=None, stderr=None, shell=False, universal_newlines=True)\nre = str(reslt)\nprint(re)\nre1 = str(reslt2)\nprint(re1)\nre2= str(reslt3)\nprint(re2)\nre3 = str(reslt4)\nprint(re3)\n# 1\nif re == f'Requirement already satisfied: pip in c:\\\\users\\\\{result}\\\\appdata\\\\roaming\\\\python\\\\python37\\\\site-packages (22.1.2)\\n':\n print('found')\n instance = f'C:/Users/{result}/AppData/Local/Programs/Python/Python37/python.exe -m '\nelse:\n print('incorrect method')\n reslt = subprocess.check_output(\n f'python.exe -m pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location',\n stdin=None, stderr=None, shell=False, universal_newlines=True)\n re = str(reslt)\n # 2\n print(re)\n if re == f'Requirement already satisfied: pip in c:\\\\users\\\\{result}\\\\appdata\\\\roaming\\\\python\\\\python37\\\\site-packages (22.1.2)\\n':\n print('found')\n instance = f'python.exe -m '\n else:\n print('incorrect method')\n reslt = subprocess.check_output(\n f'python -m pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location',\n stdin=None, stderr=None, shell=False, universal_newlines=True)\n re = str(reslt)\n print(re)\n # 3\n if re == f'Requirement already satisfied: pip in c:\\\\users\\\\{result}\\\\appdata\\\\roaming\\\\python\\\\python37\\\\site-packages (22.1.2)\\n':\n print('found')\n instance = f'python -m '\n else:\n print('incorrect method')\n reslt = subprocess.check_output(\n f'python -m pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location',\n stdin=None, stderr=None, shell=False, universal_newlines=True)\n re = str(reslt)\n # 4\n print(re)\n if re == f'Requirement already satisfied: pip in c:\\\\users\\\\{result}\\\\appdata\\\\roaming\\\\python\\\\python37\\\\site-packages (22.1.2)\\n':\n print('found')\n instance = f''\n else:\n print('incorrect method')\ntry:\n import pyperclip\nexcept ImportError as o:\n print('not installed')\n os.system(f'{instance}pip uninstall pyside6')\n os.system(f'{instance}pip uninstall pillow')\n os.system(f'{instance}pip uninstall pyperclip')\n os.system(f'{instance}pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location')\n os.system(f'{instance}pip install --user pyside6 --trusted-host pypi.org --trusted-host files.pythonhosted.org')\n os.system(f'{instance}pip install --user pillow --trusted-host pypi.org --trusted-host files.pythonhosted.org')\n os.system(f'{instance}pip install --user pyperclip --trusted-host pypi.org --trusted-host files.pythonhosted.org')\ntry:\n # IMPORT PACKAGES AND MODULES\n # ///////////////////////////////////////////////////////////////\n from gui.uis.windows.main_window.functions_main_window import *\n\n # IMPORT QT CORE\n # ///////////////////////////////////////////////////////////////\n from qt_core import *\n import pyperclip\n\n # IMPORT SETTINGS\n # ///////////////////////////////////////////////////////////////\n from gui.core.json_settings import Settings\n\n # IMPORT PY ONE DARK WINDOWS\n # ///////////////////////////////////////////////////////////////\n # MAIN WINDOW\n from gui.uis.windows.main_window import *\n\n # IMPORT PY ONE DARK WIDGETS\n # ///////////////////////////////////////////////////////////////\n from gui.widgets import *\n from gui.uis.pages.files_rc import *\n\n # IMPORT MATERIALS\n # //////////////////////////////////////////////////////////////\n from gui.uis.pages.files_rc import *\n\nexcept ImportError as ie:\n print(f\"Well an error has occured: {ie} \\nPress enter to reinstall if 'n' is present it will stop\")\n a = input()\n if a == 'n':\n print('wow')\n elif a == 'N':\n print('wow')\n else:\n ################################################################################################################################\n try:\n import os\n\n os.system(\n f'{instance}pip uninstall pyside6')\n\n os.system(\n f'{instance}pip uninstall pillow')\n\n os.system(\n f'{instance}pip uninstall pyperclip')\n\n os.system(\n f'{instance}pip install --user --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org --no-warn-script-location')\n\n os.system(\n f'{instance}pip install --user pyside6 --trusted-host pypi.org --trusted-host files.pythonhosted.org')\n\n os.system(\n f'{instance}pip install --user pillow --trusted-host pypi.org --trusted-host files.pythonhosted.org')\n\n os.system(\n f'{instance}pip install --user pyperclip --trusted-host pypi.org --trusted-host files.pythonhosted.org')\n except OSError as osio:\n print(\n f'Os issue: {osio}')\n\n exit(10)\n ####################################################################################################################################################################################\nlogin(True, True)\n# ADJUST QT FONT DPI FOR HIGHT SCALE AN 4K MONITOR\n# ///////////////////////////////////////////////////////////////\nos.environ[\"QT_FONT_DPI\"] = \"96\"\n\n\n# IF IS 4K MONITOR ENABLE 'os.environ[\"QT_SCALE_FACTOR\"] = \"2\"'\n\n# MAIN WINDOW\n# ///////////////////////////////////////////////////////////////\n# noinspection PyTypeChecker\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # SETUP MAIN WINDOw\n # Load widgets from \"gui\\uis\\main_window\\ui_main.py\"\n # ///////////////////////////////////////////////////////////////\n self.ui = UI_MainWindow()\n self.ui.setup_ui(self)\n\n # LOAD SETTINGS\n # ///////////////////////////////////////////////////////////////\n settings = Settings()\n self.settings = settings.items\n\n # SETUP MAIN WINDOW\n # ///////////////////////////////////////////////////////////////\n self.hide_grips = True # Show/Hide resize grips\n SetupMainWindow.setup_gui(self)\n\n # SHOW MAIN WINDOW\n # ///////////////////////////////////////////////////////////////\n self.show()\n\n # LEFT MENU BTN IS CLICKED\n # Run function when btn is clicked\n # Check funtion by object name / btn_id\n # ///////////////////////////////////////////////////////////////\n def btn_clicked(self):\n # GET BT CLICKED\n btn = SetupMainWindow.setup_btns(self)\n\n # Get Title Bar Btn And Reset Active\n top_settings = MainFunctions.get_title_bar_btn(self, \"btn_top_settings\")\n top_settings.set_active(False)\n if btn.objectName() == \"btn_home\":\n self.ui.left_menu.select_only_one(btn.objectName())\n MainFunctions.set_page(self, self.ui.load_pages.page_1)\n if btn.objectName() == \"btn_page_2\":\n self.ui.left_menu.select_only_one(btn.objectName())\n MainFunctions.set_page(self, self.ui.load_pages.page_2)\n if btn.objectName() == \"btn_page_3\":\n self.ui.left_menu.select_only_one(btn.objectName())\n MainFunctions.set_page(self, self.ui.load_pages.page_3)\n if btn.objectName() == \"btn_settings\" or btn.objectName() == \"btn_close_left_column\":\n if not MainFunctions.left_column_is_visible(self):\n MainFunctions.toggle_left_column(self)\n self.ui.left_menu.select_only_one(btn.objectName())\n else:\n if btn.objectName() == \"btn_close_left_column\":\n MainFunctions.toggle_left_column(self)\n self.ui.left_menu.select_only_one(btn.objectName())\n\n # TITLE BAR MENU\n # ///////////////////////////////////////////////////////////////\n\n # SETTINGS TITLE BAR\n if btn.objectName() == \"btn_settings\" or btn.objectName() == \"btn_close_left_column\":\n # CHECK IF LEFT COLUMN IS VISIBLE\n if not MainFunctions.left_column_is_visible(self):\n # Show / Hide\n MainFunctions.toggle_left_column(self)\n self.ui.left_menu.select_only_one_tab(btn.objectName())\n else:\n if btn.objectName() == \"btn_close_left_column\":\n self.ui.left_menu.deselect_all_tab()\n # Show / Hide\n MainFunctions.toggle_left_column(self)\n self.ui.left_menu.select_only_one_tab(btn.objectName())\n\n # Change Left Column Menu\n if btn.objectName() != \"btn_close_left_column\":\n MainFunctions.set_left_column_menu(\n self,\n menu=self.ui.left_column.menus.menu_1,\n title=\"Settings Left Column\",\n icon_path=Functions.set_svg_icon(\"icon_settings.svg\")\n )\n\n # DEBUG\n print(f\"Button {btn.objectName()}, clicked!\")\n\n # LEFT MENU BTN IS RELEASED\n # Run function when btn is released\n # Check funtion by object name / btn_id\n # ///////////////////////////////////////////////////////////////\n def btn_released(self):\n # GET BT CLICKED\n btn = SetupMainWindow.setup_btns(self)\n\n # DEBUG\n print(f\"Button {btn.objectName()}, released!\")\n\n # RESIZE EVENT\n # ///////////////////////////////////////////////////////////////\n def resizeEvent(self, event):\n SetupMainWindow.resize_grips(self)\n\n def resizeFunction(self):\n print('Height: ' + str(self.height()) + ' | Width: ' + str(self.width()))\n\n # MOUSE CLICK EVENTS\n # ///////////////////////////////////////////////////////////////\n def mousePressEvent(self, event):\n p = event.globalPosition()\n globalPos = p.toPoint()\n self.dragPos = globalPos\n print(p)\n\n\n# SETTINGS WHEN TO START\n# Set the initial class and also additional parameters of the \"QApplication\" class\n# ///////////////////////////////////////////////////////////////\nif __name__ == \"__main__\":\n # APPLICATION\n # ///////////////////////////////////////////////////////////////\n app = QApplication()\n app.setWindowIcon(QIcon(\"icon.ico\"))\n window = MainWindow()\n # EXEC APP\n # ///////////////////////////////////////////////////////////////\n sys.exit(app.exec())\n","repo_name":"Aksboomer1m/python2","sub_path":"blooket/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12429367703","text":"from pathlib import Path\nfrom typing import Sequence\n\nimport fluidsynth\nimport numpy as np\n\nfrom robopianist import SF2_PATH\nfrom robopianist.music import constants as consts\nfrom robopianist.music import midi_message\nfrom robopianist.music.constants import SAMPLING_RATE\n\n_PROGRAM = 0 # Acoustic Grand Piano\n_CHANNEL = 0\n_BANK = 0\n\n\ndef _validate_note(note: int) -> None:\n assert consts.MIN_MIDI_PITCH <= note <= consts.MAX_MIDI_PITCH\n\n\ndef _validate_velocity(velocity: int) -> None:\n assert consts.MIN_VELOCITY <= velocity <= consts.MAX_VELOCITY\n\n\nclass Synthesizer:\n \"\"\"FluidSynth-based synthesizer.\"\"\"\n\n def __init__(\n self,\n soundfont_path: Path = SF2_PATH,\n sample_rate: int = SAMPLING_RATE,\n ) -> None:\n self._soundfont_path = soundfont_path\n self._sample_rate = sample_rate\n self._muted: bool = False\n self._sustained: bool = False\n\n # Initialize FluidSynth.\n self._synth = fluidsynth.Synth(samplerate=float(sample_rate))\n soundfont_id = self._synth.sfload(str(soundfont_path))\n self._synth.program_select(_CHANNEL, soundfont_id, _BANK, _PROGRAM)\n\n def start(self) -> None:\n self._synth.start()\n\n def stop(self) -> None:\n self._synth.delete()\n\n def mute(self, value: bool) -> None:\n self._muted = value\n if value:\n self.all_sounds_off()\n\n def all_sounds_off(self) -> None:\n self._synth.all_sounds_off(_CHANNEL)\n\n def all_notes_off(self) -> None:\n self._synth.all_notes_off(_CHANNEL)\n\n def note_on(self, note: int, velocity: int) -> None:\n if not self._muted:\n _validate_note(note)\n _validate_velocity(velocity)\n self._synth.noteon(_CHANNEL, note, velocity)\n\n def note_off(self, note: int) -> None:\n if not self._muted:\n _validate_note(note)\n self._synth.noteoff(_CHANNEL, note)\n\n def sustain_on(self) -> None:\n if not self._muted:\n self._synth.cc(\n _CHANNEL, consts.SUSTAIN_PEDAL_CC_NUMBER, consts.MAX_CC_VALUE\n )\n self._sustained = True\n\n def sustain_off(self) -> None:\n if not self._muted:\n self._synth.cc(\n _CHANNEL, consts.SUSTAIN_PEDAL_CC_NUMBER, consts.MIN_CC_VALUE\n )\n self._sustained = False\n\n @property\n def muted(self) -> bool:\n return self._muted\n\n @property\n def sustained(self) -> bool:\n return self._sustained\n\n def get_samples(\n self,\n event_list: Sequence[midi_message.MidiMessage],\n ) -> np.ndarray:\n \"\"\"Synthesize a list of MIDI events into a waveform.\"\"\"\n current_time = event_list[0].time\n\n # Convert absolute seconds to relative seconds.\n next_event_times = [e.time for e in event_list[1:]]\n for event, end in zip(event_list[:-1], next_event_times):\n event.time = end - event.time\n\n # Include 1 second of silence at the end.\n event_list[-1].time = 1.0\n\n total_time = current_time + np.sum([e.time for e in event_list])\n synthesized = np.zeros(int(np.ceil(self._sample_rate * total_time)))\n for event in event_list:\n if isinstance(event, midi_message.NoteOn):\n self.note_on(event.note, event.velocity)\n elif isinstance(event, midi_message.NoteOff):\n self.note_off(event.note)\n elif isinstance(event, midi_message.SustainOn):\n self.sustain_on()\n elif isinstance(event, midi_message.SustainOff):\n self.sustain_off()\n else:\n raise ValueError(f\"Unknown event type: {event}\")\n current_sample = int(self._sample_rate * current_time)\n end = int(self._sample_rate * (current_time + event.time))\n samples = self._synth.get_samples(end - current_sample)[::2]\n synthesized[current_sample:end] += samples\n current_time += event.time\n waveform_float = synthesized / np.abs(synthesized).max()\n\n # Convert to 16-bit ints.\n normalizer = float(np.iinfo(np.int16).max)\n return np.array(np.asarray(waveform_float) * normalizer, dtype=np.int16)\n","repo_name":"google-research/robopianist","sub_path":"robopianist/music/synthesizer.py","file_name":"synthesizer.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","stars":486,"dataset":"github-code","pt":"83"} +{"seq_id":"14511062026","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 18 16:13:28 2018\n\n@author: 李弘一萌\n\"\"\"\n\nimport pandas as pd\nfrom .HC_Base import HC_Base\nfrom . import HC_SpotPrice, HC_Macro, HC_Upstream, HC_Downstream\nfrom . import HC_Inventory, HC_Spread, HC_SupplyDemandBalance, HC_FuturesPrice\n#from sqlalchemy.orm import sessionmaker\n#from sqlalchemy import and_\n\n\n\nclass HC_Factory(object):\n @staticmethod\n def getobj(col):\n class_name, table_module_name = HC_Base.get_col_class_table(col)\n tmp_obj = eval(table_module_name + \".\" + class_name)()\n return tmp_obj\n\n @staticmethod\n def getBaseObj():\n return HC_Base()\n \nif __name__ == \"__main__\":\n col_list = [u\"PP拉丝价格_华东\"]\n col = u\"PP拉丝价格_华东\"\n a = HC_Factory().getobj(col)\n print(a.col_name)\n# bbb = a.get_ts()","repo_name":"HongYiMengLi/CmtDB_UI","sub_path":"MyPackages/Commodity_Data/Commodities/HC/HC_Factory.py","file_name":"HC_Factory.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"13353971888","text":"from pyocp.common import *\nfrom pyocp.arc_c import arc\nfrom pyocp.problem import problem\nfrom sympy import sin, cos, sqrt\n\n# States\nx, v, w = time_variables('x, v, w', t)\nx_d, v_d, w_d = [i.diff(t) for i in [x, v, w]]\n# Controls\nu = time_variables('u', t)\nT = Function('T')\n# Params\nl = symbols('l')\n# ODEs\nodes = [x_d - v,\n v_d - u,#T(x, v, w) * u,\n w_d - u**2/2]#T(x, v, w) * u**2 / 2]\n\nDI = problem('Double Integrator', 3, t)\nfor i in range(3):\n DI[i].odes(odes)\n DI[i].path_constraints(g=Matrix([l-x]), g_l=Matrix([0]), g_u=Matrix([oo]))\n DI[i].continuous_bounds()\n DI[i].parameter_bounds(arguments=[l])\n\nDI[2].objective(term=w)\n\nDI.link_arcs(left=None, right=0, right_cons=[x, v - 1, w], fixed_time=0)\nDI.link_arcs(left=0, right=1, left_cons=[x, v, w], right_cons=[x, v, w], free_time=0.3)\nDI.link_arcs(left=1, right=2, left_cons=[x, v, w], right_cons=[x, v, w], free_time=0.7)\nDI.link_arcs(left=2, right=None, left_cons=[x, v + 1], fixed_time=1)\nDI.complete_links()\n\ns_g = {x : [0, 1./9, 1./9, 0],\n v : [1, 0, 0, -1],\n w : [0, 2, 2, 4]}\n\nc_g = {u : [-6, 0, 0, -6]}\ntg = [0, .3, .7, 1]\n\n\nDI.numerical_defs(m=[10, 10, 10], states_guess=s_g, controls_guess=c_g,\n time_guess_grid=tg, parameters_guess={}, arguments={l : 1. / 9})\n\nDI.write_c_files()\nDI.run_and_read()\n","repo_name":"gilbertgede/PyOCP","sub_path":"pyocp/tests/bryson_3_11_2/bryson_3_11_2.py","file_name":"bryson_3_11_2.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"41899017260","text":"import os\n\nimport jinja2\nfrom fusesoc.capi2.generator import Generator\n\nfrom htfft import helper, unrolled_fft_gen, stage_gen\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\ndef random_pipeline(rnd, spcc):\n pipelines = {\n 'barrel_shifter': ''.join(rnd.choice(('0', '1')) for i in range(helper.logceil(spcc)+1)),\n 'butterfly': {\n 'mult_latency': rnd.randint(1, 4),\n 'reg_i_p': rnd.choice([True, False]),\n 'reg_q_r': rnd.choice([True, False]),\n 'reg_r_s': rnd.choice([True, False]),\n 'reg_s_o': rnd.choice([True, False]),\n },\n 'stage': {\n 'reg_fromread_buffered': rnd.choice([True, False]),\n 'reg_buffered_tobutterfly': rnd.choice([True, False]),\n },\n 'reg_s_': rnd.choice([True, False]),\n }\n return pipelines\n\n\ndef make_pipeline_pkg(suffix, pipelines):\n pipeline_template = os.path.join(basedir, 'htfft_pipeline.vhd')\n with open(pipeline_template, 'r') as f:\n template_text = f.read()\n template = jinja2.Template(template_text)\n formatted_text = template.render(suffix=suffix, pipelines=pipelines)\n pipeline_filename = 'htfft{}_pipeline.vhd'.format(suffix)\n with open(pipeline_filename, 'w') as g:\n g.write(formatted_text)\n return pipeline_filename\n\n\ndef generate_htfft(n, spcc, input_width, suffix, pipelines):\n assert spcc == pow(2, helper.logceil(spcc))\n assert n == pow(2, helper.logceil(n))\n\n output_width = 2*helper.logceil(n) + input_width\n\n unrolled_filenames = unrolled_fft_gen.generate_unrolled_fft_inner(\n spcc, input_width, suffix)\n\n n_stages = helper.logceil(n//spcc)\n stage_filenames = []\n stage_ns = []\n for stage_index in range(n_stages):\n stage_n = spcc * pow(2, stage_index+1)\n stage_ns.append(stage_n)\n width = input_width + (helper.logceil(stage_n)-1)*2\n stage_filenames += stage_gen.generate_stage(stage_n, spcc, width, suffix)\n\n params = {\n 'n': n,\n 'spcc': spcc,\n 'input_width': input_width,\n 'output_width': output_width,\n 'suffix': suffix,\n 'stage_ns': stage_ns,\n 'n_stages': len(stage_ns),\n 'pipelines': pipelines,\n }\n\n template_filename = os.path.join(basedir, 'htfft.vhd')\n with open(template_filename, 'r') as f:\n template_text = f.read()\n template = jinja2.Template(template_text)\n formatted_text = template.render(**params)\n top_filename = 'htfft{}.vhd'.format(suffix)\n with open(top_filename, 'w') as g:\n g.write(formatted_text)\n\n template_filename = os.path.join(basedir, 'htfft_params.vhd')\n with open(template_filename, 'r') as f:\n template_text = f.read()\n template = jinja2.Template(template_text)\n formatted_text = template.render(**params)\n params_filename = 'htfft{}_params.vhd'.format(suffix)\n with open(params_filename, 'w') as g:\n g.write(formatted_text)\n\n pipeline_filename = make_pipeline_pkg(suffix, pipelines)\n\n return [params_filename, pipeline_filename] + unrolled_filenames + stage_filenames + [top_filename]\n\n\nclass HTFFTGenerator(Generator):\n\n def run(self):\n output_filenames = generate_htfft(\n n=self.config['n'],\n spcc=self.config['spcc'],\n input_width=self.config['input_width'],\n suffix=self.config['suffix'],\n pipelines=self.config['pipelines'],\n )\n self.add_files(output_filenames, file_type='vhdlSource')\n\n\ndef make_htfft_core(directory, suffix, n, spcc, input_width, pipelines):\n \"\"\"\n Utility function for generating a core file from python.\n \"\"\"\n params = {\n 'suffix': suffix,\n 'n': n,\n 'spcc': spcc,\n 'input_width': input_width,\n 'pipelines': pipelines,\n }\n template_filename = os.path.join(basedir, 'htfft.core.j2')\n with open(template_filename, 'r') as f:\n template_text = f.read()\n template = jinja2.Template(template_text)\n formatted_text = template.render(**params)\n top_filename = os.path.join(directory, 'htfft{}.core'.format(suffix))\n with open(top_filename, 'w') as g:\n g.write(formatted_text)\n\n\nif __name__ == '__main__':\n g = HTFFTGenerator()\n g.run()\n g.write()\n","repo_name":"benreynwar/htfft","sub_path":"htfft/htfft_gen.py","file_name":"htfft_gen.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"83"} +{"seq_id":"27100953351","text":"from sortedcontainers import SortedDict\n\nclass MyCalendarThree:\n\n def __init__(self):\n self.times = SortedDict()\n \n def book(self, start: int, end: int) -> int:\n self.times[start] = self.times.get(start, 0) + 1\n self.times[end] = self.times.get(end, 0) - 1\n \n ans = _sum = 0\n for target in self.times.values():\n _sum += target\n ans = max(ans, _sum)\n \n return ans\n \n\n# Your MyCalendarThree object will be instantiated and called as such:\n# obj = MyCalendarThree()\n# param_1 = obj.book(start,end)","repo_name":"ChengTsungPao/LeetCode","sub_path":"0732_My_Calendar_III/code2.py","file_name":"code2.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"27169475719","text":"from flask import Flask\nfrom celery_app import celery\nfrom pymongo import MongoClient\n\n#app\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(app.config)\n\n # 添加蓝本\n from celery_app.domainviews import domain_blueprint\n from celery_app.ipviews import ipscan_blueprint\n from celery_app.pluginviews import pluginscan_blueprint\n app.register_blueprint(domain_blueprint)\n app.register_blueprint(ipscan_blueprint)\n app.register_blueprint(pluginscan_blueprint)\n\n return app\n\n#celery\ndef make_celery(app):\n class ContextTask(celery.Task):\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n\n\nclient = MongoClient(\"127.0.0.1\", 27017,connect=False)\n# 指定mongodb数据库\npapapa = client.papapa\npa_domain=papapa.pa_domain\npa_sub_domain=papapa.pa_sub_domain\npa_ip=papapa.pa_ip\npa_plugin=papapa.pa_plugin\npa_vuln=papapa.pa_vuln\npa_taskid=papapa.pa_taskid\n\n\n","repo_name":"qq431169079/papapa","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"37978282996","text":"# Name: PSA1\n# Author(s): Garrett and Tyler\n# Date: 2-20-23\n# Description: This program takes in hospital and student prefrences for the Gale Shapley Algorithim\n# and outputs stable matchings for hospital-student connections.\n\n\n#Create a Queue class which will be used later for determining what hospital proposes next\nclass Queue:\n class Node:\n def __init__(self, value,next=None):\n self.value = value\n self.next = next\n\n def __init__(self):\n self.front = None\n self.rear = None\n\n def enqueue(self, value):\n new_node = self.Node(value)\n if self.rear is None:\n self.front = new_node\n else:\n self.rear.next = new_node\n self.rear = new_node\n\n def dequeue(self):\n if self.front is None:\n return None\n else:\n value = self.front.value\n self.front = self.front.next\n if self.front is None:\n self.rear = None\n return value\n \n def isEmpty(self):\n if self.front is None:\n return True\n else:\n return False\n def getNumPositions(self):\n return self.num_positions\n\ndef gale_shapley(filename):\n \"\"\"\n Runs Gale-Shapley algorithm on input\n from file filename. Format of input file\n given in problem statement.\n Returns a list containing hospitals assigned to each \n student, or None if a student is not assigned to a hospital.\n \"\"\"\n #PART 1\n #Get the number of hospitals and number of students\n file = open(filename, 'r')\n line1 = file.readline()\n num_hospital_and_students = line1.split()\n num_hospitals = int(num_hospital_and_students[0]) \n num_students = int(num_hospital_and_students[1])\n\n #PART 2\n #grab the number of positions for each hospital and throw them in a dict \n line2 = file.readline().split()\n \n hospital_positions = {}\n for i in range(num_hospitals):\n hospital_positions[i] = int(line2[i])\n\n #PART 3\n #creates a dictionary for hostpital preferences \n #Ex: {\"hospital_0\" : [0,2,1], \"hospital_1\" : [2,1,0]}\n\n hospital_prefs = {}\n\n result = []\n for i in range(num_hospitals):\n next_line = file.readline().split()\n nums = [int(num) for num in next_line[::-1]]\n result.append(nums)\n\n for i in range(num_hospitals):\n hospital_prefs[i] = result[i]\n\n #Part 4\n #Load the student preferences\n student_prefs_2 = loadStudentPreferences(file,num_students,num_hospitals)\n \n #Part 5\n #Get proposal order which is a Queue of hospital numbers\n propose_order = loadProposalOrder(num_hospitals,hospital_positions)\n\n #Part 6\n #Get matches\n matches = getMatches(hospital_prefs,student_prefs_2,propose_order)\n\n #Part 7\n #Convert Dict to List\n return_list = convertDictToList(matches,num_students)\n return return_list\n\ndef getMatches(hospital_prefs, student_prefs_2, propose_order):\n \"\"\"\n propose order is queue.\n hostpital_prefs is dictonary of lists.\n student_prefs is dictonary of dictonaries.\n This function is the implementation of the Gale Shapley Algorithim.\n \"\"\"\n matches = {}\n\n #determines the current hospital proposing\n #idea use a queue to keep track of hospitals who are proposing\n while (not propose_order.isEmpty()):\n hospital_num = propose_order.dequeue()\n\n #Check if hospital has proposed to every student in their preference list\n if(len(hospital_prefs[hospital_num])==0):\n continue\n else:\n #grab the hospitals next preference\n student = hospital_prefs[hospital_num].pop()\n\n #checks if s is unmatched\n if student not in matches:\n matches[student] = hospital_num\n\n #checks if s prefers h to current partner h'\n elif student_prefs_2[student][hospital_num] < student_prefs_2[student][matches[student]]:\n booted_hosp = matches[student]\n propose_order.enqueue(booted_hosp)\n del matches[student]\n matches[student] = hospital_num\n\n else:\n #student rejects hospital h\n propose_order.enqueue(hospital_num)\n return matches\n\ndef loadProposalOrder(num_hospitals,hospital_positions):\n \"\"\"\n This function takes in int num_hospitals and the hospital_position dictonary \n and returns a queue where the queue is the order of hospital proposals.\n Each hospital is in the Queue for as many positions they have.\n \"\"\"\n proposal_order = Queue()\n for hosp_num in range(num_hospitals):\n for i in range(hospital_positions[hosp_num]):\n proposal_order.enqueue(hosp_num)\n return proposal_order\n\ndef loadStudentPreferences(file,num_students, num_hospitals):\n \"\"\"\n This function takes the file, num_students and num_hospitals in as a parameter\n and then loads each students preference into\n a dictonary of dictionaries. The keys are\n student_num and the vals are a dictonary in which\n the keys are the hospitals num and it's ranking\n {student_x : {hospital_x : 1, hospital_z : 2}}\n \"\"\"\n #Load student pref dict setting all student rankings of every hospital to -1\n student_preferences = {}\n for stu in range(num_students):\n student_preferences[stu] = {}\n for hosp in range(num_hospitals):\n student_preferences[stu][hosp]=-1\n\n #now load the real preferences in\n for student_num in range(num_students):\n next_line = file.readline().split() \n #start ranking at 1 and add one per iteration in below loop\n ranking=1\n student_preferences[student_num] = {}\n for hospital_num in next_line:\n hospital_num = int(hospital_num)\n student_preferences[student_num][hospital_num] = ranking\n ranking+=1\n #Now any student that deams a hospital unnaceptable has -1 assigned as its ranking for that hopsital\n return student_preferences\n\n\ndef convertDictToList(matches,num_students):\n \"\"\"\n This function takes the matches and puts them into the format that is to be compared by test_pa1.py\n \"\"\"\n return_list = []\n for i in range(num_students):\n if(i not in matches):\n return_list.append(None)\n else:\n return_list.append(matches[i])\n return return_list\n","repo_name":"kreidert6/Gale-Shapley-Implementation","sub_path":"pa1.py","file_name":"pa1.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9675703630","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\ndef main(argv):\n if len(argv) < 2:\n print(\"Input file name isn't specified. Using test input line.\")\n input_line = 'AGCTTTTCATTCTGACTGCAACGGGCAATATGTCT' \\\n 'CTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC'\n else:\n with open(argv[1]) as f:\n input_line = f.readline()\n\n from collections import Counter\n\n cnt = Counter()\n for base in input_line:\n cnt[base] += 1\n\n print('{0} {1} {2} {3}'.format(cnt['A'], cnt['C'], cnt['G'], cnt['T']))\n\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv)\n","repo_name":"ivanyu/rosalind","sub_path":"bioinformatics_stronghold/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"28002536286","text":"import enum\nimport instructor\n\nfrom typing import List\nfrom openai import OpenAI\nfrom pydantic import BaseModel\n\nclient = instructor.patch(OpenAI())\n\n\n# Define new Enum class for multiple labels\nclass MultiLabels(str, enum.Enum):\n BILLING = \"billing\"\n GENERAL_QUERY = \"general_query\"\n HARDWARE = \"hardware\"\n\n\n# Adjust the prediction model to accommodate a list of labels\nclass MultiClassPrediction(BaseModel):\n predicted_labels: List[MultiLabels]\n\n\n# Modify the classify function\ndef multi_classify(data: str) -> MultiClassPrediction:\n return client.chat.completions.create(\n model=\"gpt-3.5-turbo-0613\",\n response_model=MultiClassPrediction,\n messages=[\n {\n \"role\": \"user\",\n \"content\": f\"Classify the following support ticket: {data}\",\n },\n ],\n ) # type: ignore\n\n\n# Example using a support ticket\nticket = (\n \"My account is locked and I can't access my billing info. Phone is also broken.\"\n)\nprediction = multi_classify(ticket)\nprint(prediction)\n","repo_name":"jxnl/instructor","sub_path":"examples/classification/multi_prediction.py","file_name":"multi_prediction.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":2161,"dataset":"github-code","pt":"83"} +{"seq_id":"26946458495","text":"\"\"\"src URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path, include, re_path\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\nfrom graphene_django.views import GraphQLView\n\nfrom custom_jwt.view import MyTokenObtainPairView\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"WS chat API\",\n default_version='v1',\n description=\"Qwe qwe\",\n contact=openapi.Contact(email=\"qwe@qwe.qwe\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\n\nversion_prefix = 'v1'\nrest_urls = [\n path('/chat_users/', include('chat.urls')),\n]\n\nurlpatterns = [\n re_path(r'^swagger(?P\\.json|\\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'), # noqa\n re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), # noqa\n re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), # noqa\n\n # rest drf\n re_path(r'^auth/login$', MyTokenObtainPairView.as_view(), name='rest_login'),\n re_path(r'^api/{}'.format(version_prefix), include(rest_urls)),\n\n # graphql\n re_path(r'^graphql$', GraphQLView.as_view(graphiql=True)),\n]\n","repo_name":"nxstranger/thou-server","sub_path":"src/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41743868663","text":"\"\"\"\nDjango settings for littlespaceship project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nfrom __future__ import absolute_import\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'XXX'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'spaceAPI',\n 'djcelery',\n 'south',\n 'flashpolicies',\n 'corsheaders',\n)\n\nMIDDLEWARE_CLASSES = (\n\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),\n 'PAGINATE_BY': 10\n}\n\nROOT_URLCONF = 'littlespaceship.urls'\n\nWSGI_APPLICATION = 'littlespaceship.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'spaceshipdjango',\n\t'USER': 'spaceship',\n\t'PASSWORD': 'julma72Ban**ni',\n\t'HOST': '127.0.0.1',\n\t'PORT': '3306',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Helsinki'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\n#STATIC_ROOT = '/home/tuomas/django/littlespaceship/static/'\n#STATIC_ROOT = '/vagrant/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static/\")\nSTATIC_URL = '/static/'\n\n#Celery\n\nCELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend'\nCELERY_BEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIMEZONE = 'Europe/Helsinki'\n\n#CORS\nCORS_ORIGIN_ALLOW_ALL = True\n#CORS_ORIGIN_WHITELIST = ()\n\nCORS_ALLOW_METHODS = (\n 'GET',\n 'POST',\n 'PUT',\n 'PATCH',\n 'DELETE',\n 'OPTIONS'\n)\n\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'Access-Control-Allow-Origin',\n )","repo_name":"Tumetsu/little-spaceship-django-haxe","sub_path":"serverside/littlespaceship/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"41139372026","text":"from matrix import Matrix\nfrom solve import solve\nimport sys\n\n\ndef create_filled_matrix(size: int, get_line) -> Matrix:\n A = []\n b = []\n for line in range(0, size):\n equation = list(map(float, get_line().split()))\n b.append(equation.pop())\n A.append(equation)\n return Matrix(size, A, b)\n\n\ndef validate_size(size: int):\n if not 0 < size <= 20:\n print('Matrix size must be in the range [1, 20].')\n exit(-1)\n return size\n\n\nif len(sys.argv) < 2:\n precision = input('Precision: ')\n size = validate_size(int(input('Matrix size: ')))\n print('Matrix coefficients: ')\n matrix = create_filled_matrix(size, input)\nelse:\n try:\n f = open(sys.argv[1], \"r\")\n precision = f.readline()\n size = validate_size(int(f.readline()))\n matrix = create_filled_matrix(size, f.readline)\n f.close()\n except FileNotFoundError:\n print('File not found.')\n exit(-1)\n\nprint('Source matrix:')\nprint(matrix)\n\nsolution = solve(matrix, precision)\n\nif solution:\n print('\\nSolution:')\n print(solution)\n\n","repo_name":"Vsev0l0d/se4-compMath-lab1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"32700182812","text":"from shapes import Game\n\n\ndef brute_force(game):\n \"\"\"expects Game game\"\"\"\n # solves in original order\n valid_placements = Brute_Valid_Placements(game)\n iters = 0\n for next_placement in valid_placements:\n iters += 1\n for shape in next_placement:\n game.remove_shape(shape)\n game.place_shape(shape, next_placement[shape], check=False)\n if game.solved():\n break\n print('Took {} iterations'.format(iters))\n\n\nclass Brute_Valid_Placements(object):\n \"\"\"docstring for Brute_Valid_Placements\"\"\"\n def __init__(self, game):\n super(Brute_Valid_Placements, self).__init__()\n self.game = game\n self.current_inds = {shape: 0 for shape in game.shapes}\n self.updated_shapes = self.game.shapes\n self.stop_condition = {shape: len(shape.valid_positions)-1 for shape in self.game.shapes}\n\n def __iter__(self):\n while True:\n yield {shape: shape.valid_positions[self.current_inds[shape]] for shape in self.updated_shapes}\n if self.current_inds == self.stop_condition:\n break\n self.updated_shapes = []\n for shape in self.game.shapes:\n self.updated_shapes.append(shape)\n if self.current_inds[shape] >= self.stop_condition[shape]:\n self.current_inds[shape] = 0\n else:\n self.current_inds[shape] += 1\n break\n\n\ndef main(filename):\n game = Game(filename)\n print('number of possibilities: {}'.format(game.num_possibilities))\n brute_force(game)\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('filename', help='The filename to use as input.')\n\n args = parser.parse_args()\n\n main(args.filename)\n","repo_name":"sidequestboy/shapeshifter","sub_path":"brute_force.py","file_name":"brute_force.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"73316971152","text":"# coding:utf-8\r\n\r\nimport datetime\r\nimport json\r\nimport sys\r\n# import traceback\r\nimport typing\r\n\r\n# import PyQt5\r\nimport serial\r\n# from PyQt5 import QtCore\r\nfrom PyQt5.QtCore import QCoreApplication, Qt\r\nfrom PyQt5.QtWidgets import (\r\n qApp, QApplication,\r\n QHBoxLayout,\r\n QWidget, QDockWidget,\r\n QFileDialog, QMainWindow, QMessageBox,\r\n QMenu, QAction,\r\n)\r\n\r\nfrom window.dockbar import (\r\n livedata, extpanel,\r\n)\r\n\r\nimport config\r\nfrom config import CFG\r\nimport protocol\r\nimport timer\r\nfrom protocol.base import (BaseProtocol, Btn_Base, Btn_Func, Btn_Func_Dir,\r\n Btn_Input)\r\nfrom window import SerialSettingWindow, WebChart\r\n\r\n\r\ndef round(a):\r\n if a % 1 > 0.4:\r\n return int(a)+1\r\n else:\r\n return int(a)\r\n\r\n\r\nclass MainWidget(QWidget):\r\n def __init__(self, parent=None):\r\n super().__init__()\r\n self.hbox = QHBoxLayout()\r\n\r\n self.web = WebChart(self)\r\n self.hbox.addWidget(self.web)\r\n self.setLayout(self.hbox)\r\n\r\n self.f1 = livedata.LiveData(self, \"实时数据\")\r\n self.ext_panels = []\r\n # self.f3 = Sub1_Window(self, \"f3\")\r\n\r\n self.parent = parent\r\n\r\n parent.addDockWidget(Qt.LeftDockWidgetArea, self.f1)\r\n # parent.addDockWidget(Qt.RightDockWidgetArea, self.f2)\r\n # parent.addDockWidget(Qt.RightDockWidgetArea, self.f3)\r\n\r\n if not CFG.gCfg.view.showLiveData:\r\n self.f1.hide()\r\n # self.f2.hide()\r\n # self.f3.hide()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n pass\r\n\r\n def add_Ext_Panel(self, panel):\r\n self.ext_panels.append(panel)\r\n self.parent.addDockWidget(Qt.RightDockWidgetArea, panel)\r\n\r\n def closeAll_Ext_Panel(self):\r\n for p in self.ext_panels:\r\n p.hide()\r\n p.close()\r\n\r\n\r\nclass MyMainWindow(QMainWindow):\r\n\r\n def __init__(self, parent=None):\r\n super().__init__()\r\n self.setWindowTitle(\"USB temperature monitor\")\r\n while(config.CFG.data is None):\r\n try:\r\n config.loadConfig()\r\n except FileNotFoundError as e:\r\n msgBox = QMessageBox()\r\n msgBox.setIcon(QMessageBox.Critical)\r\n s = \"{0}\\r\\n\".format(e)\r\n s += \"点击确定开始配置\"\r\n msgBox.setText(s)\r\n msgBox.setWindowTitle(\"找不到配置文件\")\r\n msgBox.setStandardButtons(QMessageBox.Ok)\r\n msgBox.exec()\r\n subW = SerialSettingWindow()\r\n subW.exec()\r\n print(\"config init OK\")\r\n self.w = MainWidget(self)\r\n self.initUI()\r\n self.cp = []\r\n\r\n self.ttt_data = []\r\n self.ttt_live_data = []\r\n self.ttt = timer.Timer(\r\n delay=CFG.gCfg.chart.chartRefreshInterval,\r\n acc=0.01,\r\n fn=self.onClock)\r\n self.ttt_liveData = timer.Timer(\r\n delay=1, acc=0.01, fn=self.onClock_LiveData)\r\n\r\n self.resize(900, 600)\r\n\r\n self.data = []\r\n self.onceU = 0\r\n self.startTime = None\r\n self.A_time = None\r\n self.nn = 0\r\n self.nnn = 0\r\n\r\n self.sss = []\r\n\r\n self.att = None\r\n\r\n self.viewmode = WebChart.ViewMode_All\r\n\r\n def onClock_LiveData(self):\r\n dur = None\r\n n = datetime.datetime.now()\r\n\r\n if self.startTime is not None:\r\n dur = n - self.startTime\r\n dur = datetime.timedelta(\r\n milliseconds=round(dur.total_seconds())*1000)\r\n self.w.f1.push_meta_data([dur, len(self.data)])\r\n self.w.f1.push_data(self.ttt_live_data)\r\n\r\n def onClock(self):\r\n n = datetime.datetime.now()\r\n dur = None\r\n\r\n if self.onceU == 0:\r\n is_empty = False\r\n for v in self.ttt_data:\r\n if v in [None, \"\"]:\r\n is_empty = True\r\n break\r\n\r\n if is_empty:\r\n return\r\n else:\r\n self.onceU += 1\r\n self.startTime = n\r\n dur = datetime.timedelta(seconds=0)\r\n self.A_time = n\r\n dur = n-self.startTime\r\n self.A_time = n\r\n dur = datetime.timedelta(\r\n milliseconds=round(dur.total_seconds()*100)*10)\r\n ss = \"{0}\".format(dur)\r\n\r\n # qb.web.push_data(self.ttt_data)\r\n self.w.web.push_data({\r\n \"timestamp\": ss,\r\n \"data\": self.ttt_data,\r\n })\r\n\r\n self.data.append({\r\n \"timestamp\": dur.total_seconds(),\r\n \"data\": self.ttt_data,\r\n })\r\n self.ttt_live_data = self.ttt_data\r\n\r\n self.ttt_data = []\r\n for _ in config.CFG.getAllCurChannel_Comp():\r\n self.ttt_data.append(\"\")\r\n\r\n def initUI(self):\r\n\r\n menubar = self.menuBar()\r\n\r\n fileMenu = menubar.addMenu(\"文件\")\r\n settingMenu = menubar.addMenu(\"设置\")\r\n ProtocolMenu = menubar.addMenu(\"通讯协议\")\r\n ChartMenu = menubar.addMenu(\"图表显示\")\r\n helpMenu = menubar.addMenu(\"帮助\")\r\n\r\n saveAction = QAction(self)\r\n saveAction.setText(\"保存数据\")\r\n saveAction.triggered.connect(self.on_save)\r\n fileMenu.addAction(saveAction)\r\n\r\n loadAction = QAction(self)\r\n loadAction.setText(\"加载数据\")\r\n loadAction.triggered.connect(self.on_import)\r\n fileMenu.addAction(loadAction)\r\n\r\n exitAction = QAction(self)\r\n exitAction.setText(\"退出\")\r\n exitAction.triggered.connect(qApp.quit)\r\n fileMenu.addAction(exitAction)\r\n\r\n self.btnsMenu = settingMenu.addMenu(\"扩展\")\r\n self.btnsMenu.setDisabled(True)\r\n\r\n self.serialAction = QAction(self)\r\n self.serialAction.setText(\"串口参数\")\r\n self.serialAction.triggered.connect(self.on_click_serial_setting)\r\n settingMenu.addAction(self.serialAction)\r\n\r\n self.startAction = QAction(self)\r\n self.startAction.setText(\"启动采样\")\r\n self.startAction.triggered.connect(self.on_start)\r\n settingMenu.addAction(self.startAction)\r\n\r\n self.pauseAction = QAction(self)\r\n self.pauseAction.setText(\"暂停采样\")\r\n self.pauseAction.setDisabled(True)\r\n self.pauseAction.triggered.connect(self.on_pause)\r\n settingMenu.addAction(self.pauseAction)\r\n\r\n for a in protocol.M:\r\n\r\n aMenu = ProtocolMenu.addMenu(a.Name)\r\n aMenu.addMenu(\"名称:{}\".format(a.Name))\r\n aMenu.addMenu(\"设备号:{}\".format(a.Device))\r\n aMenu.addMenu(\"��介:{}\".format(a.Description))\r\n channels = aMenu.addMenu(\"通道数目:{}\".format(len(a.xAxis)))\r\n\r\n for ii in a.xAxis:\r\n nn = channels.addMenu(ii[\"Name\"])\r\n\r\n for k in ii.keys():\r\n nn.addMenu(\"{0}: {1}\".format(k, ii[k]))\r\n\r\n def aaa(checked):\r\n if checked:\r\n self.w.f1.show()\r\n CFG.gCfg.view.showLiveData = True\r\n else:\r\n self.w.f1.hide()\r\n CFG.gCfg.view.showLiveData = False\r\n\r\n self.dockBarMenu = ChartMenu.addMenu(\"侧边栏\")\r\n\r\n self.dockAction_LiveData = QAction(\"实时数据\", self)\r\n self.dockAction_LiveData.setCheckable(True)\r\n if CFG.gCfg.view.showLiveData:\r\n self.dockAction_LiveData.setChecked(True)\r\n self.dockAction_LiveData.triggered.connect(aaa)\r\n self.dockBarMenu.addAction(self.dockAction_LiveData)\r\n\r\n self.EtxDockMenu = self.dockBarMenu.addMenu(\"扩展面板\")\r\n self.EtxDockMenu.setDisabled(True)\r\n\r\n self.viewLastAction_list = []\r\n\r\n def on_set_view_last(num, viewLastAction_list):\r\n def aaa(checked):\r\n if checked:\r\n for k, v in enumerate(viewLastAction_list):\r\n if v.text() == \"x={0}\".format(num):\r\n v.setChecked(True)\r\n else:\r\n v.setChecked(False)\r\n CFG.gCfg.view.showLatestView = num\r\n self.w.web.viewmode = WebChart.ViewMode_Last\r\n self.w.web.viewModeLast = num\r\n return aaa\r\n\r\n def on_set_view_all(viewLastAction_list):\r\n def aaa(checked):\r\n for k, v in enumerate(viewLastAction_list):\r\n if k == 0:\r\n v.setChecked(True)\r\n else:\r\n v.setChecked(False)\r\n self.w.web.viewmode = WebChart.ViewMode_All\r\n return aaa\r\n\r\n viewLastMenu = ChartMenu.addMenu(\"显示最后x个数据\")\r\n self.viewAllAction = QAction(\"显示全部\", self)\r\n self.viewAllAction.setCheckable(True)\r\n self.viewAllAction.triggered.connect(\r\n on_set_view_all(self.viewLastAction_list))\r\n self.viewLastAction_list.append(self.viewAllAction)\r\n viewLastMenu.addAction(self.viewAllAction)\r\n for i in [20, 50, 100, 200, 500, 1000, 5000, 10000]:\r\n viewLastAction = QAction(\"x={}\".format(i), self)\r\n viewLastAction.setCheckable(True)\r\n viewLastAction.triggered.connect(\r\n on_set_view_last(i, self.viewLastAction_list))\r\n viewLastMenu.addAction(viewLastAction)\r\n self.viewLastAction_list.append(viewLastAction)\r\n\r\n if CFG.gCfg.view.showLatestView == 0:\r\n self.viewAllAction.setChecked(True)\r\n else:\r\n on_set_view_last(CFG.gCfg.view.showLatestView,\r\n self.viewLastAction_list)(True)\r\n\r\n refreshAction = QAction(\"刷新视图\", self)\r\n refreshAction.triggered.connect(self.on_refresh)\r\n ChartMenu.addAction(refreshAction)\r\n\r\n aboutAction = QAction(self)\r\n aboutAction.setText(\"关于这个软件\")\r\n helpMenu.addAction(aboutAction)\r\n\r\n self.setCentralWidget(self.w)\r\n\r\n def closeEvent(self, event):\r\n config.saveConfig()\r\n event.accept()\r\n\r\n def on_refresh(self):\r\n self.w.web.set_data(self.data)\r\n\r\n def on_update_btn_list(self):\r\n pass\r\n\r\n def on_click_serial_setting(self):\r\n subW = SerialSettingWindow(self)\r\n subW.exec()\r\n\r\n def on_start(self):\r\n all_chan = config.CFG.getAllCurChannel_Comp()\r\n all_chan_a = config.CFG.getAllCurChannel()\r\n self.ttt_data = []\r\n self.cp = []\r\n self.w.closeAll_Ext_Panel()\r\n\r\n for _ in all_chan:\r\n self.ttt_data.append(\"\")\r\n try:\r\n # self.w.web.reload()\r\n self.w.web.set_data([])\r\n self.w.web.set_channels(all_chan)\r\n\r\n self.w.f1.set_channels(all_chan_a)\r\n\r\n self.sss = self.openSerial()\r\n self.serialAction.setDisabled(True)\r\n self.startAction.setDisabled(True)\r\n self.pauseAction.setDisabled(False)\r\n self.btnsMenu.setDisabled(False)\r\n self.statusBar().showMessage(\"启动采样\")\r\n self.data = []\r\n self.ttt.Run()\r\n self.ttt_liveData.Run()\r\n self.setWindowTitle(\"USB temperature monitor | 数据采样启动\")\r\n\r\n except serial.serialutil.SerialException as e:\r\n # subW = SerialSettingWindow(self)\r\n # returnValue = subW.exec()\r\n print(\"e: {0}\".format(e))\r\n pass\r\n # raise e\r\n\r\n def on_pause(self):\r\n self.ttt.Stop()\r\n self.ttt_liveData.Stop()\r\n self.onceU = 0\r\n self.btnsMenu.clear()\r\n for k, v in enumerate(self.cp):\r\n try:\r\n v.onClose()\r\n except serial.serialutil.SerialException as e:\r\n msgBox = QMessageBox()\r\n msgBox.setIcon(QMessageBox.Critical)\r\n msgBox.setText(\r\n \"错误:{0}\".format(e))\r\n msgBox.setWindowTitle(\"关闭串口发生错误\")\r\n msgBox.setStandardButtons(QMessageBox.Ok)\r\n _ = msgBox.exec()\r\n self.cp = None\r\n self.ttt_data = None\r\n for k, v in enumerate(self.sss):\r\n v.close()\r\n # closeSerial()\r\n self.serialAction.setDisabled(False)\r\n self.startAction.setDisabled(False)\r\n self.pauseAction.setDisabled(True)\r\n self.btnsMenu.setDisabled(True)\r\n self.statusBar().showMessage(\"暂停采样\")\r\n self.setWindowTitle(\"USB temperature monitor | 数据采样暂停\")\r\n\r\n def on_import(self):\r\n fpath, flit = QFileDialog.getOpenFileName(\r\n parent=self,\r\n caption=\"导入数据,格式:{0}\".format(\r\n format),\r\n filter='csv文件(*.csv)',\r\n )\r\n import_data = []\r\n all_chan = []\r\n\r\n lines = []\r\n\r\n if fpath == \"\":\r\n return\r\n\r\n with open(fpath, 'r') as f:\r\n lines = f.readlines()\r\n\r\n for k, line in enumerate(lines):\r\n lines[k] = line[:-1]\r\n\r\n for item in lines[0].split(',')[1:]:\r\n all_chan.append({\r\n \"Name\": item,\r\n },)\r\n\r\n # t_start = datetime.datetime.strptime(\r\n # '2021-01-06 20:00:00',\r\n # \"%Y-%m-%d %H:%M:%S\") - datetime.timedelta(seconds=109395)\r\n\r\n datetime.datetime.strptime\r\n\r\n for item in lines[1:]:\r\n item_data = []\r\n items = item.split(',')\r\n for k, i in enumerate(items[1:]):\r\n if k >= len(all_chan):\r\n break\r\n if i == \"\":\r\n item_data.append(\"\")\r\n else:\r\n item_data.append(float(i))\r\n\r\n t = float(items[0])\r\n\r\n # t_a = datetime.timedelta(milliseconds=round(t*100)*10)\r\n # t_a = t_start + datetime.timedelta(milliseconds=round(t*100)*10)\r\n\r\n import_data.append({\r\n 'timestamp': \"{0:02d}:{1:02d}:{2:02d}.{3:02d}\".format(\r\n int(t / 60 / 60),\r\n int(t % 60 / 60),\r\n int(t % 60 % 60),\r\n int(t*100 % 100),\r\n ),\r\n 'data': item_data,\r\n })\r\n\r\n msgBox = QMessageBox()\r\n msgBox.setIcon(QMessageBox.Information)\r\n s = \"\"\r\n s += \"文件路径:{0}\\n\".format(fpath)\r\n s += \"通道数:{0}\\n\".format(len(all_chan))\r\n s += \"数据量:{0}\\n\".format(len(import_data))\r\n msgBox.setText(s)\r\n msgBox.setWindowTitle(\"导入文件信息\")\r\n msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\r\n returnValue = msgBox.exec()\r\n\r\n if returnValue == QMessageBox.Ok:\r\n print(\"数据导入 len:{0} path:{1}\".format(len(import_data), fpath))\r\n self.w.web.reload()\r\n self.w.web.set_channels(all_chan)\r\n self.w.web.set_data(import_data)\r\n self.setWindowTitle(\r\n \"USB temperature monitor | 导入文件 | {0}\".format(fpath))\r\n self.data = import_data\r\n elif returnValue == QMessageBox.Cancel:\r\n print(\"数据导入 取消\")\r\n\r\n def on_save(self):\r\n fpath, flit = QFileDialog.getSaveFileName(\r\n parent=self, caption=\"保存数据\", filter='csv文件(*.csv);;json文件(*.json)')\r\n # print(\"Path: {0} {1}\".format(fpath, flit))\r\n if fpath == \"\":\r\n return\r\n if flit == \"csv文件(*.csv)\":\r\n with open(fpath, 'w+') as f:\r\n\r\n tit = \"time\"\r\n\r\n for i in config.CFG.getAllCurChannel_Comp():\r\n tit += \",{0}\".format(i[\"Name\"])\r\n\r\n # print(\"通道:{0}\".format(tit))\r\n\r\n f.write(tit+\"\\n\")\r\n for a in self.data:\r\n f.write(\"{0},\".format(a[\"timestamp\"]))\r\n for ii in a[\"data\"]:\r\n f.write(\"{0},\".format(ii))\r\n f.write(\"\\n\")\r\n elif flit == \"json文件(*.json)\":\r\n with open(fpath, 'w+') as f:\r\n f.write(json.dumps({\r\n \"channels\": config.CFG.getAllCurChannel_Comp(),\r\n \"data\": self.data,\r\n }))\r\n self.statusBar().showMessage(\"存储采样数据,格式: {}\".format(flit))\r\n\r\n msgBox = QMessageBox()\r\n msgBox.setIcon(QMessageBox.Information)\r\n s = \"\"\r\n s += \"文件路径:{0}\\n\".format(fpath)\r\n s += \"数据量:{0}\".format(len(self.data))\r\n msgBox.setText(s)\r\n msgBox.setWindowTitle(\"保存成功\")\r\n msgBox.setStandardButtons(QMessageBox.Ok)\r\n _ = msgBox.exec()\r\n return None\r\n\r\n def gen_ser_recv(self, index):\r\n padding = 0\r\n for k1, v1 in enumerate(config.CFG.data[\"serial\"]):\r\n if k1 == index:\r\n break\r\n padding += len(v1[\"chann\"])\r\n chann = config.CFG.data[\"serial\"][index][\"chann\"]\r\n\r\n def fn(body):\r\n dd = self.cp[index].parsePkg(body)\r\n if len(dd) == 0:\r\n return\r\n\r\n for k, v in enumerate(chann):\r\n if k >= len(dd):\r\n self.ttt_data[padding+k] = 0\r\n else:\r\n self.ttt_data[padding+k] = dd[v]\r\n return fn\r\n\r\n def _addExtBtnMenu_Intput(self, menu: QMenu, Btn_Input: Btn_Input):\r\n btn_act = QAction(Btn_Input.name, self)\r\n menu.addAction(btn_act)\r\n # w = Btn_Input_Window(Btn_Input)\r\n\r\n w = extpanel.ExtPanel(\r\n Btn_Input,\r\n # name=\"{0}-{1}\".format(Btn_Input.name),\r\n parent=self)\r\n self.w.add_Ext_Panel(w)\r\n\r\n def aaa():\r\n w.close()\r\n w.show()\r\n # if w.exec() != 0:\r\n # print(\"{0} {1} args {2}\".format(\r\n # menu.title(), Btn_Input.name, w.args))\r\n btn_act.triggered.connect(aaa)\r\n\r\n def _addExtBtnMenu(self, menu: QMenu, btn_func_dir: typing.List[Btn_Base]):\r\n for i in btn_func_dir:\r\n if not isinstance(i, Btn_Base):\r\n continue\r\n if type(i) is Btn_Func:\r\n btn_act = QAction(self)\r\n menu.addAction(btn_act)\r\n if i.func is not None:\r\n btn_act.setText(i.name)\r\n btn_act.triggered.connect(i.func)\r\n else:\r\n btn_act.setText(\"{0} NoFunc\".format(i.name))\r\n elif type(i) is Btn_Func_Dir:\r\n self._addExtBtnMenu(menu.addMenu(i.name), i.btn_items)\r\n elif type(i) is Btn_Input:\r\n self._addExtBtnMenu_Intput(menu, i)\r\n else:\r\n btn_act = QAction(\r\n \"??? {0} {1}\".format(i.name, type(i)), self)\r\n btn_act.setDisabled(True)\r\n menu.addAction(btn_act)\r\n\r\n def addExtBtnMenu(self):\r\n for k, v in enumerate(config.CFG.data[\"serial\"]):\r\n c = self.cp[k]\r\n btn_menu = self.btnsMenu.addMenu(\"{0}\".format(v[\"name\"]))\r\n if c.M_Btn is not None:\r\n self._addExtBtnMenu(btn_menu, c.M_Btn)\r\n\r\n def openSerial(self):\r\n try:\r\n sss = []\r\n chann_index = 0\r\n for k, v in enumerate(config.CFG.data[\"serial\"]):\r\n\r\n if v[\"port\"] == \"NaN\":\r\n sss.append(None)\r\n else:\r\n s = serial.Serial(\r\n port=v[\"port\"],\r\n baudrate=v[\"baud\"],\r\n )\r\n sss.append(s)\r\n\r\n c = protocol.find(v[\"deviceType\"])(s)\r\n c.onOpen()\r\n self.cp.append(c)\r\n\r\n def on_serialPackage(body):\r\n dd = c.parsePkg(body)\r\n print(\"k {0} aa {1} {2}\".format(k, v[\"chann\"], dd))\r\n # for k1, v1 in enumerate(v[\"chann\"]):\r\n # self.ttt_data[chann_index+k1] = dd[v1]\r\n # self.uData(dd)\r\n chann_index += len(v[\"chann\"])\r\n\r\n if v[\"deviceType\"] == \"Tool8775C1\":\r\n self.aat = protocol.SerialThread(s, self.gen_ser_recv(\r\n k), BaseProtocol.ProtocolType_T87_RS232)\r\n else:\r\n self.aat = protocol.SerialThread(s, self.gen_ser_recv(k))\r\n self.aat.setDaemon(True)\r\n self.aat.start()\r\n self.addExtBtnMenu()\r\n\r\n # def s_fn():\r\n # return b\"\\x01\"\r\n\r\n # aat = protocol.SerialThreadSend(sss, fn=s_fn)\r\n # aat.setDaemon(True)\r\n # aat.start()\r\n return sss\r\n\r\n except serial.serialutil.SerialException as e:\r\n\r\n for k, v in enumerate(sss):\r\n if v != \"None\":\r\n v.close()\r\n\r\n msgBox = QMessageBox()\r\n msgBox.setIcon(QMessageBox.Critical)\r\n msgBox.setText(\r\n \"错误:{0}\".format(e))\r\n msgBox.setWindowTitle(\"打开串口发生错误\")\r\n msgBox.setStandardButtons(QMessageBox.Ok)\r\n msgBox.exec()\r\n raise e\r\n\r\n\r\ndef app_panic_hook(exctype, value, ttraceback):\r\n print('My Error Information')\r\n print('Type:', exctype)\r\n print('Value:', value)\r\n\r\n log_file = \"panic.log\"\r\n\r\n msg_txt = \"\"\r\n msg_txt += \"崩溃日志文件:{0}\\r\\n\".format(log_file)\r\n tb_txt = \"\"\r\n tb_txt += \"=\"*79+\"\\n\"\r\n tb_txt += \"exception:\\n {0}\\n {1}\\n\".format(exctype, value)\r\n tb_txt += \"=\"*79+\"\\n\"\r\n tb_txt += \"trace:\\n\"\r\n while ttraceback:\r\n tb_txt += \" {0}\\n\".format(ttraceback.tb_frame)\r\n ttraceback = ttraceback.tb_next\r\n msg_txt += tb_txt\r\n\r\n with open(log_file, 'w+') as f:\r\n f.write(tb_txt)\r\n\r\n print(msg_txt)\r\n msgBox = QMessageBox()\r\n # msgBox.setIcon(QMessageBox.Critical)\r\n msgBox.setText(msg_txt)\r\n msgBox.setWindowTitle(\"程序崩溃\")\r\n msgBox.setStandardButtons(QMessageBox.Ok)\r\n msgBox.exec()\r\n sys.exit(1)\r\n\r\n\r\nif __name__ == '__main__':\r\n sys.excepthook = app_panic_hook\r\n QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling)\r\n app = QApplication(sys.argv)\r\n qb = MyMainWindow()\r\n print(\"Start Done\")\r\n\r\n qb.show()\r\n sys.exit(app.exec())\r\n","repo_name":"xhyangxianjun/UTM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9690439291","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport logging\nimport plotly.graph_objs as go\nimport numpy as np\nimport os\nimport configparser \nimport datetime as dt\nfrom datetime import timedelta\nimport utils\nimport random\nimport pickle\nimport dateutil.parser as dt_parser\nfrom pprint import pprint as pp\nimport base64\nimport time \nimport uuid\nimport pandas as pd\n\nlogging.basicConfig(\n filename='monitor.log',\n level=logging.INFO,\n format='%(asctime)s %(message)s')\nlogger = logging.getLogger(__name__)\n\nconfig = configparser.ConfigParser()\nconfig.read('../configs/monitor.conf')\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.title = 'AstroWatchdog'\nRANGE = [0, 1]\n\nimg_width = 1400\nimg_height = 1200\nscale_factor = 0.5\n\"\"\"\nhtml.H2('Object name', className='my-class', id='obj_name'),\n html.H2('Image time', className='my-class alert', id='im_time'),\n html.H2('Exptime', className='my-class', id='im_exptime'),\n html.H2('Filter', className='my-class', id='im_filter')\n\"\"\"\n\n\nMAIN_GRAPH_LAYOUT = go.Layout(title='',\n paper_bgcolor='rgba(0, 0, 0, 0)',\n plot_bgcolor='rgba(0, 0, 0, 0)',\n margin={'l': 15, 'r': 5, 't': 25},\n template='plotly_dark',\n height=200,\n )\n\nGRAPHS_POINTS_NUMBER = 20\nALARM_TIME = 5 # min\nREFRESH_FREQ = 5 # sec\n\napp.layout = html.Div([\n html.Div([\n html.Div([\n dcc.Graph(id='image', figure=[]),\n ], className='figure'),\n html.Div([\n html.Div([\n html.H3('Object name'),\n html.H3('---', id='object_name_val'),\n ],),\n html.Div([\n html.H3('Image time [UT]'),\n html.H3('---', id='image_time_val'),\n ], className='it'),\n html.Div([\n html.H3('Exptime [s]'), \n html.H3('---', id='image_exptime_val'),\n ], className='et'),\n html.Div([\n html.H3('Filter'), \n html.H3('---', id='image_filter_val', \n ),\n ],),\n html.Div([\n html.H3('Last data [min]',), \n html.H3('---', id='time_from_last_val'),\n ]),\n ], className='data')\n ]),\n html.Div([\n dcc.Graph(id='snr_graph',\n figure={\n 'data': [],\n 'layout': MAIN_GRAPH_LAYOUT},\n className='snr_graph'),\n dcc.Graph(id='flux_max_graph',\n figure={\n 'data': [],\n 'layout': MAIN_GRAPH_LAYOUT},\n className='flux_max_graph'),\n dcc.Graph(id='bkg_graph', \n figure={\n 'data': [],\n 'layout': MAIN_GRAPH_LAYOUT},\n className='bkg_graph'),\n dcc.Graph(id='fwhm_graph', \n figure={\n 'data': [],\n 'layout': MAIN_GRAPH_LAYOUT},\n className='fwhm_graph'),\n\n html.Button('Refresh', id='refresh_button',\n className='refresh_button',\n n_clicks_timestamp=time.time()*1000),\n # n_clicks_timestamp=0),\n ], className='graph_1_box'),\n dcc.Interval(\n id='interval',\n interval=REFRESH_FREQ*1000),\n html.Div(id='data_div', children=0, style={'display': 'none'}),\n html.Div(id='test_data_div', children=0, style={'display': 'none'}),\n], style={'backgroundColor': 'black'}, className='main', id='main_div',)\n\n\n@app.callback([Output('data_div', 'children'),\n Output('data_div', 'data-last'),\n Output('data_div', 'data-main')],\n [Input('refresh_button', 'n_clicks_timestamp'),\n Input('interval', 'n_intervals')],\n [State('refresh_button', 'n_clicks_timestamp')])\n@utils.dump_func_name\ndef update_data(_, __, refresh_timestamp):\n last_point, data = utils.get_influxdb_data(influxdb_client,\n influxdb_df_client,\n refresh_timestamp)\n print(data)\n return time.time(), last_point, data\n\n@app.callback([Output('object_name_val', 'children'),\n Output('image_time_val', 'children'),\n Output('image_exptime_val', 'children'),\n Output('image_filter_val', 'children'),\n Output('time_from_last_val', 'children'),\n Output('main_div', 'style')],\n [Input('data_div', 'data-last')])\n@utils.dump_func_name\ndef update_image_info(data):\n image_datetime = dt_parser.parse(data['image_time'])\n image_time_str = image_datetime.time().strftime(\"%H:%M:%S\")\n minutes_from_last = (\n image_datetime + dt.timedelta(seconds=float(data['EXPTIME']))- dt.datetime.utcnow()\n ).total_seconds() / 60.\n\n if minutes_from_last < -ALARM_TIME:\n website_bkg_color = {'backgroundColor': 'red'}\n else:\n website_bkg_color = {'backgroundColor': 'black'}\n\n return (data['OBJECT'], image_time_str, data['EXPTIME'], data['FILTER'],\n int(minutes_from_last), website_bkg_color)\n\n\n@app.callback(Output('image', 'figure'),\n [Input('data_div', 'children')])\n@utils.dump_func_name\ndef update_image(_):\n\n encoded_image = base64.b64encode(\n open('./assets/main_plot.png', 'rb').read())\n\n layout = go.Layout(\n xaxis = go.layout.XAxis(\n visible = False,\n range = [10, img_width*scale_factor]),\n yaxis = go.layout.YAxis(\n visible=False,\n range = [10, img_height*scale_factor],\n scaleanchor = 'x'),\n width = img_width*scale_factor,\n height = img_height*scale_factor,\n margin = {'l': 0, 'r': 0, 't': 0, 'b': 0},\n images = [go.layout.Image(\n x=0,\n sizex=img_width*scale_factor,\n y=img_height*scale_factor,\n sizey=img_height*scale_factor,\n xref=\"x\",\n yref=\"y\",\n opacity=1.0,\n layer=\"below\",\n sizing=\"stretch\",\n source='data:image/png;base64,{}'.format(encoded_image.decode()))\n ],\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)'\n )\n\n figure={'data': [],\n 'layout': layout}\n\n return figure\n\n\n@utils.dump_func_name\ndef create_base_graph(data, data_last, data_key, title_prefix, **kwargs):\n\n fig_data = []\n title_value = \"\"\n\n if data:\n title_value = f\"{data_last[data_key]}\"\n\n for name, value in data.items():\n value = pd.read_json(value)\n value = value.sort_values(by='image_time')\n value = value.tail(GRAPHS_POINTS_NUMBER)\n x = value['image_time']\n y = value[data_key]\n trace = go.Scatter(\n x=x,\n y=y,\n name=name,\n mode = 'lines+markers')\n fig_data.append(trace)\n\n figure = {\n 'data': fig_data,\n 'layout': go.Layout(title=f'{title_prefix}: {title_value}',\n paper_bgcolor='rgba(0, 0, 0, 0)',\n plot_bgcolor='rgba(0, 0, 0, 0)',\n margin={\n 'l': kwargs.get('margin_l', 15),\n 'r': kwargs.get('margin_r', 5),\n 't': kwargs.get('margin_t', 25),\n },\n template='plotly_dark',\n height=kwargs.get('height', 200),\n width=kwargs.get('width', 600),\n )\n }\n\n return figure\n\n\n@app.callback(Output('snr_graph', 'figure'),\n [Input('data_div', 'data-main')],\n [State('data_div', 'data-last'),\n State('snr_graph', 'figure')])\n@utils.dump_func_name\ndef create_snr_graph(data, data_last, figure):\n\n figure = create_base_graph(data, data_last, 'SNR_WIN', 'SNR', margin_t=30)\n return figure\n\n@app.callback(Output('flux_max_graph', 'figure'),\n [Input('data_div', 'data-main')],\n [State('data_div', 'data-last'),\n State('flux_max_graph', 'figure')])\n@utils.dump_func_name\ndef create_fluxmax_graph(data, data_last, figure): \n\n figure = create_base_graph(data, data_last, 'FLUX_MAX', 'FLUX MAX')\n return figure\n\n@app.callback(Output('bkg_graph', 'figure'),\n [Input('data_div', 'data-main')],\n [State('data_div', 'data-last'),\n State('bkg_graph', 'figure')])\n@utils.dump_func_name\ndef create_bgk_value_graph(data, data_last, figure):\n \n figure = create_base_graph(data, data_last, 'BACKGROUND', 'BKG')\n return figure\n\n@app.callback(Output('fwhm_graph', 'figure'),\n [Input('data_div', 'data-main')],\n [State('data_div', 'data-last'),\n State('fwhm_graph', 'figure')])\n@utils.dump_func_name\ndef create_fwhm_graph(data, data_last, figure):\n\n figure = create_base_graph(data, data_last, 'FWHM_IMAGE', 'FWHM')\n return figure\n\n\n\n\napp.css.append_css({\n \"external_url\": \"/static/main.css\"})\n\nif __name__ == '__main__':\n\n influxdb_client, influxdb_df_client = utils.get_influxdb_clients()\n app.run_server(host=\"0.0.0.0\", port=8050, debug=False, threaded=False)\n","repo_name":"MichalZG/AstroWatchdog","sub_path":"monitor/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":9639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8819398620","text":"import tkinter as tk\nfrom pathlib import Path\nfrom PIL import Image\n\nwindow = tk.Tk()\n\n#code here\nwindow.title('Display an Image')\nwindow.configure(background='grey')\n\npath = Path(\"C:/Python/voham.png\")\n\nimg = tk.PhotoImage(file=path)\nimage1 = Image.open(path)\n\n#Size of the image\nwidth, height = image1.size\nprint(width)\nprint()\nprint(height)\n\n#window.size(width, height)\nlabel = tk.Label(image=img)\nlabel.pack()\n# Code ends\nwindow.mainloop()\n\n\n\n\n\n","repo_name":"go2vn-dev/FakeFaceAI","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27288952069","text":"import numpy as np\nimport cv2\nfrom util import BAR_SPEED\n\nSYNTHESIA_BG_COLOR = 65\nKEY_HEIGHT_FRAC = 6\nKEY_WIDTH_FRAC = 52\nKEY_WIDTH_FRAC_SHARP = 160\nBAR_COLOR_FULL = (60, 175, 30)\nBAR_COLOR_SHARP = (50, 130, 30)\nSHARP_OFFSET = 3\nSHEET_BG_COLOR = 185\nSHEET_VERT_GAP = 40\nSHEET_SPACE_BETWEEN = 20\nSHEET_PADDING_X = 50\n\ndef get_view_height(height):\n return height - (height // KEY_HEIGHT_FRAC)\n\ndef calculate_key_positions(width):\n sharp_distances = [1, 2, 1, 2, 1]\n last_sharp = 0\n sharp_index = 0\n key_index = 0\n key_thickness = width / KEY_WIDTH_FRAC\n keys = []\n sharps = []\n for i in range(88):\n x = int(key_index * key_thickness) - 1\n if last_sharp == sharp_distances[sharp_index]:\n sharp_index += 1\n curr_offset = 0\n if sharp_index == 5:\n curr_offset = 0\n sharp_index = 0\n elif last_sharp == 2:\n curr_offset = -SHARP_OFFSET\n else:\n curr_offset = SHARP_OFFSET\n last_sharp = 0\n sharps.append((x + curr_offset, i, True))\n else:\n key_index += 1\n last_sharp += 1\n keys.append((x, i, False))\n return keys + sharps\n\ndef draw_rounded_rect(img, p1, p2, color, thickness, radius):\n x_1 = p1[0] + radius\n x_2 = p2[0] - radius\n y_1 = p1[1] + radius\n y_2 = p2[1] - radius\n\n view_height = get_view_height(img.shape[0])\n\n black = (0, 0, 0)\n\n cv2.line(img, (x_1, p1[1]), (x_2, p1[1]), black, thickness)\n cv2.line(img, (x_1, p2[1]), (x_2, p2[1]), black, thickness)\n cv2.line(img, (p1[0], y_1), (p1[0], y_2), black, thickness)\n cv2.line(img, (p2[0], y_1), (p2[0], y_2), black, thickness)\n\n # Corners.\n cv2.line(img, (p1[0], y_1), (x_1, p1[1]), black, thickness)\n cv2.line(img, (p1[0], y_2), (x_1, p2[1]), black, thickness)\n cv2.line(img, (x_2, p1[1]), (p2[0], y_1), black, thickness)\n cv2.line(img, (x_2, p2[1]), (p2[0], y_2), black, thickness)\n\n if p2[1] > radius + 3 and p1[1] < view_height - radius - 3:\n cv2.floodFill(img, None, (x_1 + 5, y_2 - 1), color)\n\ndef draw_bar(img, x_1, x_2, is_sharp, status):\n height = img.shape[0]\n any_pressed = False\n view_height = get_view_height(height)\n for key_status, key_start, key_end in status:\n if key_status in (\"onscreen\", \"pressed\"):\n if key_status == \"pressed\":\n any_pressed = True\n bar_bot = view_height - int(view_height * (key_start / BAR_SPEED)) - 1\n bar_top = (view_height - int(view_height * (key_end / BAR_SPEED)) + 1\n if key_end > -1 else 0)\n if bar_top < view_height:\n if bar_top < 0:\n bar_top = 0\n if bar_bot > view_height:\n bar_bot = view_height\n color = BAR_COLOR_FULL\n if is_sharp:\n color = BAR_COLOR_SHARP\n\n draw_rounded_rect(img, (x_1, bar_top), (x_2, bar_bot), color, 2, 4)\n return any_pressed\n\ndef draw_key(img, x, x_1, x_2, any_pressed, is_sharp, press_color=(205, 120, 70)):\n height = img.shape[0]\n view_height = get_view_height(height)\n y_unp = height - 10\n y_sharp = height - int(KEY_HEIGHT_FRAC * 0.4)\n y_2 = y_unp if not is_sharp else y_sharp\n key_color = (255, 255, 255) if not is_sharp else (0, 0, 0)\n color = press_color if any_pressed else key_color\n if not is_sharp:\n y_2 = height if any_pressed else y_2\n cv2.rectangle(img, (x_1, view_height), (x_2, y_2), color, -1)\n if not is_sharp:\n cv2.line(img, (x, view_height), (x, y_unp), (0, 0, 0))\n\ndef draw_progress(img, progress):\n length = int(img.shape[1] * progress)\n y = 5\n thickness = y * 2\n cv2.line(img, (0, y), (length, y), (0, 0, 255), thickness)\n cv2.line(img, (0, thickness), (length, thickness), (0, 0, 0), 1)\n\ndef draw_piano(statuses, key_pos, size, progress, draw_presses=True):\n img = np.full((size[1], size[0], 3), SYNTHESIA_BG_COLOR, dtype=\"uint8\")\n h, w = img.shape[:2]\n key_thickness = w // KEY_WIDTH_FRAC\n sharp_thickness = w // KEY_WIDTH_FRAC_SHARP\n view_height = get_view_height(h)\n\n for (x, key, is_sharp) in key_pos:\n any_pressed = False\n x_1 = x if not is_sharp else x - 5\n x_2 = x + key_thickness if not is_sharp else x + sharp_thickness\n\n any_pressed = draw_bar(img, x_1, x_2, is_sharp, statuses[key])\n\n draw_key(img, x, x_1, x_2, any_pressed and draw_presses, is_sharp)\n\n cv2.line(img, (0, view_height-2), (w, view_height-2), (0, 0, 0), 2)\n draw_progress(img, progress)\n return img\n\ndef find_key(key, key_pos):\n for tupl in key_pos:\n if tupl[1] == key:\n return tupl\n return None\n\ndef draw_altered_note(img, note_id, key_pos, color):\n h, w = img.shape[:2]\n curr_key = find_key(note_id, key_pos)\n keys_to_draw = [curr_key]\n prev_key = find_key(note_id-1, key_pos)\n if prev_key is not None and prev_key[2]:\n keys_to_draw.append(prev_key)\n next_key = find_key(note_id+1, key_pos)\n if next_key is not None and next_key[2]:\n keys_to_draw.append(next_key)\n for i, (x, _, is_sharp) in enumerate(keys_to_draw):\n key_thickness = w // KEY_WIDTH_FRAC\n sharp_thickness = w // KEY_WIDTH_FRAC_SHARP\n x_1 = x if not is_sharp else x - 5\n x_2 = x + key_thickness if not is_sharp else x + sharp_thickness\n draw_key(img, x, x_1, x_2, i == 0, is_sharp, press_color=color)\n if i == 0:\n view_height = get_view_height(h)\n cv2.line(img, (x_2, view_height), (x_2, h), (0, 0, 0), 1)\n\ndef draw_correct_note(img, note_id, key_pos):\n draw_altered_note(img, note_id, key_pos, (30, 200, 30))\n\ndef draw_wrong_note(img, note_id, key_pos):\n draw_altered_note(img, note_id, key_pos, (0, 0, 225))\n\ndef end_of_data(timestamp, key_data):\n return timestamp > key_data[-1][\"timestamp\"]\n\ndef draw_str(img, x, y, text, color=(255, 255, 255), size=1.5):\n cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_COMPLEX, size, (0, 0, 0), 8)\n cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_COMPLEX, size, color, 2)\n\ndef draw_points(img, points):\n w = img.shape[1]\n pt_str = \"Score: \" + str(points)\n x = w - len(pt_str) * 27 - 20\n draw_str(img, x, 50, pt_str)\n\ndef draw_streak(img, streak):\n as_str = \"Streak: \" + str(streak)\n draw_str(img, 20, 50, as_str)\n\ndef draw_accuracy(img, accuracy):\n as_str = \"Acc: \" + str(accuracy) + \"%\"\n draw_str(img, 20, 100, as_str, size=1.2)\n\ndef draw_hits(img, hits, total):\n str_1 = str(hits)\n str_2 = \"/\"\n str_3 = str(total)\n w = img.shape[1]\n space_per_char = 25\n x_1 = w - len(str_1 + str_2 + str_3) * space_per_char - 20\n x_2 = w - len(str_1 + str_2) * space_per_char - 20\n x_3 = w - len(str_1) * space_per_char - 20\n draw_str(img, x_1, 100, str_1, color=(0, 190, 0), size=1.2)\n draw_str(img, x_2, 100, str_2, size=1.2)\n draw_str(img, x_3, 100, str_3, size=1.2)\n if total > 0:\n draw_accuracy(img, int((hits / total) * 100))\n\ndef draw_sharp(img, x, y):\n hori_offset_x = 12\n hori_offset_y = 12\n vert_offset_x = 6\n vert_offset_y = hori_offset_x * 2\n hori_offset = 6\n vert_offset = 12\n cv2.line(img, (x - hori_offset_x, y - hori_offset_y + hori_offset),\n (x + hori_offset_x, y - hori_offset_y - hori_offset), (0, 0, 0), 6)\n cv2.line(img, (x - hori_offset_x, y + hori_offset_y + hori_offset),\n (x + hori_offset_x, y + hori_offset_y - hori_offset), (0, 0, 0), 6)\n cv2.line(img, (x - vert_offset_x, y - vert_offset_y),\n (x - vert_offset_x, y + vert_offset_y + vert_offset), (0, 0, 0), 2)\n cv2.line(img, (x + vert_offset_x, y - vert_offset_y - vert_offset),\n (x + vert_offset_x, y + vert_offset_y), (0, 0, 0), 2)\n\ndef draw_semitone(img, x, y, treble, index, color):\n note_color = (0, 0, 0) if color is None else color\n y_radius = SHEET_SPACE_BETWEEN // 2\n cv2.ellipse(img, (x, y), (int(y_radius * 1.4), y_radius), 325, 0, 360, note_color, -1)\n offset_x, offset_y = 1, 11\n if treble:\n threshold = 31\n offset_y = -1 if index > threshold else 1\n offset_x = -11 if index > threshold else 10\n else:\n threshold = 16\n offset_y = 1 if index < threshold else -1\n offset_x = 11 if index < threshold else -10\n cv2.line(img, (x+offset_x, y-(SHEET_SPACE_BETWEEN * 3 * offset_y)), (x+offset_x, y), color, 3)\n\ndef draw_note(img, offset, key_index, treble, sharp, color=None):\n x = 200 + 70 * offset\n offset = -SHEET_VERT_GAP // 2 if treble else SHEET_VERT_GAP // 2\n start_y = img.shape[0] // 2 + offset\n y = int(start_y + (23 - key_index) * (SHEET_SPACE_BETWEEN / 2))\n if ((treble and (key_index < 25 or key_index > 34))\n or (not treble and (key_index < 10 or key_index > 22))):\n line_y = y + 10 if key_index % 2 == 0 else y\n cv2.line(img, (x-15, line_y), (x+15, line_y), (0, 0, 0), 2)\n draw_semitone(img, x, y, treble, key_index, color)\n if sharp:\n draw_sharp(img, x-30, y)\n\ndef overlay_img(img_1, img_2, x, y):\n y1, y2 = y, y + img_2.shape[0]\n x1, x2 = x, x + img_2.shape[1]\n\n alpha_s = img_2[:, :, 3] / 255.0\n alpha_l = 1.0 - alpha_s\n\n for c in range(0, 3):\n img_1[y1:y2, x1:x2, c] = (alpha_s * img_2[:, :, c] +\n alpha_l * img_1[y1:y2, x1:x2, c])\n return img_1\n\ndef create_sheet_image(size):\n img = np.full((size[1], size[0], 3), SHEET_BG_COLOR, dtype=\"uint8\")\n h, w = img.shape[:2]\n\n for sign in range(-1, 2, 2):\n for y_offset in range(5):\n y = int(h / 2 + (y_offset * SHEET_SPACE_BETWEEN + SHEET_VERT_GAP) * sign)\n cv2.line(img, (SHEET_PADDING_X, y), (w-SHEET_PADDING_X, y), (60, 60, 60), 2)\n\n split_lines = 6\n left = int((w - SHEET_PADDING_X * 2) / (split_lines-1))\n for x_offset in range(split_lines):\n x = SHEET_PADDING_X + int(left * x_offset)\n cv2.line(img, (x, int(h / 2 - (4 * SHEET_SPACE_BETWEEN + SHEET_VERT_GAP))),\n (x, int(h / 2 + (4 * SHEET_SPACE_BETWEEN + SHEET_VERT_GAP))), (0, 0, 0), 2)\n\n treb_clef_img = cv2.imread(\"../resources/img/treble-clef.png\", -1)\n treb_clef_img = cv2.resize(treb_clef_img, (62, int(SHEET_SPACE_BETWEEN*6.8)))\n bass_clef_img = cv2.imread(\"../resources/img/bass-clef.png\", -1)\n bass_clef_img = cv2.resize(bass_clef_img, (62, int(SHEET_SPACE_BETWEEN*3.1)))\n img = overlay_img(img, treb_clef_img, SHEET_PADDING_X + 10, int(h / 2 - SHEET_SPACE_BETWEEN * 7.25))\n img = overlay_img(img, bass_clef_img, SHEET_PADDING_X + 10, int(h / 2 + SHEET_SPACE_BETWEEN * 2))\n\n return img\n","repo_name":"mhso/PianoMagic","sub_path":"src/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":10700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"37922863614","text":"from infrastructure.base_repository import BaseRepository\nfrom infrastructure.data_mappers import build_details_entities_to_model\nfrom infrastructure.models import BuildDetailsModels\n\n\nclass DeploymentRepository(BaseRepository):\n def save_build_metrics(self, build_metrics):\n build_metrics_db = build_details_entities_to_model(build_metrics)\n self.add_item(build_metrics_db)\n\n def get_build_details(self, build_id):\n build_details = self.session.query(BuildDetailsModels).filter(\n BuildDetailsModels.build_id == build_id).all()\n\n return build_details\n","repo_name":"keshrisohit/devops_metrics","sub_path":"infrastructure/deployment_repository.py","file_name":"deployment_repository.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"9015693137","text":"from django.shortcuts import render,redirect\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.generic import View,TemplateView,ListView,UpdateView,CreateView,DeleteView\nfrom django.urls import reverse_lazy\n\nnameWeb = 'DI2D'\n\nclass home(TemplateView):\n template_name = \"sistemaInteligente/index.html\"\n\n def get_context_data(self,**kwargs):\n contexto = super().get_context_data(**kwargs)\n contexto['title'] = 'Sistemas Inteligentes'\n contexto['abstract'] = '''\n Proyecto desarrollado por el DI2D de la División de Mantemiento del Ejército de Chile. Utilizando técnicas de Machine Learning para apoyar la toma de desiciones en el presupuesto.\n\n '''\n return contexto\n\n\nclass combustible(TemplateView):\n template_name = \"combustible/index.html\"\n\n def get_context_data(self,**kwargs):\n contexto = super().get_context_data(**kwargs)\n contexto['title'] = 'Combustibles'\n contexto['abstract'] = '''\n Proyecto desarrollado por el DI2D de la División de Mantemiento del Ejército de Chile. Permite estimar el gasto de combustibles a nivel nacional.\n '''\n return contexto\n\n\nclass mapa(TemplateView):\n template_name = \"mapa/index.html\"\n\n def get_context_data(self,**kwargs):\n contexto = super().get_context_data(**kwargs)\n contexto['title'] = 'Mapas'\n contexto['abstract'] = '''\n Proyecto desarrollado por el DI2D de la División de Mantemiento del Ejército de Chile. Permite estimar el gasto de combustibles a nivel nacional.\n '''\n return contexto\n","repo_name":"fbarriosr/machineLearning","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38089288622","text":"#!/usr/bin/env python3\n\nimport languageprocess.stopword\nimport re\nimport json\n\n'''\nSQLIZE the Normal query data to sql syntax\n'''\n\n###################################################\nsqldict = {}\n\nsqldict.setdefault('SELECT',['choose','take','find', 'select', 'elect', 'pick', 'get'])\nsqldict.setdefault('CREATE',['make','build','create', 'design', 'form', 'setup', 'invent', 'establish', 'produce', 'initiate', 'generate', 'devise', 'concieve', 'new'])\nsqldict.setdefault('INSERT',['insert','put','enter', 'include', 'set', 'fill', 'inject', 'in'])\nsqldict.setdefault('VALUES', ['value', 'data', 'values'])\nsqldict.setdefault('*', ['all', 'each'])\nsqldict.setdefault('where', ['whose', 'where\"s'])\nsqldict.setdefault('FROM', ['from'])\nsqldict.setdefault('DROP', ['delete', 'remove', 'drop'])\nsqldict.setdefault('MORE THAN', ['more', 'greater', 'higher'])\nsqldict.setdefault('LESS THAN', ['below', 'less', 'lower'])\n\ncreate_template = ['CREATE', 'TABLE', '(', ')']\n####################################################\n\n\ndef sqlize(qinput, sample = None):\n # log queries\n fobj = open('logs', 'a');\n fobj.write('Query: ' + qinput + '\\n')\n data = languageprocess.stopword.stopwordremover(qinput)\n for i in data: # sql word synonyms corrections\n for key, value in sqldict.items():\n if i.lower() in value:\n data[data.index(i)] = key\n \n #for single word databse internal queries\n length = len(data) \n #data = ' '.join(data)\n if length > 2 or ('SELECT' in data or 'CREATE' in data):\n data = sqltokenize(data, sample)\n else:\n data = singlequery(data)\n data = ' '.join(data) \n fobj.write('SQLized: ' + data + '\\n\\n') #logging\n fobj.close()\n return data\n\ndatatype = ['text', 'integer', 'varchar']\n\n\ndef sqltokenize(qinput, sample):\n '''rightnow doing it for create only\n '''\n if 'CREATE' in qinput:\n qinput.remove('CREATE')\n newinput = ['CREATE', 'TABLE', '(', ')']\n for ind,inp in enumerate(create_template):\n if inp == 'TABLE': \n newinput.insert(ind+1, qinput[0])\n if inp == '(':\n for i in range(1,len(qinput)):\n newinput.insert(-1,qinput[i]+',')\n newinput[-2] = newinput[-2].rstrip(',')\n\n # For CREATE query data type interpretation. user only need to eneter the sample input :)\n try:\n sample = sample.replace(',', '').split()\n except Exception as e:\n return ['ERROR: You need to input sample input... Please try again with sample input.']\n\n b = []\n # to determine the datatype of attributes\n for i in sample: \n if i.replace('.','').isdigit(): # for the float cases\n b.append('INTEGER')\n elif i.isalpha():\n b.append('TEXT')\n else:\n b.append('TEXT')\n attrs = newinput[newinput.index('(')+1:-1]#get attributes from newdata\n for ind, attr in enumerate(attrs):\n# attr = attr.replace(',', '')\n newinput[newinput.index(attr)] = attr.replace(',','') + ' ' + b[ind] + ',' \n newinput[-2] = newinput[-2].rstrip(',') # remove last ','\n inpu = newinput\n return inpu\n\n elif 'SELECT' in qinput:\n try:\n qinput.remove('SELECT')\n qinput.remove('*')\n except:\n pass\n newinput = []\n newinput.insert(0, 'SELECT')\n newinput.insert(1, '*')\n newinput.insert(2, 'FROM')\n with open('languageprocess/words.json', 'r') as fobj:\n js = json.load(fobj)\n # to get the table name and values associated with table name key\n for ind, dat in enumerate(qinput):\n if js.get(dat, 0):\n newinput.insert(3, dat)\n values = js[dat]\n qinput.remove(dat)\n newinput.append('WHERE') # appen where in all cases in absence of condition use 1\n # to get where conditions\n flag = 0\n for ind, val in enumerate(qinput[:]): # even revresed(qinput) could be used\n try:\n if val in values:\n flag = 1\n if 'MORE THAN' in qinput:\n sign = ' >= '\n qinput.remove('MORE THAN')\n elif 'LESS THAN' in qinput:\n sign = ' <= '\n qinput.remove('LESS THAN')\n else:\n sign = ' = '\n newinput.append(val + sign)\n newinput.append('sql')\n qinput.remove(val)\n except Exception as e:\n print(e)\n return ['ERROR: Maybe you need to submit new table info to Word.json']\n\n if not flag:\n newinput.append('1')\n return newinput\n # to get values\n try:\n lenth = len(qinput)\n for l in range(lenth):\n # to put quotes for alpha\n if qinput[l].isalpha():\n newinput[newinput.index('sql')] = '\"' + qinput[l] + '\"' + ','\n else:\n newinput[newinput.index('sql')] = qinput[l] + ','\n except:\n return ['ERROR: This table schema doesnt exists in \"words.json\" file']\n newinput[-1] = newinput[-1].rstrip(',')\n return newinput\n\n else:\n '''\n Rightnow considering else case as insert case\n '''\n data = qinput\n# data = qinput.split()\n data = insert_token(data)\n data.insert(0, 'INSERT INTO')\n# data = ''.join(data)\n return data\n return qinput\n\ndef insert_token(qinput):\n '''\n for proper formatting of the insert queries i.e. quotes and commas\n '''\n attr = []\n val = []\n newdata = []\n data = qinput\n length = len(data) - 1 #index starts with 0\n for index,word in enumerate(data):\n if word == '=': #and index+1 != length: # last check to correct last ','\n attr.append(data[index-1])\n# val.append(data[index+1])\n\n i, lnth = index+2, len(data)\n stri = ''\n # for multiple word insert statements\n while i < lnth and (data[i]) != '=':\n stri += data[i - 1] + ' '\n i += 1\n #this if for one word value of last column\n if i == lnth:\n stri += data[i-1]\n val.append(stri)\n # to remove spaces\n attr = tuple([i.strip() for i in attr])\n val = tuple([j.strip() for j in val])\n newdata = [data[0]]\n newdata.append(str(attr))\n newdata.append('VALUES')\n newdata.append(str(val))\n return newdata\n\ndef sqltemplate(data):\n\n '''sql query template: Correct the order of sql words. Last step of normalization\n try: Check if \"SELECT\" is not in string except: \"SELECT\" is in string but not at the index 0\n '''\n\n if data[0] != 'SELECT' and ('from' in data or 'where' in data):\n try:\n del(data[data.index('SELECT')])\n data.insert(0, 'SELECT')\n return data\n except:\n data.insert(0, 'SELECT')\n return data\n return data\n\ndef singlequery(data):\n '''For processing smqll length queries that doesn't need much processing\n *Some conditions may be of sql specific.*\n '''\n if len(data) < 1 or 'tables' in data[0]: #case when user chack tables with 'table' ***improve***\n data = \"SELECT name FROM sqlite_master WHERE type='table'\".split()\n return data\n elif 'schema' in data:\n return ['schema']\n elif 'DROP' in data:\n data.insert(data.index('DROP') + 1, 'TABLE' )\n return data\n return data\n\nif __name__ == '__main__':\n data = 'choOSe or select o.r : find or elect'\n print('input data: ', data)\n data = sqlize(data)\n print(data)\n","repo_name":"amitt001/nql","sub_path":"languageprocess/sqlizer.py","file_name":"sqlizer.py","file_ext":"py","file_size_in_byte":7905,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"12373943015","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('feed',views.feed, name=\"home\"),\n path('profile', views.profile, name=\"profile\"),\n path('edit', views.edit, name=\"edit\"),\n path('connections', views.connections, name=\"connections\"),\n path('add_post', views.add_post, name=\"add_post\"),\n path('profile_edit', views.profile_edit, name=\"profile_edit\"),\n path('', views.show, name=\"show\"),\n]\n","repo_name":"FatimahAbdullah/nuces_circle","sub_path":"student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30706744663","text":"from collections import Counter\r\nfrom os import listdir\r\nimport re\r\nfrom nltk.corpus import stopwords\r\nfrom string import punctuation\r\nfrom nltk.stem.porter import PorterStemmer\r\ndef load_newdoc(fileload):\r\n\tfile = open(fileload, 'r')\r\n\ttext = file.read()\r\n\tfile.close()\r\n\treturn text\r\n\r\n\r\ndef clean_newdoc(doc):\r\n token=doc.split()\r\n dic=str.maketrans('','',punctuation)\r\n token=[u.translate(dic) for u in token]\r\n for word in token:\r\n word=word.lower()\r\n for word in token:\r\n word=re.sub('[^A-Za-z]', ' ', word)\r\n #print(word)\r\n #print(\"arka\")\r\n token=[word for word in token if word.isalpha()]\r\n stopword=set(stopwords.words('english'))\r\n token=[v for v in token if not v in stopword]\r\n stemmer = PorterStemmer()\r\n for word in token:\r\n word=stemmer.stem(word)\r\n return token\r\n\r\n\r\ndef process_newdocs(file, vocab):\r\n\tdocument = load_newdoc(file)\r\n\ttokens = clean_newdoc(document)\r\n\tvocab.update(tokens)\r\n\r\ndef save_file(phrase, file):\r\n\tdata = '\\n'.join(phrase)\r\n\tfilenew = open(file, 'w')\r\n\tfilenew.write(data)\r\n\tfilenew.close()\r\n\r\nvocab = Counter()\r\nprocess_newdocs('pos.csv', vocab)\r\nprocess_newdocs('neg.csv', vocab)\r\nminno_occuranence = 1\r\ntokens = [k for k,c in vocab.items() if c >= minno_occuranence]\r\nsave_file(tokens, 'vocab.txt')","repo_name":"arkaiima/sentiment-analysis-using-bag-of-words","sub_path":"vocab building.py","file_name":"vocab building.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14734772822","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/6/15 17:14\n# @Author : THU\nimport torch\n\n\ndef save_checkpoint(checkpoint_path, model, _optimizers, epoch, logger):\n state = {'state_dict': model.state_dict(),\n 'optimizer': [_.state_dict() for _ in _optimizers],\n 'epoch': epoch}\n torch.save(state, checkpoint_path)\n logger.info('models saved to %s' % checkpoint_path)\n","repo_name":"ShenDezhou/CAIL","sub_path":"CAIL2020/cocr/torchocr/utils/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"73"} +{"seq_id":"34247223064","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport click\nimport zmq\n\nfrom zmq.eventloop import ioloop\nfrom zmq.eventloop.zmqstream import ZMQStream\n\n@click.command()\n@click.option('--port', '-p', 'port', required=True)\ndef rec(port):\n\n zmq_ctx = zmq.Context()\n\n s = zmq_ctx.socket(zmq.SUB)\n s.bind('tcp://*:{port}'.format(port=port))\n s.setsockopt(zmq.SUBSCRIBE, b\"\")\n\n\n stream = ZMQStream(s)\n\n stream.on_recv_stream(rec_frame)\n\n ioloop.IOLoop.instance().start()\n\n while True:\n pass\n\n\ndef rec_frame(stream, msg, *args, **kwargs):\n print(msg[0])\n\n\nif __name__ == '__main__':\n rec()\n","repo_name":"fossabot/odr-stream-router","sub_path":"dev-tools/zmq_rec.py","file_name":"zmq_rec.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1797371080","text":"import pygame\nimport os\nfrom environment import *\n\n\nos.environ['SDL_VIDEO_CENTERED'] = '1' # centers window, must be before pygame.init()!\npygame.init() # initialize pygame\n\nPARTICLES_PER_SOURCE = 40\nREFLECTION_ORDER = 5\nPARTICLE_SPEED = 5\nPARTICLE_SIZE = 4 # reflection_order + 1\nBACKGROUND_COLOR = Color(\"Black\")\nSTART_COLOR = Color(\"Violet\")\nEND_COLOR = Color(\"Green\")\n\n# pygame stuff\ndisplay_info = pygame.display.Info() # create a video display information object\nscreen_size = int(display_info.current_w * 0.9), int(display_info.current_h * 0.9)\nscreen = pygame.display.set_mode(screen_size)\npygame.display.set_caption('Simple Acoustics')\n\n# initialize font\n# must be called after 'pygame.init()' to avoid 'Font not Initialized' error\nfont = pygame.font.SysFont(\"consolas\", 20)\nlabel = font.render(\"LMC - add source / CMC - clean screen / RMC - start animation\", 1, Color(\"White\"))\n\n\ndef main():\n # create environment\n env = Environment(screen_size,\n BACKGROUND_COLOR,\n START_COLOR,\n END_COLOR,\n REFLECTION_ORDER)\n\n env.add_source(200, 200)\n\n clock = pygame.time.Clock()\n running = True\n animation = False\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: # LEFT=1\n env.add_source(event.pos[0], event.pos[1])\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 2: # CENTER=2\n env.clean()\n animation = False\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3: # RIGHT=3\n # from each source create sound molecules\n env.generate_particles(PARTICLE_SIZE, PARTICLE_SPEED, PARTICLES_PER_SOURCE)\n animation = True\n\n if animation:\n # moves, reflects, cleaning dead particles\n env.update()\n\n # draw elements on screen\n screen.fill(env.background_color)\n\n # render text\n screen.blit(label, (10, 10))\n for particle in env.particles:\n pygame.draw.circle(screen, particle.color, (int(particle.x), int(particle.y)), particle.size)\n for source in env.sources:\n pygame.draw.circle(screen, source.color, (int(source.x), int(source.y)), source.size)\n\n pygame.display.flip()\n clock.tick(100)\n\n pygame.quit()\n\nif __name__ == '__main__':\n main()\n","repo_name":"michkowalczuk/pygame-acoustics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39550719615","text":"import numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nimport random\n\n# read in data to np array\n\npath = open('/Users/nolandowdle/Desktop/CSC533/HW2/HW2/IL_employee_salary.csv')\narray = np.loadtxt(path, delimiter=',', dtype=str, skiprows=1)\n\nnum_rows, num_cols = array.shape\ni = 0\nwhile i < num_rows:\n i += 1\n\n# count number of employees in each salary range\nlessThanFiftyCounter = 0\nfiftyCounter = 0 # count number of employees from 50-60k\nsixtyCounter = 0 # count number of employees from 60-70k\nseventyCounter = 0 # count number of employees from 70-80k\neightyCounter = 0 # count number of employees from 80-90k\nnintyCounter = 0 # count number of employees above 90k\ngreaterThanOneHundredCounter = 0\n\nnewArray = [None] * num_rows\ni = 0\nwhile i < num_rows:\n if (array[i][3].astype(float) < 50000):\n lessThanFiftyCounter += 1\n if (array[i][3].astype(float) > 50000 and array[i][3].astype(float) < 60000):\n fiftyCounter += 1\n if (array[i][3].astype(float) > 60000 and array[i][3].astype(float) < 70000):\n sixtyCounter += 1\n if (array[i][3].astype(float) > 70000 and array[i][3].astype(float) < 80000):\n seventyCounter += 1\n if (array[i][3].astype(float) > 80000 and array[i][3].astype(float) < 90000):\n eightyCounter += 1\n if (array[i][3].astype(float) > 90000 and array[i][3].astype(float) < 100000):\n nintyCounter += 1\n if (array[i][3].astype(float) > 100000):\n greaterThanOneHundredCounter += 1\n newArray[i] = array[i][3].astype(float)\n i += 1\n\n# set width for bars in histogram\nbarWidth = 0.2\n# create histogram without the epsilon-differential privacy\ndata = {'50k':fiftyCounter, '60k':sixtyCounter,\n '70k':seventyCounter, '80k':eightyCounter, '90k':nintyCounter}\n\nranges = list(data.keys())\nvalues = list(data.values())\n\nbar1 = plt.bar(np.arange(len(ranges)) - 0.4, values, color ='r', width = barWidth, label='unperturbed')\n\n# histogram with epsilon = 0.05\nloc = 0\nscale = 1 / 0.05 # known as lambda\nnoise, noise1, noise2, noise3, noise4 = np.random.laplace(loc, scale, 5)\n\nfiftyCounter0 = fiftyCounter\nsixtyCounter0 = sixtyCounter\nseventyCounter0 = seventyCounter\neightyCounter0 = eightyCounter\nnintyCounter0 = nintyCounter\n\nfiftyCounter0 += noise\nsixtyCounter0 += noise1\nseventyCounter0 += noise2\neightyCounter0 += noise3\nnintyCounter0 += noise4\n\n\n# create histogram without the epsilon-differential privacy\ndata = {'50k':fiftyCounter0, '60k':sixtyCounter0,\n '70k':seventyCounter0, '80k':eightyCounter0, '90k':nintyCounter0}\n\nranges = list(data.keys())\nvalues = list(data.values())\n\nbar2 = plt.bar(np.arange(len(ranges)) - 0.2, values, width=barWidth, label='0.05')\n\n\n\n# create histogram for epsilon = 0.1\nloc = 0\nscale = 1 / 0.1 # known as lambda\nnoise, noise1, noise2, noise3, noise4 = np.random.laplace(loc, scale, 5)\n\nfiftyCounter1 = fiftyCounter\nsixtyCounter1 = sixtyCounter\nseventyCounter1 = seventyCounter\neightyCounter1 = eightyCounter\nnintyCounter1 = nintyCounter\n\nfiftyCounter1 += noise\nsixtyCounter1 += noise1\nseventyCounter1 += noise2\neightyCounter1 += noise3\nnintyCounter1 += noise4\n\n\n# create histogram without the epsilon-differential privacy\ndata = {'50k':fiftyCounter1, '60k':sixtyCounter1,\n '70k':seventyCounter1, '80k':eightyCounter1, '90k':nintyCounter1}\n\nranges = list(data.keys())\nvalues = list(data.values())\n\nbar3 = plt.bar(np.arange(len(ranges)), values, width=barWidth, label='0.1')\n\n# create histogram for epsilon = 5.0\nloc = 0\nscale = 1 / 5.0 # known as lambda\nnoise, noise1, noise2, noise3, noise4 = np.random.laplace(loc, scale, 5)\n\nfiftyCounter2 = fiftyCounter\nsixtyCounter2 = sixtyCounter\nseventyCounter2 = seventyCounter\neightyCounter2 = eightyCounter\nnintyCounter2 = nintyCounter\n\nfiftyCounter2 += noise\nsixtyCounter2 += noise1\nseventyCounter2 += noise2\neightyCounter2 += noise3\nnintyCounter2 += noise4\n\n\n# create histogram without the epsilon-differential privacy\ndata = {'50k':fiftyCounter2, '60k':sixtyCounter2,\n '70k':seventyCounter2, '80k':eightyCounter2, '90k':nintyCounter2}\n\nranges = list(data.keys())\nvalues = list(data.values())\n\nbar4 = plt.bar(np.arange(len(ranges)) + 0.2, values, width=barWidth, label='5.0')\n\n\nplt.xlabel(\"Salary Ranges\")\nplt.ylabel(\"Number of Employees\")\nplt.title(\"Salary Brackets\")\nplt.legend()\nplt.show()","repo_name":"njdowdle/CSS533HW2","sub_path":"njdowdle_HW2_Q2.py","file_name":"njdowdle_HW2_Q2.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"43851114934","text":"from django.test import TestCase\n\nfrom films.models import Film\nfrom films.serializers import FilmSerializer\n\n\nclass FilmSerializerTestCase(TestCase):\n def test_ok(self):\n film_1 = Film.objects.create(name='film_1', director=\"director_1\", price=100)\n film_2 = Film.objects.create(name='film_2', director=\"director_2\", price=450.11)\n film_3 = Film.objects.create(name='film_3', director=\"director_3\", price=123.42)\n data = FilmSerializer([film_1, film_2, film_3], many=True).data\n excepted_data = [\n {\n \"name\": 'film_1',\n \"director\": \"director_1\",\n \"price\": '100.000'\n },\n {\n \"name\": 'film_2',\n \"director\": \"director_2\",\n \"price\": '450.110'\n },\n {\n \"name\": 'film_3',\n \"director\": \"director_3\",\n \"price\": '123.420'\n },\n ]\n self.assertEqual(excepted_data, data)\n","repo_name":"jamoliddinovabubakr/FilmProject","sub_path":"films/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26541201660","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Book',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('title', models.CharField(db_index=True, max_length=100)),\n ('comments', models.ManyToManyField(to='commands_sql_migrations.Comment')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"mollstam/UnrealPy","sub_path":"UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/commands_sql_migrations/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"73"} +{"seq_id":"39795410009","text":"'''\nCreated on Apr 29, 2014\n\n@author: javi\n'''\n\nimport random\n\ndef toTail():\n num = 0\n count = 0\n while num != 1:\n num = random.randint(0,400)\n count += 1\n return count\n\ndef pledge(num):\n count = 0. \n for x in range(num):\n if random.randint(0,400) == 1:\n count += 1\n return count/num\n\nif __name__ == '__main__':\n stuff = []\n reps = 100000\n for x in range(1,reps):\n stuff.append(toTail())\n print(stuff[0], stuff[len(stuff)-1])\n print(\"median:\" + str(sorted(stuff)[len(stuff)/2]))\n \n average = 0. \n reps = 100000\n for x in range(1, reps):\n average = (average * (x-1) + toTail()) / x\n print(average)","repo_name":"xescape/PopNet-Backup","sub_path":"RandomScripts/coinsim.py","file_name":"coinsim.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29182983407","text":"from selenium import webdriver\nfrom time import sleep\n\n\n# selenium bot\nclass TopScorrerBot():\n\tdef __init__(self):\n\t\toptions = webdriver.ChromeOptions()\n\t\toptions.add_argument('--ignore-certificate-errors')\n\t\toptions.add_argument('--incognito')\n\t\t# options.add_argument('--headless') # without opening a browser window\n\t\tself.driver = webdriver.Chrome(options=options)\n\t\tself.end = False\n\n\n\tdef go_to_site(self):\n\t\tself.driver.get('https://www.transfermarkt.com/statistik/topscorer')\n\t\tsleep(10)\n\t\t\n\tdef mv_across_page(self):\n\t\ttry:\n\t\t\tnext_page = self.driver.find_element_by_xpath('//*[@id=\"yw2\"]/li[13]')\n\t\t\tnext_page.click()\n\t\t\tsleep(5)\n\t\t\t\n\t\texcept :\n\t\t\tself.end = True\n\n\tdef scroll_down(self):\n\t\tnext_page = self.driver.find_element_by_xpath('//*[@id=\"yw2\"]/li[13]')\n\t\tnext_page.click()\n\n\n\n\n# # test\n# bot = TopScorrerBot()\n# bot.go_to_site()\n# bot.scroll_down()\n# for page in range(12):\n# \tbot.mv_across_page()\n\n\n\n\n\n\n","repo_name":"axelearning/CS_portfolio","sub_path":"old_project/soccer_analysis/250_best_scorer/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20072448289","text":"import math\nimport sys\nimport tkinter\nfrom fractions import Fraction\nfrom tkinter import filedialog, scrolledtext, messagebox\n\nclass LocError(Exception):\n def __str__(self):\n (startLine, startCol), (endLine, endCol) = self.loc\n return \"%s:%s: %s\" % (startLine + 1, startCol, self.args[0])\n\nclass ParseError(LocError):\n def __init__(self, loc, msg):\n Exception.__init__(self, msg)\n self.loc = loc\n\ndef is_alpha(c):\n return 'A' <= c <= 'Z' or 'a' <= c <= 'z' or c == '_'\n\ndef is_digit(c):\n return '0' <= c <= '9'\n\noperators = {'!=', '==', '<=', '<', '+', '-', '#', '(', ')', ',', '==>', ':', '[', ']', '*'}\noperatorPrefixes = set()\nfor operator in operators:\n for i in range(1,len(operator) + 1):\n operatorPrefixes.add(operator[:i])\n\nkeywords = ['assert', 'and', 'True', 'Herschrijven', 'met', 'in', 'Z', 'op', 'Wet', 'not', 'en', 'if', 'else', 'of']\n\nbinaryOperators = {'==>', 'and', '==', '<=', '<', '+', '-', '!=', '*'}\nsymmetricBinaryOperators = {'and', '==', '+', '*'}\n\nunaryOperators = {'not'}\nnullaryOperators = {'True'}\n\nclass Lexer:\n def __init__(self, text):\n self.text = text\n self.pos = -1\n self.line = 0\n self.startOfLine = 0\n self.eat()\n\n def get_token_value(self):\n return self.text[self.tokenStart:self.pos]\n\n def eat(self):\n if 0 <= self.pos and self.text[self.pos] == '\\n':\n self.line += 1\n self.startOfLine = self.pos + 1\n self.pos += 1\n if self.pos == len(self.text):\n self.c = '\\0'\n else:\n self.c = self.text[self.pos]\n\n def tokenLoc(self):\n return ((self.line, self.tokenStart - self.startOfLine), (self.line, self.pos - self.startOfLine))\n\n def error(self, msg):\n raise ParseError(((self.line, self.pos - self.startOfLine), (self.line, self.pos - self.startOfLine + 1)), msg)\n\n def next_token(self):\n while self.c == ' ':\n if self.pos == self.startOfLine:\n self.error(\"Indentation is not supported\")\n self.eat()\n self.tokenStart = self.pos\n if self.c == '\\0':\n return 'EOF'\n if self.c == '\\n':\n self.eat()\n self.startOfLine = self.pos\n return 'EOL'\n if is_alpha(self.c):\n self.eat()\n while is_alpha(self.c) or is_digit(self.c):\n self.eat()\n if self.get_token_value() in keywords:\n return self.get_token_value()\n return 'identifier'\n if is_digit(self.c):\n self.eat()\n while is_digit(self.c):\n self.eat()\n return 'number'\n operatorLength = 0\n operatorPrefixLength = 1\n while True:\n operatorPrefix = self.text[self.tokenStart:self.tokenStart + operatorPrefixLength]\n if not operatorPrefix in operatorPrefixes:\n break\n if operatorPrefix in operators:\n operatorLength = operatorPrefixLength\n operatorPrefixLength += 1\n if operatorLength == 0:\n self.error(\"Bad token\")\n for i in range(operatorLength):\n self.eat()\n return self.get_token_value()\n\nclass Parser:\n def __init__(self, text):\n self.lexer = Lexer(text)\n self.tokenType = self.lexer.next_token()\n self.tokenLoc = self.lexer.tokenLoc()\n\n def error(self, msg):\n raise ParseError(self.tokenLoc, msg)\n\n def eat(self):\n value = self.lexer.get_token_value()\n self.tokenType = self.lexer.next_token()\n self.tokenLoc = self.lexer.tokenLoc()\n return value\n\n def parsePrimaryExpression(self):\n if self.tokenType == 'identifier':\n x = self.eat()\n if self.tokenType == '(':\n self.eat()\n args = []\n if self.tokenType != ')':\n args.append(self.parseExpression())\n while self.tokenType == ',':\n self.eat()\n args.append(self.parseExpression())\n self.expect(')')\n return 'call', x, tuple(args)\n return 'var', x\n elif self.tokenType == 'number':\n v = int(self.eat())\n return 'int', v\n elif self.tokenType == 'True':\n self.eat()\n return 'True',\n elif self.tokenType == '(':\n self.eat()\n e = self.parseExpression()\n self.expect(')')\n return e\n elif self.tokenType == 'not':\n self.eat()\n e = self.parseComparison()\n return ('not', e)\n else:\n self.error(\"Expression expected\")\n\n def parseSuffixExpression(self):\n e = self.parsePrimaryExpression()\n while self.tokenType == '[':\n self.eat()\n if self.tokenType == ':':\n self.eat()\n if self.tokenType == ']':\n end = ('call', 'len', (e,))\n else:\n end = self.parseExpression()\n self.expect(']')\n e = ('call', '#slice', (e, ('int', 0), end))\n else:\n index = self.parseExpression()\n if self.tokenType == ':':\n self.eat()\n if self.tokenType == ']':\n end = ('call', 'len', (e,))\n else:\n end = self.parseExpression()\n self.expect(']')\n e = ('call', '#slice', (e, index, end))\n else:\n self.expect(']')\n e = ('call', '#subscript', (e, index))\n return e\n\n def parseMultiplication(self):\n e = self.parseSuffixExpression()\n while True:\n if self.tokenType == '*':\n self.eat()\n e2 = self.parseSuffixExpression()\n e = ('*', e, e2)\n else:\n return e\n\n def parseAddition(self):\n e = self.parseMultiplication()\n while True:\n if self.tokenType == '+':\n self.eat()\n e2 = self.parseMultiplication()\n e = ('+', e, e2)\n elif self.tokenType == '-':\n self.eat()\n e2 = self.parseMultiplication()\n e = ('-', e, e2)\n else:\n return e\n\n def parseComparison(self):\n e = self.parseAddition()\n if self.tokenType in ['==', '<=', '<', '!=']:\n operator = self.tokenType\n self.eat()\n e2 = self.parseAddition()\n result = (operator, e, e2)\n e = e2\n while self.tokenType in ['==', '<=', '<', '!=']:\n operator = self.tokenType\n self.eat()\n e2 = self.parseAddition()\n result = ('and', result, (operator, e, e2))\n e = e2\n return result\n else:\n return e\n\n def expect(self, tokenType):\n if self.tokenType != tokenType:\n self.error('%s expected, found %s' % (tokenType, self.tokenType))\n return self.eat()\n\n def parseConjunction(self):\n e = self.parseComparison()\n while self.tokenType == 'and':\n self.eat()\n e2 = self.parseConjunction()\n e = ('and', e, e2)\n return e\n\n def parseIfThenElse(self):\n e = self.parseConjunction()\n if self.tokenType == 'if':\n self.eat()\n cond = self.parseExpression()\n self.expect('else')\n elseBranch = self.parseIfThenElse()\n return ('call', '#ifthenelse', (cond, e, elseBranch))\n else:\n return e\n\n def parseImplication(self):\n e = self.parseIfThenElse()\n if self.tokenType == '==>':\n self.eat()\n return ('==>', e, self.parseIfThenElse())\n return e\n\n def parseExpression(self):\n return self.parseImplication()\n\n def parseFactSpec(self):\n if self.tokenType == '(':\n self.eat()\n result = self.parseFactSpec()\n self.expect(')')\n return result\n elif self.tokenType == 'number':\n return ('antecedent', int(self.eat()))\n lawName = self.expect('identifier')\n arguments = []\n if self.tokenType == 'op':\n self.eat()\n arguments.append(self.parseFactSpec())\n while self.tokenType == ',' or self.tokenType == 'en':\n self.eat()\n arguments.append(self.parseFactSpec())\n return ('law', lawName, tuple(arguments))\n\n def parsePrimaryJustification(self):\n if self.tokenType == 'Herschrijven':\n self.eat()\n self.expect('met')\n i = self.parseFactSpec()\n self.expect('in')\n j = int(self.expect('number'))\n return ('Herschrijven', i, j)\n elif self.tokenType == 'Z':\n self.eat()\n i = None\n if self.tokenType == 'op':\n self.eat()\n i = self.parseFactSpec()\n return ('Z', i)\n elif self.tokenType == 'identifier':\n return self.parseFactSpec()\n else:\n self.error('Justification keyword not supported')\n\n def parseJustification(self):\n justification = self.parsePrimaryJustification()\n if self.tokenType == 'of':\n self.eat()\n return ('of', justification, self.parseJustification())\n return justification\n\n def parseProofLine(self):\n line, _ = self.tokenLoc\n self.expect('assert')\n e = self.parseExpression()\n if self.tokenType == '#':\n self.eat()\n justification = self.parseJustification()\n else:\n justification = None\n self.expect('EOL')\n return (line, e, justification)\n\n def parseProof(self):\n while self.tokenType == 'EOL':\n self.eat()\n lines = []\n while self.tokenType == 'assert':\n lines.append(self.parseProofLine())\n return lines\n\n def parseLaw(self):\n self.expect('#')\n self.expect('Wet')\n name = self.expect('identifier')\n self.expect(':')\n rule = self.parseImplication()\n self.expect('EOL')\n return (name, rule)\n\ndef get_rewrites_for_tuple(es, bindings, lhs, rhs):\n if es == ():\n return [es]\n else:\n rewrites = []\n for e in get_rewrites(es[0], bindings, lhs, rhs):\n for es0 in get_rewrites_for_tuple(es[1:], bindings, lhs, rhs):\n rewrites.append((e,) + es0)\n return rewrites\n\ndef get_rewrites(e, bindings, lhs, rhs):\n rewrites = [e] # e itself is a rewrite of itself\n try:\n bindings1 = dict(bindings)\n match(bindings1, lhs, e)\n rewrites.append(subst(rhs, bindings1))\n except MatchFailure:\n pass\n try:\n bindings1 = dict(bindings)\n match(bindings1, rhs, e)\n rewrites.append(subst(lhs, bindings1))\n except MatchFailure:\n pass\n if e[0] in binaryOperators:\n for e1 in get_rewrites(e[1], bindings, lhs, rhs):\n for e2 in get_rewrites(e[2], bindings, lhs, rhs):\n rewrites.append((e[0], e1, e2))\n elif e[0] in unaryOperators:\n for e1 in get_rewrites(e[1], bindings, lhs, rhs):\n rewrites.append((e[0], e1))\n elif e[0] == 'call':\n for args in get_rewrites_for_tuple(e[2], bindings, lhs, rhs):\n rewrites.append(('call', e[1], args))\n return rewrites\n\nclass ProofError(LocError):\n pass\n\ndef get_conjuncts(e):\n if e[0] == 'and':\n return get_conjuncts(e[1]) + get_conjuncts(e[2])\n else:\n return [e]\n\ndef add_polys(poly1, poly2):\n poly1Keys = set(poly1.keys())\n poly2Keys = set(poly2.keys())\n result = {}\n for key in poly1Keys - poly2Keys:\n result[key] = poly1[key]\n for key in poly2Keys - poly1Keys:\n result[key] = poly2[key]\n for key in poly1Keys & poly2Keys:\n value = poly1[key] + poly2[key]\n if value == 0:\n pass\n else:\n result[key] = value\n return result\n\ndef scale_poly(coef, poly):\n if coef == 0:\n return {}\n else:\n result = {}\n for key in poly.keys():\n result[key] = coef * poly[key]\n return result\n\ndef get_poly(e):\n if e[0] == 'var':\n return {(e,): 1}\n elif e[0] == 'int':\n if e[1] == 0:\n return {}\n else:\n return {(): e[1]}\n elif e[0] == '+':\n poly1 = get_poly(e[1])\n poly2 = get_poly(e[2])\n return add_polys(poly1, poly2)\n elif e[0] == '-':\n poly1 = get_poly(e[1])\n poly2 = get_poly(e[2])\n return add_polys(poly1, scale_poly(-1, poly2))\n else:\n return {(e,): 1}\n\ndef is_tautology(e):\n if e[0] not in ['==', '<=', '!=']:\n return False\n poly = get_poly(('-', e[2], e[1]))\n if e[0] == '==':\n return poly == {}\n elif e[0] == '!=':\n return set(poly.keys()) == {()} and 0 != poly[()]\n elif e[0] == '<=':\n return set(poly.keys()) == set() or set(poly.keys()) == {()} and 0 <= poly[()]\n\ndef normalize_eq(e):\n \"\"\"Rewrites an equation to a form not involving not or <, i.e. involving only ==, <=, or !=\"\"\"\n if e[0] == 'not':\n e1 = normalize_eq(e[1])\n if e1[0] == 'not':\n e = e1[1]\n elif e1[0] == '==':\n e = ('!=', e1[1], e1[2])\n elif e1[0] == '!=':\n e = ('==', e1[1], e1[2])\n elif e1[0] == '<=':\n e = ('<=', e1[2], ('+', ('int', -1), e1[1]))\n else:\n return e\n if e[0] == '<':\n return ('<=', e[1], ('+', ('int', -1), e[2]))\n return e\n\ndef get_polyc(eq):\n poly = get_poly(('-', eq[2], eq[1]))\n c = Fraction(0)\n if () in poly:\n c = Fraction(poly[()])\n del poly[()]\n op = eq[0]\n if poly != {}:\n gcd = math.gcd(*poly.values())\n if op == '==' or op == '!=' and poly[min(poly.keys())] < 0:\n gcd *= -1\n for key in list(poly.keys()):\n poly[key] /= gcd\n c /= gcd\n return op, c, poly\n\ndef follows_in_Z_from(consequent, antecedent):\n antecedent = normalize_eq(antecedent)\n consequent = normalize_eq(consequent)\n if not {consequent[0], antecedent[0]} <= {'==', '<=', '!='}:\n return False\n if consequent[0] == '==' and antecedent[0] != '==':\n return False\n if consequent[0] == '<=' and antecedent[0] == '!=':\n return False\n op1, c1, poly1 = get_polyc(antecedent)\n op2, c2, poly2 = get_polyc(consequent)\n # print('Checking entailment in Z: %s ==> %s' % ((op1, c1, poly1), (op2, c2, poly2)))\n if op2 == '==':\n return (c2, poly2) == (c1, poly1)\n elif op2 == '!=':\n if op1 == '!=':\n return (c2, poly2) == (c1, poly1)\n elif op1 == '==':\n return poly2 == poly1 and c1 != c2\n else:\n assert op1 == '<='\n return poly2 == poly1 and c1 < c2\n assert op2 == '<='\n if op1 == '<=':\n return poly2 == poly1 and c1 <= c2\n else:\n assert op1 == '=='\n return poly2 == poly1 and c1 <= c2 or poly2 == scale_poly(-1, poly1) and -c1 <= c2\n\nclass MatchFailure(ProofError):\n pass\n\ndef match(bindings, e1, e2):\n \"\"\"\n Extends the bindings so that subst(bindings, e1) == e2, or raises a ProofError.\n \"\"\"\n if e1[0] == 'var':\n x = e1[1]\n if x in bindings:\n if e2 != bindings[x]:\n raise MatchFailure(\"Match failure: expected: %s; found: %s\" % (bindings[x], e2))\n else:\n bindings[x] = e2\n else:\n if e1[0] != e2[0]:\n raise MatchFailure(\"Match failure: %s is not of the form %s\" % (e2, e1))\n if e1[0] in symmetricBinaryOperators:\n # Take symmetry/commutativity into account\n bindings1 = dict(bindings)\n try:\n match(bindings1, e1[1], e2[1])\n match(bindings1, e1[2], e2[2])\n bindings.update(bindings1)\n except MatchFailure:\n match(bindings, e1[1], e2[2])\n match(bindings, e1[2], e2[1])\n elif e1[0] in binaryOperators:\n match(bindings, e1[1], e2[1])\n match(bindings, e1[2], e2[2])\n elif e1[0] in unaryOperators:\n match(bindings, e1[1], e2[1])\n elif e1[0] in nullaryOperators:\n pass\n elif e1[0] == 'call':\n if e1[1] != e2[1]:\n raise MatchFailure(\"Match failure: %s is not of the form %s\" % (e2, e1))\n for arg1, arg2 in zip(e1[2], e2[2]):\n match(bindings, arg1, arg2)\n elif e1[0] == 'int':\n if e1 != e2:\n raise MatchFailure(\"Match failure: expected: %s; found: %s\" % (e1, e2))\n else:\n raise ProofError(\"match: construct not supported: %s\" % e1)\n\ndef matches(e1, e2, bindings):\n \"\"\"Returns whether bindings can be extended so that subst(e2, bindings) == e1\"\"\"\n try:\n match(dict(bindings), e2, e1)\n return True\n except MatchFailure:\n return False\n\nfreshVarCounter = 0\n\ndef get_fresh_var_name():\n global freshVarCounter\n\n result = \"#x%d\" % freshVarCounter\n freshVarCounter += 1\n return result\n\ndef subst(e, bindings):\n if e[0] == 'var':\n if e[1] not in bindings:\n x = get_fresh_var_name()\n bindings[e[1]] = ('var', x)\n return bindings[e[1]]\n elif e[0] in binaryOperators:\n return (e[0], subst(e[1], bindings), subst(e[2], bindings))\n elif e[0] in unaryOperators:\n return (e[0], subst(e[1], bindings))\n elif e[0] in nullaryOperators:\n return e\n elif e[0] == 'call':\n return ('call', e[1], tuple(map(lambda arg: subst(arg, bindings), e[2])))\n elif e[0] == 'int':\n return e\n else:\n raise ProofError(\"subst: construct not supported: %s\" % (e,))\n\ndef get_free_vars(e):\n if e[0] == 'var':\n return {e[1]}\n elif e[0] in binaryOperators:\n return get_free_vars(e[1]).union(get_free_vars(e[2]))\n elif e[0] in unaryOperators:\n return get_free_vars(e[1])\n elif e[0] in nullaryOperators:\n return set()\n elif e[0] == 'call':\n return set().union(*map(get_free_vars, e[2]))\n elif e[0] == 'int':\n return set()\n else:\n raise AssertionError(\"Unsupported construct: %s\" % (e,))\n\ndef normalize(eq):\n if eq[0] == '==' and eq[2] < eq[1]:\n return ('==', eq[2], eq[1])\n return eq\n\nlaws = {}\n\ndef check_entailment(line, antecedent, consequent, justification):\n try:\n def get_conjunct(i):\n if i < 1 or len(antecedent) < i:\n raise ProofError(\"Antecedent conjunct index out of range\")\n return antecedent[i - 1]\n\n def get_fact(factSpec):\n if factSpec[0] == 'antecedent':\n conjunct = get_conjunct(factSpec[1])\n return dict(map(lambda x: (x, ('var', x)), get_free_vars(conjunct))), conjunct\n elif factSpec[0] == 'law':\n _, lawName, arguments = factSpec\n if lawName not in laws:\n raise ProofError(\"No such law: %s\" % lawName)\n premisses, conclusion = laws[lawName]\n if len(arguments) != len(premisses):\n raise ProofError(\"De wet %s verwacht %d argumenten; %d gegeven\" % (lawName, len(premisses), len(arguments)))\n variableBindings = {}\n for premiss, argument in zip(premisses, arguments):\n argBindings, argTerm = get_fact(argument)\n if set(get_free_vars(argTerm)) != set(argBindings.keys()):\n raise ProofError(\"Law application requires fully instantiated arguments. Argument %s with bindings %s has uninstantiated pattern variables\" % (argTerm, argBindings))\n argTerm = subst(argTerm, argBindings)\n match(variableBindings, premiss, argTerm)\n return variableBindings, conclusion\n else:\n raise ProofError(\"Unsupported fact specification form %s\" % (factSpec,))\n\n # print(\"Checking entailment %s ==> %s\" % (antecedent, consequent))\n\n def get_entailment_checker(justification):\n if justification[0] == 'Herschrijven':\n _, i, j = justification\n bindings, equation = get_fact(i)\n target = get_conjunct(j)\n if equation[0] != '==':\n raise ProofError(\"Kan niet herschrijven met \" + str(equation) + \" want is geen gelijkheid\")\n rewrites = get_rewrites(target, bindings, equation[1], equation[2])\n targetBindings = dict((x, ('var', x)) for x in get_free_vars(target))\n def checker(conjunct):\n if any(matches(conjunct, rewrite, targetBindings) for rewrite in rewrites):\n return None\n else:\n return \"rewrites = \" + str(rewrites) + \")\"\n return checker\n elif justification[0] == 'Z':\n if justification[1] == None:\n def checker(conjunct):\n return None if is_tautology(normalize_eq(conjunct)) else \"\"\n return checker\n else:\n bindings, fact = get_fact(justification[1])\n if set(get_free_vars(fact)) != set(bindings.keys()):\n raise ProofError(\"Z justification requires fully instantiated fact. Fact %s under bindings %s has uninstantiated pattern variables\" % (fact, bindings))\n fact = subst(fact, bindings)\n antecedent_conjunct = normalize_eq(fact)\n def checker(conjunct):\n if follows_in_Z_from(normalize_eq(conjunct), antecedent_conjunct):\n return None\n else:\n return \"\"\n return checker\n elif justification[0] == 'law':\n variableBindings, conclusion = get_fact(justification)\n def checker(conjunct):\n if matches(conjunct, conclusion, variableBindings):\n return None\n else:\n return \"\"\n return checker\n elif justification[0] == 'of':\n checker1 = get_entailment_checker(justification[1])\n checker2 = get_entailment_checker(justification[2])\n def checker(conjunct):\n failureInfo1 = checker1(conjunct)\n if failureInfo1 == None:\n return None\n else:\n failureInfo2 = checker2(conjunct)\n if failureInfo2 == None:\n return None\n else:\n failureInfos = []\n if failureInfo1 != \"\":\n failureInfos.append(failureInfo1)\n if failureInfo2 != \"\":\n failureInfos.append(failureInfo2)\n return \"; \".join(failureInfos)\n return checker\n else:\n raise ProofError(\"Verantwoording niet ondersteund: \" + str(justification))\n\n checker = get_entailment_checker(justification)\n for conjunct in consequent:\n if conjunct not in antecedent:\n checkerFailureInfo = checker(conjunct)\n if checkerFailureInfo != None:\n raise ProofError(\"Conjunct niet bewezen: \" + str(conjunct) + (\"\" if checkerFailureInfo == \"\" else \" (\" + checkerFailureInfo + \")\"))\n\n except ProofError as e:\n e.loc = (line, (line[0],-1))\n raise e\n\ndef add_law(name, rule):\n conclusion = rule\n premisses = []\n while conclusion[0] == '==>':\n premisses.extend(get_conjuncts(conclusion[1]))\n conclusion = conclusion[2]\n laws[name] = (premisses, conclusion)\n\ndef checkProof(proof):\n if proof == []:\n raise ProofError(\"Need at least one assert\")\n antecedent = get_conjuncts(proof[0][1])\n i = 1\n while i < len(proof):\n line, consequent, justification = proof[i]\n consequent = get_conjuncts(consequent)\n\n check_entailment(line, antecedent, consequent, justification)\n\n antecedent = consequent\n i += 1\n\ndef check_text(text):\n # lexer = Lexer(text)\n # while True:\n # token = lexer.next_token()\n # print(\"'%s': '%s'\" % (token, lexer.get_token_value()))\n # if token == 'EOF':\n # break\n\n parser = Parser(text)\n while parser.tokenType != 'EOF':\n if parser.tokenType == 'EOL':\n parser.eat()\n elif parser.tokenType == '#':\n name, rule = parser.parseLaw()\n # print(\"Adding law\", name, rule)\n add_law(name, rule)\n else:\n proof = parser.parseProof()\n # print(proof)\n checkProof(proof)\n\ndef check_file(path):\n with open(path) as f:\n text = f.read()\n check_text(text)\n\nif len(sys.argv) > 1:\n try:\n check_file(sys.argv[1])\n print(\"%s was checked successfully; the proof outline is valid!\" % (sys.argv[1],))\n except LocError as e:\n print(e)\nelse:\n window = tkinter.Tk()\n window.title(\"Proof Checker\")\n panedWindow = tkinter.PanedWindow(window, orient=tkinter.VERTICAL)\n\n def check_proof():\n text_box.tag_remove('error', '1.0', tkinter.END)\n error_msg_box.delete('1.0', tkinter.END)\n window.update()\n try:\n check_text(text_box.get('1.0', tkinter.END))\n messagebox.showinfo(message=\"Geen fouten gevonden; het bewijs is geldig!\")\n except LocError as e:\n (startLine, startCol), (endLine, endCol) = e.loc\n print(e.loc)\n text_box.tag_add('error', '%d.%s' % (startLine + 1, 'end' if startCol == -1 else startCol), '%d.%s' % (endLine + 1, 'end' if endCol == -1 else endCol))\n error_msg_box.insert('1.0', e.args[0])\n menubar = tkinter.Menu(window)\n proof = tkinter.Menu(menubar)\n proof.add_command(label=\"Check proof\", command=check_proof)\n menubar.add_cascade(label=\"Proof\", menu=proof)\n\n # display the menu\n window.config(menu=menubar)\n\n text_box = scrolledtext.ScrolledText(panedWindow)\n text_box.pack(fill=tkinter.BOTH, expand=True)\n with open('gevolgtrekkingen_uit_voorbeeldsilhouetten.py') as f:\n text = f.read()\n text_box.insert('1.0', text)\n\n text_box.tag_config('error', foreground='red', underline=1)\n panedWindow.add(text_box)\n\n error_msg_box = scrolledtext.ScrolledText(panedWindow)\n error_msg_box.pack(fill=tkinter.BOTH, expand=True)\n panedWindow.add(error_msg_box)\n\n panedWindow.pack(fill=tkinter.BOTH, expand=True)\n\n tkinter.mainloop()\n","repo_name":"btj/proofchecker","sub_path":"proofchecker.py","file_name":"proofchecker.py","file_ext":"py","file_size_in_byte":27202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40703255414","text":"from flask import Flask, render_template,url_for, request,redirect,send_file\nfrom werkzeug.utils import secure_filename\nimport os\nfrom os import path\nimport cv2\n\n\n\napp = Flask(__name__)\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template(\"home.html\")\n\n\n\n@app.route(\"/convert\", methods=[\"GET\", \"POST\"] )\ndef convert():\n\n if request.method == \"POST\":\n\n if request.files:\n video = request.files[\"video\"]\n video.save(secure_filename(video.filename))\n print(video.filename)\n\n cap= cv2.VideoCapture(video.filename)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n fps=cap.get(cv2.CAP_PROP_FPS)\n ret, img=cap.read()\n height,width,layers=img.shape\n size=(width,height)\n out_name=\"video_output.mp4\"\n video.save('video_output.mp4')\n out= cv2.VideoWriter('video_output.mp4',fourcc,fps,size,0)\n while(cap.isOpened()):\n ret, frame = cap.read();\n if ret == True:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n out.write(gray)\n else:\n break\n cap.release()\n out.release()\n os.remove(video.filename)\n return send_file(out_name,as_attachment=True)\n\n return render_template(\"convert.html\")\n\n\n\n\n\n\n\n\n\n\n@app.route(\"/about\")\ndef about():\n\treturn render_template('about.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"suryanshbhar/RGB2GREYSCALE","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18855678057","text":"import shutil\nimport os, os.path\nimport sys\nimport argparse\nimport random\n\n\ndef CreateArgParser():\n # Training settings\n parser = argparse.ArgumentParser(description='Image Mover')\n\n parser.add_argument('--source', required= True)\n\n parser.add_argument('--dest_train', required= True)\n parser.add_argument('--dest_test', required= True)\n\n return parser\n\ndef MoveRandomImages():\n args = CreateArgParser().parse_args()\n\n path, dirs, files = os.walk(args.source).__next__()\n print(len(files))\n file_num = len(files)\n\n valid_files = {x: x for x in range(file_num)}\n train = {}\n\n print(valid_files)\n\n while True:\n rand_num = random.randrange(file_num)\n\n if str(rand_num) in train == False:\n train[str(rand_num)] = rand_num\n del valid_files[str(rand_num)]\n\n if len(valid_files) == 2200:\n break\n\n print(len(train))\n print(len(valid_files))\n print(train)\n\ndef MoveImages():\n args = CreateArgParser().parse_args()\n\n path, dirs, files = os.walk(args.source).__next__()\n print(len(files))\n num_files = len(files)\n\n fs = args.source\n fdtrain = args.dest_train\n fdtest = args.dest_test\n\n for i in range(5400):\n file_name = str(i) + '.png'\n file_source = fs + file_name\n\n if i < 3200:\n file_train = fdtrain + file_name\n shutil.move(file_source, file_train)\n else:\n file_test = fdtest + file_name\n shutil.move(file_source, file_test) \n\nif __name__ == '__main__':\n MoveImages()\n\n","repo_name":"brod4910/LenslessCameraML","sub_path":"Scripts/ImageScript.py","file_name":"ImageScript.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"15286228130","text":"from scipy import ndimage\r\nfrom collections import Counter\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport hickle\r\nimport pickle\r\nimport os\r\nimport json\r\n\r\nfrom PIL import Image\r\nimport time\r\nimport math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.init as weight_init\r\nimport torch.utils.model_zoo as model_zoo\r\nfrom torch.nn.parameter import Parameter\r\nimport torch.nn.functional as F\r\nfrom torchvision import models\r\nimport torchvision.transforms as transforms\r\nimport torch.optim\r\nimport torch.utils.data\r\nimport torchvision.datasets as datasets\r\nfrom tqdm import tqdm\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nprint(device)\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\n\r\n\r\n\"\"\"Reference Link: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py \"\"\"\r\n\r\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\r\n 'resnet152']\r\nmodel_urls = {\r\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\r\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\r\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\r\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\r\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\r\n}\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n \"\"\"3x3 convolution with padding\"\"\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None):\r\n super(BasicBlock, self).__init__()\r\n self.conv1 = conv3x3(inplanes, planes, stride)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = conv3x3(planes, planes)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 4\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * 4)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\nclass ResNet(nn.Module):\r\n\r\n def __init__(self, block, layers, num_classes=1000):\r\n self.inplanes = 64\r\n super(ResNet, self).__init__()\r\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\r\n bias=False)\r\n # Batch normalization reduces the amount by what the hidden unit values shift around (covariance shift)\r\n self.bn1 = nn.BatchNorm2d(64)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\r\n self.layer1 = self._make_layer(block, 64, layers[0])\r\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\r\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\r\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\r\n self.avgpool = nn.AvgPool2d(7, stride=1)\r\n self.fc = nn.Linear(512 * block.expansion, num_classes)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n weight_init.kaiming_normal_(m.weight, mode='fan_out')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n weight_init.constant_(m.weight, 1)\r\n weight_init.constant_(m.bias, 0)\r\n\r\n def _make_layer(self, block, planes, blocks, stride=1):\r\n downsample = None\r\n if stride != 1 or self.inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(self.inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(planes * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(self.inplanes, planes, stride, downsample))\r\n self.inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.inplanes, planes))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n x = self.maxpool(x)\r\n\r\n x = self.layer1(x)\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n x = self.layer4(x)\r\n\r\n # x = self.avgpool(x)\r\n # x = x.view(x.size(0), -1)\r\n # x = self.fc(x)\r\n\r\n return x\r\n\r\ndef resnet34(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-34 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\r\n return model\r\n\r\ndef resnet50(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-50 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\r\n return model\r\n\r\n\r\ndef load_pickle(path):\r\n with open(path, 'rb') as f:\r\n file = pickle.load(f, encoding='latin1')\r\n print ('Loaded %s..' %path)\r\n return file \r\n\r\ndef save_pickle(data, path):\r\n with open(path, 'wb') as f:\r\n pickle.dump(data, f, 2)\r\n print ('Saved %s..' %path)\r\n\r\ndef prepend(list, str): \r\n # Using format() \r\n str += '{0}'\r\n list = [str.format(i) for i in list] \r\n return(list)\r\n\r\n\r\ndef main():\r\n\r\n batch_size = 100\r\n max_length = 15\r\n word_count_threshold = 1\r\n \r\n model=resnet34()\r\n orthogonal_model=torch.load('model_best.pth.tar', map_location='cpu')\r\n from collections import OrderedDict\r\n new_state_dict = OrderedDict()\r\n for keys, v in orthogonal_model['state_dict'].items():\r\n name = keys[7:] # remove `module.`\r\n new_state_dict[name] = v\r\n model.load_state_dict(new_state_dict)\r\n model.eval()\r\n \r\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])\r\n preprocess_image = transforms.Compose([\r\n transforms.CenterCrop(size=224),\r\n transforms.ToTensor(),\r\n normalize,\r\n ])\r\n\r\n image_dir = '/Data/santosh_1821cs03/Santosh/val_feature_extract/image/%2014_resized/'\r\n \r\n for split in ['train_hindi', 'val_hindi', 'test_hindi']:\r\n annotations = load_pickle('/Data/santosh_1821cs03/Santosh/val_feature_extract/data_hindi/%s/%s.annotations.pkl' % (split, split))\r\n save_path = './%s.features_orthogonal_cnn_49X512.hkl' % (split)\r\n image_path = list(annotations['file_name'].unique())\r\n n_examples = len(image_path)\r\n image_path=prepend(image_path, '/Data/santosh_1821cs03/Santosh/val_feature_extract/')\r\n all_feats = np.ndarray([n_examples, 49, 512], dtype=np.float32) #change here according to layer chosen\r\n\r\n for start, end in zip(range(0, n_examples, batch_size),\r\n range(batch_size, n_examples + batch_size, batch_size)):\r\n image_batch_file = image_path[start:end]\r\n image_batch = torch.stack(list(map(lambda x: preprocess_image(Image.open(x).convert('RGB')), image_batch_file)))\r\n\r\n feats=model(image_batch)\r\n feats=feats.permute(0, 3, 2, 1)\r\n feats=feats.detach().numpy()\r\n all_feats[start:end, :] = feats.reshape(-1,49,512)\r\n print (\"Processed %d %s features..\" % (end, split))\r\n\r\n hickle.dump(all_feats, save_path)\r\n print (\"Saved %s..\" % (save_path))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"sahilee26/Image_Captioning_Hindi_Language","sub_path":"final_feature_extract_orthogonal_resnet.py","file_name":"final_feature_extract_orthogonal_resnet.py","file_ext":"py","file_size_in_byte":8941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"13222212654","text":"import tensorflow as tf\n\n\nclass LstmModel(tf.keras.Model):\n def __init__(self, num_classes, vocab_size, embeddings) -> None:\n super().__init__()\n self.num_classes = num_classes\n if embeddings is not None:\n embedding_layer = tf.keras.layers.Embedding(\n vocab_size, 50,\n tf.keras.initializers.Constant(embeddings),\n embeddings_regularizer=tf.keras.regularizers.L1(l1=0.001)\n )\n # embedding_layer.trainable = False\n else:\n embedding_layer = tf.keras.layers.Embedding(vocab_size, 50, embeddings_regularizer=tf.keras.regularizers.L1(l1=0.001))\n self.model = tf.keras.models.Sequential([\n embedding_layer,\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256, return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256, return_sequences=False)),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(self.num_classes, activation='softmax')\n ])\n\n def call(self, x):\n return self.model(x)\n","repo_name":"MarekPokropinski/TextClassificationReuters","sub_path":"models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"12125402888","text":"import networkx as nx\nfrom node2vec import Node2Vec\n\nfrom app import db\nfrom app.models import Link\n\n\nclass NodeEmbed():\n \"Class to do node embedings\"\n\n def __init__(self):\n self.links = None\n\n def create_graph(self):\n directed_graph = nx.DiGraph()\n\n self.links = db.query(Link).with_entities(Link.from_id, Link.to_id)\n\n for link in links:\n from_id, to_id = link\n directed_graph.add_edge(from_id, to_id)\n\n # Precompute Probabilities and generate walks\n node_vec = Node2Vec(directed_graph)\n\n # Embed Nodes\n model = node_vec.fit()\n\n return model\n","repo_name":"schedutron/visualnet","sub_path":"app/spider/spider_network.py","file_name":"spider_network.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"73"} +{"seq_id":"73965492395","text":"from django import forms\nfrom .models import ContactModel\n\n\nclass ContactForm(forms.ModelForm): \n \n class Meta:\n model = ContactModel\n fields = \"__all__\"\n widgets = {\n 'email' : forms.TextInput(attrs={'class' : 'form-control'})\n }\n\n def clean_mobile(self):\n mobile = self.cleaned_data['mobile']\n if mobile:\n if not mobile.isnumeric():\n raise forms.ValidationError('mobile must be numeric')\n else:\n return mobile\n","repo_name":"miladimos/djshop","sub_path":"src/apps/pages/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"20750909482","text":"import statistics\n\nimport matplotlib.image as image\n\n\nclass Processor:\n\n def __init__(self, img):\n self.img = image.imread(img).copy()\n self.width, self.height, self.depth = self.img.shape\n\n def to_black_and_white(self, threshold=150):\n white = [255, 255, 255]\n black = [0, 0, 0]\n for x in range(self.width):\n for y in range(self.height):\n mean = statistics.fmean(self.img[x][y])\n self.img[x][y] = black if mean < threshold else white\n\n def to_greyscale(self):\n for x in range(self.width):\n for y in range(self.height):\n mean = statistics.fmean(self.img[x][y])\n self.img[x][y] = [mean] * self.depth\n\n def keep_one_colour(self, colour, threshold):\n mean_colour = statistics.fmean(colour)\n for x in range(self.width):\n for y in range(self.height):\n mean_img = statistics.fmean(self.img[x][y])\n if abs(mean_img - mean_colour) > threshold:\n self.img[x][y] = [255] * self.depth\n\n # def __surrounding_average(self, x, y):\n # average = [0] * self.depth\n # divisor = 0\n # for i in range(-1, 2):\n # for j in range(-1, 2):\n # if (0 <= x + i < self.width) and (0 <= y + j < self.height):\n # for k in range(self.depth):\n # average[k] += self.img[x + i][y + j][k]\n # divisor += 1\n # for k in range(self.depth):\n # average[k] /= divisor\n # return average\n\n def __surrounding_mean(self, x, y):\n surroundings = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n try:\n surroundings.append(statistics.fmean(self.img[x + i][y + j]))\n except IndexError:\n pass\n return statistics.fmean(surroundings)\n\n def blur(self):\n copy = self.img.copy()\n for x in range(0, self.width):\n for y in range(0, self.height):\n copy[x][y] = self.__surrounding_mean(x, y)\n self.img = copy.copy()\n","repo_name":"draescherl/image-processing","sub_path":"processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"44287879474","text":"#from api import forex\nfrom pathlib import Path\nimport re, csv\n\nfile_path = Path.cwd()/'project_group'/'overall_report.txt'\nfile_path.touch()\n\ndef profit_and_loss():\n \"\"\"\n - This function returns the data in the profit and loss csv\n - It reads the values in the profit and loss csv and appends it to an empty list\n \"\"\"\n # empty list made to append the net profit values\n pnl_list = []\n path = Path.cwd()/'project_group'/'csv_reports'/'profit-and-loss-usd.csv'\n # path is opened in read mode as file\n with path.open(mode = \"r\",encoding=\"UTF-8-sig\",newline=\"\") as file:\n reader = csv.reader(file)\n # skips headers in the profit and loss csv\n next(reader)\n \n #for loop used to append values without the headers in the profit and loss csv to pnl_list\n for line in reader:\n pnl_list.append(line)\n return pnl_list\n\n## Check values in profit and loss\n#print(profit_and_loss())\n\ndef pnl_write(forex): \n \"\"\"\n - This function returns the days that have deficit and the amount of deficit of the net profit\n - If there are no deficits, the function will return a surplus\n - The net profit values and the days are added to separate empty lists\n \"\"\"\n #empty lists made to append the net profit values and days respectively\n all_pnl= []\n day_pnl= []\n\n #for loop to append the net profit values in index position 4\n #values are converted to float\n for value in profit_and_loss():\n all_pnl.append(int(value[4]*int(forex)))\n # to check the values in all_pnl\n #print(all_pnl)\n\n #for loop to append the days in index position 0\n for days in profit_and_loss():\n day_pnl.append(days[0])\n # to check the values in all_pnl\n #print(day_pnl)\n \n count = 0 \n #for loop to\n for amount in range(len(all_pnl)-1):\n diff = all_pnl[amount] - all_pnl[amount + 1]\n if diff >0:\n with file_path.open(mode = 'a', encoding = 'UTF-8') as file:\n text = file.write(f\"[PROFIT DEFICIT] DAY: {day_pnl[amount+1]}, AMOUNT: SGD{diff:.2f}\\n\")\n# write cash deficit with corresponding day and difference in overall_report.txt\n count += 1\n\n if count == 0:\n with file_path.open(mode = 'a', encoding = 'UTF-8') as file:\n text = file.write(f\"[PROFIT SURPLUS] Profit on each period is higher than the previous period\\n\")\n# write cash surplus in overall_report.txt","repo_name":"vallimwr/TA01","sub_path":"project_group/profit_and_loss.py","file_name":"profit_and_loss.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20965514920","text":"import turtle\r\nt = turtle.Turtle()\r\nt.hideturtle\r\ncolor = input(\"Nhap ma mau: \")\r\nw = float(input(\"Nhập w:\"))\r\nh = float(input(\"Nhập h:\"))\r\n\r\nt.color(color)\r\nt.forward(h)\r\nt.right(90)\r\nt.forward(w)\r\nt.right(90)\r\nt.forward(h)\r\nt.right(90)\r\nt.forward(w)\r\nt.right(90)\r\n\r\nc = (w+h)*2\r\nv = w*h\r\n\r\nprint(\"Chu vi dài = {w} rộng = {h} là {c} \".format(w=w, h=h,c=c))\r\nprint(\"Diện tích dài = {w} rộng = {h} là {v} \".format(w=w, h=h,v=v))\r\n\r\nturtle.done()","repo_name":"sadangel1/my-new-project","sub_path":"nhap/ve_hinh_chu_nhat.py","file_name":"ve_hinh_chu_nhat.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73539019115","text":"import requests\nfrom os import environ\n\nurl = f\"https://discord.com/api/v8/applications/{environ['JWBOT_CLIENT_ID']}/guilds/{environ['JWBOT_TARGET_GUILD']}/commands\"\n\njson = {\n \"name\": \"watch\",\n \"description\": \"Look up a movie or show on JustWatch to see what service you would need for it\",\n \"options\": [\n {\n \"name\": \"name\",\n \"description\": \"The name of the movie or show you want to watch\",\n \"type\": 3,\n \"required\": True\n },\n {\n \"name\": \"_type\",\n \"description\": \"The type of thing you want to watch (movie, show)\",\n \"type\": 3,\n \"required\": False,\n \"choices\": [\n {\n \"name\": \"Movie\",\n \"value\": \"movie\"\n },\n {\n \"name\": \"Show\",\n \"value\": \"show\"\n }\n ]\n },\n {\n \"name\": \"result_count\",\n \"description\": \"How many movie/show results you want to see (default is 3)\",\n \"type\": 4,\n \"required\": False\n },\n {\n \"name\": \"dontskipnonfree\",\n \"description\": \"Do you want to include results that aren't free or fixed rate (e.g Apple)\",\n \"type\": 5,\n \"required\": False\n }\n ]\n}\n\nwith open(\".bottoken\") as f:\n headers = {\"Authorization\": \"Bot \" + f.read().strip()}\n\nprint(requests.post(url, headers=headers, json=json).json())\n","repo_name":"alyssadev/discord-justwatch","sub_path":"register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12809846490","text":"import logging\nfrom pathlib import Path\n\nfrom octopus_energy_scraper.config import Settings\nfrom octopus_energy_scraper.data_cache.file import FileCache\nfrom octopus_energy_scraper.scraper import Scraper\nfrom octopus_energy_scraper.types.usage import EnergyType\n\n\ndef configure_logs() -> None:\n logging.basicConfig(level=logging.DEBUG)\n # logging.basicConfig(level=logging.INFO)\n\n\ndef main() -> None:\n configure_logs()\n log = logging.getLogger(__name__)\n settings = Settings() # Parsed from environment or .env file etc.\n\n data_path = Path(\"./consumption_data.json\").resolve()\n log.info(f\"Syncing octopus data to file: {data_path.relative_to(Path('.').resolve())}.\")\n\n file_cache = FileCache(cache_path=data_path)\n file_cache.load()\n scraper = Scraper(settings, file_cache)\n\n records_added = scraper.sync_data()\n\n log.info(\n f\"Added {records_added[EnergyType.ELECTRICITY]} electricity records and \"\n f\"{records_added[EnergyType.GAS]} gas records.\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"martsa1/octopus-scraper","sub_path":"python/octopus_energy_scraper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"7902105498","text":"from pandora.preprocessing import StatisticsMeasure\nfrom pandora.util import seed_everything\n\nimport numpy as np\n\nseed_everything()\n\n\ndef test_statistical_preprocessor():\n test_array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10], [9, 11, 12]])\n\n preprocessor_obj = StatisticsMeasure()\n output = preprocessor_obj.fit_transform(test_array)\n\n real_output = np.array(\n [\n [6, 15, 25, 32],\n [2, 5, 8.333, 10.666],\n [2, 5, 8, 11],\n [0.82, 0.816, 1.247, 1.247],\n [0, 0, 0.381, -0.381],\n [-1.5, -1.5, -1.5, -1.5]\n ]\n )\n np.testing.assert_array_equal(real_output.round(2).T, output.round(2))\n","repo_name":"akshkr/pandora-python","sub_path":"tests/test_preprocessor.py","file_name":"test_preprocessor.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"3981877042","text":"import logging\nimport socket\n\nimport player\nimport game\n\nIP = socket.gethostbyname(socket.gethostname())\nPORT = 9850\n\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\nserversocket.bind((IP, PORT))\nserversocket.listen(2)\n\n\ndef main():\n clientsockets = []\n\n while len(clientsockets) < 2:\n clientsocket, addr = serversocket.accept()\n logging.info(f\"Accepted socket with address {addr}\")\n clientsockets.append(clientsocket)\n\n players = [player.Player(clientsocket) for clientsocket in clientsockets]\n\n while True:\n g = game.Game(players)\n g.send_start_messages()\n\n while True:\n g.get_ships()\n g.main_game_loop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlexanderJCS/multiplayer-battleship-2","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28029610859","text":"load(\"@prelude//:paths.bzl\", \"paths\")\nload(\":apple_bundle_destination.bzl\", \"AppleBundleDestination\", \"bundle_relative_path_for_destination\")\nload(\":apple_bundle_types.bzl\", \"AppleBundleInfo\")\nload(\":apple_package_config.bzl\", \"IpaCompressionLevel\")\nload(\":apple_sdk.bzl\", \"get_apple_sdk_name\")\nload(\":apple_toolchain_types.bzl\", \"AppleToolchainInfo\")\n\n_SKIP_COPYING_SWIFT_STDLIB_EXTENSIONS = [\n \".framework\",\n \".appex\",\n]\n\ndef apple_package_impl(ctx: \"context\") -> [\"provider\"]:\n ipa_contents = _get_ipa_contents(ctx)\n compression_level = _compression_level_arg(IpaCompressionLevel(ctx.attrs._ipa_compression_level))\n\n package = ctx.actions.declare_output(\"{}.ipa\".format(ctx.attrs.bundle.label.name))\n\n # TODO(T110378117): Pull this into a shared zip utility function\n zip = cmd_args([\"(cd \\\"\", cmd_args(ipa_contents), \"\\\" && zip -X -r {} - .) > \".format(compression_level), package.as_output()], delimiter = \"\")\n ctx.actions.run([\"sh\", \"-c\", zip], category = \"apple_package_zip\")\n\n return [DefaultInfo(default_output = package)]\n\ndef _get_ipa_contents(ctx) -> \"artifact\":\n bundle = ctx.attrs.bundle\n app = bundle[DefaultInfo].default_outputs[0]\n\n contents = {\n paths.join(\"Payload\", app.basename): app,\n }\n\n apple_bundle_info = bundle[AppleBundleInfo]\n should_copy_swift_stdlib = not (apple_bundle_info.skip_copying_swift_stdlib or app.extension in _SKIP_COPYING_SWIFT_STDLIB_EXTENSIONS)\n if should_copy_swift_stdlib:\n contents[\"SwiftSupport\"] = _get_swift_support_dir(ctx, app, apple_bundle_info)\n\n if apple_bundle_info.contains_watchapp:\n contents[\"Symbols\"] = _build_symbols_dir(ctx)\n\n return ctx.actions.copied_dir(\n \"__unzipped_ipa_contents__\",\n contents,\n )\n\ndef _build_symbols_dir(ctx) -> \"artifact\":\n symbols_dir = ctx.actions.declare_output(\"__symbols__\", dir = True)\n ctx.actions.run(\n cmd_args([\"mkdir\", \"-p\", symbols_dir.as_output()]),\n category = \"watchos_symbols_dir\",\n )\n\n return symbols_dir\n\ndef _get_swift_support_dir(ctx, bundle_output: \"artifact\", bundle_info: AppleBundleInfo.type) -> \"artifact\":\n stdlib_tool = ctx.attrs._apple_toolchain[AppleToolchainInfo].swift_toolchain_info.swift_stdlib_tool\n sdk_name = get_apple_sdk_name(ctx)\n\n # .app -> app\n # This is the way the input is expected.\n extension = bundle_output.extension[1:]\n swift_support_dir = ctx.actions.declare_output(\"__swift_dylibs__\", dir = True)\n script, _ = ctx.actions.write(\n \"build_swift_support.sh\",\n [\n cmd_args(swift_support_dir, format = \"mkdir -p {}\"),\n cmd_args(\n [\n stdlib_tool,\n # If you're debugging, you can pass the '--verbose' flag here.\n \"--copy\",\n \"--scan-executable\",\n cmd_args(\n [\n bundle_output,\n bundle_relative_path_for_destination(AppleBundleDestination(\"executables\"), sdk_name, extension),\n bundle_info.binary_name,\n ],\n delimiter = \"/\",\n ),\n _get_scan_folder_args(AppleBundleDestination(\"plugins\"), bundle_output, sdk_name, extension),\n _get_scan_folder_args(AppleBundleDestination(\"frameworks\"), bundle_output, sdk_name, extension),\n \"--destination\",\n swift_support_dir,\n ],\n delimiter = \" \",\n quote = \"shell\",\n ),\n ],\n allow_args = True,\n )\n ctx.actions.run(\n cmd_args([\"/bin/sh\", script]).hidden([stdlib_tool, bundle_output, swift_support_dir.as_output()]),\n category = \"copy_swift_stdlibs\",\n )\n\n return swift_support_dir\n\ndef _get_scan_folder_args(dest: AppleBundleDestination.type, bundle_output: \"artifact\", sdk_name, extension) -> \"_arglike\":\n return cmd_args(\n [\n \"--scan-folder\",\n cmd_args(\n [\n bundle_output,\n bundle_relative_path_for_destination(dest, sdk_name, extension),\n ],\n delimiter = \"/\",\n ),\n ],\n )\n\ndef _compression_level_arg(compression_level: IpaCompressionLevel.type) -> str.type:\n if compression_level.value == \"none\":\n return \"-0\"\n elif compression_level.value == \"default\":\n return \"-6\"\n elif compression_level.value == \"min\":\n return \"-1\"\n elif compression_level.value == \"max\":\n return \"-9\"\n else:\n fail(\"Unknown .ipa compression level: \" + str(compression_level))\n","repo_name":"joeleba/dummy-benchmark","sub_path":"genrule-project/prelude/apple/apple_package.bzl","file_name":"apple_package.bzl","file_ext":"bzl","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"74201669035","text":"import psycopg2\n\n# most popular articles of all time\n# which articles have been accessed the most\n# format:\n\n\ndef pop_articles(db_name):\n db = psycopg2.connect(dbname=db_name)\n c = db.cursor()\n # we want pages that don't result in errors, hence ='200 OK'\n # since all paths start w/ '/article/', can use like to compare to slug\n query = '''select articles.title, count(log.path) as views\n from articles, log\n where status='200 OK'\n and log.path like '/article/'||articles.slug\n group by articles.title\n order by views desc;'''\n c.execute(query)\n rows = c.fetchall()\n db.close()\n for row in rows:\n result = '{} - {}'.format(row[0], row[1])\n print(result)\n return rows\n\n\n# Who are the most popoular article authors of all time?\n# which authors have the highest total aggregate views\n# format: \"Author-# views\"\n\ndef pop_authors(db_name):\n db = psycopg2.connect(dbname=db_name)\n c = db.cursor()\n query = '''select authors.name, count(log.path) as views\n from authors, articles, log\n where status='200 OK'\n and log.path like '/article/'||articles.slug\n and authors.id=articles.author\n group by authors.name\n order by views desc;'''\n c.execute(query)\n rows = c.fetchall()\n db.close()\n for row in rows:\n result = '{} - {} views'.format(row[0], row[1])\n print(result)\n return rows\n\n# On which days did more than 1% of requests lead to errors?\n# Log tables that include error codes grouped by days\n# format: \"date-#% errors\"\n\n\ndef most_errors(db_name):\n db = psycopg2.connect(dbname=db_name)\n c = db.cursor()\n # set up views that pulled total errors (code!=200) and total views per day\n # set upa subq to extract a percentage in way that where clause can be used\n query = '''select days, percentage\n from (select date(errors.days) as days,\n round(((errors.errors / daily_views.daily_views::float)*100)\n ::numeric,2) as percentage\n from errors, daily_views\n where errors.days=daily_views.days) as subq\n where percentage>1;'''\n c.execute(query)\n rows = c.fetchall()\n db.close()\n for row in rows:\n result = '{} - {}% errors'.format(row[0], row[1])\n print(result)\n return rows\n\n# Function to print out all 3 of the other function's outputs\n\ndef news_print(db_name, file_name):\n articles = pop_articles(db_name)\n authors = pop_authors(db_name)\n errors = most_errors(db_name)\n\n # I probably should have just made this its own function\n with open(file_name, mode='x') as f:\n f.write('Most Popular Articles\\n')\n for article in articles:\n f.write('{} - {} views\\n'.format(article[0], article[1]))\n f.write('\\nMost Popular Authors\\n')\n for author in authors:\n f.write('{} - {} views\\n'.format(author[0], author[1]))\n f.write('\\nDays with >1% of Views as Errors\\n')\n for error in errors:\n f.write('{} - {} views\\n'.format(error[0], error[1]))\n","repo_name":"cwoodward10/newsdata_udacity","sub_path":"newsdata.py","file_name":"newsdata.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"848080839","text":"from uuid import UUID\n\nfrom src.core.entity.billing import InvoiceItem\nfrom src.core.usecase.driven.billing.billing_charge import BillingCharge\nfrom src.core.usecase.driven.billing.billing_fetch_user import BillingFetchUser\nfrom src.core.usecase.driven.creating_transaction import CreatingTransaction\nfrom src.core.usecase.driven.reading_account import ReadingAccount\n\n\nclass ReceiveCredit:\n def __init__(\n self,\n reading_account: ReadingAccount,\n billing_fetch_user: BillingFetchUser,\n billing_charge: BillingCharge,\n creating_transaction: CreatingTransaction,\n ):\n self.reading_account = reading_account\n self.billing_fetch_user = billing_fetch_user\n self.billing_charge = billing_charge\n self.creating_transaction = creating_transaction\n\n def receive_credit(self, external_id: UUID, amount: float):\n account = self.reading_account.by_external_id(external_id)\n credit = InvoiceItem(\"CREDIT\", amount, \"BRL\", 1.0)\n item_list = [credit]\n self.billing_fetch_user.fetch_by_account_id(external_id)\n self.billing_charge.charge(external_id, item_list)\n transaction = self.creating_transaction.create_transaction(\n account.id, amount, \"credit receive\", None\n )\n return transaction\n","repo_name":"auth-plus/auth-plus-monetization","sub_path":"src/core/usecase/receive_credit.py","file_name":"receive_credit.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39009730868","text":"import fileinput\n\n\ntravel_map = [line.rstrip() for line in fileinput.input()]\nrows = len(travel_map)\nwidth_of_row = len(travel_map[0])\ntree_count = 0\nx = 0\ny = 0\n\n# need to find something that deals with when x is greater than the size of a row\n# from input and how to get that to repeat...\n\nwhile y < rows:\n if x >= width_of_row:\n x -= width_of_row\n if travel_map[y][x] == '#':\n tree_count += 1\n\n x += 3\n y += 1\n\nprint(tree_count)\n\n","repo_name":"lukasindre/aoc_2020","sub_path":"03/puzzle_01.py","file_name":"puzzle_01.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"73256500076","text":"# pylint: disable=c0301\n\"\"\"Extract FIP region reports from Eclipse PRT file\"\"\"\n\nimport argparse\nimport datetime\nimport logging\nimport re\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom ecl2df import EclFiles, getLogger_ecl2csv\nfrom ecl2df.common import parse_ecl_month, write_dframe_stdout_file\n\nlogger = logging.getLogger(__name__)\n\nREGION_REPORT_COLUMNS: List[str] = [\n \"DATE\",\n \"FIPNAME\",\n \"REGION\",\n \"DATATYPE\",\n \"TO_REGION\",\n \"STOIIP_OIL\",\n \"ASSOCIATEDOIL_GAS\",\n \"STOIIP_TOTAL\",\n \"WIIP_TOTAL\",\n \"GIIP_GAS\",\n \"ASSOCIATEDGAS_OIL\",\n \"GIIP_TOTAL\",\n]\n\n\ndef report_block_lineparser(line: str) -> tuple:\n \"\"\"\n Parses single lines within region reports, splits data into a tuple.\n\n Does not support many different phase configurations yet.\n \"\"\"\n\n def float_or_nan(string: str) -> float:\n try:\n return float(string)\n except ValueError:\n return np.nan\n\n allowed_line_starts = [\":CURRENTLY\", \":OUTFLOW\", \":MATERIAL\", \":ORIGINALLY\"]\n if not any(line.strip().upper().startswith(x) for x in allowed_line_starts):\n return tuple()\n\n colonsections = line.split(\":\")\n to_index: Optional[int]\n if \"OUTFLOW TO REGION\" in line:\n to_index = int(colonsections[1].split()[3])\n row_name = \"OUTFLOW TO REGION\"\n else:\n to_index = None\n row_name = \" \".join(colonsections[1].strip().upper().split())\n\n # Oil section:\n liquid_oil: Optional[float] = None\n vapour_oil: Optional[float] = None\n total_oil: Optional[float] = None\n if len(colonsections[2].split()) == 3:\n (liquid_oil, vapour_oil, total_oil) = map(\n float_or_nan, colonsections[2].split()\n )\n elif len(colonsections[2].split()) == 1:\n total_oil = float_or_nan(colonsections[2])\n else:\n (liquid_oil, total_oil) = map(float_or_nan, colonsections[2].split())\n\n total_water = float_or_nan(colonsections[3])\n\n # Gas section:\n free_gas = None\n dissolved_gas = None\n total_gas = None\n if len(colonsections[4].split()) == 1:\n total_gas = float_or_nan(colonsections[4])\n elif len(colonsections[4].split()) == 2:\n (free_gas, total_gas) = map(float_or_nan, colonsections[4].split())\n else:\n (free_gas, dissolved_gas, total_gas) = map(\n float_or_nan, colonsections[4].split()\n )\n return (\n row_name,\n to_index,\n liquid_oil,\n vapour_oil,\n total_oil,\n total_water,\n free_gas,\n dissolved_gas,\n total_gas,\n )\n\n\ndef df(prtfile: Union[str, EclFiles], fipname: str = \"FIPNUM\") -> pd.DataFrame:\n \"\"\"\n Parses a PRT file from Eclipse and finds FIPXXXX REGION REPORT blocks and\n organizes those numbers into a dataframe\n\n Each row in the dataframe represents one parsed line in the PRT file, with\n DATE and region index added.\n\n Args:\n prtfile: filename (PRT) or an EclFiles object\n fipname: The name of the regport regions, FIPNUM, FIPZON or whatever\n Max length of the string is 8, the first three characters must be FIP,\n and the next 3 characters must be unique for a given Eclipse deck.\n \"\"\"\n if isinstance(prtfile, EclFiles):\n prtfile = prtfile.get_prtfilename()\n if not fipname.startswith(\"FIP\"):\n raise ValueError(\"fipname must start with FIP\")\n if len(fipname) > 8:\n raise ValueError(\"fipname can be at most 8 characters\")\n\n # List of rows in final dataframe\n records = []\n\n # State variables while parsing line by line:\n in_report_block = False\n region_index = None\n date = None\n\n ecl_datematcher = re.compile(r\"\\s\\sREPORT\\s+\\d+\\s+(\\d+)\\s+(\\w+)\\s+(\\d+)\")\n opm_datematcher = re.compile(r\"Starting time step.*? date = (\\d+)-(\\w+)-(\\d+)\\s*\")\n\n # When case insensitive, this one works with both Eclipse100 and OPM:\n reportblockmatcher = re.compile(\n \".+\" + fipname + r\"\\s+REPORT\\s+REGION\\s+(\\d+)\", re.IGNORECASE\n )\n\n # Flag for whether we are supposedly parsing a PRT file made by OPM flow:\n opm = False\n\n with open(prtfile, encoding=\"utf-8\") as prt_fh:\n logger.info(\n \"Parsing file %s for blocks starting with %s REPORT REGION\",\n prtfile,\n fipname,\n )\n for line in prt_fh:\n matcheddate = re.match(ecl_datematcher, line)\n if matcheddate is None:\n matcheddate = re.match(opm_datematcher, line)\n if matcheddate is not None:\n opm = True\n if matcheddate is not None:\n newdate = datetime.date(\n year=int(matcheddate.group(3)),\n month=parse_ecl_month(matcheddate.group(2).upper()),\n day=int(matcheddate.group(1)),\n )\n if newdate != date:\n date = newdate\n logger.debug(\"Found date: %s\", str(date))\n continue\n matchedreportblock = re.match(reportblockmatcher, line)\n if matchedreportblock:\n in_report_block = True\n region_index = int(matchedreportblock.group(1))\n logger.debug(\" Region report for region %s\", str(region_index))\n continue\n if line.startswith(\" ============================\"):\n in_report_block = False\n continue\n\n if in_report_block:\n interesting_strings = [\"IN PLACE\", \"OUTFLOW\", \"MATERIAL\"]\n if not sum([string in line.upper() for string in interesting_strings]):\n # Skip if we are not on an interesting line.\n continue\n\n if opm is False:\n # The colons in the report block are not reliably included\n # (differs by Eclipse version), even in the same PRT file. We\n # insert them in fixed positions and hope for the best (if the\n # ASCII table is actually dynamic with respect to content, this\n # will fail)\n linechars = list(line)\n linechars[1] = \":\"\n linechars[27] = \":\"\n line = \"\".join(linechars)\n\n records.append(\n [date, fipname, region_index] + list(report_block_lineparser(line))\n )\n return pd.DataFrame(data=records, columns=REGION_REPORT_COLUMNS)\n\n\ndef fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n \"\"\"Fill parser with command line arguments\"\"\"\n parser.add_argument(\"PRTFILE\", type=str, help=\"Eclipse PRT file (or DATA file)\")\n parser.add_argument(\n \"--fipname\",\n type=str,\n help=\"Region parameter name of interest\",\n default=\"FIPNUM\",\n )\n parser.add_argument(\n \"-o\", \"--output\", type=str, help=\"Output CSV filename\", default=\"outflow.csv\"\n )\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Be verbose\")\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Debug mode for logging\")\n return parser\n\n\ndef fipreports_main(args) -> None:\n \"\"\"Command line API\"\"\"\n logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name\n __name__, vars(args)\n )\n if args.PRTFILE.endswith(\".PRT\"):\n prtfile = args.PRTFILE\n else:\n prtfile = EclFiles(args.PRTFILE).get_prtfilename()\n dframe = df(prtfile, args.fipname)\n write_dframe_stdout_file(dframe, args.output, index=False, caller_logger=logger)\n","repo_name":"equinor/ecl2df","sub_path":"ecl2df/fipreports.py","file_name":"fipreports.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"73"} +{"seq_id":"37744158502","text":"# Create a sarcastic program that asks a user for their honest opinion,\n# then prints the same sentence back to them in aLtErNaTiNg CaPs.\nst_1 = input(\"Please input a string here : \")\nresult = \"\"\n\nodd=True\nfor c in st_1:\n if odd:\n result = result + c.upper()\n else:\n result = result + c.lower()\n odd = not odd\nprint(result)","repo_name":"Daniel-W-official-public/CodingNomads","sub_path":"python-101/12_user-input-string-formatting/12_10_sarcastic_prompt.py","file_name":"12_10_sarcastic_prompt.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14219003063","text":"#!/usr/bin/python3\n\nimport calendar\nfrom datetime import datetime\n\ndef dates(startYear, endYear, startMonth, endMonth):\n year = startYear\n month = startMonth\n\n while(year < endYear or (year == endYear and month < endMonth)):\n days = calendar.monthrange(year, month)[1]\n for day in range(1, days + 1):\n weekDay = datetime(year, month, day).weekday() + 1\n a = str(year)\n b = str(month)\n if len(b) == 1:\n b = '0' + b\n c = str(day)\n if len(c) == 1:\n c = '0' + c\n yield (a, b, c, str(weekDay))\n\n month += 1\n if month > 12:\n month = 1\n year += 1\n\ndef beforeDate(year, month, day):\n year = int(year)\n month = int(month)\n day = int(day)\n if day > 1:\n return (year, month, day - 1)\n if month > 1:\n day = calendar.monthrange(year, month - 1)[1]\n month -= 1\n return (year, month, day)\n return (year - 1, 12, 31)\n","repo_name":"zpltys/MovieMsgSpider","sub_path":"listDate.py","file_name":"listDate.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"4601731210","text":"import typing as t\n\nfrom threatexchange.signal_type.index import (\n IndexMatchUntyped,\n SignalSimilarityInfoWithIntDistance,\n SignalTypeIndex,\n T as IndexT,\n)\nfrom threatexchange.signal_type.pdq.pdq_faiss_matcher import (\n PDQMultiHashIndex,\n PDQFlatHashIndex,\n PDQHashIndex,\n)\n\nPDQIndexMatch = IndexMatchUntyped[SignalSimilarityInfoWithIntDistance, IndexT]\n\n\nclass PDQIndex(SignalTypeIndex[IndexT]):\n \"\"\"\n Wrapper around the pdq faiss index lib using PDQMultiHashIndex\n \"\"\"\n\n @classmethod\n def get_match_threshold(cls):\n return 31 # PDQ_CONFIDENT_MATCH_THRESHOLD\n\n @classmethod\n def _get_empty_index(cls) -> PDQHashIndex:\n return PDQMultiHashIndex()\n\n def __init__(self, entries: t.Iterable[t.Tuple[str, IndexT]] = ()) -> None:\n super().__init__()\n self.local_id_to_entry: t.List[t.Tuple[str, IndexT]] = []\n self.index: PDQHashIndex = self._get_empty_index()\n self.add_all(entries=entries)\n\n def __len__(self) -> int:\n return len(self.local_id_to_entry)\n\n def query(self, hash: str) -> t.Sequence[PDQIndexMatch[IndexT]]:\n \"\"\"\n Look up entries against the index, up to the max supported distance.\n \"\"\"\n\n # query takes a signal hash but index supports batch queries hence [hash]\n results = self.index.search_with_distance_in_result(\n [hash], self.get_match_threshold()\n )\n\n matches = []\n for id, _, distance in results[hash]:\n matches.append(\n IndexMatchUntyped(\n SignalSimilarityInfoWithIntDistance(int(distance)),\n self.local_id_to_entry[id][1],\n )\n )\n return matches\n\n def add(self, signal_str: str, entry: IndexT) -> None:\n self.add_all(((signal_str, entry),))\n\n def add_all(self, entries: t.Iterable[t.Tuple[str, IndexT]]) -> None:\n start = len(self.local_id_to_entry)\n self.local_id_to_entry.extend(entries)\n if start != len(self.local_id_to_entry):\n # This function signature is very silly\n self.index.add(\n (e[0] for e in self.local_id_to_entry[start:]),\n range(start, len(self.local_id_to_entry)),\n )\n\n\nclass PDQFlatIndex(PDQIndex):\n \"\"\"\n Wrapper around the pdq faiss index lib\n that uses PDQFlatHashIndex instead of PDQMultiHashIndex\n It also uses a high match threshold to increase recall\n possibly as the cost of precision.\n \"\"\"\n\n @classmethod\n def get_match_threshold(cls):\n return 52 # larger PDQ_MATCH_THRESHOLD for flatindexes\n\n @classmethod\n def _get_empty_index(cls) -> PDQHashIndex:\n return PDQFlatHashIndex()\n","repo_name":"facebook/ThreatExchange","sub_path":"python-threatexchange/threatexchange/signal_type/pdq/pdq_index.py","file_name":"pdq_index.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":1075,"dataset":"github-code","pt":"73"} +{"seq_id":"12271405326","text":"import os\nimport rospkg\nfrom python_qt_binding.QtCore import Signal, QSize\nfrom python_qt_binding.QtWidgets import QLabel\nfrom .util import IconHelper\n\nclass BatteryDashWidget(QLabel):\n \"\"\"\n A Widget which displays incremental battery state, including a status tip.\n To use this widget simply call :func:`update_perc` and :func:`update_time`\n to change the displayed charge percentage and time remaining, respectively.\n\n :param name: The name of this widget\n :type name: str\n \"\"\"\n state_changed = Signal(int)\n\n def __init__(self, name='Battery', icons=None, charge_icons=None,\n icon_paths=None, suppress_overlays=False, stale_icon=None):\n super(BatteryDashWidget, self).__init__()\n if not icons:\n icons = []\n charge_icons = []\n for x in range(6):\n icons.append(['ic-battery-%s.svg' % (x * 20)])\n charge_icons.append(['ic-battery-charge-%s.svg' % (x * 20)])\n if not stale_icon:\n stale_icon = ['ic-battery-0.svg', 'ol-stale-battery.svg']\n icon_paths = (icon_paths if icon_paths else []) + [['rqt_robot_dashboard', 'images']]\n paths = []\n rp = rospkg.RosPack()\n for path in icon_paths:\n paths.append(os.path.join(rp.get_path(path[0]), path[1]))\n self._icon_helper = IconHelper(paths, name)\n # Add stale icon at end of icons so that it gets composited\n icons.append(stale_icon)\n charge_icons.append(stale_icon) # Need icons and charge_icons length to be same\n converted_icons = self._icon_helper.set_icon_lists(icons, charge_icons, suppress_overlays)\n self._icons = converted_icons[0]\n self._charge_icons = converted_icons[1]\n self._name = name\n self._charging = False\n self._stale = True\n self.__state = 0\n self.setMargin(5)\n self.state_changed.connect(self._update_state)\n self.update_perc(0)\n self.update_time(0)\n\n def _update_state(self, state):\n if self._stale:\n self.setPixmap(self._icons[-1].pixmap(QSize(60, 100)))\n elif self._charging:\n self.setPixmap(self._charge_icons[state].pixmap(QSize(60, 100)))\n else:\n self.setPixmap(self._icons[state].pixmap(QSize(60, 100)))\n\n @property\n def state(self):\n \"\"\"\n Read-only accessor for the widgets current state.\n \"\"\"\n return self.__state\n\n def set_charging(self, value):\n self._charging = value\n\n def update_perc(self, val):\n \"\"\"\n Update the displayed battery percentage.\n The default implementation of this method displays in 20% increments\n\n :param val: The new value to be displayed.\n :type val: int\n \"\"\"\n self.update_state(round(val / 20.0))\n\n def update_state(self, state):\n \"\"\"\n Set the state of this button.\n This will also update the icon for the button based on the ``self._icons`` list\n\n :raises IndexError: If state is not a proper index to ``self._icons``\n\n :param state: The state to set.\n :type state: int\n \"\"\"\n if 0 <= state and state < len(self._icons):\n self.__state = state\n self.state_changed.emit(self.__state)\n else:\n raise IndexError(\"%s update_state received invalid state: %s\" % (self._name, state))\n\n def update_time(self, value):\n try:\n fval = float(value)\n self.setToolTip(\"%s: %.2f%% remaining\" % (self._name, fval))\n except ValueError:\n self.setToolTip(\"%s: %s%% remaining\" % (self._name, value))\n\n def set_stale(self):\n \"\"\"Set button to stale.\n\n Not used by base dashboard implementation.\n \"\"\"\n self._charging = False\n self._stale = True\n self.setToolTip(\"%s: Stale\" % self._name)\n # This triggers self.update_state which in turn will trigger _update_state\n self.update_perc(0)\n\n def unset_stale(self):\n self._stale = False\n","repo_name":"ros-visualization/rqt_robot_dashboard","sub_path":"src/rqt_robot_dashboard/battery_dash_widget.py","file_name":"battery_dash_widget.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"73"} +{"seq_id":"42808799115","text":"class Solution:\n def minFlipsMonoIncr(self, S: str) -> int:\n ones=0\n for item in S:\n if item==\"1\":\n ones+=1\n total=len(S)\n zeros=total-ones\n ret=zeros\n acc0=0\n acc1=0\n for i in range(len(S)):\n if S[i]==\"1\":\n acc1+=1\n else:\n acc0+=1\n ret=min(ret,acc1+zeros-acc0)\n return ret","repo_name":"lkwq007/leetcode-py","sub_path":"926-Flip-String-to-Monotone-Increasing.py","file_name":"926-Flip-String-to-Monotone-Increasing.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"33665066787","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 14:14:15 2023\n@author: xavier.pessoles2\n\"\"\"\nfrom collections import deque\nimport matplotlib.pyplot as plt\nimport random\n\n## Question 1 ##\ndef creer_graphe(p:int, n:int) -> dict:\n # n : lignes\n # p : colonnes\n G = {}\n sommets = []\n for i in range(n):\n for j in range(p):\n sommets.append((j,i))\n \n for sommet in sommets : \n (i,j) = sommet\n voisins = [(i+1,j),(i,j+1),(i-1,j),(i,j-1)]\n # On vérifie que les voisins sont dans les sommets\n vv = []\n for v in voisins : \n if v in sommets : \n vv.append(v)\n G[sommet]=vv\n return G\n\n## Question 2 ##\ndef get_sommets(G:{}) -> ([],[]):\n # On trace les sommets\n les_x,les_y = [],[]\n for sommet in G.keys() : \n les_x.append(sommet[0])\n les_y.append(sommet[1])\n return les_x,les_y\n \n## Question 3 ##\ndef trace_sommets(G:{}) :\n # On trace les sommets\n les_x,les_y = [],[]\n for sommet in G.keys() : \n les_x.append(sommet[0])\n les_y.append(sommet[1])\n plt.plot(les_x,les_y,\".\",color=\"royalblue\")\n \n plt.grid()\n plt.axis(\"equal\")\n plt.show()\n \n\n## Question 4 ##\ndef get_aretes(G):\n edges = []\n for sommet,voisins in G.items():\n for v in voisins : \n edge = (sommet,v)\n if (sommet,v) not in edges : \n if (v,sommet) not in edges : \n edges.append(edge)\n return edges\n\n## Question 5 ##\ndef tracer_aretes(G) :\n # On trace les arrêtes \n edges = get_aretes(G)\n for edge in edges : \n x = [edge[0][0],edge[1][0]]\n y = [edge[0][1],edge[1][1]]\n plt.plot(x,y,'lightcoral')\n \n plt.grid()\n plt.axis(\"equal\")\n plt.show()\n\n## Question 6 ##\ndef tracer_graphe(G) :\n # On trace les arrêtes \n edges = get_aretes(G)\n for edge in edges : \n x = [edge[0][0],edge[1][0]]\n y = [edge[0][1],edge[1][1]]\n plt.plot(x,y,'lightcoral')\n \n # On trace les sommets\n les_x,les_y = [],[]\n for sommet in G.keys() : \n les_x.append(sommet[0])\n les_y.append(sommet[1])\n plt.plot(les_x,les_y,\".\",color=\"royalblue\")\n \n plt.grid()\n plt.axis(\"equal\")\n plt.show()\n\n## TEST ##\nG = creer_graphe(10,8)\ntracer_graphe(G)\n\n\n### PARCOURS ###\ndef parcours_largeur(G,depart) :\n visited = {} \n for sommet in G.keys():\n visited[sommet] = False\n file = deque([depart])\n while len(file) > 0:\n s = file.pop()\n if visited[s] == False:\n visited[s] = True\n voisins = G[s]\n for v in voisins:\n file.appendleft(v)\n\n\ndef parcours_profondeur(G,depart) :\n visited = {} \n for sommet in G.keys():\n visited[sommet] = False\n pile = deque([depart])\n \n while len(pile) > 0:\n s = pile.pop()\n if visited[s] == False:\n visited[s] = True\n voisins = G[s]\n for v in voisins:\n pile.append(v)\n","repo_name":"mpsilamartin/mpsilamartin.github.io","sub_path":"info/TP/14_Labyrinthe_eleve.py","file_name":"14_Labyrinthe_eleve.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"10609536859","text":"#responsible: xiaoyu.chen@thalesgroup.com\n#location: Dalian\n#TC0091713.001\n\nimport unicorn\nfrom core.basetest import BaseTest\nfrom dstl.network_service import register_to_network\nfrom dstl.auxiliary import init\nfrom dstl.auxiliary import restart_module\nfrom dstl.call.setup_voice_call import dstl_is_data_call_supported\n\nimport random\n\nclass Test(BaseTest):\n '''\n TC0091713.001 - TpAtS0Basic\n Intention: This procedure provides basic tests for the command of AtS0\n Subscriber: 2\n '''\n\n def setup(test):\n test.dut.dstl_detect()\n test.dut.dstl_restart()\n test.r1.dstl_detect()\n test.expect(test.r1.dstl_register_to_network())\n\n\n def run(test):\n dut_phone_num = test.dut.sim.nat_voice_nr\n test.log.step(\"1.Read command and write command without PIN\")\n test.expect(test.dut.at1.send_and_verify(\"AT+CPIN?\", \"SIM PIN\"))\n test.expect(test.dut.at1.send_and_verify(\"ATS0=2\", \"ERROR\"))\n test.expect(test.dut.at1.send_and_verify(\"ATS0?\", \"OK\"))\n\n test.log.step(\"2. Read command and write command with PIN authentication\")\n test.expect(test.dut.dstl_register_to_network())\n test.expect(test.dut.at1.send_and_verify(\"ATS0=2\", \"OK\"))\n test.expect(test.dut.at1.send_and_verify(\"ATS0?\", \"002\"))\n\n test.log.step(\"3. Check write command with invalid parameters\")\n\n test.expect(test.dut.at1.send_and_verify(\"ATS0=-1\", \"ERROR\"))\n test.expect(test.dut.at1.send_and_verify(\"ATS0=256\", \"ERROR\"))\n test.expect(test.dut.at1.send_and_verify(\"ATS0=\", \"ERROR\"))\n\n if test.dut.dstl_is_data_call_supported():\n test.log.info('Not implemented temporarily ...')\n test.log.step(\"4.Make an incomming data call with ats0 set to '5', release data call\")\n test.log.step(\"5.Set DTR to off (at&d2) and make an incomming data call with sts0 set to '2', disconnect\")\n\n test.log.step(\"6. Test voice call\")\n test.expect(test.dut.at1.send_and_verify(\"ATS0=5\", \"OK\"))\n test.expect(test.dut.at1.send_and_verify(\"AT^SLCC=1\", \"OK\"))\n\n test.r1.at1.send_and_verify(f\"ATD{dut_phone_num};\")\n test.expect(test.dut.at1.wait_for(\"(RING\\s+){5}\", timeout=60))\n test.expect(test.dut.at1.wait_for(\"\\^SLCC: 1,1,0,0,0.*\"))\n test.expect(test.r1.at1.send_and_verify(\"AT+CLCC\", \"\\+CLCC: 1,0,0,0,0.*\"))\n test.expect(test.dut.at1.send_and_verify(\"AT+CHUP\", \"OK\"))\n test.expect(test.r1.at1.wait_for(\"NO CARRIER\"))\n\n\n\n def cleanup(test):\n test.log.step(\"7.Restore default value with ats0=0\")\n test.expect(test.dut.at1.send_and_verify(\"ATS0=0\", \"OK\"))\n test.expect(test.dut.at1.send_and_verify(\"at&v\", \"S0:000\"))\n\n\n\nif \"__main__\" == __name__:\n unicorn.main()\n\n","repo_name":"yinhwgh/remoterepo01","sub_path":"tests/cellular_networks/circuit_switched/ats0_basic.py","file_name":"ats0_basic.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22603402111","text":"# A partir d'une grille donnée, écrivez la fonction qui indique si un sudoku est gagnant ou pas\n# (ie, tous les nombres de 1 à 9 apparaissent une fois sur la ligne et sur la colonne)\n\n# La résolution ci dessous se fait de manière naive en testant à successivement les lignes, les colonnes et les blocs\n\nsudokuValide = [\n [7, 1, 3, 8, 2, 4, 6, 5, 9],\n [2, 6, 9, 1, 7, 5, 3, 8, 4],\n [5, 4, 8, 6, 9, 3, 2, 1, 7],\n [6, 9, 1, 2, 4, 8, 5, 7, 3],\n [4, 7, 2, 3, 5, 1, 9, 6, 8],\n [3, 8, 5, 9, 6, 7, 4, 2, 1],\n [1, 3, 4, 5, 8, 2, 7, 9, 6],\n [9, 5, 7, 4, 1, 6, 8, 3, 2],\n [8, 2, 6, 7, 3, 9, 1, 4, 5]]\n\nsudokuFaux = [\n [1, 1, 3, 8, 2, 4, 6, 5, 9],\n [2, 6, 9, 1, 7, 5, 3, 8, 4],\n [5, 4, 8, 6, 9, 3, 2, 1, 7],\n [6, 9, 1, 2, 4, 8, 5, 7, 3],\n [4, 7, 2, 3, 5, 1, 9, 6, 8],\n [3, 8, 5, 9, 6, 7, 4, 2, 1],\n [1, 3, 4, 5, 8, 2, 7, 9, 6],\n [9, 5, 7, 4, 1, 6, 8, 3, 2],\n [8, 2, 6, 7, 3, 9, 1, 4, 5]]\n\n\ndef rechercher_valeur(tableau, valeur):\n double = False\n occurence = 0\n i = 0\n\n while(i < 9 and occurence < 2):\n if(tableau[i] == valeur):\n occurence += 1\n i += 1\n if(occurence > 1):\n double = True\n\n return double\n\n\ndef tester_tableau(tableau):\n estValide = True\n i = 0\n while(estValide and i < 9):\n if(rechercher_valeur(tableau, i)):\n estValide = False\n i += 1\n return estValide\n\n\ndef colonne_to_tableau(grille, index):\n tableau = []\n for i in range(0, len(grille)):\n tableau.append(grille[i][index])\n return tableau\n\n\ndef bloc_to_tableau(grille, x, y):\n tableau = []\n for i in range(x, x+3):\n j = y\n for j in range(y, y+3):\n tableau.append(grille[i][j])\n return tableau\n\n\ndef tester_ligne(grille):\n estValide = True\n i = 0\n while(estValide and i < 9):\n if(not tester_tableau(grille[i])):\n estValide = False\n i += 1\n return estValide\n\n\ndef tester_colonne(grille):\n estValide = True\n i = 0\n while(estValide and i < 9):\n tableau = colonne_to_tableau(grille, i)\n if(not tester_tableau(tableau)):\n estValide = False\n i += 1\n return estValide\n\n\ndef tester_bloc(grille):\n estValide = True\n i = 0\n while(estValide and i < 9):\n j = 0\n while(estValide and j < 9):\n tableau = bloc_to_tableau(grille, i, j)\n if(not tester_tableau(tableau)):\n estValide = False\n j += 3\n i += 3\n return estValide\n\n\ndef tester_sudoku(sudoku):\n estValide = True\n if(not tester_ligne(sudoku)):\n estValide = False\n elif(not tester_colonne(sudoku)):\n estValide = False\n elif(not tester_bloc(sudoku)):\n estValide = False\n\n return estValide\n\n\nprint(tester_sudoku(sudokuValide))\n","repo_name":"SvenLC/CESI-Algorithmique","sub_path":"tableaux/exercice9.py","file_name":"exercice9.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"10614529499","text":"#responsible: fang.liu@thalesgroup.com\n#location: Berlin\n#TC0092490.001\n\nimport unicorn\nimport re\nfrom core.basetest import BaseTest\nfrom dstl.auxiliary import restart_module\nfrom dstl.network_service import register_to_network\nfrom dstl.auxiliary.init import dstl_detect\nfrom dstl.security.lock_unlock_sim import *\n\nclass Test(BaseTest):\n\n def setup(test):\n\n test.dut.dstl_detect()\n test.dut.dstl_register_to_network()\n\n def run(test):\n\n test.log.step(\"1. Check the syntax of the AT command \\\"at^sgapn\\\".\")\n test.log.info(\"***********************************************************************************************\")\n test.dut.at1.send_and_verify(\"at+cgdcont?\", \".*OK.*\")\n res = test.dut.at1.last_response\n pattern1 = r'\\+CGDCONT: (\\d+),(\"IP\"|\"IPV4V6\"|\"IPV6\"),\".*\",.*'\n\n it = re.finditer(pattern1, res)\n num = 0\n for item in it:\n test.log.info(f\"{item.group()}\")\n num = num+1\n\n test.dut.at1.send_and_verify(\"at^sgapn?\", \".*,.*,.*,.*,.*,.*,.*\")\n res2 = test.dut.at1.last_response\n pattern2 = r'\\^SGAPN: (\\d+),(\\d+),(\"IP\"|\"IPV4V6\"|\"IPV6\"),\".*\",(\"GSM\"|\"WCDMA\"|\"LTE\"|\"ANY\"),(\"Enabled\"|\"Disabled\"),(\\d+)'\n\n it2 = re.finditer(pattern2, res2)\n num2 = 0\n for item in it2:\n test.log.info(f\"{item.group()}\")\n num2 = num2+1\n\n test.expect(num == num2, critical=True)\n\n test.log.step(\"2. Check the command is not pin protected.\")\n test.log.info(\"***********************************************************************************************\")\n\n test.dut.dstl_lock_sim()\n test.expect(test.dut.at1.send_and_verify(\"at^sgapn?\", \".*OK.*\"))\n test.expect(test.dut.at1.send_and_verify(\"at^sgapn=?\", \".*OK.*\"))\n\n test.log.step(\"3. Check the test command and the rang of parameter values.\")\n test.log.info(\"***********************************************************************************************\")\n\n test.dut.at1.send_and_verify(\"at^sgapn=?\", \".*OK.*\")\n res3 = test.dut.at1.last_response\n\n test.log.info(\"The range of PDP context ID is 1-16.\")\n test.expect(\"^SGAPN: (1-16)\" in res3)\n\n test.log.info(\"The range of APN class is 0-16.\")\n test.expect(\"(0-16)\" in res3)\n\n test.log.info(\"The type of PDP context should be in the range of (\\\"IP\\\",\\\"PPP\\\",\\\"IPV6\\\",\\\"IPV4V6\\\").\")\n test.expect(\"(\\\"IP\\\",\\\"PPP\\\",\\\"IPV6\\\",\\\"IPV4V6\\\")\" in res3)\n\n test.log.info(\"The range of APN bearer is (\\\"GSM\\\",\\\"WCDMA\\\",\\\"LTE\\\",\\\"ANY\\\").\")\n test.expect(\"(\\\"GSM\\\",\\\"WCDMA\\\",\\\"LTE\\\",\\\"ANY\\\")\" in res3)\n\n test.log.info(\"The range of inactivity timeout value.\")\n test.expect(\"(0-122820)\" in res3)\n\n\n test.log.step(\"4. Check the rules for the classed.\")\n\n pattern3 = r'\\^SGAPN: \\d+,1,.*'\n res4 = re.search(pattern3, res3, re.I | re.U)\n\n if not res4:\n test.log.info(\"APN class range is 0-16, only 1 is not supported.\")\n test.expect(True)\n\n test.log.step(\"5. Check some illegal values.\")\n test.dut.at1.send_and_verify(\"at^sgapn=1,17\", \"ERROR\")\n test.dut.at1.send_and_verify(\"at^sgapn=9,0\", \"ERROR\")\n test.dut.at1.send_and_verify(\"at^sgapn=1,2,\\\"wsx\\\"\", \"ERROR\")\n test.dut.at1.send_and_verify(\"at^sgapn=3,0,\\\"IPV4V6\\\",\"\",\"\"\", \"ERROR\")\n #The two command will cause module crash.\n #test.dut.at1.send_and_verify(\"at^sgapn=3,0,\\\"IPV4V6\\\",\"\",\\\"123\\\"\", \"ERROR\")\n #test.dut.at1.send_and_verify(\"at^sgapn=3,0,\\\"IPV4V6\\\",\"\",\\\"abc\\\"\", \"ERROR\")\n test.dut.at1.send_and_verify(\"at^sgapn=3,0,\\\"IPV4V6\\\",\"\",\\\"ANY\\\",\" \"\", \"ERROR\")\n #test.dut.at1.send_and_verify(\"at^sgapn=3,0,\\\"IPV4V6\\\",\"\",\\\"ANY\\\",\\\"Disabled\\\"\", \"ERROR\")\n\n pattern4 = r'\\^SGAPN: 3,.*,\"Enabled\",0'\n it3 = re.search(pattern4, res2)\n if it3:\n test.log.info(\"0 Inactivity timer disabled.\\n\"\n \"If enabled, then for connections without data transfer the connection will be terminated after the timer expires.\")\n\n test.dut.at1.send_and_verify(\"at^sgapn=3,0,\\\"IPV4V6\\\",\"\",\\\"ANY\\\",,123\", \"ERROR\")\n\n\n def cleanup(test):\n \"\"\"\n < Test postconditions >\n \"\"\"\n pass\n\n\nif \"__main__\" == __name__:\n unicorn.main()\n","repo_name":"yinhwgh/remoterepo01","sub_path":"tests/packet_services/sgapn_APN_class_handling.py","file_name":"sgapn_APN_class_handling.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3363779754","text":"import time\nclass Jogo:\n def __init__(self):\n try:\n self.menu()\n except ValueError:\n print(\"Escolha uma opção válida!\")\n self.menu() \n\n def menu(self):\n print(\"Bem vindo ao Perguntados da programação!\")\n while True:\n print(\"\"\"\\n1 - Iniciar jogo\n2 - Como jogar / Objetivo do jogo\n3 - Sair\"\"\")\n comando = int(input(\"Escolha sua opção digitando o número correspondente: \")) \n if comando == 1:\n self.iniciar()\n break\n elif comando == 2:\n self.mostrarInstrucoes()\n elif comando == 3:\n self.sair()\n else:\n print(\"Escolha uma opção válida!\")\n self.menu()\n \n\n def iniciar(self): \n try:\n print(\"\"\"\\n1 - Único Jogador\n2 - Multiplayer Local\"\"\")\n comando = int(input('Escolha seu modo de jogo: '))\n if comando == 1:\n self.jogador1 = Jogador()\n self.tipoDeObjeto = Pergunta #aqui é decidido o tipo de objeto que será criado\n elif comando == 2:\n self.jogador1 = Jogador()\n self.jogador2 = Jogador()\n self.tipoDeObjeto = PerguntaMultiplayer #aqui é decidido o tipo de objeto que será criado\n else:\n print(\"Escolha uma opção válida!\")\n self.iniciar()\n except ValueError:\n print(\"Escolha uma opção válida!!\")\n self.iniciar() \n\n def mostrarInstrucoes(self):\n print(\"\"\"\\nInstruções do jogo:\n1 - O jogo consiste em acertar as perguntas sobre conteúdos da programação.\n2 - Assim que iniciar o jogo, aparecerá uma pergunta e 4 alternativas, sendo apenas uma delas a correta.\n3 - Pode ser jogado no modo 'Um Jogador' ou 'Dois Jogadores'.\n4 - No modo 'Um Jogador', o jogador possui 3 vidas, onde cada vez que ele errar um questão, ele perde uma vida. Caso chegue a 0, o jogo acaba.\n5 - O jogador deve OBRIGATORIAMENTE responder a pergunta com letra minúscula.\n6 - Cada pergunta respondida corretamente garante ao jogador 100 pontos, no fim, quem tiver mais pontos vence.\"\"\")\n\n def sair(self):\n print(\"Encerrando o jogo...\")\n quit()\n\n def finalizar(self):\n if self.tipoDeObjeto == Pergunta: \n self.finalUmJogadorGanha()\n \n elif self.tipoDeObjeto == PerguntaMultiplayer: \n self.finalMultiplayer()\n\n def finalUmJogadorGanha(self): #final jogo sozinho quando ele n perde nehuma vida\n if self.jogador1.vidas == 3:\n print(\"\\nParabéns, você zerou o jogo!\\nAcertou todas as perguntas.\") \n print(f\"\\nResultado final\\n{self.jogador1}\")\n\n elif self.jogador1.vidas < 3:\n print(\"\\nParabéns, você ganhou!\") \n print(f\"\\nResultado final\\n{self.jogador1}\\nVidas: {self.jogador1.vidas}\")\n \n\n def finalMultiplayer(self):\n print(\"Fim de jogo!!\")\n if jogo.jogador1.getPontosJ1() > jogo.jogador2.getPontosJ2(): #jogador 1 ganhou\n print(\"\\nO vencedor é...\")\n self.tempo()\n print(f\"===== {jogo.jogador1.nome} =====\")\n self.tempo()\n print(\"\\nResultado final\")\n print(jogo.jogador1)\n print(jogo.jogador2)\n \n elif jogo.jogador2.getPontosJ2() > jogo.jogador1.getPontosJ1(): #jogador 2 ganhou\n print(\"\\nO vencedor é...\")\n self.tempo()\n print(f\"===== {jogo.jogador2.nome} =====\")\n self.tempo()\n print(\"\\nResultado final\")\n print(jogo.jogador1)\n print(jogo.jogador2)\n \n elif jogo.jogador1.getPontosJ1() == jogo.jogador2.getPontosJ2(): \n if jogo.jogador1.getPontosJ1() and jogo.jogador2.getPontosJ2() == 0:\n print(\"\\nO vencedor é...\")\n self.tempo()\n print(\"Todos perderam!! Ninguém pontuou.\")\n self.tempo()\n print(\"\\nResultado final\")\n print(jogo.jogador1)\n print(jogo.jogador2)\n else:\n print(\"\\nO vencedor é...\")\n self.tempo()\n print(f\"===== {jogo.jogador1.nome} e {jogo.jogador2.nome} ===== empate!!\")\n self.tempo()\n print(\"\\nResultado final\")\n print(jogo.jogador1)\n print(jogo.jogador2)\n \n def tempo(self):\n time.sleep(3) \n \nclass Jogador:\n i = 1\n def __init__(self, p=0, v=3):\n self.nome = input(f\"{Jogador.i}�� jogador digite o seu nome: \")\n Jogador.i += 1\n self.__pontos = p #encapsulamento\n self.__vidas = v #encapsulamento\n\n def __str__(self):\n return (f\"Nome: {self.nome}\\nPontos: {self.__pontos}\")\n\n \n def getVidasJ1(self):\n return jogo.jogador1.__vidas\n def getPontosJ1(self):\n return jogo.jogador1.__pontos\n def getPontosJ2(self):\n return jogo.jogador2.__pontos\n \n\n def ganharPontos(self):\n self.__pontos += 100\n def perderVidas(self):\n self.__vidas -= 1 \n if self.__vidas == 0: \n print(f\"Fim de Jogo!\\nResultado Final\\n{jogo.jogador1}\")\n quit()\n\nclass Pergunta: #classe mae\n numeroQuestao = 1 #variavel de classe\n def __init__(self, questao, respostaCorreta):\n self.questao = questao\n self.respostaCorreta = respostaCorreta\n if jogo.tipoDeObjeto == Pergunta: #verificação: se for modo 1 jogador roda as perguntas, caso contrário n faz nd.\n self.rodarPergunta()\n \n def rodarPergunta(self):\n print(f\"\\n{Pergunta.numeroQuestao}ª Pergunta\")\n print(f\"{self.questao}\")\n resposta = str(input(\"Escolha a alternativa correta: \"))\n if resposta == self.respostaCorreta:\n Pergunta.numeroQuestao += 1\n jogo.jogador1.ganharPontos() #agregação: chama o metodo de outra classe\n print(\"\\nResposta correta\")\n \n else:\n jogo.jogador1.perderVidas() #agregação\n Pergunta.numeroQuestao += 1\n print(\"\\nResposta errada\")\n \n\n#multiplayer local\nclass PerguntaMultiplayer(Pergunta): #classe filha\n numeroQuestao = 1 #variavel de classe\n def __init__(self, questao, respostaCorreta):\n Pergunta.__init__(self, questao, respostaCorreta)\n self.rodarPergunta()\n \n def rodarPergunta(self): #polimorfismo: o método tem o mesmo nome da classe mãe, mas faz coisas diferentes\n print(f\"\\n{PerguntaMultiplayer.numeroQuestao}ª Pergunta\")\n print(f\"{self.questao}\")\n respJ1 = str(input(f\"{jogo.jogador1.nome}, escolha a alternativa correta: \"))\n respJ2 = str(input(f\"{jogo.jogador2.nome}, escolha a alternativa correta: \"))\n if respJ1 == self.respostaCorreta:\n if respJ2 == self.respostaCorreta:\n #os dois acertaram\n jogo.jogador1.ganharPontos() #agregaçao\n jogo.jogador2.ganharPontos() #agregaçao\n PerguntaMultiplayer.numeroQuestao += 1\n print(f\"\\n{jogo.jogador1.nome} e {jogo.jogador2.nome} acertaram a resposta!!\")\n \n \n else:\n #j1 acertou e o j2 errou\n jogo.jogador1.ganharPontos() #agregaçao\n PerguntaMultiplayer.numeroQuestao += 1\n print(f\"\\n{jogo.jogador1.nome} acertou a resposta!!\")\n print(f\"{jogo.jogador2.nome} errou a resposta!!\")\n \n \n \n if respJ2 == self.respostaCorreta:\n if respJ1 != self.respostaCorreta:\n #j2 acertou e o j1 errou\n jogo.jogador2.ganharPontos() #agregaçao\n PerguntaMultiplayer.numeroQuestao += 1\n print(f\"\\n{jogo.jogador1.nome} errou a resposta!!\")\n print(f\"{jogo.jogador2.nome} acertou a resposta!!\")\n \n \n if respJ1 != self.respostaCorreta:\n if respJ2 != self.respostaCorreta:\n #os dois erraram\n PerguntaMultiplayer.numeroQuestao += 1\n print(f\"\\n{jogo.jogador1.nome} e {jogo.jogador2.nome} erraram a resposta!!\")\n \n \n \n \n\n\njogo = Jogo()\nx = jogo.tipoDeObjeto #Determina se o modo de jogo é 1 jogador ou se é multiplayer local\n\npergunta1 = x(\"\"\"Qual é o significado de \"código\" no contexto da programação?\\na) Uma forma de encriptar dados\nb) Uma senha que usamos para ativar o Python\nc) Um conjunto de regras de estilo para programas em Python\nd) Uma sequência de instruções em uma linguagem de programação\"\"\", \"d\")\n\npergunta2 = x(\"\"\"Em Python, quando mais de um operador aparece em uma expressão, a ordem de avaliação depende das regras de precedência de cada linguagem. Assim, ao programar em Python, além de observar essas regras, é preciso considerar, ainda, a forma como a linguagem representa seus operadores, conforme demonstrado nos comandos a seguir.\nx = 7*3**2%4\\nAssinale o resultado impresso:\\na) 1\nb) 3\nc) 7\nd) 15.75\"\"\", \"b\")\n\npergunta3 = x(\"\"\"Qual a função que transforma algo em string no python?\\na) str()\nb) int()\nc) float()\nd) string()\"\"\", \"a\")\n\npergunta4 = x(\"\"\"Sobre o Paradigma Orientado a Objetos, responda a alternativa que melhor o define:\na) Organiza o código em procedimentos conhecidos como rotinas, subrotinas, funções ou métodos\nb) Organiza o código em blocos lógicos usando estruturas de controle de fluxo\nc) Orgabiza o código em grupos de objetos, chamados de métodos, que contém variáveis e classes próprias\nd) Organiza o código em grupos de objetos, chamados de classes, que contém variáveis e funções próprías\"\"\",\"d\" or \"D\")\n\npergunta5 = x(\"\"\"Ao criar um sistema ou resolver um problema com Programação Orientada a Objetos, é comum a análise da descrição do sistema ou problema. Nesse contexto, para definir os potenciais métodos de uma classe em qual classe de palavras é importante estar atento?\na) Substantivos\nb) Verbos\nc) Adjetivos\nd) Artigos\"\"\", \"b\")\n\npergunta6 = x(\"\"\"Substitua os asteriscos com o comando de método especial que mais se adequa a situação a seguir para obter o resultado proposto:\nclass Fruta:\n def __init__(self, nome, quantidade):\n self.nome = nome\n self.quantidade = quantidade\n def *******(self):\n return (f'Nome da Fruta: {self.nome}\\\\nQuantidade: {self.quantidade}')\nbanana = Fruta('banana', 10)\nprint(banana)\n\nResultado:\nNome da Fruta: banana\nQuantidade: 10\n\na) __init__\nb) print()\nc) __str__\nd) __eq__\"\"\", \"c\")\n\npergunta7 = x(\"\"\"Considere o programa em Python abaxo:\n\\nnum1 = int(input('Informe o número de Processos:'))\nnum2 = int(input('Informe o número de Juízes:'))\n\n..I..\n resultado = numero1 / numero2\n print(\"Há \",resultado, \" processos a serem julgados por cada Juiz\")\n..II..\n print(\"Não é possível divisão por zero\")\nPara tratar a exceção que será lançada se o valor contido na variável num2 for zero, as lacunas I e II deverão ser corretamente preenchidas por:\n\na) if: e except ZeroDivisionError:\nb) try e except ZeroDivisionError:\nc) if: e except ValueError:\nd) try: e except ZeroDivisionError:\"\"\", \"d\")\n\npergunta8 = x(\"\"\"frutas = [\"banana\" , \"laranja\" , \"manga\" , \"uva\"]\n\nfor k in range( -1, -4, -2 ):\n print frutas [ k ]\n\nO conjunto de palavras exibidas pela execução desse código, na ordem, é:\na) uva, laranja\nb) banana, laranja, manga\nc) laranja, manga\nd) uva, banana, manga\"\"\", \"a\")\n\npergunta9 = x(\"\"\"class Cliente:\n I\n self.nome = nome\n self.renda = renda\n\np1 = Cliente(\"Adriano\", 5700.98)\n\nprint(p1.nome)\nprint(p1.renda)\n\nPara que o código seja compilado e executado corretamente, a lacuna I deverá ser preenchida com:\na) _init_(self, nome, renda):\nb) def _init_(self, nome, renda):\nc) def _str_(self):\nd) def _eq_(self, Cliente):\"\"\", \"b\")\n\npergunta10 = x(\"\"\"Sobre as linguagens de programação orientada a obejtos, marque V para as afirmativas verdadeiras e F para as falsas.\n( ) Os 4 pilares fundamentais da programação orientada a objetos são: Encapsulamento, Abstração, Polimorfismo e Herança.\n( ) Mesmo quando um método possui o parâmetro 'self', ele não receberá automaticamente como argumento uma referência para o objeto que está tentando usá-lo.\n( ) O método _eq_ compara dois objetos e retorna True se seus valores forem os mesmos.\n( ) Atributo é um elemento dentro do sistema que é a execução de uma classe.\n( ) Os métodos também podem ser chamados de 'comportamento dos objetos de uma classe', pois são ações que esses objetos podem executar.\n\na) V - V - V - F - F\nb) V - F - V - F - V\nc) F - F - F - V - V\nd) V - V - F - V - F\"\"\", \"b\")\n\njogo.finalizar()\n","repo_name":"joanaMartinsS/Jogo-Perguntas-E-Respostas","sub_path":"JogoPerguntasERespostas.py","file_name":"JogoPerguntasERespostas.py","file_ext":"py","file_size_in_byte":12973,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16374534881","text":"#! /usr/bin/env python3\n\nfrom .base_miner import BasePostGradientMiner\nfrom ..utils import loss_and_miner_utils as lmu\nimport torch\n\n\nclass MultiSimilarityMiner(BasePostGradientMiner):\n def __init__(self, epsilon, **kwargs):\n super().__init__(**kwargs)\n self.epsilon = epsilon\n\n def mine(self, embeddings, labels):\n self.n = embeddings.size(0)\n self.index_list = torch.arange(self.n).to(embeddings.device)\n self.sim_mat = lmu.sim_mat(embeddings)\n return self.compute_indices(labels)\n\n def compute_indices(self, labels):\n empty_tensor = torch.tensor([]).long().to(labels.device)\n a1_idx, p_idx, a2_idx, n_idx = [empty_tensor], [empty_tensor], [empty_tensor], [empty_tensor]\n for i in range(self.n):\n pos_indices = (\n ((labels == labels[i]) & (self.index_list != i)).nonzero().flatten()\n )\n neg_indices = (labels != labels[i]).nonzero().flatten()\n\n if pos_indices.size(0) == 0 or neg_indices.size(0) == 0:\n continue\n\n pos_sorted, pos_sorted_idx = torch.sort(self.sim_mat[i][pos_indices])\n neg_sorted, neg_sorted_idx = torch.sort(self.sim_mat[i][neg_indices])\n neg_sorted_filtered_idx = (\n (neg_sorted + self.epsilon > pos_sorted[0]).nonzero().flatten()\n )\n pos_sorted_filtered_idx = (\n (pos_sorted - self.epsilon < neg_sorted[-1]).nonzero().flatten()\n )\n\n pos_indices = pos_indices[pos_sorted_idx][pos_sorted_filtered_idx]\n neg_indices = neg_indices[neg_sorted_idx][neg_sorted_filtered_idx]\n\n if len(pos_indices) > 0:\n a1_idx.append(torch.ones_like(pos_indices) * i)\n p_idx.append(pos_indices)\n if len(neg_indices) > 0:\n a2_idx.append(torch.ones_like(neg_indices) * i)\n n_idx.append(neg_indices)\n\n return [torch.cat(idx) for idx in [a1_idx, p_idx, a2_idx, n_idx]]\n","repo_name":"dlmuyy/IJCAI-PRICAI-2D-based-3D-shape-retrieval","sub_path":"models/pytorch_metric_learning/pytorch_metric_learning/miners/multi_similarity_miner.py","file_name":"multi_similarity_miner.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"39005717542","text":"from PyQt6.QtWidgets import QMessageBox, QDialogButtonBox\nfrom UI.Widgets.custom_qimage import CustomQImage\n\n\nclass Error(QMessageBox):\n def __init__(self, parent=None, title='title', message='message'):\n super().__init__(parent)\n # Variables\n self.title = title\n self.message = message\n # Button\n self.ok = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok)\n self.ok.accepted.connect(self.accept)\n self.init_ui()\n # Show message\n self.exec()\n\n \"\"\" UI \"\"\"\n def init_ui(self):\n self.setWindowTitle(self.title)\n self.setText(self.message)\n self.setIconPixmap(CustomQImage(0))\n","repo_name":"WasaProduction/Ixchel","sub_path":"UI/Widgets/Alerts/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14061772566","text":"from fastapi import HTTPException, status\nfrom sqlalchemy.orm import Session, joinedload\nfrom models import area_model\nfrom uuid import UUID\nfrom schemas.area import AreaCreate, AreaUpdate, AreaDelete\n\n\n#################################################\n# Create Area\n\ndef create_area(db:Session, area: AreaCreate):\n #verifica que no esta ya en labase de datos ,si \n # es asi entonces si area_in_db es verdadero ,\n # significa que ya existeen la base de datos un \n # area con ese id por tanto no debe crearse\n # y debe lanzarun mensaje de error \n area_in_db = db.query(area_model.Area).filter(\n area_model.Area.area_name == area.area_name).first()\n\n if area_in_db:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT, detail= \"Area already exist\"\n )\n db_area =area_model.Area(area_name=area.area_name)\n\n db.add(db_area)\n db.commit()\n db.refresh(db_area)\n\n return db_area\n\ndef get_area_by_id(db:Session,area_id:UUID):\n\n areas_in_db= db.query(area_model.Area).filter(area_model.Area.area_id == area_id).options(\n joinedload(area_model.Area.cargos),\n joinedload(area_model.Area.catalog)\n ).first()\n\n if not areas_in_db:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail= \"Areas not found\"\n )\n return areas_in_db\n\n###################################################\n# GET AREA BY NAME\ndef get_area_by_name(db:Session,area_name:str):\n\n areas_in_db= db.query(area_model.Area).options(\n joinedload(area_model.Area.catalog),\n joinedload(area_model.Area.cargos)\n ).filter(area_model.Area.area_name == area_name).first()\n\n if not areas_in_db:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail= \"Areas not found\"\n )\n return areas_in_db\n\n###########################################################\n# Get all areas\n\ndef get_all_areas(db: Session):\n areas_in_db = db.query(area_model.Area).options(\n joinedload(area_model.Area.catalog),\n joinedload(area_model.Area.cargos)\n ).all()\n\n if not areas_in_db:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail=\"Areas not found\")\n\n return areas_in_db\n\n###########################################################\n# Update Area\ndef update_area(db: Session, area: AreaUpdate):\n area_in_db = db.query(area_model.Area).filter(area_model.Area.area_id == area.area_id).first()\n\n if not area_in_db:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail=\"Area not found\")\n\n area_in_db.area_name = area.area_name\n db.commit()\n db.refresh(area_in_db)\n\n return area_in_db\n\n###########################################################\n# Delete Area\n\ndef delete_area(db: Session, area: AreaDelete):\n area_in_db = db.query(area_model.Area).filter(area_model.Area.area_id == area.area_id).first()\n\n if not area_in_db:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail=\"Area not found\")\n\n db.delete(area_in_db)\n db.commit()\n\n return area_in_db","repo_name":"R00rss/PQR_white_back","sub_path":"crud/area_crud.py","file_name":"area_crud.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73675287275","text":"from typing import List, Dict, Tuple\n\nimport networkx as nx\n\nfrom .opinion import Opinion\n\n\nclass ParsedSentence:\n \"\"\"\n Attributes\n ----------\n graph : nx.DiGraph\n dependency tree of sentence\n id2word : dict\n dictionary of all tokens\n value = token\n key = it's index\n id2lemma : dict\n dictionary of several tokens\n value = node of dependency tree in format: `lemma_pos`\n key = it's index\n id2dep : dict\n dictionary of dependency relations\n value = relation type\n key = node index\n \"\"\"\n def __init__(self,\n graph: nx.DiGraph,\n text: str,\n id2word: Dict[int, Tuple[int, int]],\n id2lemma: Dict[int, str],\n id2dep: Dict[int, str],\n opinions: List[Opinion] = None):\n if opinions is None:\n opinions = []\n self.opinions = opinions\n\n self.graph = graph\n self.text = text\n self.id2word = id2word\n self.id2lemma = id2lemma\n self.id2dep = id2dep\n\n def __len__(self):\n \"\"\"Number of parsed tokens\n\n According to number of nodes in dependency tree\"\"\"\n return len(self.graph)\n\n def reset_opinions(self):\n self.opinions = []\n\n def is_known(self, word_index: int) -> bool:\n \"\"\"Is word with that index in known words\"\"\"\n return word_index in self.id2lemma\n\n def get_text(self) -> str:\n return self.text\n\n def get_nodes(self, start_index: int, stop_index: int) -> List[int]:\n nodes_index = []\n for word_index, (word_start, word_stop) in self.id2word.items():\n if (start_index <= word_start) and (word_stop <= stop_index):\n nodes_index.append(word_index)\n return nodes_index\n\n def nodes_sentence_order(self) -> List[int]:\n \"\"\"Nodes indexes in sentence order\n \n Returns\n -------\n nodes : list\n list of node's id\n \"\"\"\n\n return [\n node_id for node_id, _ in sorted(self.id2lemma.items(), key=lambda item: item[0])\n ]\n\n def reset_opinions_polarities(self):\n \"\"\"Reset only polarities of targets\"\"\"\n for opinion in self.opinions:\n opinion.reset_polarity()\n\n def is_opinions_contain_unknown(self) -> bool:\n for opinion in self.opinions:\n for node in opinion.nodes:\n if not self.is_known(node):\n return False\n return True\n\n # todo\n # def to_sentence(self) -> Sentence:\n # \"\"\"Convert to instance of Sentence class\n #\n # Returns\n # -------\n # sentence : Sentence\n # \"\"\"\n #\n # text = []\n # for parsed_node_id, _ in sorted(self.id2init_id.items(), key=lambda item: item[1]):\n # if parsed_node_id in self.id2word:\n # text.append(self.id2word[parsed_node_id])\n #\n # opinions = []\n # for opinion in self.opinions:\n # opinions.append(\n # Opinion(nodes=[\n # self.id2init_id[parsed_node_id] for parsed_node_id in opinion.nodes\n # ],\n # category=opinion.category,\n # polarity=opinion.polarity))\n # return Sentence(text=text, opinions=opinions)\n\n # todo\n # def to_specified_sentence(self, text: List[str]) -> Sentence:\n # \"\"\"Convert to instance of Sentence class with specified text\n #\n # Returns\n # -------\n # sentence : Sentence\n # \"\"\"\n #\n # opinions = []\n # for opinion in self.opinions:\n # nodes = []\n # for node in opinion.nodes:\n # node = self.id2init_id[node]\n # if node not in nodes:\n # nodes.append(node)\n # opinions.append(\n # Opinion(nodes=nodes, category=opinion.category, polarity=opinion.polarity))\n #\n # return Sentence(text=text, opinions=opinions)\n","repo_name":"DavydovDmitry/absa","sub_path":"absa/text/parsed/sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"23632451239","text":"import unittest\nfrom copy import deepcopy\nfrom random import randrange, randint\nfrom math import isclose\nimport numpy as np\nimport matrixlib as ml\n\n\nclass NumpyAndMatrixlib(unittest.TestCase):\n\n def test_negation(self):\n m, n = randint(1, 50), randint(1, 50)\n x = ml.Matrix(m, n).rand()\n a, b = np.negative(deepcopy(x)._Matrix__matrix), (-x)._Matrix__matrix\n self.assertTrue(all([a[i][j] == b[i][j] for i in range(m) for j in range(n)]))\n\n def test_addition(self):\n m, n = randint(1, 50), randint(1, 50)\n x, y = ml.Matrix(m, n).rand(), ml.Matrix(m, n).rand()\n a, b = np.add(deepcopy(x)._Matrix__matrix, deepcopy(y)._Matrix__matrix), (x+y)._Matrix__matrix\n self.assertTrue(all([a[i][j] == b[i][j] for i in range(m) for j in range(n)]))\n\n def test_subtraction(self):\n m, n = randint(1, 50), randint(1, 50)\n x, y = ml.Matrix(m, n).rand(), ml.Matrix(m, n).rand()\n a, b = np.subtract(deepcopy(x)._Matrix__matrix, deepcopy(y)._Matrix__matrix), (x-y)._Matrix__matrix\n self.assertTrue(all([a[i][j] == b[i][j] for i in range(m) for j in range(n)]))\n\n def test_transposition(self):\n m, n = randint(1, 50), randint(1, 50)\n x = ml.Matrix(m, n).rand()\n a, b = np.transpose(deepcopy(x)._Matrix__matrix), (x**\"t\")._Matrix__matrix\n self.assertTrue(all([a[i][j] == b[i][j] for i in range(n) for j in range(m)]))\n\n def test_multiplication(self):\n m, n, p = randint(1, 20), randint(1, 20), randint(1, 20)\n x, y = ml.Matrix(m, n).rand(), ml.Matrix(n, p).rand()\n a, b = np.matmul(deepcopy(x)._Matrix__matrix, deepcopy(y)._Matrix__matrix), (x@y)._Matrix__matrix\n self.assertTrue(all([a[i][j] == b[i][j] for i in range(m) for j in range(p)]))\n\n def test_linearsolve(self):\n n = randint(2, 10)\n x, y = ml.Matrix(n, n).rand(), ml.Matrix(n, 1).rand()\n while x.det() == 0:\n x.rand()\n a, b = np.linalg.solve(np.array(deepcopy(x)._Matrix__matrix, dtype='float64'), np.array([deepcopy(y)._Matrix__matrix[i][0] for i in range(n)],dtype='float64')), x.lin_sol(y)._Matrix__matrix\n self.assertTrue(all([isclose(a[i], b[i][0]) for i in range(n)]))\n\n def test_determinant(self):\n n = randint(1, 50)\n x = ml.Matrix(n, n).rand()\n a, b = np.linalg.det(np.array(deepcopy(x)._Matrix__matrix,dtype='float64')), x.det()\n self.assertTrue(isclose(a, b))\n\n def test_inverse(self):\n n = randint(1, 10)\n x = ml.Matrix(n, n).rand()\n while x.det() == 0:\n x.rand()\n a, b = np.linalg.inv(np.array(deepcopy(x)._Matrix__matrix,dtype='float64')), (x**(-1))._Matrix__matrix\n\n def test_scalarmultiplication(self):\n m, n, y = randint(1,50), randint(1,50), randrange(-100,100)\n x = ml.Matrix(m, n).rand()\n a, b = np.array(deepcopy(x)._Matrix__matrix)*y, (y*x)._Matrix__matrix\n self.assertTrue(all([isclose(a[i][j], b[i][j]) for i in range(x._Matrix__rows) for j in range(x._Matrix__cols)]))\n\n def test_dotproduct(self):\n n = randint(1, 20)\n x, y = ml.Matrix(n,1).rand(), ml.Matrix(n,1).rand()\n a, b = np.dot([deepcopy(x)._Matrix__matrix[i][0] for i in range(n)], [deepcopy(y)._Matrix__matrix[i][0] for i in range(n)]), x*y\n self.assertTrue(isclose(a, b))\n\n def test_leastsquares(self):\n m, n = randint(1, 10), randint(1, 10)\n x, y = ml.Matrix(m, n).rand(), ml.Matrix(m, 1).rand()\n if ((x**\"t\")@x).det() == 0:\n self.assertTrue(True)\n else:\n a, b = np.linalg.lstsq(np.array(deepcopy(x)._Matrix__matrix,dtype = 'float64'), np.array([deepcopy(y)._Matrix__matrix[i][0] for i in range(m)],dtype = 'float64'),rcond=1)[0], x.least_squares(y)._Matrix__matrix\n self.assertTrue(all([isclose(a[i],b[i][0]) for i in range(n)]))\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"marekplesnik/matrix-library","sub_path":"numpy_validation/test_numpy.py","file_name":"test_numpy.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7204610383","text":"from typing import List, Callable\nfrom hummingbot.client.config.config_helpers import get_connector_class\nfrom hummingbot.connector.exchange.paper_trade.market_config import MarketConfig\nfrom hummingbot.connector.exchange.paper_trade.paper_trade_exchange import PaperTradeExchange\nfrom hummingbot.client.settings import AllConnectorSettings\n\n\ndef get_order_book_tracker_class(connector_name: str) -> Callable:\n conn_setting = AllConnectorSettings.get_connector_settings()[connector_name]\n module_name = f\"{conn_setting.base_name()}_order_book_tracker\"\n class_name = \"\".join([o.capitalize() for o in module_name.split(\"_\")])\n try:\n mod = __import__(f'hummingbot.connector.{conn_setting.type.name.lower()}.{conn_setting.base_name()}.'\n f'{module_name}',\n fromlist=[class_name])\n return getattr(mod, class_name)\n except Exception:\n pass\n raise Exception(f\"Connector {connector_name} OrderBookTracker class not found\")\n\n\ndef create_paper_trade_market(exchange_name: str, trading_pairs: List[str]):\n obt_class = get_order_book_tracker_class(exchange_name)\n conn_setting = AllConnectorSettings.get_connector_settings()[exchange_name]\n obt_params = {\"trading_pairs\": trading_pairs}\n obt_kwargs = conn_setting.add_domain_parameter(obt_params)\n obt_obj = obt_class(**obt_kwargs)\n return PaperTradeExchange(obt_obj,\n MarketConfig.default_config(),\n get_connector_class(exchange_name))\n","repo_name":"HappyDream0317/hummingbot","sub_path":"hummingbot/connector/exchange/paper_trade/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"85"} +{"seq_id":"21924774590","text":"\nimport datetime\nimport cv2\nfrom PyQt5.QtCore import *\nimport threading, os, time\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtSql import QSqlDatabase\nfrom PyQt5.QtWidgets import QGraphicsDropShadowEffect, QFileDialog, QApplication, QListWidget\nfrom main import start_camera, stop, re_start, start_video1, start_video2, start_video3, start_video4, start_video5, start_video6, start_video7,find_one\nfrom image import start_image, people_num1\nfrom PyQt5.QtCore import QThread, pyqtSignal\nimport shutil\nimport sqlite3\nglobal is_open, index\nis_open = 1\nindex = 1\n\nclass WorkThread(QThread):\n # 自定义信号对象。参数str就代表这个信号可以传一个字符串\n signal = pyqtSignal(str)\n\n def __int__(self):\n # 初始化函数\n super(WorkThread, self).__init__()\n\n def run(self):\n time.sleep(10)\n for i in range(1, 298):\n # 通过自定义信号把待显示的字符串传递给槽函数\n self.signal.emit(str(i))\n time.sleep(0.4)\n\nclass Work2Thread(QThread):\n def __int__(self):\n # 初始化函数\n super(Work2Thread, self).__init__()\n\n def run(self):\n a = Ui_self()\n a.start_video()\n\nclass Work3Thread(QThread):\n def __int__(self):\n # 初始化函数\n super(Work3Thread, self).__init__()\n\n def run(self):\n a = Ui_self()\n a.start_camera()\n\nclass Work4Thread(QThread):\n def __int__(self):\n # 初始化函数\n super(Work4Thread, self).__init__()\n\n def run(self):\n a = Ui_self()\n a.start_image()\n\n\n#----------------------------------\n\nclass Ui_self(QtWidgets.QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setting_flag = 1\n self.init_ui()\n\n self.timer1 = QtCore.QTimer()\n self.timer2 = QtCore.QTimer()\n self.timer3 = QtCore.QTimer()\n self.timer4 = QtCore.QTimer()\n self.cap = cv2.VideoCapture()\n self.work = WorkThread() # 展示图片\n self.work.signal.connect(self.show_image) # 设置子线程发出信号后激活的函数\n self.work2 = Work2Thread() # 视频检测\n self.work3 = Work3Thread() # 摄像头检测\n self.work4 = Work4Thread() # 图片检测\n self.listFile = QListWidget()\n\n self.videoName=[] #地址列表\n\n def init_ui(self):\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.setAttribute(Qt.WA_TranslucentBackground)\n\n self.setObjectName(\"self\")\n self.resize(1223, 640)\n self.label_p1 = QtWidgets.QLabel(self)\n self.label_p1.setGeometry(QtCore.QRect(-20, -10, 401, 651))\n self.label_p1.setText(\"\")\n self.label_p1.setPixmap(QtGui.QPixmap(\"./icons/bg.png\"))\n self.label_p1.setScaledContents(True)\n self.label_p1.setObjectName(\"label_p1\")\n self.label_pp = QtWidgets.QLabel(self)\n self.label_pp.setGeometry(QtCore.QRect(80, 110, 201, 61))\n self.label_pp.setText(\"\")\n self.label_pp.setPixmap(QtGui.QPixmap(\"./icons/下载.png\"))\n self.label_pp.setScaledContents(True)\n self.label_pp.setObjectName(\"label_pp\")\n self.label_name = QtWidgets.QLabel(self)\n self.label_name.setGeometry(QtCore.QRect(50, 180, 261, 41))\n self.label_name.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\")\n self.label_name.setAlignment(QtCore.Qt.AlignCenter)\n self.label_name.setObjectName(\"label_name\")\n\n self.label_team = QtWidgets.QLabel(self)\n self.label_team.setGeometry(QtCore.QRect(50, 205, 261, 41))\n self.label_team.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n \"font: 11pt \\\"黑体\\\";\")\n self.label_team.setAlignment(QtCore.Qt.AlignCenter)\n self.label_team.setObjectName(\"label_team\")\n\n self.btn_close = QtWidgets.QPushButton(self)\n self.btn_close.setGeometry(QtCore.QRect(305, 32, 35, 35))\n self.btn_close.setStyleSheet(\"QPushButton{\\n\"\n \" border:none;\\n\"\n \" border-radius:6px;\\n\"\n \" background-color: transparent;\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(25, 40, 50);\\n\"\n \"}\")\n self.btn_close.setText(\"\")\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"./icons/cil-x.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_close.setIcon(icon)\n self.btn_close.setFlat(True)\n self.btn_close.setObjectName(\"btn_close\")\n self.btn_min = QtWidgets.QPushButton(self)\n self.btn_min.setGeometry(QtCore.QRect(272, 32, 35, 35))\n self.btn_min.setStyleSheet(\"QPushButton{\\n\"\n \" border:none;\\n\"\n \" border-radius:6px;\\n\"\n \" background-color: transparent;\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(25, 40, 50);\\n\"\n \"}\")\n self.btn_min.setText(\"\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\"./icons/cil-window-minimize.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_min.setIcon(icon1)\n self.btn_min.setFlat(True)\n self.btn_min.setObjectName(\"btn_min\")\n self.btn_show = QtWidgets.QPushButton(self)\n self.btn_show.setGeometry(QtCore.QRect(290, 550, 35, 35))\n self.btn_show.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_show.setText(\"\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\"./icons/right.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_show.setIcon(icon2)\n self.btn_show.setFlat(True)\n self.btn_show.setObjectName(\"btn_show\")\n self.label_upload = QtWidgets.QLabel(self)\n self.label_upload.setGeometry(QtCore.QRect(48, 305, 290, 101))\n self.label_upload.setText(\"\")\n self.label_upload.setPixmap(QtGui.QPixmap(\"./icons/label_gray.png\"))\n self.label_upload.setScaledContents(True)\n self.label_upload.setObjectName(\"label_upload\")\n self.btn_camera = QtWidgets.QPushButton(self)\n self.btn_camera.setGeometry(QtCore.QRect(55, 310, 81, 81))\n self.btn_camera.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_camera.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\"./icons/camera.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_camera.setIcon(icon3)\n self.btn_camera.setIconSize(QtCore.QSize(50, 50))\n self.btn_camera.setObjectName(\"btn_camera\")\n self.btn_image = QtWidgets.QPushButton(self)\n self.btn_image.setGeometry(QtCore.QRect(141, 310, 81, 81))\n self.btn_image.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_image.setText(\"\")\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\"./icons/image.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_image.setIcon(icon4)\n self.btn_image.setIconSize(QtCore.QSize(50, 50))\n self.btn_image.setObjectName(\"btn_image\")\n self.btn_video = QtWidgets.QPushButton(self)\n self.btn_video.setGeometry(QtCore.QRect(226, 310, 81, 81))\n self.btn_video.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_video.setText(\"\")\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\"./icons/video.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_video.setIcon(icon5)\n self.btn_video.setIconSize(QtCore.QSize(50, 50))\n self.btn_video.setObjectName(\"btn_video\")\n self.label_11 = QtWidgets.QLabel(self)\n self.label_11.setGeometry(QtCore.QRect(50, 420, 290, 101))\n self.label_11.setText(\"\")\n self.label_11.setPixmap(QtGui.QPixmap(\"./icons/label_gray.png\"))\n self.label_11.setScaledContents(True)\n self.label_11.setObjectName(\"label_11\")\n self.frame = QtWidgets.QFrame(self)\n self.frame.setGeometry(QtCore.QRect(339, 14, 891, 611))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.label_p2 = QtWidgets.QLabel(self.frame)\n self.label_p2.setGeometry(QtCore.QRect(0, 0, 931, 561))\n self.label_p2.setText(\"\")\n self.label_p2.setPixmap(QtGui.QPixmap(\"./icons/label_gray.png\"))\n self.label_p2.setScaledContents(True)\n self.label_p2.setObjectName(\"label_p2\")\n self.label_p3 = QtWidgets.QLabel(self.frame)\n self.label_p3.setGeometry(QtCore.QRect(0, 500, 878, 111))\n self.label_p3.setText(\"\")\n self.label_p3.setPixmap(QtGui.QPixmap(\"./icons/label.png\"))\n self.label_p3.setScaledContents(True)\n self.label_p3.setObjectName(\"label_p3\")\n self.label_people = QtWidgets.QLabel(self.frame)\n self.label_people.setGeometry(QtCore.QRect(80, 530, 81, 41))\n self.label_people.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\")\n self.label_people.setAlignment(QtCore.Qt.AlignCenter)\n self.label_people.setObjectName(\"label_people\")\n self.label_user = QtWidgets.QLabel(self.frame)\n self.label_user.setGeometry(QtCore.QRect(40, 530, 41, 41))\n self.label_user.setText(\"\")\n self.label_user.setPixmap(QtGui.QPixmap(\"./icons/user (1).png\"))\n self.label_user.setScaledContents(True)\n self.label_user.setObjectName(\"label_user\")\n self.label_cv = QtWidgets.QLabel(self.frame)\n self.label_cv.setGeometry(QtCore.QRect(36, 37, 791, 441))\n self.label_cv.setStyleSheet(\"border-radius:10px;\\n\"\n \"border: 2px solid rgb(52, 59, 72);\")\n self.label_cv.setText(\"\")\n self.label_cv.setObjectName(\"label_cv\")\n self.btn_stop = QtWidgets.QPushButton(self.frame)\n self.btn_stop.setGeometry(QtCore.QRect(646, 518, 61, 61))\n self.btn_stop.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_stop.setText(\"\")\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(\"./icons/stop.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_stop.setIcon(icon6)\n self.btn_stop.setIconSize(QtCore.QSize(41, 41))\n self.btn_stop.setObjectName(\"btn_stop\")\n self.btn_play = QtWidgets.QPushButton(self.frame)\n self.btn_play.setGeometry(QtCore.QRect(711, 518, 61, 61))\n self.btn_play.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_play.setText(\"\")\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(\"./icons/play.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_play.setIcon(icon7)\n self.btn_play.setIconSize(QtCore.QSize(41, 41))\n self.btn_play.setObjectName(\"btn_play\")\n self.btn_pause = QtWidgets.QPushButton(self.frame)\n self.btn_pause.setGeometry(QtCore.QRect(777, 518, 61, 61))\n self.btn_pause.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_pause.setText(\"\")\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\"./icons/pause.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_pause.setIcon(icon8)\n self.btn_pause.setIconSize(QtCore.QSize(41, 41))\n self.btn_pause.setObjectName(\"btn_pause\")\n self.spinbox_num = QtWidgets.QSpinBox(self.frame)\n self.spinbox_num.setGeometry(QtCore.QRect(380, 540, 71, 21))\n self.spinbox_num.setStyleSheet(\"QSpinBox{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \"font: 13pt \\\"黑体\\\";}\\n\"\n \"QSpinBox:up-button{\\n\"\n \"image: url(./icons/add.png);\\n\"\n \" background-color: rgb(44, 49, 60);\\n\"\n \" subcontrol-position:right;\\n\"\n \" width:20px;\\n\"\n \" height:20px;\\n\"\n \"\\n\"\n \"}\\n\"\n \"QSpinBox:down-button{\\n\"\n \" \\n\"\n \" image: url(./icons/line.png);\\n\"\n \" background-color: rgb(44, 49, 60);\\n\"\n \" subcontrol-position:left;\\n\"\n \" width:20px;\\n\"\n \" height:20px;\\n\"\n \"}\\n\"\n \"QSPinBox:up-button:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QSpinBox:up-button:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\\n\"\n \"QSPinBox:down-button:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QSpinBox:down-button:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.spinbox_num.setAlignment(QtCore.Qt.AlignCenter)\n self.spinbox_num.setObjectName(\"spinbox_num\")\n\n self.label_single = QtWidgets.QLabel(self.frame)\n self.label_single.setGeometry(QtCore.QRect(301, 534, 81, 31))\n self.label_single.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\")\n self.label_single.setObjectName(\"label_single\")\n self.btn_num = QtWidgets.QPushButton(self.frame)\n self.btn_num.setGeometry(QtCore.QRect(460, 535, 51, 31))\n self.btn_num.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_num.setObjectName(\"btn_num\")\n self.btn_hide = QtWidgets.QPushButton(self)\n self.btn_hide.setGeometry(QtCore.QRect(290, 550, 35, 35))\n self.btn_hide.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_hide.setText(\"\")\n icon9 = QtGui.QIcon()\n icon9.addPixmap(QtGui.QPixmap(\"./icons/return.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_hide.setIcon(icon9)\n self.btn_hide.setFlat(True)\n self.btn_hide.setObjectName(\"btn_hide\")\n self.label_degree = QtWidgets.QLabel(self)\n self.label_degree.setGeometry(QtCore.QRect(80, 440, 50, 50))\n self.label_degree.setText(\"\")\n self.label_degree.setPixmap(QtGui.QPixmap(\"./icons/filter.png\"))\n self.label_degree.setScaledContents(True)\n self.label_degree.setObjectName(\"label_degree\")\n self.spinbox = QtWidgets.QDoubleSpinBox(self)\n self.spinbox.setEnabled(True)\n self.spinbox.setGeometry(QtCore.QRect(150, 450, 141, 31))\n self.spinbox.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.spinbox.setAutoFillBackground(False)\n self.spinbox.setStyleSheet(\"QDoubleSpinBox{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \"font: 13pt \\\"黑体\\\";}\\n\"\n \"QDoubleSpinBox:up-button{\\n\"\n \" image: url(./icons/add.png);\\n\"\n \" background-color: rgb(44, 49, 60);\\n\"\n \" subcontrol-position:right;\\n\"\n \" width:30px;\\n\"\n \" height:30px;\\n\"\n \"\\n\"\n \"}\\n\"\n \"QDoubleSpinBox:down-button{\\n\"\n \" \\n\"\n \" image: url(./icons/line.png);\\n\"\n \" background-color: rgb(44, 49, 60);\\n\"\n \" subcontrol-position:left;\\n\"\n \" width:30px;\\n\"\n \" height:30px;\\n\"\n \"}\\n\"\n \"QDoubleSPinBox:up-button:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QDoubleSpinBox:up-button:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\\n\"\n \"QDoubleSPinBox:down-button:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QDoubleSpinBox:down-button:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.spinbox.setAlignment(QtCore.Qt.AlignCenter)\n self.spinbox.setReadOnly(False)\n self.spinbox.setMaximum(1.0)\n self.spinbox.setSingleStep(0.01)\n self.spinbox.setProperty(\"value\", 0.5)\n self.spinbox.setObjectName(\"spinbox\")\n\n self.btn_setting = QtWidgets.QPushButton(self)\n self.btn_setting.setGeometry(QtCore.QRect(40, 550, 35, 35))\n self.btn_setting.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_setting.setText(\"\")\n icon10 = QtGui.QIcon()\n icon10.addPixmap(QtGui.QPixmap(\"./icons/setting.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_setting.setIconSize(QtCore.QSize(30, 30))\n self.btn_setting.setIcon(icon10)\n self.btn_setting.setFlat(True)\n self.btn_setting.setObjectName(\"btn_setting\")\n self.frame_setting = QtWidgets.QFrame(self)\n self.frame_setting.setGeometry(QtCore.QRect(350, 150, 271, 341))\n self.frame_setting.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_setting.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_setting.setObjectName(\"frame_setting\")\n\n self.label_2 = QtWidgets.QLabel(self.frame_setting)\n self.label_2.setGeometry(QtCore.QRect(-20, -10, 311, 361))\n self.label_2.setText(\"\")\n self.label_2.setPixmap(QtGui.QPixmap(\"./icons/bg.png\"))\n self.label_2.setScaledContents(True)\n self.label_2.setObjectName(\"label_2\")\n\n self.btn_GPUoff = QtWidgets.QPushButton(self.frame_setting)\n self.btn_GPUoff.setGeometry(QtCore.QRect(150, 76, 51, 31))\n self.btn_GPUoff.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_GPUoff.setText(\"\")\n icon10 = QtGui.QIcon()\n icon10.addPixmap(QtGui.QPixmap(\"./icons/switch-OFF (1).png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_GPUoff.setIcon(icon10)\n self.btn_GPUoff.setIconSize(QtCore.QSize(61, 41))\n self.btn_GPUoff.setObjectName(\"btn_GPUoff\")\n self.label_setting_1 = QtWidgets.QLabel(self.frame_setting)\n self.label_setting_1.setGeometry(QtCore.QRect(70, 80, 71, 21))\n self.label_setting_1.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\")\n self.label_setting_1.setObjectName(\"label_setting_1\")\n\n self.label_setting_2 = QtWidgets.QLabel(self.frame_setting)\n self.label_setting_2.setGeometry(QtCore.QRect(40, 229, 111, 31))\n self.label_setting_2.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\")\n self.label_setting_2.setObjectName(\"label_setting_2\")\n\n self.btn_setting_close = QtWidgets.QPushButton(self.frame_setting)\n self.btn_setting_close.setGeometry(QtCore.QRect(222, 14, 35, 35))\n self.btn_setting_close.setStyleSheet(\"QPushButton{\\n\"\n \" border:none;\\n\"\n \" border-radius:6px;\\n\"\n \" background-color: transparent;\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(25, 40, 50);\\n\"\n \"}\")\n self.btn_setting_close.setText(\"\")\n self.btn_setting_close.setIcon(icon)\n self.btn_setting_close.setFlat(True)\n self.btn_setting_close.setObjectName(\"btn_setting_close\")\n self.btn_history = QtWidgets.QPushButton(self.frame_setting)\n self.btn_history.setGeometry(QtCore.QRect(60, 130, 151, 31))\n self.btn_history.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_history.setObjectName(\"btn_history\")\n\n self.btn_history1 = QtWidgets.QPushButton(self.frame_setting)\n self.btn_history1.setGeometry(QtCore.QRect(60, 170, 151, 31))\n self.btn_history1.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_history1.setObjectName(\"btn_history1\")\n\n self.btn_GPUon = QtWidgets.QPushButton(self.frame_setting)\n self.btn_GPUon.setGeometry(QtCore.QRect(150, 76, 51, 31))\n self.btn_GPUon.setStyleSheet(\"QPushButton{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"color: rgb(255, 255, 255);\\n\"\n \"font: 12pt \\\"黑体\\\";\\n\"\n \"}\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_GPUon.setText(\"\")\n icon11 = QtGui.QIcon()\n icon11.addPixmap(QtGui.QPixmap(\"./icons/switch-ON (1).png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.btn_GPUon.setIcon(icon11)\n self.btn_GPUon.setIconSize(QtCore.QSize(61, 41))\n self.btn_GPUon.setObjectName(\"btn_GPUon\")\n\n self.spinbox_num_2 = QtWidgets.QSpinBox(self.frame_setting)\n self.spinbox_num_2.setGeometry(QtCore.QRect(160, 230, 61, 31))\n self.spinbox_num_2.setStyleSheet(\"QSpinBox{\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \"font: 13pt \\\"黑体\\\";}\\n\"\n \"QSpinBox:up-button{\\n\"\n \"image: url(./icons/add.png);\\n\"\n \" background-color: rgb(44, 49, 60);\\n\"\n \" subcontrol-position:right;\\n\"\n \" width:20px;\\n\"\n \" height:20px;\\n\"\n \"\\n\"\n \"}\\n\"\n \"QSpinBox:down-button{\\n\"\n \" \\n\"\n \" image: url(./icons/line.png);\\n\"\n \" background-color: rgb(44, 49, 60);\\n\"\n \" subcontrol-position:left;\\n\"\n \" width:20px;\\n\"\n \" height:20px;\\n\"\n \"}\\n\"\n \"QSPinBox:up-button:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QSpinBox:up-button:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\\n\"\n \"QSPinBox:down-button:hover{\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QSpinBox:down-button:pressed{\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.spinbox_num_2.setAlignment(QtCore.Qt.AlignCenter)\n self.spinbox_num_2.setMinimum(1)\n self.spinbox_num_2.setMaximum(7)\n self.spinbox_num_2.setObjectName(\"spinbox_num_2\")\n\n self.retranslateUi(self)\n QtCore.QMetaObject.connectSlotsByName(self)\n\n self.label_cv.setScaledContents(True)\n\n self.btn_hide.hide()\n self.btn_show.show()\n self.label_cv.hide()\n self.label_p2.hide()\n self.label_p3.hide()\n self.label_user.hide()\n self.label_people.hide()\n self.btn_play.hide()\n self.btn_pause.hide()\n self.btn_stop.hide()\n self.label_single.hide()\n self.spinbox_num.hide()\n self.btn_num.hide()\n self.spinbox_num_2.hide()\n self.label_setting_2.hide()\n\n self.label_2.hide()\n self.btn_GPUoff.hide()\n self.btn_GPUon.hide()\n self.btn_setting_close.hide()\n self.btn_history.hide()\n self.btn_history1.hide()\n self.label_setting_1.hide()\n\n self.btn_show.show()\n self.btn_hide.hide()\n\n self.btn_GPUon.clicked.connect(self.GPUoff)\n self.btn_GPUoff.clicked.connect(self.GPUon)\n self.btn_setting.clicked.connect(self.show_setting)\n self.btn_setting_close.clicked.connect(self.hide_setting)\n self.btn_close.clicked.connect(self.exit)\n self.btn_min.clicked.connect(self.mini)\n self.btn_hide.clicked.connect(self.hide_frame)\n self.btn_show.clicked.connect(self.show_frame)\n self.btn_video.clicked.connect(self.getVideoInfo)\n self.btn_image.clicked.connect(self.getImageInfo)\n self.btn_camera.clicked.connect(self.getCamera)\n self.btn_history.clicked.connect(self.history)\n self.btn_history1.clicked.connect(self.historyClean)\n self.btn_num.clicked.connect(self.singleDetect)\n self.btn_pause.clicked.connect(self.pause)\n self.btn_play.clicked.connect(self.play)\n self.btn_stop.clicked.connect(self.stop)\n\n self.threshold = self.spinbox.value() # 置信度\n self.detectnum = self.spinbox_num_2.value() # 检测视频个数\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n self.setWindowTitle(_translate(\"Form\", \"self\"))\n self.label_name.setText(_translate(\"Form\", \"基于百度飞桨的单/多镜头行人追踪\"))\n self.label_team.setText(_translate(\"Form\", \"——随便什么都队\"))\n self.label_single.setText(_translate(\"Form\", \"单独追踪\"))\n self.btn_num.setText(_translate(\"Form\", \"选中\"))\n self.label_setting_1.setText(_translate(\"Form\", \"开启GPU\"))\n self.label_setting_2.setText(_translate(\"Form\", \"检测视频个数\"))\n self.btn_history.setText(_translate(\"Form\", \"历史回放\"))\n self.btn_history1.setText(_translate(\"Form\", \"清除历史记录\"))\n\n def mousePressEvent(self, e):\n if e.button() == Qt.LeftButton:\n self.m_drag = True\n self.m_DragPosition = e.globalPos() - self.pos()\n e.accept()\n\n def mouseReleaseEvent(self, e):\n if e.button() == Qt.LeftButton:\n self.m_drag = False\n\n def mouseMoveEvent(self, e):\n try:\n if Qt.LeftButton and self.m_drag:\n self.move(e.globalPos() - self.m_DragPosition)\n e.accept()\n except:\n pass\n\n def exit(self): # 退出\n def Thread():\n for i in reversed(range(0, 11)):\n self.setWindowOpacity(i / 10)\n time.sleep(0.02)\n os._exit(-1)\n\n Thread = threading.Thread(target=Thread)\n Thread.start()\n\n def mini(self): # 最小化\n def Thread():\n for i in reversed(range(0, 11)):\n self.setWindowOpacity(i / 10)\n time.sleep(0.02)\n self.showMinimized()\n self.setWindowOpacity(1)\n\n Thread = threading.Thread(target=Thread)\n Thread.start()\n\n def hide_frame(self):\n self.anim = QPropertyAnimation(self.frame, b\"geometry\")\n self.anim.setDuration(200)\n self.anim.setStartValue(QRect(339, 14, 891, 611))\n self.anim.setEndValue(QRect(10, 14, 1, 611))\n self.anim.start()\n self.btn_hide.hide()\n self.btn_show.show()\n self.label_cv.hide()\n self.label_p2.hide()\n self.label_p3.hide()\n self.label_user.hide()\n self.label_people.hide()\n self.btn_play.hide()\n\n self.btn_pause.hide()\n self.btn_stop.hide()\n self.label_single.hide()\n self.spinbox_num.hide()\n self.btn_num.hide()\n\n def show_frame(self):\n self.anim = QPropertyAnimation(self.frame, b\"geometry\")\n self.anim.setDuration(200)\n self.anim.setStartValue(QRect(10, 14, 1, 611))\n self.anim.setEndValue(QRect(339, 14, 8091, 611))\n self.anim.start()\n self.btn_hide.show()\n self.btn_show.hide()\n self.label_cv.show()\n\n self.label_p2.show()\n self.label_p3.show()\n self.label_user.show()\n self.label_people.show()\n self.btn_play.show()\n self.btn_pause.show()\n self.btn_stop.show()\n self.label_single.show()\n self.spinbox_num.show()\n self.btn_num.show()\n\n def show_setting(self):\n # self.anim2 = QPropertyAnimation(self.frame, b\"geometry\")\n # self.anim2.setDuration(200)\n # self.anim2.setStartValue(QRect(40, 550, 35, 35))\n # self.anim2.setEndValue(QRect(50, 150, 271, 341))\n # self.anim2.start()\n self.label_2.show()\n if self.setting_flag == 1:\n self.btn_GPUon.show()\n else:\n self.btn_GPUoff.show()\n self.btn_setting_close.show()\n self.btn_history.show()\n self.label_setting_2.show()\n self.label_setting_1.show()\n self.spinbox_num_2.show()\n self.btn_history1.show()\n self.btn_show.setEnabled(False)\n self.btn_hide.setEnabled(False)\n self.btn_camera.setEnabled(False)\n self.btn_image.setEnabled(False)\n self.btn_video.setEnabled(False)\n self.btn_num.setEnabled(False)\n self.btn_play.setEnabled(False)\n self.btn_pause.setEnabled(False)\n self.btn_stop.setEnabled(False)\n self.spinbox.setEnabled(False)\n self.spinbox_num.setEnabled(False)\n self.btn_setting.setEnabled(False)\n\n def hide_setting(self):\n global index\n # self.anim1 = QPropertyAnimation(self.frame, b\"geometry\")\n # self.anim1.setDuration(200)\n # self.anim1.setStartValue(QRect(50, 150, 271, 341))\n # self.anim1.setEndValue(QRect(40, 550, 35, 35))\n # self.anim1.start()\n self.detectnum = self.spinbox_num_2.value() # 检测视频个数\n index = self.detectnum\n self.label_2.hide()\n self.btn_GPUoff.hide()\n self.btn_GPUon.hide()\n self.btn_setting_close.hide()\n self.btn_history.hide()\n self.btn_history1.hide()\n self.spinbox_num_2.hide()\n self.label_setting_2.hide()\n self.label_setting_1.hide()\n self.btn_show.setEnabled(True)\n self.btn_hide.setEnabled(True)\n self.btn_camera.setEnabled(True)\n self.btn_image.setEnabled(True)\n self.btn_video.setEnabled(True)\n self.btn_num.setEnabled(True)\n self.btn_play.setEnabled(True)\n self.btn_pause.setEnabled(True)\n self.btn_stop.setEnabled(True)\n self.spinbox.setEnabled(True)\n self.spinbox_num.setEnabled(True)\n self.btn_setting.setEnabled(True)\n self.videoName = []\n global video_list\n video_list =[]\n\n def GPUon(self): #开启GPU\n global is_open\n is_open = 1\n self.btn_GPUoff.hide()\n self.btn_GPUon.show()\n self.setting_flag = True\n #is_open == 1为GPU开启状态(默认开启),为0时为关闭状态\n\n def GPUoff(self): #关闭GPU\n global is_open\n is_open = 0\n self.btn_GPUon.hide()\n self.btn_GPUoff.show()\n self.setting_flag = False\n\n def getImageInfo(self):\n global imgPath\n _translate = QtCore.QCoreApplication.translate\n self.show_frame()\n imgPath, imgType = QFileDialog.getOpenFileName(self, \"打开图片\", \"\", \"*.jpg;;*.png;;All Files(*)\")\n if imgPath!='':\n self.work4.start()\n time.sleep(8)\n self.label_cv.setPixmap(QPixmap('./frame/00000.jpg'))\n self.label_people.setText(_translate(\"Form\", \"人数:\"+ people_num1()))\n\n def getVideoInfo(self):\n global video_list\n self.show_frame()\n if self.detectnum !=1 :\n self.detectnum -= 1\n else:\n self.btn_video.setEnabled(False)\n imgName, imgType = QFileDialog.getOpenFileName(self, \"打开视频\", \"\", \"*.mp4;;*.AVI;;*.rmvb;;All Files(*)\")\n self.videoName.append(imgName)\n video_list = self.videoName\n #------------------------------------------------\n\n def getCamera(self):\n self.show_frame()\n self.work3.start()\n self.work.start()\n self.autoSubmitCloseOrder()\n\n def pause(self):\n pass\n\n def start_camera(self):\n re_start()\n os.system('python delet.py')\n start_camera()\n\n def start_video(self):\n global video_list, index\n if index == 1:\n start_video1(self.threshold, video_list[0])\n if index == 2:\n start_video2(self.threshold, video_list[0], video_list[1])\n if index == 3:\n start_video3(self.threshold, video_list[0], video_list[1], video_list[2])\n if index == 4:\n start_video4(self.threshold, video_list[0], video_list[1], video_list[2], video_list[3])\n if index == 5:\n start_video5(self.threshold, video_list[0], video_list[1], video_list[2], video_list[3], video_list[4])\n if index == 6:\n start_video6(self.threshold, video_list[0], video_list[1], video_list[2], video_list[3], video_list[4], video_list[5])\n if index == 7:\n start_video7(self.threshold, video_list[0], video_list[1], video_list[2], video_list[3], video_list[4], video_list[5],video_list[6])\n\n\n def start_image(self):\n global imgPath\n start_image(imgPath, self.threshold)\n\n\n\n def show_image(self,i):\n _translate = QtCore.QCoreApplication.translate\n global num\n self.label_cv.setPixmap(QPixmap('./frame/' + str(i).zfill(5) + '.jpg'))\n conn = sqlite3.connect(\"people_num.db\")\n cur = conn.cursor()\n cursor = conn.execute(\"SELECT * from num where rowid==%d \"%int(i))\n for i in cursor:\n num = i[0]\n conn.commit()\n self.label_people.setText(_translate(\"Form\", \"人数:\"+str(num)))\n cur.close()\n conn.close()\n\n\n def autoSubmitCloseOrder(self):\n second = 0\n while True:\n QApplication.processEvents()\n time.sleep(0.1)\n second += 1\n if second % 10 == 0:\n print(datetime.datetime.now())\n\n def history(self): # 历史记录\n self.show_frame()\n self.work.start()\n self.autoSubmitCloseOrder()\n\n def historyClean(self): # 删除历史记录\n shutil.rmtree('frame')\n os.mkdir('frame')\n conn = sqlite3.connect(\"people_num.db\")\n cur = conn.cursor()\n sql = '''\n delete from \"num\";\n '''\n cur.execute(sql)\n conn.commit()\n\n cur.close()\n conn.close()\n\n def singleDetect(self): # 单个行人追踪\n one_num = self.spinbox_num.value() # num为所输入编号\n find_one(one_num)\n\n\n def pause(self):\n return\n\n def play(self):\n re_start()\n os.system('python delet.py')\n self.work2.start()\n self.work.start()\n self.autoSubmitCloseOrder()\n\n def stop(self):\n stop()\n self.work.terminate()\n\nimport sys\n\ndef is_true():\n global is_open\n if is_open == 1:\n return True\n else:\n return False\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n gui = Ui_self()\n gui.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Rinnshi7/Single-multi-lens-pedestrian-tracking-system-based-on-PaddlePaddle","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":50486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"72455394199","text":"#!/user/bin/Python\n\nimport re\n#2018-06-04,Italy,Netherlands,1,1,Friendly,Turin,Italy,FALSE\npattern= re.compile(r'^(\\d{4,4})-(.*?),(.+?)(\\d+),(\\d+).*$')\n\ni=0\nj=0\n\nf= open(\"resultados.csv\", \"r\", encoding='utf-8')\n#print(type(f))\nfor line in f:\n res = re.match(pattern, line)\n\n if res:\n t=int(res.group(4))+int(res.group(5))\n if t>20:\n j=1+j\n #print(t)\n print (line)\n\n #print (res.group(4))\n i=1+i\n\nprint(i)\nprint(j)\nf.close\n","repo_name":"jolarteu/curso_expresiones","sub_path":"python_regex.py","file_name":"python_regex.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73476541076","text":"# encoding=utf-8\nimport os\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\nfrom torch.utils.data import DataLoader\nfrom torch import nn, optim, device, cuda, save, load\n\nimport valid\nfrom model import MNISTModel\n\n# 选取设备\ndevice = device('cuda' if cuda.is_available() else 'cpu')\n# 实例化模型\nmodel = MNISTModel()\n# 将模型放入设备\nmodel = model.to(device)\n# 定义损失函数\ncriterion = nn.CrossEntropyLoss()\n\n# 定义优化器\noptimizer = optim.Adam(model.parameters(), lr=0.001)\n# 加载已经训练好的模型和优化器继续进行训练\nbest_model_path = './models/mnist_best_model.pkl'\nlast_model_path = './models/mnist_last_model.pkl'\nbest_optimizer_path = './models/mnist_best_optimizer.pkl'\nlast_optimizer_path = './models/mnist_last_optimizer.pkl'\nif os.path.exists(last_model_path):\n model.load_state_dict(load(last_model_path))\n optimizer.load_state_dict(load(last_optimizer_path))\n# 加载数据集\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n])\ntrain_set = MNIST(root='../../data', train=True, download=True, transform=transform)\n\n\ndef train(epoch):\n # 动态修改参数学习率\n if epoch % 5 == 0:\n optimizer.param_groups[0]['lr'] *= 0.1\n # total_loss\n epoch_loss = []\n # 加入进度条\n train_loader = DataLoader(train_set, batch_size=4, shuffle=True)\n train_loader = tqdm(train_loader, total=len(train_loader))\n model.train()\n\n # 训练模型\n for images, labels in train_loader:\n # 将模型放入设备\n images = images.to(device)\n labels = labels.to(device)\n # 清空梯度\n optimizer.zero_grad()\n # 预测\n outputs = model(images)\n # 计算损失\n loss = criterion(outputs, labels)\n epoch_loss.append(loss.item())\n # 反向传播\n loss.backward()\n # 更新参数\n optimizer.step()\n # # 打印损失\n # train_loader.set_description(f'loss: {loss.item():.4f}')\n\n save(model.state_dict(), last_model_path)\n save(optimizer.state_dict(), last_optimizer_path)\n mean_loss = np.mean(epoch_loss)\n return mean_loss, model, optimizer\n\n\ndef save_model_and_optimizer(model, optimizer, file_path):\n save(model.state_dict(), file_path)\n save(optimizer.state_dict(), file_path)\n\n\ndef main():\n high_accuracy = 0\n epochs = 10\n for epoch in range(1, epochs+1):\n epoch_loss, model, optimizer = train(epoch)\n epoch_accuracy = valid.valid_succeed()\n if epoch_accuracy > high_accuracy:\n high_accuracy = epoch_accuracy\n # 保存最优模型\n save(model.state_dict(), best_model_path)\n # 保存最优参数\n save(optimizer.state_dict(), best_optimizer_path)\n print(f'第{epoch}个epoch训练完成, 损失为 {epoch_loss:.4f}, 准确率为 {epoch_accuracy:.4f}, 当前最高准确率为 {high_accuracy:.4f}')\n print(f'{\"-\" * 30} 训练完成 {\"-\" * 30}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xiangnan0811/torchTest","sub_path":"scripts/MNIST/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36854188501","text":"from __future__ import unicode_literals\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nimport json\nimport requests\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import carts\n\n\n### This function is inserting the data into our table.\ndef cart_data_insert(customerid, product_id, quantity, price):\n cart_data = carts(customerid=customerid, product_id=product_id, quantity=quantity, price=price)\n cart_data.save()\n return 1\n\n\n### This function will get the data from the front end.\n@csrf_exempt\ndef cart_regis(request):\n # val1 = json.loads(request.body)\n ### This is for reading the inputs from JSON.\n customerid = request.POST.get(\"Customer Id\")\n product_id = request.POST.get(\"Product Id\")\n quantity = request.POST.get(\"Quantity\")\n resp = {}\n url = 'http://127.0.0.1:8000/getcustomerinfo/'+customerid+\"/\"\n headers = {'Content-Type': 'application/json'}\n response = requests.get(url, headers=headers)\n val1 = json.loads(response.content.decode('utf-8'))\n if val1 == \"Customer exist\":\n url_book = 'http://127.0.0.1:8004/book_info/'+product_id+\"/\"\n url_clothe = 'http://127.0.0.1:8006/clothe_info/'+product_id+\"/\"\n url_shoe = 'http://127.0.0.1:8007/shoe_info/'+product_id+\"/\"\n headers = {'Content-Type': 'application/json'}\n response = requests.get(url_book, headers=headers)\n val1 = json.loads(response.content.decode('utf-8'))\n response = requests.get(url_clothe, headers=headers)\n val2 = json.loads(response.content.decode('utf-8'))\n response = requests.get(url_shoe, headers=headers)\n val3 = json.loads(response.content.decode('utf-8'))\n status1 = val1['status']\n status2 = val2['status']\n status3 = val3['status']\n if status1==\"Success\" :\n ### After all validation, it will call the data_insertfunction.\n price1= int(val1['data']['Price'])*int(quantity)\n respdata = cart_data_insert(customerid, product_id, quantity, price1)\n ### If it returns value then will show success.\n if respdata:\n resp['status'] = 'Success'\n resp['status_code'] = '200'\n resp['message'] = 'Cart is added Successfully.'\n elif status2==\"Success\" :\n price2= int(val2['data']['Price'])*int(quantity)\n ### After all validation, it will call the data_insertfunction.\n respdata = cart_data_insert(customerid, product_id, quantity, price2)\n ### If it returns value then will show success.\n if respdata:\n resp['status'] = 'Success'\n resp['status_code'] = '200'\n resp['message'] = 'Cart is added Successfully.'\n elif status3==\"Success\" :\n price3= int(val3['price'])*int(quantity)\n ### After all validation, it will call the data_insertfunction.\n respdata = cart_data_insert(customerid, product_id, quantity, price3)\n ### If it returns value then will show success.\n if respdata:\n resp['status'] = 'Success'\n resp['status_code'] = '200'\n resp['message'] = 'Cart is added Successfully.'\n else:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'Product does not exist.'\n ### If value is not found then it will give failed in response.\n else:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'User does not exist.'\n return HttpResponse(json.dumps(resp), content_type='application/json')\n### This function is used for getting the shipment status\n@csrf_exempt\ndef get_cart_data(request,uid):\n respdata = []\n resp = {}\n data = carts.objects.filter(customerid=uid)\n for val in data.values():\n respdata.append(val)\n ### It will call the shipment_data function.\n\n ### If it returns value then will show success.\n if respdata:\n resp['status'] = 'Success'\n resp['status_code'] = '200'\n resp['message'] = respdata\n ### If it is not returning any value then it will show failed response.\n else:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'Cart data is not available.'\n return HttpResponse(json.dumps(resp), content_type='application/json')\ndef data_update(uid, pid, quantity, price):\n try:\n cart = carts.objects.get(customerid=uid,product_id=pid)\n except carts.DoesNotExist:\n return 0\n\n if quantity is not None:\n cart.quantity = quantity\n cart.price = price\n\n cart.save()\n return 1\n\n@csrf_exempt\ndef update_cart(request, uid, pid):\n resp = {}\n try:\n cart = carts.objects.get(customerid=uid,product_id=pid)\n old_quantity=int(cart.quantity)\n old_price=cart.price\n except carts.DoesNotExist:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'Cart product does not exist.'\n return HttpResponse(json.dumps(resp), content_type = 'application/json')\n\n if request.method == 'PATCH':\n data = json.loads(request.body.decode('utf-8'))\n # Update the cart instance with the data\n a=data.get(\"quantity\")\n quantity = int(a)\n price = old_price/old_quantity*quantity\n respdata=data_update(uid, pid, a, price)\n resp['status'] = 'Success'\n resp['status_code'] = '200'\n resp['message'] = 'Cart product is updated Successfully.'\n return HttpResponse(json.dumps(resp), content_type = 'application/json')\ndef product_delete(uid,pid):\n try:\n cart = carts.objects.get(customerid=uid,product_id=pid)\n except carts.DoesNotExist:\n return 0\n\n cart.delete()\n return 1\n\n@csrf_exempt\ndef cart_delete_product(request, uid, pid):\n resp = {}\n\n if uid and pid:\n respdata = product_delete(uid, pid)\n if respdata:\n resp['status'] = 'Success'\n resp['status_code'] = '200'\n resp['message'] = 'Cart Product is deleted Successfully.'\n else:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'Unable to delete Product from Cart, Please try again.'\n else:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'Customer ID or Product ID is missing.'\n\n return HttpResponse(json.dumps(resp), content_type = 'application/json')\ndef product_delete_all(uid):\n\n try:\n cart = carts.objects.filter(customerid=uid)\n for val in cart.values():\n cart.delete()\n except carts.DoesNotExist:\n return 0\n return 1\n\n@csrf_exempt\ndef cart_delete(request,uid):\n resp = {}\n\n if uid:\n respdata = product_delete_all(uid)\n if respdata:\n resp['status'] = 'Success'\n resp['status_code'] = '200'\n resp['message'] = 'Cart is deleted Successfully.'\n else:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'Unable to delete Cart, Please try again.'\n else:\n resp['status'] = 'Failed'\n resp['status_code'] = '400'\n resp['message'] = 'Customer ID is missing.'\n\n return HttpResponse(json.dumps(resp), content_type = 'application/json')","repo_name":"krystalizes/btl_ktpm","sub_path":"cart_service/cart_model/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19566018419","text":"from math import prod\nfrom numpy import product\nimport pandas as pd\n\n\ndef clean_price(price_column: pd.Series) -> pd.Series:\n \"\"\"Function taking a pandas series containing prices to remove all characters that are not digits or '.' and convert values to float.\n\n Args:\n price_column (pd.Series): pandas series of price data in string format.\n\n Returns:\n pd.Series: pandas series of clean price data in float format.\n \"\"\"\n \n clean_column = price_column.replace(to_replace='[^0-9.]', value='', regex=True)\n\n float_column = pd.to_numeric(clean_column)\n\n return float_column\n\n\ndef clean_long_names(column: pd.Series, splitter: str) -> pd.Series:\n \"\"\"Function takes in the product name column and retrives the test before the first | for clarity.\n\n Args:\n product_column (pd.Series): pandas series containing the product names retrieved from facebook marketplace.\n\n Returns:\n pd.Series: pandas series with cleaned product names.\n \"\"\"\n\n product_name_series = column.str.split(splitter).str.get(0)\n\n clean_product_series = product_name_series.str.strip()\n\n return clean_product_series\n\n\ndef convert_category(category_column: pd.Series) -> pd.Series:\n \"\"\"Function taking in a pandas series and converts it to type 'category'.\n\n Args:\n category_column (pd.Series): pandas series of data to be changed to category type.\n\n Returns:\n pd.Series: pandas series of type category.\n \"\"\"\n\n column_cat_type = category_column.astype('category')\n\n return column_cat_type\n\n\ndef convert_date(date_column: pd.Series) -> pd.Series:\n \"\"\"Function to convert a column of objects with date information to datetime type.\n\n Args:\n time_column (pd.Series): pandas series of strings containing date information.\n\n Returns:\n pd.Series: pandas series containing type date time in format %d/%m/%Y.\n \"\"\"\n\n date_column = pd.to_datetime(date_column)\n formatted_time_column = date_column.dt.strftime('%d/%m/%Y')\n\n return formatted_time_column\n\n\ndef convert_integer(integer_column: pd.Series) -> pd.Series:\n \"\"\"Function to convert a column of string values containing integers to integers.\n\n Args:\n integer_column (pd.Series): pandas series of strings containing integer characters.\n\n Returns:\n pd.Series: pandas series containing type integer.\n \"\"\"\n\n integer_column = integer_column.astype('int64')\n\n return integer_column\n\n\ndef get_tabular_data(filepath: str, lineterminator: str = \",\") -> pd.DataFrame:\n \"\"\"Function to import data from a csv file and save to a pandas dataframe, dropping all rows with missing data.\n\n Args:\n filepath (str): string of the path of the file to be imported.\n lineterminator (str, optional): string to state the line terminator used in the csv file. Defaults to \",\".\n\n Returns:\n pd.DataFrame: dataframe of the csv contents.\n \n \"\"\"\n df = pd.read_csv(filepath, lineterminator=lineterminator)\n\n df.rename(columns={'create_time\\r':'create_time'}, inplace=True)\n\n return df\n\n\ndef get_and_normalise_data(file_path, lineterminator):\n \"\"\"Combines defined functions to get the data from the file and reformat it as required.\n\n Args:\n filepath (str): string of the path of the file to be imported.\n lineterminator (str, optional): string to state the line terminator used in the csv file. Defaults to \",\".\n\n Returns:\n pd.DataFrame: reformatted dataframe of imported data.\n \"\"\"\n tab_data = get_tabular_data(file_path, lineterminator)\n\n tab_data['price'] = clean_price(tab_data['price'])\n\n tab_data['category'] = clean_long_names(tab_data['category'], '/')\n tab_data['category'] = convert_category(tab_data['category'])\n tab_data['location'] = convert_category(tab_data['location'])\n\n tab_data['product_name'] = clean_long_names(tab_data['product_name'], '|')\n\n tab_data['page_id'] = convert_integer(tab_data['page_id'])\n\n tab_data['create_time'] = convert_date(tab_data['create_time'])\n\n tab_data.rename(columns = {'id':'product_id'}, inplace = True)\n\n return tab_data\n\n\nif __name__ == \"__main__\":\n file_path = \"Products.csv\"\n lineterminator = \"\\n\"\n\n get_and_normalise_data(file_path, lineterminator)","repo_name":"shrutiturner/facebook-marketplace-search","sub_path":"clean_tabular_data.py","file_name":"clean_tabular_data.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"73702638036","text":"import numpy as np\n\nfrom mdlib import (\n Bead,\n BeadType,\n ForceField,\n HarmonicBond,\n LangevinIntegrator,\n LinearChain,\n LJWCA,\n Simulation,\n System,\n Topology,\n WallLJWCA\n)\n\n# create block copolymer\nbead_type_a = BeadType(\"A\")\nbead_type_b = BeadType(\"B\")\nbeads = [Bead(\"A\", bead_type_a)]*5 + [Bead(\"B\", bead_type_b)]*5\nblock_copolymer = LinearChain(\"block_copolymer\", beads)\nblock_copolymer.packmol_instructions.append(\"center\")\nblock_copolymer.packmol_instructions.append(\"fixed 200. 50. 50. 0. 0. 0.\")\n\n# create topology\ntopology = Topology()\ntopology.add_chain(block_copolymer, n=1)\ntopology.box_lengths = np.array([40., 10., 10.])\ntopology.periodicity[0] = False\n\n# create LJ potential\nlj = LJWCA()\nlj.add_interaction(\"A\", \"A\")\nlj.add_interaction(\"A\", \"B\")\nlj.add_interaction(\"B\", \"B\")\n\n# create harmonic potentials\nharmonic_bond = HarmonicBond()\nharmonic_bond.add_interaction(\"A\", \"A\")\nharmonic_bond.add_interaction(\"A\", \"B\")\nharmonic_bond.add_interaction(\"B\", \"B\")\n\n# create wall potentials\nwall_ljwca = WallLJWCA()\nwall_ljwca.add_interaction(\"A\", eps=1.0, upper_bound=topology.box_lengths[0], cut=7.5, lambda_lj=1.0, lambda_wca=0.0)\nwall_ljwca.add_interaction(\"B\", eps=1.0, upper_bound=topology.box_lengths[0], lambda_lj=0.0, lambda_wca=1.0)\n\n# create force field and system\nforce_field = ForceField()\nforce_field.add_potential(lj)\nforce_field.add_potential(harmonic_bond)\nforce_field.add_potential(wall_ljwca)\nsystem = System(topology, force_field)\n\n# create integrator\nintegrator = LangevinIntegrator(step_size=0.002)\n\n# create simulation and initialize\nsimulation = Simulation(system, integrator)\nsimulation.initialize()\nsimulation.minimize_energy()\nsimulation.system.topology.to_pdb(\"block_copolymer_single_chain_wall_initial.pdb\",\n positions=simulation.state.positions)\n\n# set up reporting\nsimulation.thermo_file = \"block_copolymer_single_chain_wall_thermo.csv\"\nsimulation.thermo_frequency = 10000\nsimulation.thermo_verbose = True\nsimulation.traj_file = \"block_copolymer_single_chain_wall_traj.pdb\"\nsimulation.traj_frequency = 10000\nsimulation.traj_min_image = True\n\n# run\nsimulation.step(10000000)\n","repo_name":"charles9li/che210dproject","sub_path":"examples/block_copolymer_single_chain_wall.py","file_name":"block_copolymer_single_chain_wall.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3607085007","text":"import os\nimport datetime\nimport json\nimport sys\nimport traceback\nfrom tbe.common.rl_bank.bank_manager import set_current_op_name\nfrom te.platform.cce_conf import te_set_version\nfrom te_fusion.fusion_util import fusion_op, dump_fusion_json\nfrom te_fusion.parallel_compilation import init_multi_process_env, get_finished_compilation_task, \\\n deinit_multi_process_env, start_ga_multi_process\nfrom te_fusion.compile_task_manager import dispatch_autotune_task, import_py_module\nimport auto_tune\nfrom schedule_search.rl_online_tune import rl_tune_init, dispatch_fusion_tune_task, dispatch_single_tune_task, \\\n rl_tune_deinit\nfrom mindspore import log\nfrom .compiler import build_op\nfrom .re_construct_json import single_to_fusion, fusion_to_fusion\n\nTE_LOG_LEVEL = [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"]\nRL_COMPILE = \"RL_COMPILE\"\nRL_OFFLINE = \"RL_OFFLINE\"\nRL_ONLINE = \"RL_ONLINE\"\nOP_BUILD = \"compile\"\n\nPLATFORM_FLAG = [\"ascend310\", \"ascend910\", \"Hi3796CV300ES\", \"ascend710\", \"ascend610\", \"Hi3796CV300CS\", \"SD3403\"]\n\n\nclass TbeTuner:\n \"\"\"tbe tuner for ga tune or rl tune\"\"\"\n\n def __init__(self, offline_tune, tune_mode):\n self.offline_tune = offline_tune\n self.tune_init = False\n self.rl_init = False\n self.multi_init = False\n self.offline_dump_path = \"./tune_dump\"\n if os.environ.get(\"TUNE_DUMP_PATH\") is not None:\n self.offline_dump_path = os.getenv(\"TUNE_DUMP_PATH\", \"\")\n self._creating_custom_path(tune_mode)\n self.fusion_need_sync = 0\n self.module_list = {}\n\n def init_tune_interface(self, json_str, process_num):\n \"\"\"\n Initialize tuner interface\n :param json_str: ori json\n :param process_num : process num for tuner\n :return: bool True or False\n \"\"\"\n json_info = json.loads(json_str)\n soc_info = self.get_soc_info(json_info)\n cur_cce_product_params = te_set_version(*soc_info)\n if cur_cce_product_params is None:\n log.warning(\"Set Soc Info failed.\")\n tune_mode = self.get_tune_mode(json_info)\n ret = self.parallel_compilation_init(soc_info, tune_mode, process_num)\n if not ret:\n log.error(\"Init parallel compilation env failed\")\n return False\n\n return True\n\n def deinit(self):\n \"\"\"\n DeInitialize tuner interface\n \"\"\"\n if self.multi_init:\n deinit_multi_process_env()\n if self.rl_init:\n rl_tune_deinit()\n\n def get_tune_mode(self, json_info):\n \"\"\"\n Get the corresponding tune mode from op json and env info\n :param json_info: ori json\n :return: NO_TUNE RL_TUNE GA_TUNE or RL,GA\n \"\"\"\n tune_mode = json_info[\"SocInfo\"][\"autoTilingMode\"]\n if self.offline_tune:\n tune_mode = \"RL\"\n return tune_mode\n\n def __directory_creation(self, path, concat_path):\n \"\"\"\n Create directory\n \"\"\"\n path = os.path.join(path, concat_path)\n if not os.path.isdir(path):\n os.makedirs(path, 0o750)\n return path\n\n def __creating_default_custom_path(self, tune_mode, base_custom_path):\n \"\"\"\n Create default custom path\n \"\"\"\n base_custom_path = self.__directory_creation(base_custom_path, \"data\")\n tune_flag = []\n if \"RL\" in tune_mode:\n tune_flag.append(\"rl\")\n if \"GA\" in tune_mode:\n tune_flag.append(\"tiling\")\n\n for tune_path in tune_flag:\n real_path = self.__directory_creation(base_custom_path, tune_path)\n for soc_version in PLATFORM_FLAG:\n final_path = self.__directory_creation(real_path, soc_version)\n final_path = self.__directory_creation(final_path, \"custom\")\n\n def _creating_custom_path(self, tune_mode):\n \"\"\"\n Create custom path\n \"\"\"\n if \"NO_TUNE\" in tune_mode:\n return\n\n base_custom_path = os.getenv(\"TUNE_BANK_PATH\", None)\n tune_bank_flag = True\n if not base_custom_path:\n base_custom_path = os.path.dirname(os.path.realpath(auto_tune.__file__))\n base_custom_path = os.path.realpath(os.path.join(base_custom_path, \"../../../\"))\n tune_bank_flag = False\n\n if not os.path.isdir(base_custom_path):\n log.error(\"Check whether the tuning path [{}] exists.\".format(base_custom_path))\n return\n if not os.access(base_custom_path, os.R_OK | os.W_OK | os.X_OK):\n log.error(\"Check whether the permission on the tuning path [{}] is correct.\".format(base_custom_path))\n return\n\n if not tune_bank_flag:\n self.__creating_default_custom_path(tune_mode, base_custom_path)\n\n def get_soc_info(self, json_info):\n \"\"\"\n Get soc info\n :param json_info: ori json\n :return: soc info\n \"\"\"\n soc_param = {}\n soc_param[\"op_impl_mode\"] = json_info[\"SocInfo\"][\"op_impl_mode\"]\n soc_param[\"op_debug_level\"] = json_info[\"SocInfo\"][\"op_debug_level\"]\n soc_param[\"op_impl_mode_list\"] = json_info[\"SocInfo\"][\"op_impl_mode_list\"]\n soc_param[\"op_debug_dir\"] = ''\n soc_param[\"vector_fp_ceiling\"] = ''\n soc_param['mdl_bank_path'] = ''\n soc_param['op_bank_path'] = ''\n\n soc_info = []\n soc_info.append(json_info[\"SocInfo\"][\"socVersion\"])\n soc_info.append(json_info[\"SocInfo\"][\"coreType\"])\n soc_info.append(json_info[\"SocInfo\"][\"coreNum\"])\n soc_info.append(json_info[\"SocInfo\"][\"l1Fusion\"])\n soc_info.append(json_info[\"SocInfo\"][\"l2Mode\"])\n soc_info.append(json_info[\"SocInfo\"][\"l2Fusion\"])\n soc_info.append(soc_param)\n\n return soc_info\n\n def parallel_compilation_init(self, soc_info, tune_mode, process_num):\n \"\"\"\n Initialize parallel compilation framework for tuner\n :param soc_info: soc info\n :param tune_mode: tuner mode\n :param process_num : process num for tuner\n :return: bool True or False\n \"\"\"\n env_count = process_num\n if \"TE_PARALLEL_COMPILER\" in os.environ:\n env_count = os.getenv(\"TE_PARALLEL_COMPILER\")\n log.info(\"TE_PARALLEL_COMPILER is set to {}\".format(env_count))\n if int(env_count) > process_num:\n env_count = process_num\n log.info(\"change process count to {}\".format(process_num))\n os.environ[\"TE_PARALLEL_COMPILER\"] = str(int(env_count))\n pid_str = os.getpid()\n time_str = datetime.datetime.now().strftime('%Y%m%d_%H%M%S%f')[:-3]\n pid_ts = \"{}_pid{}\".format(time_str, pid_str)\n\n embedding = False\n enable_event = False\n te_log_level = os.environ.get(\"TE_LOGLEVEL\")\n glog_level = os.environ.get(\"GLOG_v\")\n if glog_level is not None and te_log_level is None:\n os.environ[\"TE_LOGLEVEL\"] = TE_LOG_LEVEL[int(glog_level)]\n global_loglevel = int(glog_level)\n elif glog_level is None and te_log_level is None:\n os.environ[\"TE_LOGLEVEL\"] = TE_LOG_LEVEL[2]\n global_loglevel = 3\n else:\n # pylint: disable=no-else-return\n if te_log_level.isdigit() and int(te_log_level) >= len(TE_LOG_LEVEL):\n log.error(f\"Invalid environment TE_LOGLEVEL, the value should be in [0, 4) if it is a digit, but got : \"\n f\"{te_log_level}\")\n return False\n elif te_log_level.upper() not in TE_LOG_LEVEL:\n log.error(f\"Invalid environment TE_LOGLEVEL, the value should be one of [DEBUG, INFO, WARNING, ERROR] \"\n f\"if it is a string, but got :{te_log_level}\")\n return False\n global_loglevel = int(te_log_level) if te_log_level.isdigit() else TE_LOG_LEVEL.index(te_log_level.upper())\n ret = init_multi_process_env(embedding, soc_info, tune_mode, global_loglevel, enable_event, pid_ts)\n if ret is None:\n log.error(\"Init multiprocess env failed\")\n return False\n self.multi_init = True\n process_count = ret[0]\n log.info(\"Init multiprocess env success with {} process\".format(process_count))\n if \"RL\" in tune_mode:\n res_queue = ret[1]\n live_checker = ret[2]\n termin_event = ret[3]\n ret = rl_tune_init(soc_info, res_queue, live_checker, termin_event, global_loglevel, pid_ts)\n if not ret:\n log.error(\"RL env init failed!\")\n return False\n self.rl_init = True\n log.info(\"RL Tune init success.\")\n if \"GA\" in tune_mode:\n start_ga_multi_process(tune_mode)\n log.info(\"GA Tune init success.\")\n return True\n\n def sync_fusion_env(self):\n \"\"\"\n Sync fusion env\n :return: None\n \"\"\"\n if self.fusion_need_sync == 0:\n return\n\n module_using = []\n for key, value in self.module_list.items():\n if value > 0:\n module_using.append(str(key))\n self.module_list[key] = 0\n\n module_str = \",\".join(module_using)\n import_py_module(module_str)\n self.fusion_need_sync = 0\n\n def rl_tune(self, task_id, op_json):\n \"\"\"\n RL tune for single op and fusion op\n :param task_id: task id for this op to tune\n :param op_json: op's info\n :return: tune result\n \"\"\"\n json_info = json.loads(op_json)\n if \"fusion_op\" in json_info:\n self.sync_fusion_env()\n ret = self.fusion_rl_tune(task_id, json_info)\n else:\n ret = self.single_rl_tune(task_id, json_info)\n return ret\n\n def ga_tune(self, task_id, op_json):\n \"\"\"\n GA tune for single op and fusion op\n :param task_id: task id for this op to tune\n :param op_json: op's info\n \"\"\"\n json_info = json.loads(op_json)\n if \"fusion_op\" in json_info:\n self.sync_fusion_env()\n self.fusion_ga_tune(task_id, json_info)\n else:\n self.single_ga_tune(task_id, json_info)\n\n def single_rl_tune(self, task_id, json_info):\n \"\"\"\n RL tune for single op\n :param task_id: task id for this op to tune\n :param json_info: op's info\n :return: tune result\n \"\"\"\n if self.offline_tune:\n converted_json = single_to_fusion(json.dumps(json_info), tune_mode=\"RL\")\n op_type = json_info['op_info']['name']\n kernel_name = json_info['op_info']['kernel_name']\n full_name = json_info['op_info']['full_name']\n tune_mode = \"RL\"\n set_current_op_name(kernel_name)\n # todo build with build_single_op_from_c\n base_kernel = './kernel_meta/' + kernel_name + '.o'\n job_type = RL_COMPILE\n compile_info = None\n try:\n compile_info, op_args, op_module_name = build_op(OP_BUILD, json.dumps(json_info), tune_mode)\n # pylint: disable=broad-except\n except Exception:\n exc_type, exc_value, _ = sys.exc_info()\n log.error(\n \"exc_type:{}, exc_value:{}, exc_traceback:{}\".format(exc_type, exc_value, traceback.format_exc()))\n return False, job_type, compile_info\n if self.offline_tune:\n job_type = RL_OFFLINE\n dump_fusion_json(converted_json, self.offline_dump_path)\n else:\n job_type = RL_ONLINE\n graph_id = 0\n l1size = 0 # todo need to verify\n ret = dispatch_single_tune_task(graph_id, task_id, l1size, base_kernel, kernel_name, full_name,\n op_module_name + \"@\" + op_module_name, op_type, op_type, op_args)\n\n self.module_list[op_module_name] = 1\n self.fusion_need_sync += 1\n return ret, job_type, compile_info\n\n def fusion_rl_tune(self, task_id, json_info):\n \"\"\"\n RL tune for fusion op\n :param task_id: task id for this op to tune\n :param json_info: op's info\n :return: tune result\n \"\"\"\n if 'fusion_op' not in json_info or not json_info['fusion_op']:\n raise ValueError(\"Json string Errors, key:fusion_op not found.\")\n kernel_name = json_info[\"fusion_op\"][\"fusion_op_name\"]\n full_name = json_info[\"fusion_op\"][\"full_name\"]\n reset_op_info = json_info[\"reset_op_info\"]\n set_current_op_name(kernel_name)\n converted_json = fusion_to_fusion(json.dumps(json_info), tune_mode=\"RL\")\n job_type = RL_COMPILE\n base_kernel = './kernel_meta/' + kernel_name + '.o'\n compile_info = None\n try:\n fusion_op(converted_json, reset_op_info=reset_op_info)\n # pylint: disable=broad-except\n except Exception:\n exc_type, exc_value, _ = sys.exc_info()\n log.error(\n \"exc_type:{}, exc_value:{}, exc_traceback:{}\".format(exc_type, exc_value, traceback.format_exc()))\n return False, job_type, compile_info\n if self.offline_tune:\n job_type = RL_OFFLINE\n dump_fusion_json(converted_json, self.offline_dump_path)\n else:\n job_type = RL_ONLINE\n graph_id = 0\n l1size = 0\n ret = dispatch_fusion_tune_task(graph_id, task_id, l1size, base_kernel, kernel_name, full_name,\n converted_json)\n return ret, job_type, compile_info\n\n def fusion_ga_tune(self, task_id, json_info):\n \"\"\"\n GA tune for fusion op\n :param task_id: task id for this op to tune\n :param json_info: op's info\n \"\"\"\n if 'fusion_op' not in json_info or not json_info['fusion_op']:\n raise ValueError(\"Json string Errors, key:fusion_op not found.\")\n kernel_name = json_info[\"fusion_op\"][\"fusion_op_name\"]\n converted_json = fusion_to_fusion(json.dumps(json_info), tune_mode=\"GA\")\n graph_id = 0\n l1size = 0\n dispatch_autotune_task(graph_id, task_id, l1size, converted_json, [], kernel_name)\n\n def single_ga_tune(self, task_id, json_info):\n \"\"\"\n GA tune for single op\n :param task_id: task id for this op to tune\n :param json_info: op's info\n \"\"\"\n converted_json = single_to_fusion(json.dumps(json_info), tune_mode=\"GA\")\n graph_id = 0\n l1size = 0\n kernel_name = json.loads(converted_json)[\"fusion_op_name\"]\n dispatch_autotune_task(graph_id, task_id, l1size, converted_json, [], kernel_name)\n\n def get_finish_tasks(self):\n \"\"\"\n Get finish task from parallel compilation framework\n :return task info list\n \"\"\"\n ret = get_finished_compilation_task(0)\n return ret\n","repo_name":"lixiao-yang/LZU-MindSpore","sub_path":"mindspore/_extends/parallel_compile/tbe_compiler/tuner.py","file_name":"tuner.py","file_ext":"py","file_size_in_byte":14780,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"71718172437","text":"QUESTIONS = [\n {\n \"text\": \"What's your name?\",\n },\n {\n \"text\": \"How old are you?\",\n \"choices\": [\"-18\", \"18-25\", \"25-30\", \"30+\"]\n },\n {\n \"text\": \"How do you rate our app?\",\n \"choices\": [1, 2, 3, 4, 5],\n },\n {\n \"text\": \"How can we make it better?\"\n },\n {\n \"text\": \"Where did you hear about us?\",\n \"choices\": [\n \"Telegram\",\n \"Facebook\",\n \"Instagram\",\n \"Other\",\n ]\n },\n]\n","repo_name":"azimjohn/survey-bot","sub_path":"survey/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"85"} +{"seq_id":"27050636756","text":"#coding:utf-8\n\nimport xlrd\nimport csv\n\n# xls转csv\ndef fileConvertFunction(sourceFile, targetFile):\n # 读取源文件\n mapdataList = readFile(sourceFile)\n\n # 将货位点和非货位点写入CSV文件\n csvPath = targetFile + '\\mapData.csv'\n csvFile = open(csvPath, 'w', newline='')\n try:\n # 创建并打开mapData.csv文件\n writer = csv.writer(csvFile)\n writer.writerows(mapdataList)\n finally:\n csvFile.close()\n\n # print('xls转csv文件成功')\n\ndef readFile(sourceFile):\n fileType = sourceFile[-3:]\n mapDataList = []\n\n if fileType == 'xls' or fileType == 'lsx':\n mapdatalist = xlrd.open_workbook(sourceFile)\n sheet = mapdatalist.sheet_by_index(0)\n nrow = sheet.nrows\n\n # 找到颜色列\n for i in range(sheet.ncols):\n if sheet.cell(0, i).value in ('颜色', 'Color', 'color'):\n ncol = i\n break\n\n # 将货位点和非货位点写入CSV文件\n count = 0\n for j in range(nrow):\n count += 1\n if count == 1:\n continue\n\n # 遍历xls文件取出货位点和非货位点,颜色为洋红:货架取放货点\n if sheet.cell(j, ncol).value != '':\n if sheet.cell(j, ncol).value in ('洋红', 'Magenta', 'magenta'):\n x = sheet.cell(j, ncol + 1).value\n y = sheet.cell(j, ncol + 2).value\n isMagenta = 1\n else:\n x = sheet.cell(j, ncol + 1).value\n y = sheet.cell(j, ncol + 2).value\n isMagenta = 0\n\n maplist = [x, y, isMagenta]\n mapDataList.append(maplist)\n elif fileType == 'csv':\n with open(sourceFile, 'r') as f:\n reader = csv.reader(f)\n\n count = 0\n for row in reader:\n for k in range(0, len(row)):\n if row[k] in ('颜色', 'Color', 'color'):\n ncol = k\n break\n\n count += 1\n if count == 1:\n continue\n\n if row[ncol] != '':\n x = row[ncol + 1]\n y = row[ncol + 2]\n if row[ncol] in ('洋红', 'Magenta', 'magenta'):\n isMagenta = 1\n else:\n isMagenta = 0\n maplist = [x, y, isMagenta]\n mapDataList.append(maplist)\n\n return mapDataList\n\nif __name__ == '__main__':\n sourceFile = 'D:\\DingDing\\DingDingTalk\\优衣库点位(模拟版).xls'\n # sourceFile = 'E:\\业务测试\\项目需求\\武汉新宁\\地图坐标\\SMT区域V6.1 for SIMU.csv'\n targetFile = 'E:\\地图数据\\python地图工具'\n\n fileConvertFunction(sourceFile, targetFile)","repo_name":"MeggieRong/simulateTool","sub_path":"SimulateTool/util/fileConvertFunction.py","file_name":"fileConvertFunction.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70514805397","text":"from PyPDF2 import PdfWriter, PdfReader\nfrom csv import reader\nfrom operator import itemgetter\n\n# The number of rows and seats [included] (ex P16-25, Q17-Q21)\nseat_row = [\"P\", \"Q\"]\nseat_range = [(16, 24), (17, 21)]\n\n# Expand seat range into its individual cell\nseat_range_all = [[i for i in range(start, finish + 1)] for (start, finish) in seat_range]\nseat_taken = [[False for _ in range(start, finish + 1)] for (start, finish) in seat_range]\n\n# Open the pdf file of the collective tickets\ninputpdf = PdfReader(open(\"mamma-mia.pdf\", \"rb\"))\n\n# Open the csv to read in each user\ncustomer_info_file = open(\"Purchase_Product_Mamma_Mia.csv\") # append \",delimiter=',')\" if needed\ncustomer_info_reader = reader(customer_info_file)\n\n# get the header of the csv file\ncustomer_info_header = next(customer_info_reader)\nnum_customer_header = len(customer_info_header)\n\n# Find first name and last name column\ni_name = -1\ni_surname = -1\ni_quantity = -1\ni = 0\n\n# Get needed column and throw error if non-existent\nwhile i < num_customer_header:\n if (customer_info_header[i] == \"First Name\"):\n i_name = i\n if (customer_info_header[i] == \"Surname\"):\n i_surname = i\n if (customer_info_header[i] == \"Quantity\"):\n i_quantity = i\n i += 1\n\nif i_name == -1:\n raise Exception(\"csv does not contain \\\"First Name\\\" column\")\nif i_quantity == -1:\n raise Exception(\"csv does not contain \\\"Quantity\\\" column\")\n\n# Sort the table by number of tickets ordered\ncustomer_info_reader = sorted(customer_info_reader, key=itemgetter(i_quantity), reverse=True)\n\n# Assign pdf and attach the name at the end\n# (DOESN'T ACCOUNT THAT TWO TICKETS NEXT TO EACH OTHER (yet))\n\ncur_row = 0\nfor customer_info in customer_info_reader:\n print(customer_info)\n init_row = cur_row\n first_occurance = seat_taken[cur_row].index(False)\n quantity = int(customer_info[i_quantity])\n \n can_allocate = True\n found = False\n if int(customer_info[i_quantity]) > 1:\n # check that the row doesnt have 3 at then end,\n # if it does, try allocating a block in next row and do the check again\n # if we got to the last row and cannot do so, go back to the first row and give the continous seats\n # if there is only one seat, go to next row\n while not found:\n \n # (first_occurance >= len(seat_taken[cur_row]) - 3 and can_allocate) or (first_occurance >= len(seat_taken[cur_row]) - quantity and not can_allocate):\n \n first_occurance = seat_taken[cur_row].index(False)\n \n if (can_allocate and first_occurance < len(seat_taken[cur_row]) - 3) or (not can_allocate and first_occurance < len(seat_taken[cur_row]) - quantity):\n found = True\n \n if (first_occurance == -1 or (can_allocate and first_occurance >= len(seat_taken[cur_row]) - 3)):\n if (cur_row == len(seat_row)):\n cur_row = 0\n can_allocate = False\n else:\n cur_row += 1\n \n \n # TODO: Bug where it will be index error if you canot find 3 in row\n \n if (cur_row == init_row):\n # allocate any as long as there is two next to each other\n if int(customer_info[i_quantity]) >= len(seat_taken[cur_row]) - first_occurance:\n raise Exception(\"There is no more seats!\")\n \n page_num = sum([seat_range[i][1] - seat_range[i][0] + 1 for i in range(cur_row)]) + first_occurance\n \n for i in range(int(customer_info[i_quantity])):\n output = PdfWriter()\n output.add_page(inputpdf.pages[page_num])\n with open(\"document-%s%s-%s.pdf\" % (seat_row[cur_row], seat_range_all[cur_row][first_occurance], customer_info[i_name]), \"wb\") as outputStream:\n output.write(outputStream)\n \n seat_taken[cur_row][first_occurance] = True\n first_occurance += 1\n page_num += 1\n \n else:\n # just allocate first free\n # Throw exception if cannot find a free seat\n # TODO: return to the first one if needed\n init_row = seat_row[0]\n while (first_occurance == -1):\n cur_row += 1\n first_occurance = seat_taken[cur_row].index(False)\n if (cur_row == len(seat_row)): \n cur_row = 0\n if (cur_row == init_row):\n raise Exception(\"There is no more seats\")\n \n \n page_num = sum([seat_range[i][1] - seat_range[i][0] + 1 for i in range(cur_row)]) + first_occurance\n \n output = PdfWriter()\n output.add_page(inputpdf.pages[page_num])\n with open(\"document-%s%s-%s.pdf\" % (seat_row[cur_row], seat_range_all[cur_row][first_occurance], customer_info[i_name]), \"wb\") as outputStream:\n output.write(outputStream)\n \n seat_taken[cur_row][first_occurance] = True\n# i = 0\n\n# cur_row_i = 0\n# cur_seat_num = seat_range[cur_row_i][0]\n# for customer_info in customer_info_reader:\n# print(i)\n# for n in range(int(customer_info[i_quantity])):\n# output = PdfWriter()\n# output.add_page(inputpdf.pages[i])\n# with open(\"document-%s%s-%s.pdf\" % (seat_row[cur_row_i], cur_seat_num, customer_info[i_name]), \"wb\") as outputStream:\n# output.write(outputStream)\n# i += 1\n \n# # Get next seat\n# cur_seat_num += 1\n# if (cur_seat_num > seat_range[cur_row_i][1]):\n# cur_row_i += 1\n# cur_seat_num = seat_row[cur_row_i][0]","repo_name":"Tuna521/seat-distributor","sub_path":"distributor-old.py","file_name":"distributor-old.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6074242720","text":"import time\nimport pygame\nfrom colors import *\n\nclass Interface():\n\n break_keys = [pygame.K_ESCAPE, pygame.K_RETURN, pygame.K_SPACE]\n\n def __init__(self, grid_size, dot_size, actions, fps=0):\n \"\"\" Provides methods to get user input and draw the game to screen.\n\n Args:\n - grid_size: (tuple) x, y dimensions of play area\n - dot_size: (int) diameter of points when drawing frame, must be\n greater than 1, and probably should be even\n - actions: (dict) map of {key: action} pairs\n - fps: (int) pause for `1 / fps` seconds before returning from\n `draw_frame`, limiting max frames per second to this value\n \"\"\"\n self.grid_size = grid_size\n self.dot_size = dot_size\n self.fps = fps\n self.action_map = {}\n for label, action in actions.items():\n key = getattr(pygame, 'K_{}'.format(label))\n self.action_map[key] = action\n pygame.init()\n self.window_size = (\n self.grid_size[0] * self.dot_size + self.dot_size,\n self.grid_size[1] * self.dot_size + self.dot_size,\n )\n self.surface = pygame.display.set_mode(self.window_size)\n\n def close(self, done=False):\n \"\"\" Shutdown the game, optionally holding open the game window.\n\n Args:\n - done: (bool) if True, close the game window and exit immediately,\n otherwise blocks and waits for one of `break_keys`.\n \"\"\"\n while done == False:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # ctrl+C or closed window\n done = True\n break\n if event.type == pygame.KEYDOWN:\n ctrl_key = pygame.key.get_mods() & pygame.KMOD_CTRL\n if ctrl_key and event.key == pygame.K_c:\n done = True\n break\n if event.key in self.break_keys:\n done = True\n break\n pygame.display.quit()\n\n def draw_frame(self, apple, snake, score):\n \"\"\" Draw the current game state.\n\n Args:\n - apple: (tuple) x, y coordinates of apple\n - snake: (list) sequence of coordinates representing snake\n - score: (int) displayed in window title\n \"\"\"\n self.surface.fill(BLACK)\n self._draw_walls()\n self._draw_apple(apple)\n self._draw_snake(snake)\n pygame.display.set_caption('Score: {}'.format(score))\n pygame.display.update()\n if self.fps > 0:\n time.sleep(1 / self.fps)\n\n def get_user_input(self, excluded=[]):\n \"\"\" Get keystrokes from user. Returns the `action` mapped to the `key` pressed, or `None` if no valid input is taken.\n\n Args:\n - excluded: (list) sequence of values in `self.action_map` that\n will be ignored\n \"\"\"\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key not in self.action_map.keys():\n continue # not a mapped key, ignore it\n new_action = self.action_map[event.key]\n if new_action in excluded:\n continue # ignore repeated or backwards moves\n action = new_action\n return action # done taking user input\n\n def _draw_apple(self, apple):\n pygame.draw.circle(\n self.surface,\n RED,\n self._scale_coords(apple),\n self.dot_size // 2, # radius\n )\n\n def _draw_snake(self, snake):\n for i, unit in enumerate(snake):\n alpha = 1 - (i / len(snake))\n if unit == snake[0]: # check for collision with head\n alpha = 1.0 # redraw the head at this coordinate\n color = blend_colors(WHITE, GREEN, alpha)\n pygame.draw.circle(\n self.surface,\n color,\n self._scale_coords(unit),\n self.dot_size // 2, # radius\n )\n\n def _draw_walls(self):\n width, height = self.window_size\n offset = self.dot_size // 2\n pygame.draw.line(\n self.surface,\n RED,\n (offset, offset),\n (width - offset, offset),\n )\n pygame.draw.line(\n self.surface,\n RED,\n (width - offset, offset),\n (width - offset, height - offset),\n )\n pygame.draw.line(\n self.surface,\n RED,\n (width - offset, height - offset),\n (offset, height - offset),\n )\n pygame.draw.line(\n self.surface,\n RED,\n (offset, height - offset),\n (offset, offset),\n )\n\n def _scale_coords(self, coords):\n \"\"\" Scale up points by `dot_size`, and offset objects by half that \n value from the edges.\n \"\"\"\n return (\n coords[0] * self.dot_size + self.dot_size,\n coords[1] * self.dot_size + self.dot_size,\n )\n\n","repo_name":"shadetree01010100/snake-python","sub_path":"human_interface.py","file_name":"human_interface.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15012488645","text":"from Crypto.Util.number import long_to_bytes\r\nimport gmpy\r\n\r\ndef common_mode_attack(n, e1, e2, c1, c2):\r\n print(\"[*] Try common mode attack\")\r\n _, s1, s2 = gmpy.gcdext(e1, e2)\r\n if s1 < 0:\r\n s1 = - s1\r\n c1 = gmpy.invert(c1, n)\r\n elif s2 < 0:\r\n s2 = - s2\r\n c2 = gmpy.invert(c2, n)\r\n\r\n m = (pow(c1,s1,n)*pow(c2,s2,n)) % n\r\n print(\"[*] Maybe success\")\r\n print(\"[!] flag:\",long_to_bytes(m))\r\n return 1\r\n\r\ndef main(n,e1,e2,c1,c2):\r\n if common_mode_attack(n, e1, e2, c1, c2):\r\n print(\"[-] Thanks, bye~\")\r\n return\r\n\r\n print(\"[-] Sorry, it's not easy~\")\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n exec(open(\"nec.txt\").read())\r\n main(n, e1, e2, c1, c2)","repo_name":"cjcslhp/wheels","sub_path":"rsa/common_mode_attack/common_mode_attack.py","file_name":"common_mode_attack.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"85"} +{"seq_id":"4091166729","text":"# 정렬은 다양한 알고리즘에서 기본적으로 쓰이는 알고리즘으로\n# 단순히 정렬만으로 모든 문제들을 풀 수 있다는 생각은 저버리자\n# 이분 탐색이 나오면 먼저 파라메트릭 서치 알고리즘(Parametric Search)을 먼저 생각해보자...any()\n\nimport sys\ninput = sys.stdin.readline\n\nn, k = map(int,input().split())\nlevels = [int(input().rstrip()) for _ in range(n)]\nlevels.sort()\ntemp = 0\nleft = levels[0]\nright = levels[0] + k\nresult = 0\n\nwhile left <= right:\n mid = (left+right)//2\n possible = True\n for i in range(n):\n if levels[i] < mid:\n if temp + mid - levels[i] > k:\n possible = False\n break\n else :\n temp += (mid-levels[i])\n else:\n break\n \n if possible:\n left = mid+1\n result = mid\n else:\n right = mid - 1\n temp = 0\n \nprint(result)","repo_name":"nkw011/algorithm_training","sub_path":"baekjoon/sorting/16564_heros_of_storm_progammer.py","file_name":"16564_heros_of_storm_progammer.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3604216","text":"# def ds_laplai(danhsach, n):\n# ds_moi = []\n\n# so_lan_lap = n // len(danhsach)\n# so_phan_tu_du = n % len(danhsach)\n\n# ds_moi = so_lan_lap * danhsach\n# for i in range(so_phan_tu_du):\n# ds_moi.append(danhsach[i])\n# return ds_moi\n\n\n# try:\n# danhsach = list(input(\"Nhap danh sach: \").split())\n# n = int(input(\"Nhap N: \"))\n# if n < 0:\n# print(\"N phai lon hon 0!\")\n# else:\n# ds_laplai = ds_laplai(danhsach, n)\n# print(*ds_laplai)\n# except:\n# print(\"Dau vao chua hop le!\")\n############################################\ndef nhan_ban_danh_sach(danhSach, n):\n # Su dung ham len() de lay so luong phan tu cua danh sach\n soPhanTu = len(danhSach)\n # Tinh toan so lan toi thieu can lap lai danh sach\n soLanNhanBan = n // soPhanTu + 1\n # Su dung toan tu * de lap danh sach voi so lan mong muon\n dsNhanBan = danhSach * soLanNhanBan\n # Cat danh sach cho dung n phan tu\n dsNPhanTu = dsNhanBan[:n]\n return dsNPhanTu\n\n\n# Nhap danh sach tu ban phim\ndanhSach = input().split()\n# Kiem tra xem danh sach co rong hay khong\nif len(danhSach) == 0:\n print(\"Danh sach rong\")\nelse:\n # Khoi lenh co the phat sinh loi\n try:\n # Nhap gia tri n tu ban phim\n # Ep kieu du lieu sang so nguyen\n n = int(input())\n # Goi thuc thi ham va truyen tham so cho ham\n dsKetQua = nhan_ban_danh_sach(danhSach, n)\n # Unpacking arguments\n print(*dsKetQua)\n\n # Khoi lenh duoc thuc thi khi loi xay ra\n except:\n print(\"Dinh dang dau vao khong hop le!\")\n","repo_name":"chanh1311/Python_Basic_200","sub_path":"Kteam/ds_laplai.py","file_name":"ds_laplai.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74641727318","text":"# coding:utf-8\n# author: Articuly\n# datetime: 2020/6/18 22:44\n# software: PyCharm\n\nimport pymongo\n\nclient = pymongo.MongoClient('127.0.0.1', 27017)\n\n# 数据库名为blog\ndb = client.blog\n\n# 插入数据\n# 两条user信息结构相同,但是一条网站信息结构不一样\n# 对于mongodb来讲,每一条之间都是完全独立的文档\nuser = [{\"username\": \"articuly\", \"sex\": \"man\", \"age\": 16},\n {\"username\": \"luxp\", \"sex\": \"man\", \"age\": 28},\n {\"web_url\": \"python-xp.com\", \"type\": \"技术类\", \"company\": \"平哥编程\"}]\n\n# 使用insert插入一条记录\n# users自动创建\n# insert_one\ndb.users.insert_one(user[0])\n# 一次性插入多个记录\n# insert_many\nres = db.users.insert_many(user)\n# 查找数据\n# 找到用户名为artic的用户\n# mongodb使用json格式来表示查询条件\n# 在python客户端中以字典提供查询条件\n# {\"username\":\"artic\"}\n# {\"username\":\"lxp\", \"age\":28}\n# 正则匹配 {\"username\":\"/xp/\"}\nu1 = db.users.find_one({'username': 'articuly'})\nu2 = db.users.find({'username': 'artic'})\n# 根据 _id 查q\nfrom bson import ObjectId\n\nid = '5eeb8875cab6435a8a58a4ba'\nid = ObjectId(id)\nu3 = db.users.find_one({'_id': id})\n# 修改\n# update_one\n# update_many\nu4 = db.users.find_one({'username': 'articuly'})\nu4['age'] = 30\nu4['sex'] = 'man'\nres = db.users.update({'_id': u4['_id']}, u4)\n# 删除\n# 删除web_url这项\n# delete_many - 删除多条\nres = db.users.delete_one({'web_url': 'python-xp.com'})\nres.delete_count\nres.raw_result\n# 删除大于20岁的\nres = db.users.delete_many({'age': {'$gt': 20}})\n","repo_name":"articuly/operation_practice","sub_path":"mongodb/use_mongodb.py","file_name":"use_mongodb.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10428752852","text":"# coding: utf-8\r\n# RDS delete\r\nimport boto3\r\n\r\n# nodelete,trueを変数化\r\nND = 'nodelete'\r\nTR = 'true'\r\n\r\ndef lambda_handler(event, context):\r\n#if __name__ == '__main__':\r\n client = boto3.client('rds', \"ap-northeast-1\")\r\n resp = client.describe_db_snapshots()\r\n #print resp\r\n all_list = []\r\n del_list = []\r\n\r\n for rds in resp['DBSnapshots']:\r\n all_list.append(rds['DBSnapshotIdentifier'])\r\n #print(all_list)\r\n\r\n resp2 = client.list_tags_for_resource(\r\n ResourceName=\"arn:aws:rds:ap-northeast-1:xxxxxxxxxxxx:snapshot:\" + rds['DBSnapshotIdentifier']\r\n )\r\n #print resp2\r\n\r\n for tag in resp2['TagList']:\r\n #print tag\r\n if tag['Key'] == ND and tag['Value'] == TR:\r\n del_list.append(rds['DBSnapshotIdentifier'])\r\n #print(del_list)\r\n\r\n diffset = set(all_list) - set(del_list)\r\n #print(diffset)\r\n targetlist = list(diffset)\r\n #print(targetlist)\r\n\r\n for target in targetlist:\r\n #print target\r\n response = client.delete_db_snapshot(\r\n DBSnapshotIdentifier=target,\r\n SkipFinalSnapshot=True\r\n )\r\n","repo_name":"borkit/scriptdump","sub_path":"AWS/lambda_python/delete-rds-snapshot.py","file_name":"delete-rds-snapshot.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"2030086132","text":"# -*- coding: utf-8 -*-\n\n# by Dr. Ming Yan (11/2018)\n# yan.meen@gmail.com\n# https://github.com/yanmeen/rlaf\n#\n# =============================================================================\nfrom env import CameraEnv\nfrom RL_sarsa import SarsaLambdaTable\n\nMAX_EPISODES = 3000\nMAX_EP_STEPS = 200\nON_TRAIN = True\n\n# set env\nenv = CameraEnv()\n\n# set RL method\nRL = SarsaLambdaTable(actions=list(range(env.n_actions)))\n\nsteps = []\n\n\ndef train():\n # start training\n for i in range(MAX_EPISODES):\n s = env.reset()\n\n # RL choose action based on observation\n a = RL.choose_action(str(s))\n\n # initial all zero eligibility trace\n RL.eligibility_trace *= 0\n\n ep_r = 0.\n for j in range(MAX_EP_STEPS):\n # env.render()\n\n # RL take action and get next observation and reward\n s_, reward, done = env.step(a)\n\n # RL choose action based on next observation\n a_ = RL.choose_action(str(s_))\n\n # RL learn from this transition (s, a, r, s, a) ==> Sarsa\n RL.learn(str(s), a, reward, str(s_), a_)\n\n # swap observation and action\n s = s_\n a = a_\n\n ep_r += reward\n\n if done or j == MAX_EP_STEPS - 1:\n print(\n \"Ep: %i | %s | ep_r: %.1f | step: %i\"\n % (i, \"---\" if not done else \"done\", ep_r, j)\n )\n break\n\n\ndef eval():\n # env.render()\n while True:\n s = env.reset()\n done = 0\n print(\"start from focal position: \", env.focal_new)\n while not done:\n # env.render()\n a = RL.choose_action(s)\n s, r, done = env.step(a)\n print(\"goes to new focal position: \", env.focal_new)\n if done:\n print(\"========>>>>>>>>>>>>> Find focus point at: \", env.focal_new)\n\n\nif ON_TRAIN:\n train()\nelse:\n eval()\n","repo_name":"yanmeen/RLAF","sub_path":"SARSA/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"19813003458","text":"from enum import Enum, auto\nfrom typing import Dict, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.linalg.linalg import LinAlgError\nfrom statsmodels.tsa.arima.model import ARIMA, ARIMAResults\n\nORDER = (1, 1, 0)\nMIN_NUM_DATA_POINTS = 4\n\n\nclass PredictionDirectionType(Enum):\n FORWARD = auto()\n BACKWARD = auto()\n\n\nclass PredictedAdmissions:\n \"\"\"Predict the new admissions based on the historical trend\"\"\"\n\n def __init__(\n self,\n historical_data: pd.DataFrame,\n constant_admissions: bool,\n ):\n \"\"\"\n historical_data is a DataFrame with columns for each time step and rows for each admission_to type (jail,\n prison).\n Columns need to be numeric.\n Data must be continuous for each admission, i.e. only NaN values on either end.\n\n The input data will not necessarily be sorted in temporal order, so that step is done here. Additionally, an\n ARIMA model will fail if all data is 0, so any rows with no data will be dropped as well.\n \"\"\"\n historical_data, constant_admissions = self._infer_missing_data(\n historical_data, constant_admissions\n )\n self.historical_data = historical_data\n self.trained_model_dict: Dict[\n Tuple[str, PredictionDirectionType], ARIMAResults\n ] = {}\n self.predictions_df = pd.DataFrame(\n columns=[\"admission_to\", \"time_step\"]\n ).set_index([\"admission_to\", \"time_step\"])\n\n # if historical data has more than specified number of years, train an ARIMA model\n if (\n len(self.historical_data.columns) >= MIN_NUM_DATA_POINTS\n and not constant_admissions\n ):\n self._train_arima_models()\n self.predict_constant_value = False\n else:\n self.predict_constant_value = True\n\n # add warnings attribute that prints at the end of shell_compartment initialization\n self.warnings: list = []\n\n def get_time_step_estimate(self, time_step: int) -> Dict[str, float]:\n \"\"\"\n Return the estimated admissions for the time_step provided as a dict of compartment -> predicted value.\n\n If the time period is one for which we have actual data, return that. If it is for a year for which\n predictions have already been made, take that prediction from the dataframe. Else, generate predictions\n for the requested time period + an additional 10 steps.\n \"\"\"\n default_steps_forward = 10\n if time_step in self.historical_data.columns:\n return self.historical_data[time_step].to_dict()\n\n if (\n time_step\n in self.predictions_df.index.get_level_values(\"time_step\").unique()\n ):\n return (\n self.predictions_df.unstack(0).loc[time_step, \"predictions\"].to_dict()\n )\n\n last_time_step_to_process = int(\n max(self.historical_data.columns.max(), time_step) + default_steps_forward\n )\n self._gen_predicted_data(time_step, last_time_step_to_process)\n return self.predictions_df.unstack(0).loc[time_step, \"predictions\"].to_dict()\n\n def gen_arima_output_df(self) -> pd.DataFrame:\n \"\"\"Return the prediction DataFrame\"\"\"\n historical_data = pd.Series(\n self.historical_data.stack(), name=\"actuals\"\n ).to_frame()\n full_arima_output = pd.concat(\n [self.predictions_df, historical_data]\n ).sort_index()\n return full_arima_output\n\n @staticmethod\n def _infer_missing_data(\n historical_data: pd.DataFrame, constant_admissions: bool\n ) -> Tuple[pd.DataFrame, bool]:\n \"\"\"Fill in historical data so all admission_to cover the same time steps of data\"\"\"\n\n # Convert different forms of NA into \"None\" to make processing the missing values easier\n historical_data.replace({np.nan: None}, inplace=True)\n historical_data = historical_data.astype(float).sort_index(axis=1)\n\n for admission, row in historical_data.iterrows():\n missing_data = historical_data.columns[row.isnull()]\n\n min_data_time_step = row.dropna().index.min()\n max_data_time_step = row.dropna().index.max()\n\n missing_data_backward = missing_data[missing_data < min_data_time_step]\n missing_data_forward = missing_data[missing_data > max_data_time_step]\n\n if not missing_data_backward.empty:\n if len(row.dropna()) < MIN_NUM_DATA_POINTS:\n constant_admissions = True\n historical_data.loc[\n admission, missing_data_backward\n ] = historical_data.loc[admission, min_data_time_step]\n else:\n model_backcast = (\n ARIMA(\n row.iloc[::-1].dropna().values.astype(float),\n order=ORDER,\n trend=\"t\",\n )\n .fit()\n .forecast(steps=len(missing_data_backward))\n )\n\n # flip the predictions back around so they're ordered correctly for the historical data indexing\n historical_data.loc[\n admission, missing_data_backward\n ] = model_backcast[::-1]\n\n if not missing_data_forward.empty:\n if len(row.dropna()) < MIN_NUM_DATA_POINTS:\n constant_admissions = True\n historical_data.loc[\n admission, missing_data_forward\n ] = historical_data.loc[admission, max_data_time_step]\n else:\n model_forecast = (\n ARIMA(row.dropna().values.astype(float), order=ORDER, trend=\"t\")\n .fit()\n .forecast(steps=len(missing_data_forward))\n )\n\n historical_data.loc[\n admission, missing_data_forward\n ] = model_forecast\n return historical_data, constant_admissions\n\n def _train_arima_models(self) -> None:\n \"\"\"\n Create a dictionary to store the forecasted and backcasted trained ARIMA model objects\n A dictionary is created for each admission type with both a forecasting model and a backcasting model\n \"\"\"\n trained_model_dict = {}\n for admission_compartment, row in self.historical_data.iterrows():\n model_forecast = ARIMA(row.values, order=ORDER, trend=\"t\")\n model_backcast = ARIMA(row.iloc[::-1].values, order=ORDER, trend=\"t\")\n try:\n trained_model_dict[\n (admission_compartment, PredictionDirectionType.FORWARD)\n ] = model_forecast.fit()\n trained_model_dict[\n (admission_compartment, PredictionDirectionType.BACKWARD)\n ] = model_backcast.fit()\n\n except LinAlgError:\n # Add warnings\n warn_text = \"Singular matrix encountered fitting ARIMA model.\"\n if warn_text not in self.warnings:\n self.warnings.append(warn_text)\n\n # adjust forecast and backcast\n model_forecast = ARIMA(\n row.values + np.random.normal(0, 0.001, len(row.values)),\n order=ORDER,\n trend=\"t\",\n )\n model_backcast = ARIMA(\n row.iloc[::-1].values + np.random.normal(0, 0.001, len(row.values)),\n order=ORDER,\n trend=\"t\",\n )\n trained_model_dict[\n (admission_compartment, PredictionDirectionType.FORWARD)\n ] = model_forecast.fit()\n trained_model_dict[\n (admission_compartment, PredictionDirectionType.BACKWARD)\n ] = model_backcast.fit()\n\n self.trained_model_dict = trained_model_dict\n\n def _gen_predicted_data(self, start_period: int, end_period: int) -> None:\n \"\"\"Generate the predictions between the start and end periods\"\"\"\n\n # calculate the range of time steps to forecast forward and backward\n pred_periods_forward = range(\n int(self.historical_data.columns.max()) + 1, int(end_period) + 1\n )\n pred_periods_backward = range(\n int(start_period), int(self.historical_data.columns.min())\n )[::-1]\n\n predictions_df = pd.DataFrame()\n\n for admission_compartment, row in self.historical_data.iterrows():\n # If not specified to use the constant rate assumption...\n if not self.predict_constant_value:\n # Create dataframes to store forecasted and backcasted model outputs\n predictions_df_sub = pd.DataFrame()\n if len(pred_periods_backward) > 0:\n backward_df = self._get_arima_predictions_df(\n admission_compartment=admission_compartment,\n cast_type=PredictionDirectionType.BACKWARD,\n prediction_indexes=pred_periods_backward,\n )\n predictions_df_sub = pd.concat([predictions_df_sub, backward_df])\n\n if len(pred_periods_forward) > 0:\n forward_df = self._get_arima_predictions_df(\n admission_compartment=admission_compartment,\n cast_type=PredictionDirectionType.FORWARD,\n prediction_indexes=pred_periods_forward,\n )\n predictions_df_sub = pd.concat([predictions_df_sub, forward_df])\n\n # Combine forecast and backcast data\n predictions_df_sub = predictions_df_sub.sort_index().loc[\n start_period:end_period\n ]\n\n # If using the constant rate assumption, just take the average of the last 12 values\n # TODO(#10033): update constant admissions logic to be more accurate\n else:\n predictions_df_sub = pd.DataFrame(\n index=range(start_period, end_period + 1),\n columns=[\"predictions\"],\n )\n predictions_df_sub.sort_index(inplace=True)\n # Take the average of all rows if there are less than 12\n number_of_rows = min(len(row), 12)\n avg_forward_value = np.mean(row.iloc[:number_of_rows])\n predictions_df_sub.loc[\n predictions_df_sub.index < int(self.historical_data.columns.min()),\n \"predictions\",\n ] = avg_forward_value\n avg_backwards_value = np.mean(row.iloc[-number_of_rows:])\n predictions_df_sub.loc[\n predictions_df_sub.index > int(self.historical_data.columns.max()),\n \"predictions\",\n ] = avg_backwards_value\n predictions_df_sub = predictions_df_sub[\n ~predictions_df_sub.index.isin(self.historical_data.columns)\n ]\n\n # Label the dataframe indices\n predictions_df_sub.index.name = \"time_step\"\n predictions_df_sub = pd.concat(\n {admission_compartment: predictions_df_sub}, names=[\"admission_to\"]\n )\n\n # Throw warning if the lower bound has been hit\n warn_text = \"Warning: lower bound hit when predicting admissions.\"\n if any(predictions_df_sub.predictions < 0) and (\n warn_text not in self.warnings\n ):\n self.warnings.append(warn_text)\n # Clip negative values at 0\n predictions_df_sub[\"predictions\"] = predictions_df_sub[\"predictions\"].clip(\n lower=0\n )\n\n # append df_sub to df\n max_allowable_pred = predictions_df_sub[\"predictions\"].max()\n predictions_df_sub[\"predictions\"] = predictions_df_sub[\"predictions\"].clip(\n 0, max_allowable_pred\n )\n\n predictions_df = pd.concat([predictions_df, predictions_df_sub])\n\n # If predictions are made more than once on an overlapping set of periods we will get duplicates. Drop those.\n predictions_df = pd.concat([predictions_df, self.predictions_df])\n predictions_df = predictions_df[\n ~predictions_df.index.duplicated(keep=\"first\")\n ].sort_index()\n\n # Store the predictions\n self.predictions_df = predictions_df\n\n def _get_arima_predictions_df(\n self,\n admission_compartment: str,\n cast_type: PredictionDirectionType,\n prediction_indexes: range,\n ) -> pd.DataFrame:\n \"\"\"Helper function to generate the ARIMA forecast DataFrame for the provided prediction period\n\n Args:\n admission_compartment: The compartment to generate the predicted admissions for\n cast_type: the type of forecast to use (forecast or backcast) from within the trained model\n prediction_indexes: the index labels for the generated prediction DataFrame\n\n Returns:\n pd.DataFrame with columns for the prediction, high/low conf interval, and standard error\n \"\"\"\n admission_model = self.trained_model_dict[admission_compartment, cast_type]\n predictions_array = admission_model.forecast(steps=len(prediction_indexes))\n prediction_data = {\n \"predictions\": predictions_array,\n }\n predictions_df = pd.DataFrame(index=prediction_indexes, data=prediction_data)\n return predictions_df\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Check if two PredictedAdmissions are equal (does not require projection_df to be equal)\"\"\"\n if not isinstance(other, PredictedAdmissions):\n return False\n\n try:\n if (self.historical_data != other.historical_data).any().any():\n return False\n except ValueError:\n return False\n\n if self.trained_model_dict != other.trained_model_dict:\n return False\n\n return True\n","repo_name":"Recidiviz/pulse-data","sub_path":"recidiviz/calculator/modeling/population_projection/predicted_admissions.py","file_name":"predicted_admissions.py","file_ext":"py","file_size_in_byte":14299,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"85"} +{"seq_id":"41502963044","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 29 13:21:29 2021\n\n@author: charlestobin\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom numpy import absolute\n\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.preprocessing import StandardScaler\n\nfrom matplotlib import pyplot\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers import Dense\n\nimport seaborn as sns\n\npd.set_option('max_columns', 25)\n\n############################################################################################################################################\n\nplayers_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/players.csv\")\n\npff_data_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/PFFScoutingData.csv\")\n\ngames_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/games.csv\")\n\nplays_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/plays.csv\")\n\nseasons = [\"2018\", \"2019\"] #\"2020\",\ntracking_df = pd.DataFrame()\n\nfor s in seasons:\n trackingTemp_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/tracking\"+s+\".csv\")\n tracking_df = tracking_df.append(trackingTemp_df)\n\ntracking_df.loc[tracking_df['playDirection'] == \"left\", 'x'] = 120-tracking_df.loc[tracking_df['playDirection'] == \"left\", 'x']\ntracking_df.loc[tracking_df['playDirection'] == \"left\", 'y'] = 160/3-tracking_df.loc[tracking_df['playDirection'] == \"left\", 'y']\n\ncoord_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/stadium_coordinates.csv\")\nweather_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/games_weather.csv\")\nweatherGames_df = pd.read_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/weather.csv\")\n\n############################################################################################################################################\n\npuntPlays_df = plays_df.copy()\n\npuntPlays_df = pd.merge(puntPlays_df, pff_data_df, on = ['gameId', 'playId'])\n\npuntPlays_df = puntPlays_df[(puntPlays_df['specialTeamsPlayType'] == 'Punt')]\n\npuntPlays_df['returnerId'] = pd.to_numeric(puntPlays_df['returnerId'], errors='coerce')\n\npuntPlays_df = puntPlays_df[['gameId', 'playId', 'playDescription',\n 'possessionTeam', 'specialTeamsPlayType', 'specialTeamsResult',\n 'kickerId', 'returnerId', 'yardlineSide',\n 'yardlineNumber',\n 'kickLength', 'kickReturnYardage', 'playResult',\n 'absoluteYardlineNumber', 'snapDetail', 'snapTime', 'operationTime',\n 'hangTime', 'kickType', 'kickDirectionIntended', 'kickDirectionActual',\n 'returnDirectionIntended', 'returnDirectionActual', 'missedTackler',\n 'assistTackler', 'tackler', 'gunners',\n 'puntRushers', 'specialTeamsSafeties', 'vises', 'kickContactType']]\n\npuntPlays_df.columns\n\npuntPlays_df.head()\n\n############################################################################################################################################\n\nweatherEffects_df = pd.merge(weather_df, weatherGames_df, on = ['game_id'])\n\nweatherEffects_df = pd.merge(weatherEffects_df, coord_df, on = ['StadiumName'])\n\nweatherGroupBy = weatherEffects_df.copy()\n\nweatherGroupBy = weatherEffects_df.groupby(['game_id']).mean()\n\nweatherGroupBy = weatherGroupBy.reset_index()\n\nweatherGroupBy = weatherGroupBy.rename({'game_id': 'gameId'}, axis=1)\n\nweatherGroupBy\n\nweatherGroupBy = weatherGroupBy[['gameId', 'Temperature', 'Humidity', 'Precipitation', 'WindSpeed', 'WindDirection',\n 'Pressure', 'Season', 'StadiumAzimuthAngle', 'elevationFt']]\n\n############################################################################################################################################\n\npressAlt = weatherGroupBy.copy()\n\npressAlt['basePA'] = 29.92\n\npressAlt['pressAltit'] = ((pressAlt['basePA'] - pressAlt['Pressure'])*1000) + pressAlt['elevationFt']\n\ndensAlt = pressAlt.copy()\n\n############################################################################################################################################\n\npuntWx_df = pd.merge(pressAlt, puntPlays_df, on = ['gameId'])\n\n############################################################################################################################################\n\nexpectedReturnYards = puntWx_df.copy()\n\nexpectedReturnYards = expectedReturnYards[['gameId', 'playId', 'Temperature', 'Humidity', 'Precipitation',\n 'WindSpeed', 'WindDirection', 'StadiumAzimuthAngle',\n 'pressAltit',\n 'possessionTeam', 'specialTeamsPlayType', 'specialTeamsResult',\n 'kickerId', 'returnerId', 'yardlineSide',\n 'yardlineNumber', 'snapDetail',\n 'kickLength', 'kickReturnYardage',\n 'operationTime',\n 'hangTime', 'kickType', 'kickDirectionIntended', 'kickDirectionActual',\n 'returnDirectionIntended', 'gunners',\n 'vises', 'kickContactType']]\n\nexpectedReturnYards = pd.merge(expectedReturnYards, tracking_df, on = ['gameId', 'playId'])\n\nexpectedReturnYards = expectedReturnYards[(expectedReturnYards['event'] == 'ball_snap')]\n\nexpectedReturnYards = expectedReturnYards[(expectedReturnYards['displayName'] == 'football')]\n\ndummies = pd.get_dummies(expectedReturnYards.snapDetail)\n\nexpectedReturnYards = pd.concat([expectedReturnYards, dummies], axis=1)\n\nexpectedReturnYards = expectedReturnYards[['gameId', 'playId', 'kickerId', 'yardlineNumber', 'kickLength',\n 'hangTime', 'operationTime', 'x', 'y', 'H', 'L',\n 'OK', '<', '>', 'Precipitation',\n 'WindDirection', 'StadiumAzimuthAngle',\n 'pressAltit']]\n\nexpectedReturnYards = expectedReturnYards.dropna()\n\nexpectedReturnYards = expectedReturnYards[expectedReturnYards['kickLength'] < 63]\nexpectedReturnYards = expectedReturnYards[expectedReturnYards['kickLength'] > 30]\n\n############################################################################################################################################\n\nX = expectedReturnYards[['x', 'y', 'kickerId', 'Precipitation',\n 'StadiumAzimuthAngle',\n 'pressAltit', 'WindDirection']]\ny = expectedReturnYards[['kickLength']]\n\n\nPredictorScaler=StandardScaler()\nTargetVarScaler=StandardScaler()\n\nPredictorScalerFit=PredictorScaler.fit(X)\nTargetVarScalerFit=TargetVarScaler.fit(y) \n\nX=PredictorScalerFit.transform(X)\ny=TargetVarScalerFit.transform(y) \n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape) \n\n############################################################################################################################################\n\nmodel = Sequential()\n \nmodel.add(Dense(units=5, input_dim=7, kernel_initializer='normal', activation='relu'))\n \nmodel.add(Dense(units=5, kernel_initializer='normal', activation='tanh'))\n \n#model.add(Dense(units=5, kernel_initializer='normal', activation='relu'))\n\n#model.add(Dense(units=5, kernel_initializer='normal', activation='relu'))\n\nmodel.add(Dense(1, kernel_initializer='normal'))\n \nmodel.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse', 'mae'])\n\nmodel.fit(X_train, y_train ,batch_size = 10, epochs = 100, verbose=10) \n\n############################################################################################################################################\n\ndef FunctionFindBestParams(X_train, y_train, X_test, y_test):\n \n batch_size_list=[5, 10, 15, 20]\n epoch_list = [5, 10, 50, 100]\n \n import pandas as pd\n SearchResultsData=pd.DataFrame(columns=['TrialNumber', 'Parameters', 'Accuracy'])\n \n TrialNumber=0\n for batch_size_trial in batch_size_list:\n for epochs_trial in epoch_list:\n TrialNumber+=1\n \n model = Sequential()\n \n model.add(Dense(units=5, input_dim=X_train.shape[1], kernel_initializer='normal', activation='relu'))\n \n model.add(Dense(units=5, kernel_initializer='normal', activation='relu'))\n \n model.add(Dense(units=5, kernel_initializer='normal', activation='relu'))\n \n model.add(Dense(1, kernel_initializer='normal'))\n\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse', 'mae'])\n \n model.fit(X_train, y_train ,batch_size = batch_size_trial, epochs = epochs_trial, verbose=0)\n \n MAPE = np.mean(100 * (np.abs(y_test-model.predict(X_test))/y_test))\n\n print(TrialNumber, 'Parameters:','batch_size:', batch_size_trial,'-', 'epochs:',epochs_trial, 'Accuracy:', 100-MAPE)\n \n SearchResultsData=SearchResultsData.append(pd.DataFrame(data=[[TrialNumber, str(batch_size_trial)+'-'+str(epochs_trial), 100-MAPE]],\n columns=['TrialNumber', 'Parameters', 'Accuracy'] ))\n return(SearchResultsData)\n \n\n \n############################################################################################################################################\n\nResultsData=FunctionFindBestParams(X_train, y_train, X_test, y_test)\n\nResultsData.plot(x='Parameters', y='Accuracy', figsize=(15,4), kind='line')\n \nmodel.fit(X_train, y_train, batch_size = 10, epochs = 100, verbose=0)\n \nPredictions=model.predict(X_test)\n\nPredictions=TargetVarScalerFit.inverse_transform(Predictions)\n \ny_test_orig=TargetVarScalerFit.inverse_transform(y_test)\n\nTest_Data=PredictorScalerFit.inverse_transform(X_test)\n \nTestingData=pd.DataFrame(data=Test_Data, columns=['x', 'y', 'kickerId', 'Precipitation',\n 'StadiumAzimuthAngle',\n 'pressAltit', 'WindDirection'])\nTestingData['kickLength']=y_test_orig\nTestingData['PredictedLength']=Predictions\nTestingData.head()\n\nAPE=100*(abs(TestingData['kickLength']-TestingData['PredictedLength'])/TestingData['kickLength'])\nTestingData['APE']=APE\n\nprint('The Accuracy of ANN model is:', 100-np.mean(APE))\nTestingData.tail(25)\n\nTestingData.describe()\n\nTestingData.to_csv(\"/Users/charlestobin/Desktop/CLEAN_FILES/nfl-big-data-bowl-2022/annExpectedPuntYards.csv\")\n\n","repo_name":"Charlestobi/NFL-Big-Data-Bowl","sub_path":"SUBMISSION_CODE/EPYOE_ANN.py","file_name":"EPYOE_ANN.py","file_ext":"py","file_size_in_byte":11305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12093168999","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate,login\nfrom django.contrib.auth.models import User\nfrom .forms import LoginForm,UserRegistrationForm,UserEditForm,ProfileEditForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nfrom .models import Contact, Profile\nfrom actions.models import Action\nfrom actions.utils import create_action \nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\n# Create your views here.\n\ndef user_login(request):\n\n if request.method=='POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(request,username=cd['username'],password=cd['password'])\n if user is not None:\n if user.is_active:\n login(request,user)\n return HttpResponse('Authenticated successfully')\n \n return HttpResponse('Disabled account')\n\n return HttpResponse('invalid login')\n else:\n form = LoginForm()\n return render(request, 'account/login.html', {'form':form})\n\n@login_required\ndef dashboard(request):\n actions = Action.objects.exclude(user=request.user)\n following_ids = request.user.following.values_list('id',flat=True)\n\n if following_ids:\n actions = actions.filter(user__id__in = following_ids)\n # joining table's objects that relate to each other.\n actions = actions.select_related('user','user__profile').prefetch_related('target')[:10]\n # pagination\n paginator = Paginator(actions, 6)\n page = request.GET.get('page')\n try:\n actions = paginator.page(page)\n # if page wasn't integer\n except PageNotAnInteger:\n actions = paginator.page(1)\n except EmptyPage:\n if request.is_ajax():\n return HttpResponse('')\n actions = paginator.page(paginator.num_pages)\n if request.is_ajax():\n return render(request, 'account/dashboard_ajax.html', {'section':'dashboard','actions':actions})\n return render(request, 'account/dashboard.html', {'section':'dashboard','actions':actions})\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.set_password(\n form.cleaned_data['password']\n )\n user.save()\n create_action(user, 'has created an account')\n return render(request,'account/register_done.html',{'user':user})\n else:\n form = UserRegistrationForm()\n form = UserRegistrationForm()\n return render(request,'account/register.html',{'form':form})\n\n\n@login_required\ndef edit_profile(request):\n if request.method == 'POST':\n u_form = UserEditForm(instance=request.user,data=request.POST)\n p_form = ProfileEditForm(request.POST,instance=request.user.profile,files=request.FILES)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request,'Your account has been updated successfuly')\n return redirect('account:edit')\n else:\n u_form = UserEditForm(instance=request.user)\n p_form = ProfileEditForm(instance=request.user.profile)\n messages.error(request,'There was an error in Your field data!!')\n return redirect('account:edit')\n u_form = UserEditForm(instance=request.user)\n p_form = ProfileEditForm(instance=request.user.profile)\n return render(request,'account/edit.html',{'u_form':u_form, 'p_form':p_form})\n\n@login_required\ndef user_list(request):\n # users = User.objects.filter(is_active= True)\n profiles = Profile.objects.all()\n return render(request,'account/user/list.html',{'section': 'people','profiles':profiles})\n\n@login_required\ndef user_detail(request,username):\n user = get_object_or_404(User,username=username, is_active=True)\n return render(request,'account/user/detail.html',{'section': 'people','user':user})\n\n\n@login_required\n@require_POST\ndef user_follow(request):\n user_id = request.POST.get('id')\n action = request.POST.get('action')\n if user_id and action:\n try:\n user = User.objects.get(id=user_id) \n if action == 'follow':\n # create relation from intermediary table\n Contact.objects.get_or_create(user_from = request.user , user_to=user)\n create_action(request.user, 'is following', user)\n else:\n Contact.objects.filter(user_from = request.user,\n user_to = user).delete()\n return JsonResponse({'status':'ok'})\n except User.DoesNotExist:\n return JsonResponse({'status': 'error'})\n return JsonResponse({'status':'error'})","repo_name":"poyap/socailapp","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"41459589484","text":"# Import required libraries\nimport copy\nimport dash\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom visualisation_utils import model_search, prepare_data\n\n# Multi-dropdown options\nfrom controls import window_options\n\napp = dash.Dash(\n __name__, meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}]\n)\nserver = app.server\n\n# Create global chart template\nmapbox_access_token = \"pk.eyJ1IjoicGxvdGx5bWFwYm94IiwiYSI6ImNrOWJqb2F4djBnMjEzbG50amg0dnJieG4ifQ.Zme1-Uzoi75IaFbieBDl3A\"\n\nlayout = dict(\n autosize=True,\n automargin=True,\n margin=dict(l=30, r=30, b=20, t=40),\n hovermode=\"closest\",\n plot_bgcolor=\"#F9F9F9\",\n paper_bgcolor=\"#F9F9F9\",\n legend=dict(font=dict(size=10), orientation=\"h\"),\n title=\"Satellite Overview\",\n mapbox=dict(\n accesstoken=mapbox_access_token,\n style=\"light\",\n center=dict(lon=-78.05, lat=42.54),\n zoom=7,\n ),\n)\n\n# Create app layout\napp.layout = html.Div(\n [\n # Hidden div inside the app that stores the intermediate value\n html.Div(id='simulation_data', style={'display': 'none'}),\n\n # empty Div to trigger javascript file for graph resizing\n html.Div(id=\"output-clientside\"),\n html.Div(\n [\n html.Div(\n [\n html.Img(\n src=app.get_asset_url(\"au_logo.png\"),\n id=\"au-image\",\n style={\n \"height\": \"60px\",\n \"width\": \"auto\",\n \"margin-bottom\": \"25px\",\n },\n )\n ],\n className=\"one-third column\",\n ),\n html.Div(\n [\n html.Div(\n [\n html.H3(\n \"Smart Industry\",\n style={\"margin-bottom\": \"0px\"},\n ),\n html.H5(\n \"Simulation Analysis\", style={\"margin-top\": \"0px\"}\n ),\n ]\n )\n ],\n className=\"one-third column\",\n id=\"title\",\n ),\n html.Div(\n [\n html.A(\n html.Button(\"GitLab repo\", id=\"learn-more-button\"),\n href=\"https://gitlab.au.dk/smart-industry/anomaly_simulation\",\n )\n ],\n className=\"one-third column\",\n id=\"button\",\n ),\n ],\n id=\"header\",\n className=\"row flex-display\",\n style={\"margin-bottom\": \"25px\"},\n ),\n html.Div(\n [\n html.Div([\n html.P(\"Select model window size, dimensionality and other parameters:\",\n className=\"control_label\"),\n dcc.Input(id=\"model_window\",\n type=\"number\",\n placeholder=\"Model window size\",\n min=0,\n value=500,\n className=\"dcc_control\"),\n # html.P(\"Select model dimensionality:\", className=\"control_label\"),\n dcc.Input(id=\"model_dimensionality\",\n type=\"number\",\n placeholder=\"Model dimensionality\",\n max=125,\n min=0,\n value=60,\n className=\"dcc_control\"),\n dcc.Checklist(\n id='model_checklist',\n options=[\n {'label': 'Screwdriver data only', 'value': 'screwdriver_only'},\n {'label': 'Binary data', 'value': 'binarize'}\n ],\n value=['binarize']\n ),\n html.P(\"Select system response window type:\", className=\"control_label\"),\n dcc.Dropdown(\n id='window_type',\n options=window_options,\n value='hamming',\n className=\"dcc_control\",\n ),\n html.P(\"Select system response threshold:\", className=\"control_label\"),\n dcc.Input(id=\"response_threshold\",\n type=\"number\",\n placeholder=\"System response threshold\",\n min=0,\n value=0.5,\n className=\"dcc_control\"),\n ],\n className=\"pretty_container four columns\",\n id=\"cross-filter-options\",\n ),\n html.Div(\n [\n html.Div(\n [\n html.Div(\n [html.H6(id=\"chosen_model_text\"),\n html.P(\"Chosen model\")],\n id=\"chosen_model\",\n className=\"mini_container\",\n ),\n html.Div(\n [html.H6(id=\"avg_acc_text\"),\n html.P(\"Model accuracy\")],\n id=\"avg_f1\",\n className=\"mini_container\",\n ),\n html.Div(\n [html.H6(id=\"simulation_avg_acc_text\"),\n html.P(\"Simulation accuracy\")],\n id=\"simulation_avg_acc\",\n className=\"mini_container\",\n ),\n html.Div(\n [html.H6(id=\"simulation_length_text\"),\n html.P(\"Simulation length\")],\n id=\"simulation_length\",\n className=\"mini_container\",\n ),\n ],\n id=\"info-container\",\n className=\"row container-display\",\n ),\n html.Div(\n [html.P(\n \"Select simulation window size:\",\n className=\"control_label\",\n ),\n dcc.Slider(\n id='window_slider',\n min=0,\n max=2001,\n value=500,\n marks={str(x): str(x) for x in range(0, 2001, 50)},\n step=None,\n className=\"dcc_control\"\n ), ],\n id=\"windowSizeContainer\",\n className=\"pretty_container\",\n ),\n ],\n id=\"right-column\",\n className=\"eight columns\",\n ),\n ],\n className=\"row flex-display\",\n ),\n html.Div(\n [\n html.Div(\n [dcc.Graph(id=\"results_graph\")],\n className=\"pretty_container seven columns\",\n ),\n html.Div(\n [dcc.Graph(id=\"avg_accuracy_graph\")],\n className=\"pretty_container five columns\",\n ),\n ],\n className=\"row flex-display\",\n ),\n html.Div(\n [\n html.Div(\n [dcc.Graph(id=\"pie_graph\")],\n className=\"pretty_container seven columns\",\n ),\n html.Div(\n [dcc.Graph(id=\"run_times_graph\")],\n className=\"pretty_container five columns\",\n ),\n ],\n className=\"row flex-display\",\n ),\n ],\n id=\"mainContainer\",\n style={\"display\": \"flex\", \"flex-direction\": \"column\"},\n)\n\n\n# Helper functions\ndef produce_statistics(augmented_data):\n simulation_length = str(datetime.timedelta(minutes=augmented_data.shape[0] / 60000))\n simulation_avg_acc = np.round(augmented_data['value'].loc[augmented_data['variable'] == 'Response_accuracy'].mean(),\n decimals=3)\n\n return simulation_avg_acc, simulation_length\n\n\ndef create_plots(augmented_data):\n \"\"\"\n Create the plots for visualisation\n\n :param augmented_data: df,\n the data augmented with statistics\n :return: plotly figures\n \"\"\"\n # Line plot of true and predicted labels\n data_0 = augmented_data[augmented_data['variable'].isin(['True_labels', 'Response'])]\n layout_labels = copy.deepcopy(layout)\n\n data = [\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"True labels\",\n x=data_0['Cycle'],\n y=data_0['value'].loc[data_0['variable'] == 'True_labels'],\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#F9ADA0\"),\n ),\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"System response\",\n x=data_0['Cycle'],\n y=data_0['value'].loc[data_0['variable'] == 'Response'],\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#849E68\"),\n ),\n ]\n layout_labels[\"title\"] = \"System response vs true labels\"\n\n fig_0 = dict(data=data, layout=layout_labels)\n\n # Line plot of run times\n data_1 = augmented_data[augmented_data['variable'].isin(['Run_times'])]\n layout_runtimes = copy.deepcopy(layout)\n\n data = [\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"Prediction CMA\",\n x=data_1['Cycle'],\n y=data_1['value'].loc[data_1['variable'] == 'Run_times'],\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#849E68\"),\n ),\n ]\n layout_runtimes[\"title\"] = \"Simulation run times\"\n\n fig_1 = dict(data=data, layout=layout_runtimes)\n\n # Line plot of accuracy\n data_2 = augmented_data[augmented_data['variable'].isin(['Predicted_CMA', 'Response_CMA'])]\n layout_accuracy = copy.deepcopy(layout)\n\n data = [\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"Prediction CMA\",\n x=data_2['Cycle'],\n y=data_2['value'].loc[data_2['variable'] == 'Predicted_CMA'],\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#F9ADA0\"),\n ),\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=\"System CMA\",\n x=data_2['Cycle'],\n y=data_2['value'].loc[data_2['variable'] == 'Response_CMA'],\n line=dict(shape=\"spline\", smoothing=\"2\", color=\"#849E68\"),\n ),\n ]\n layout_accuracy[\"title\"] = \"Prediction vs system Cumulative Moving Average accuracy\"\n\n fig_2 = dict(data=data, layout=layout_accuracy)\n\n # Pie chart of result types\n data_3 = augmented_data[augmented_data['variable'].isin(['Prediction_result'])]\n data_3 = data_3['value'].value_counts()\n # Check if any value is missing\n if not (data_3.index == 'False positive').any():\n missing_entry = pd.Series({'False positive': 0}, index=['False positive'])\n data_3 = pd.concat(data_3, missing_entry)\n\n if not (data_3.index == 'False negative').any():\n missing_entry = pd.Series({'False negative': 0}, index=['False negative'])\n data_3 = pd.concat(data_3, missing_entry)\n\n if not (data_3.index == 'Correct').any():\n missing_entry = pd.Series({'Correct': 0}, index=['Correct'])\n data_3 = pd.concat([data_3, missing_entry])\n\n data_4 = augmented_data[augmented_data['variable'].isin(['Response_result'])]\n data_4 = data_4['value'].value_counts()\n # Check if any value is missing\n if not (data_4.index == 'False positive').any():\n missing_entry = pd.Series({'False positive': 0}, index=['False positive'])\n data_4 = pd.concat(data_4, missing_entry)\n\n if not (data_4.index == 'False negative').any():\n missing_entry = pd.Series({'False negative': 0}, index=['False negative'])\n data_4 = pd.concat(data_4, missing_entry)\n\n if not (data_4.index == 'Correct').any():\n missing_entry = pd.Series({'Correct': 0}, index=['Correct'])\n data_4 = pd.concat([data_4, missing_entry])\n\n layout_pie = copy.deepcopy(layout)\n\n data = [\n dict(\n type=\"pie\",\n labels=[\"False positive\", \"False negative\", \"Correct\"],\n values=[data_3['False positive'],\n data_3['False negative'],\n data_3['Correct']],\n name=\"Simulation response\",\n text=[\n \"False positives\",\n \"False negatives\",\n \"Correct\",\n ],\n hoverinfo=\"text+value+percent\",\n textinfo=\"label+percent+name\",\n hole=0.5,\n marker=dict(colors=[\"#92d8d8\", \"#fac1b7\", \"#a9bb95\"]),\n domain={\"x\": [0, 0.45], \"y\": [0.2, 0.8]},\n ),\n dict(\n type=\"pie\",\n labels=[\"False positive\", \"False negative\", \"Correct\"],\n values=[data_4['False positive'],\n data_4['False negative'],\n data_4['Correct']],\n name=\"System response\",\n text=[\n \"False positives\",\n \"False negatives\",\n \"Correct\",\n ],\n hoverinfo=\"text+value+percent\",\n textinfo=\"label+percent+name\",\n hole=0.5,\n marker=dict(colors=[\"#92d8d8\", \"#fac1b7\", \"#a9bb95\"]),\n domain={\"x\": [0.55, 1], \"y\": [0.2, 0.8]},\n ),\n ]\n layout_pie[\"title\"] = \"Results breakdown: simulation vs system response\"\n layout_pie[\"font\"] = dict(color=\"#777777\")\n layout_pie[\"legend\"] = dict(\n font=dict(color=\"#CCCCCC\", size=\"10\"), orientation=\"h\", bgcolor=\"rgba(0,0,0,0)\"\n )\n\n fig_3 = dict(data=data, layout=layout_pie)\n\n return fig_0, fig_1, fig_2, fig_3\n\n\n@app.callback([Output('simulation_data', 'children'),\n Output('chosen_model_text', 'children'),\n Output('avg_acc_text', 'children')],\n [Input('model_window', 'value'),\n Input('model_dimensionality', 'value'),\n Input('model_checklist', 'value')])\ndef load_data(model_window, model_dimensionality, model_checklist):\n if model_window == \"\": # Do nothing if button is clicked and input num is blank.\n return \"\", \"No input\", 0\n\n df, chosen_model, max_acc = model_search(model_window, model_dimensionality, model_checkbox=model_checklist)\n\n if isinstance(df, pd.DataFrame):\n return df.to_json(orient='split'), chosen_model, max_acc\n else:\n return dash.no_update, chosen_model, max_acc\n\n\n@app.callback(\n [Output('results_graph', 'figure'),\n Output('run_times_graph', 'figure'),\n Output('avg_accuracy_graph', 'figure'),\n Output('pie_graph', 'figure'),\n Output(\"simulation_avg_acc_text\", \"children\"),\n Output(\"simulation_length_text\", \"children\"),\n ],\n [Input('simulation_data', 'children'),\n Input('window_slider', 'value'),\n Input('window_type', 'value'),\n Input('response_threshold', 'value')])\ndef update_figure(simulation_data, window_size, window_type, response_threshold):\n if simulation_data == '':\n return dash.no_update, dash.no_update, dash.no_update, dash.no_update, 0, 0\n\n data = pd.read_json(simulation_data, orient='split')\n\n if window_size == 0:\n window_size = 1\n\n augmented_df = prepare_data(data, window_size, window_type, response_threshold)\n\n fig_0, fig_1, fig_2, fig_3 = create_plots(augmented_df)\n\n simulation_avg_acc_text, simulation_length_text = produce_statistics(augmented_df)\n\n return fig_0, fig_1, fig_2, fig_3, simulation_avg_acc_text, simulation_length_text\n\n\n# Main\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","repo_name":"CptPirx/detaviz","sub_path":"Source/visualisation/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"19344569718","text":"import json\n\nimport pygame\nfrom utils import root_path\n\n\nclass ImageLoader(object):\n def __init__(self):\n with open(root_path + '/assets/metadata_sprites.json') as data_file:\n self.image = pygame.image.load(root_path + '/assets/sprites.png').convert_alpha()\n self.data = json.load(data_file)[\"frames\"]\n self.metadata = {}\n for item in self.data:\n self.metadata[item[\"filename\"]] = item\n\n def get_image(self, name):\n image = self.metadata[name]\n frame_sizes = image[\"frame\"]\n cropped = pygame.Surface((frame_sizes[\"w\"], frame_sizes[\"h\"]), pygame.SRCALPHA, 32)\n cropped.blit(self.image, (0, 0), (frame_sizes[\"x\"], frame_sizes[\"y\"], frame_sizes[\"w\"], frame_sizes[\"h\"]))\n return cropped\n","repo_name":"OscarGarciaPeinado/flappy_bird","sub_path":"repository/image_loader.py","file_name":"image_loader.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"5521388706","text":"\"\"\"Derivation of variable `amoc`.\"\"\"\nimport iris\nimport numpy as np\n\nfrom ._baseclass import DerivedVariableBase\n\n\nclass DerivedVariable(DerivedVariableBase):\n \"\"\"Derivation of variable `amoc`.\"\"\"\n\n # Required variables\n required = [{'short_name': 'msftmyz', 'mip': 'Omon'}]\n\n @staticmethod\n def calculate(cubes):\n \"\"\"Compute Atlantic meriodinal overturning circulation.\n\n Arguments\n ---------\n cube: iris.cube.Cube\n input cube.\n\n Returns\n ---------\n iris.cube.Cube\n Output AMOC cube.\n \"\"\"\n # 0. Load the msftmyz cube.\n cube = cubes.extract_strict(\n iris.Constraint(\n name='ocean_meridional_overturning_mass_streamfunction'))\n\n # 1: find the relevant region\n atlantic_region = 'atlantic_arctic_ocean'\n atl_constraint = iris.Constraint(region=atlantic_region)\n cube = cube.extract(constraint=atl_constraint)\n\n # 2: Remove the shallowest 500m to avoid wind driven mixed layer.\n depth_constraint = iris.Constraint(depth=lambda d: d >= 500.)\n cube = cube.extract(constraint=depth_constraint)\n\n # 3: Find the latitude closest to 26N\n rapid_location = 26.5\n lats = cube.coord('latitude').points\n rapid_index = np.argmin(np.abs(lats - rapid_location))\n rapid_constraint = iris.Constraint(latitude=lats[rapid_index])\n cube = cube.extract(constraint=rapid_constraint)\n\n # 4: find the maximum in the water column along the time axis.\n cube = cube.collapsed(\n ['depth', 'region'],\n iris.analysis.MAX,\n )\n return cube\n","repo_name":"aperezpredictia/ESMValTool_Cordex","sub_path":"esmvalcore/preprocessor/_derive/amoc.py","file_name":"amoc.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31225048","text":"import socket\nimport json\nimport CSV_WRITER as csv_writer\nimport Server_UDP as srvUdp\nimport argparse\nimport sys\nimport CSV_Collector as collector\n\ndef check_arg(args=None):\n parser = argparse.ArgumentParser(description='Basic Functions')\n parser.add_argument('-m', '--mode',\n help='Get act',\n required='True',\n default='server')\n\n results = parser.parse_args(args)\n return (results.mode\n )\n\n\n\n\ndef main():\n m = check_arg(sys.argv[1:])\n print('MMM', m)\n if m == 'server':\n srvUdp.run(5000, '0.0.0.0').run()\n elif m == 'report':\n collector.run()\nmain()","repo_name":"JustIdeas/GetWifiInfo","sub_path":"Socket_Server/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"75086186517","text":"from tensorflow import keras\r\n\r\nfrom models import cnn, resnet\r\nfrom dataset import testing_set, training_set\r\n\r\nfrom submit import submit\r\n\r\n# model = resnet.ResNet34()\r\nmodel = cnn.Model()\r\n\r\ninitial_learning_rate = 0.001 # Adam's\r\n# Adam is a replacement optimization algorithm for\r\n# stochastic gradient descent for training deep\r\n# learning models. Adam combines the best properties\r\n# of the AdaGrad and RMSProp algorithms to provide\r\n# an optimization algorithm that can handle sparse\r\n# gradients on noisy problems.\r\nmodel.compile(\r\n optimizer=keras.optimizers.Adam(learning_rate=initial_learning_rate),\r\n # The conventional way to store a matrix is to store\r\n # all np of the values, even if most of those values\r\n # are zeros.\r\n # The sparse matrix exploits the structure of the labels\r\n # to store fewer values.\r\n # So not bothering translating heartbeat subtype\r\n # representations to one-hot vector here.\r\n loss=\"sparse_categorical_crossentropy\",\r\n metrics=[\"sparse_categorical_accuracy\"],\r\n)\r\n\r\n# Using a smaller batch size is like using some\r\n# regularization to avoid converging to sharp minimizers.\r\n# The gradients calculated with a small batch size are\r\n# much more noisy than gradients calculated with large\r\n# batch size, so it's easier for the model to escape\r\n# from sharp minimizers, and thus leads to a better\r\n# generalization.\r\n\r\n# As with our case where the model tends to overfit,\r\n# the gradient noise added due to smaller mini-batches\r\n# act as a good regularizer. Decreasing the batch size\r\n# while decreasing the learning rate might lead to a\r\n# better result.\r\n# edit: SIKE\r\n\r\n# https://axon.cs.byu.edu/papers/Wilson.nn03.batch.pdf\r\n# The general inefficiency of batch training for gradient descent learning\r\nBATCH_SIZE = 64\r\nEPOCHS = 30\r\n\r\nfrom tensorflow import math\r\nfrom tensorflow.keras.callbacks import LearningRateScheduler\r\n\r\n# https://stackoverflow.com/questions/39517431/should-we-do-learning-rate-decay-for-adam-optimizer\r\ndrop_rate = 0.5\r\nepochs_drop = 5\r\ndef lr_step_decay(epoch, lr):\r\n return initial_learning_rate * math.pow(drop_rate, math.floor(epoch / epochs_drop))\r\n\r\n# keras.optimizers.schedules.ExponentialDecay(\r\n# initial_learning_rate=initial_learning_rate, \r\n# decay_steps=100_000 / BATCH_SIZE, \r\n# decay_rate=.7, staircase=True\r\n# )\r\n\r\ndecay_rate = 0.4\r\ndef lr_time_based_decay(epoch, lr):\r\n lr = initial_learning_rate * (1.0 / (1.0 + decay_rate * epoch))\r\n return lr\r\n\r\nk = 0.1\r\ndef lr_exp_decay(epoch, lr):\r\n return initial_learning_rate * math.exp(-k * epoch)\r\n\r\ndecay_rate_exp = 0.9\r\ndef lr_exp_decay_2(epoch, lr):\r\n return initial_learning_rate * (decay_rate_exp ** epoch)\r\n\r\nlr_scheduler = LearningRateScheduler(lr_exp_decay_2, verbose=1)\r\n\r\nhistory = model.fit(\r\n training_set.k_x_train,\r\n training_set.k_y_train,\r\n epochs=EPOCHS,\r\n batch_size=BATCH_SIZE,\r\n verbose=0,\r\n callbacks=[lr_scheduler],\r\n)\r\n\r\nprint(history.history)\r\n\r\npredictions = model.predict(testing_set.x_test)\r\n\r\nprint(\"Prediction result is saved to '{}'\".format(submit(predictions)))\r\n","repo_name":"edfus/ECG-Heartbeat-Classification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27203525765","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\n\ndataset = pd.read_csv(\"HR_comma_sep.csv\")\nx = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,9].values\ny.ravel(-1)\n\n\nle_x1 = LabelEncoder()\nx[:,7] = le_x1.fit_transform(x[:,7])\nle_x2 = LabelEncoder()\nx[:,8] = le_x1.fit_transform(x[:,8])\nohe = OneHotEncoder(categorical_features = [7,8])\nx = ohe.fit_transform(x).toarray()\n\n\n\n\nfrom sklearn.cross_validation import train_test_split\ny = pd.factorize(dataset['left'].values)[0].reshape(-1, 1)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 0)\n\nfrom sklearn.preprocessing import StandardScaler\nsc_x = StandardScaler()\nx_train = sc_x.fit_transform(x_train)\nx_test = sc_x.transform(x_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\ny_test = sc_y.fit_transform(y_test)\n\n# Fitting Random Forest Regression to the dataset\nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators = 10, random_state = 0)\nregressor.fit(x_train, y_train)\n\n# Predicting a new result\ny_pred = regressor.predict(x_test)\n\n\n\nfrom sklearn.metrics import r2_score\nr2_score(y_test , y_pred)","repo_name":"anantv675/AI-py-test","sub_path":"random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21050586546","text":"shopping_list = [\"milk\", \"pasta\", \"egga\", \"spam\", \"bread\", 'rice']\n\nprint(\"using continue\")\n\nfor item in shopping_list:\n if item == \"spam\":\n break\n print(\"Buy \" + item)\n\nitem_to_find = \"spam3\"\nfound_at = None # None is a cnstant that accounts to nothing\n\nfor index in range(len(shopping_list)):\n if shopping_list[index] == item_to_find:\n found_at = index\n\nif found_at is not None:\n print(f\"{item_to_find} was found at position {found_at}\")\nelse:\n print(f\"{item_to_find} was not found\")\n","repo_name":"ecmpractitioner/python","sub_path":"flow-control/break-statement.py","file_name":"break-statement.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22220292531","text":"'''\n- File Name: process_function.py\n- Writer: Geunyoung kim\n- Update Information: [2022, 04, 23] File Version 1.0\n'''\n\nimport os, re, codecs\n\n# Print Menu\ndef menu():\n print(\"---------------------Menu---------------------\\n\")\n print(\"1. Collect particular character's dialogue\")\n print(\"2. Make a list of characters\")\n print(\"3. Collect stage directions\")\n print(\"4. Collect dialogue included particular word\")\n print(\"5. Exit\")\n print(\"----------------------------------------------\")\n\n# Collect particular character's dialogue\ndef collectPartiCharDia():\n print(\"----------------------------------------------\\n\")\n print(\" [Collect particular character's dialogue]\\n\")\n charname = input('Please enter character\\'s name: ')\n\n f = codecs.open('friends101.txt', 'r', encoding = 'utf-8')\n script101 = f.readlines()\n f.close()\n\n Line = []\n for item in script101:\n if re.match(charname + ':.+', item):\n Line += re.match(charname + ':.+', item).group()\n\n dialogue = ''\n for item in Line:\n dialogue += item\n\n if len(dialogue) == 0:\n print('\\nError: Dialogue or Character is not exist...')\n else:\n f = codecs.open(charname + '.txt', 'w', encoding = 'utf-8')\n f.write(dialogue)\n f.close()\n print('\\nText file is created!')\n \n print(\"----------------------------------------------\\n\")\n\n# Make a list of characters\ndef makeListOfChar():\n print(\"----------------------------------------------\\n\")\n print(\" [Make a list of characters]\\n\")\n f = codecs.open('friends101.txt', 'r', encoding = 'utf-8')\n script101 = f.read()\n f.close()\n\n char = list(set(re.findall(r'[A-Z][a-z]+:', script101)))\n\n if len(char) == 0:\n print('\\nError: Dialogue or Character is not exist...')\n else:\n f = codecs.open('character.txt', 'w', encoding = 'utf-8')\n f.write('*** Warning: Please exclude particular word(Ex. All, note, ..., etc) ***\\n\\n')\n for item in char:\n f.write(item[:-1])\n f.write('\\n')\n f.close()\n print('Text file is created!')\n \n print(\"----------------------------------------------\\n\")\n\n# Collect stage directions\ndef collectStageDia():\n print(\"----------------------------------------------\\n\")\n print(\" [Collect stage directions]\\n\")\n\n f = codecs.open('friends101.txt', 'r', encoding = 'utf-8')\n script101 = f.read()\n f.close()\n\n stage_direction = re.findall(r'\\([A-Za-z].+?\\)', script101)\n\n if len(stage_direction) == 0:\n print('\\nError: Dialogue or stage_direction is not exist...')\n else:\n f = codecs.open('stage_direction.txt', 'w', encoding = 'utf-8')\n for item in stage_direction:\n f.write(item)\n f.write('\\n')\n f.close()\n print('Text file is created!')\n \n print(\"----------------------------------------------\\n\")\n\n# Collect dialogue included particular word\ndef collectDiaPartiWord():\n print(\"----------------------------------------------\\n\")\n print(\" [Collect dialogue included particular word]\\n\")\n word = input('Please enter a word: ')\n\n f = codecs.open('friends101.txt', 'r', encoding = 'utf-8')\n script101 = f.readlines()\n f.close()\n\n Lines = []\n\n for item in script101:\n if re.match(r'[A-Z][a-z]+:', item):\n Lines += [item]\n\n dialogue = ''\n for item in Lines:\n if re.search(word, item):\n dialogue += item\n\n if len(dialogue) == 0:\n print('\\nError: Dialogue or word is not exist...')\n else:\n f = codecs.open(word + '.txt', 'w', encoding = 'utf-8')\n f.write(dialogue)\n f.close()\n print('\\nText file is created!')\n \n print(\"----------------------------------------------\\n\")","repo_name":"longroot0702/Python-Project03","sub_path":"process_function.py","file_name":"process_function.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70875602199","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 23 22:51:08 2019\r\n\r\n@author: akhil\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom pyDOE import lhs\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\n\r\nnp.random.seed(1234)\r\ntf.set_random_seed(1234)\r\ntfpi = tf.constant(np.pi)\r\n\r\niter = 1000\r\ndata = 100 #n\r\nl1 = -1\r\nl2 = 1\r\nl_1=[1,50,50,1]\r\nx1=np.array([[-1],[1]])\r\nu1=np.array([[0],[0]])\r\n\r\n\r\nx2=2*lhs(1,data)-1\r\nweights = []\r\nbiases = []\r\nnum_l_1 = len(l_1)\r\n\r\ndef xavier_init(size):\r\n in_dim = size[0]\r\n out_dim = size[1]\r\n xavier_stddev = np.sqrt(2 / (in_dim + out_dim))\r\n return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)\r\n\r\ndef neural_net( X, weights, biases):\r\n num_l_1 = len(weights) + 1\r\n\r\n H = 2.0 * (X - l1) / (l2 - l1) - 1.0\r\n for l in range(0, num_l_1 - 2):\r\n W = weights[l]\r\n b = biases[l]\r\n H = tf.tanh(tf.add(tf.matmul(H, W), b))\r\n W = weights[-1]\r\n b = biases[-1]\r\n Y = tf.add(tf.matmul(H, W), b)\r\n return Y\r\n\r\ndef net_f(x):\r\n u = net_u(x)\r\n u_x = tf.gradients(u, x)[0]\r\n u_xx = tf.gradients(u_x, x)[0]\r\n f = u_xx - u + (tfpi**2 +1)*tf.sin(tfpi*x)\r\n return f\r\n\r\ndef net_u( x):\r\n u = neural_net(x, weights, biases)\r\n return u\r\n\r\nfor l in range(0, num_l_1 - 1):\r\n W = xavier_init(size=[l_1[l], l_1[l + 1]])\r\n b = tf.Variable(tf.zeros([1, l_1[l + 1]], dtype=tf.float32), dtype=tf.float32)\r\n weights.append(W)\r\n biases.append(b)\r\n\r\nsess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True))\r\n\r\nx1_tf = tf.placeholder(tf.float32, shape=[None, x1.shape[1]])\r\nx2_tf = tf.placeholder(tf.float32, shape=[None, x2.shape[1]])\r\nu1_tf = tf.placeholder(tf.float32, shape=[None, u1.shape[1]])\r\n\r\n\r\n\r\nu1_pred = net_u(x1_tf)\r\nf_pred = net_f(x2_tf)\r\n\r\n\r\nloss = tf.reduce_mean(tf.square(u1_tf - u1_pred)) + tf.reduce_mean(tf.square(f_pred))\r\n\r\noptimizer_Adam = tf.train.AdamOptimizer()\r\n\r\ntrain_op_Adam = optimizer_Adam.minimize(loss)\r\n\r\n\r\ninit = tf.global_variables_initializer()\r\nsess.run(init)\r\n\r\ndef predict(X_star):\r\n tf_dict = {x1_tf: X_star}\r\n u_star = sess.run(u1_pred, tf_dict)\r\n # f_star = self.sess.run(self.f_pred, tf_dict)\r\n return u_star\r\n\r\ndef train(nIter):\r\n tf_dict = {x1_tf: x1, u1_tf: u1, x2_tf : x2}\r\n\r\n start_time = time.time()\r\n for it in range(nIter):\r\n sess.run(train_op_Adam, tf_dict)\r\n\r\n # Print\r\n if it % 10 == 0:\r\n elapsed = time.time() - start_time\r\n loss_value = sess.run(loss, tf_dict)\r\n print('It: %d, Loss: %.3e, Time: %.2f' %\r\n (it, loss_value, elapsed))\r\n start_time = time.time()\r\n\r\n\r\n\r\n\r\ntrain(iter)\r\nx_star = np.atleast_2d(np.linspace(-1, 1, 100)).T\r\nu = np.sin(np.pi*x_star)\r\nu_hat = predict(x_star)\r\nerror_u = np.linalg.norm(u - u_hat, 2) / np.linalg.norm(u_hat, 2)\r\nprint('Error u: %e' % (error_u))\r\nplt.figure()\r\nplt.scatter(u, u_hat)\r\nplt.show()","repo_name":"akhibhat/ENM531","sub_path":"Assignment4/Q3Neil.py","file_name":"Q3Neil.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28161230162","text":"import sys\n\ndef lcs(a, b):\n width = len(a) + 1\n height = len(b) + 1\n dp = [['' for col in range(width)] for row in range(height)]\n\n for blen in range(1, height):\n for alen in range(1, width):\n if a[alen-1] == b[blen-1]:\n dp[blen][alen] = dp[blen -1][alen - 1] + a[alen - 1]\n continue\n a_lcs, b_lcs = dp[blen - 1][alen], dp[blen][alen - 1]\n if len(a_lcs) > len(b_lcs):\n dp[blen][alen] = a_lcs\n else:\n dp[blen][alen] = b_lcs\n\n return len(dp[blen][alen])\n\nif __name__ == \"__main__\":\n n = int(sys.stdin.readline())\n for _ in range (n):\n a, b = map(str, sys.stdin.readline().split())\n print(lcs(a, b))","repo_name":"scv74502/algorithm_study","sub_path":"on_class/algolab_lcs.py","file_name":"algolab_lcs.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"75019560278","text":"from copy import copy\nfrom copy import deepcopy\nfrom functools import lru_cache\nfrom collections import defaultdict\nfrom collections import deque\n\nlines = open('input.txt', 'r').read().splitlines()\n\ninput_hallway = ('.', '.', 'X', '.', 'X', '.', 'X', '.', 'X', '.', '.')\n\ninput_rooms = (\n ('B', 'D', 'D', 'D',),\n ('A', 'C', 'B', 'C',),\n ('A', 'B', 'A', 'B',),\n ('D', 'A', 'C', 'C',),\n)\n\n# test values\n# input_rooms = (\n# ('B', 'D', 'D', 'A',),\n# ('C', 'C', 'B', 'D',),\n# ('B', 'B', 'A', 'C',),\n# ('D', 'A', 'C', 'A',),\n# )\n\ndef print_building(hallway, rooms):\n hallway_str = list(map(lambda h:h[0], hallway))\n print(\"\".join(hallway_str))\n for i in range(4):\n print(f\" {rooms[0][i][0]} {rooms[1][i][0]} {rooms[2][i][0]} {rooms[3][i][0]}\")\n print()\n\n\nCOST_MULTIPLIER = {'A':1, 'B':10, 'C':100, 'D':1000}\nROOM_NR_TO_EXIT_DOOR = [2,4,6,8]\nROOM_NR_TO_MATCHING_AMPH = ['A', 'B', 'C', 'D']\nAMPH_TO_MARCHING_ROOM_NR = {'A':0, 'B':1, 'C':2, 'D':3}\n\n\ndef any_unmatching_amph_bellow(room, amph_i, matching_amph):\n for i in range(amph_i + 1, 4):\n if room[i] != matching_amph:\n return True\n return False\n\n\ndef get_room_empty_index_if_possible_to_enter(room, ampt_type):\n deepest_empty_spot = None\n for amph_i in range(0, 4):\n if room[amph_i] == '.':\n deepest_empty_spot = amph_i\n if deepest_empty_spot != None:\n if not any_unmatching_amph_bellow(room, deepest_empty_spot, ampt_type):\n return deepest_empty_spot\n return None\n\n\ndef hallway_empty_spots_indices(hallway):\n indices = []\n for i, s in enumerate(hallway):\n if s == '.':\n indices.append(i)\n return indices\n\n\ndef return_cost_if_path_possible(hallway, start_i, stop_i):\n assert hallway[stop_i] in ['.', 'X']\n\n step = 1 if start_i <= stop_i else -1\n range_start = start_i+1 if start_i <= stop_i else start_i-1\n range_end = stop_i+1 if start_i <= stop_i else stop_i-1\n cost = 0\n for i in range(range_start, range_end, step):\n if hallway[i] not in ['.', 'X']:\n return None\n cost += 1\n return cost\n\n\ndef move(hallway, rooms, hallway_i, hallway_v, room_i, amph_i, amph_v):\n new_hallway = list(copy(hallway))\n new_hallway[hallway_i] = hallway_v\n new_rooms = []\n for i, r in enumerate(rooms):\n new_room = list(copy(r))\n if i == room_i:\n new_room[amph_i] = amph_v\n new_rooms.append(tuple(new_room))\n return (tuple(new_hallway), tuple(new_rooms))\n\n\ndef moves_from_rooms_to_hallway(hallway, rooms):\n moves = []\n for room_i, room in enumerate(rooms):\n for amph_i, amph in enumerate(room):\n matching_amph = ROOM_NR_TO_MATCHING_AMPH[room_i]\n # empty space\n if amph == '.':\n continue\n # 1. can move it if in wrong room\n # 2. can move if in right room but unmatching ampth bellow\n if amph != matching_amph or (amph == matching_amph and any_unmatching_amph_bellow(room, amph_i, matching_amph)):\n exit_door = ROOM_NR_TO_EXIT_DOOR[room_i]\n for hallway_empty_spot_i in hallway_empty_spots_indices(hallway):\n hallway_cost = return_cost_if_path_possible(hallway, exit_door, hallway_empty_spot_i)\n if hallway_cost == None:\n continue\n new_hallway, new_rooms = move(hallway, rooms, hallway_empty_spot_i, amph, room_i, amph_i, '.')\n cost = (amph_i + 1 + hallway_cost) * COST_MULTIPLIER[amph]\n moves.append((new_hallway, new_rooms, cost))\n break\n # sanity check\n if amph == matching_amph and not any_unmatching_amph_bellow(room, amph_i, matching_amph):\n continue\n assert False\n return moves\n\n\ndef moves_from_hallway_to_rooms(hallway, rooms):\n moves = []\n for spot_i, spot in enumerate(hallway):\n if spot not in ['.', 'X']:\n amph = spot\n matching_room_nr = AMPH_TO_MARCHING_ROOM_NR[amph]\n exit_door_nr = ROOM_NR_TO_EXIT_DOOR[matching_room_nr]\n hallway_cost = return_cost_if_path_possible(hallway, spot_i, exit_door_nr)\n if hallway_cost == None:\n continue\n room = rooms[matching_room_nr]\n room_empty_i = get_room_empty_index_if_possible_to_enter(room, amph)\n if room_empty_i == None:\n continue\n new_hallway, new_rooms = move(hallway, rooms, spot_i, '.', matching_room_nr, room_empty_i, amph)\n cost = (hallway_cost + 1 + room_empty_i) * COST_MULTIPLIER[amph]\n moves.append((new_hallway, new_rooms, cost))\n return moves\n\n\n@lru_cache(maxsize=None)\ndef all_possible_moves(hallway, rooms):\n moves = []\n moves_h2r = moves_from_hallway_to_rooms(hallway, rooms)\n moves_r2h = moves_from_rooms_to_hallway(hallway, rooms)\n moves.extend(moves_h2r)\n moves.extend(moves_r2h)\n return moves\n\n\ndef winnig_state(rooms):\n for room_i, room_v in enumerate(rooms):\n expected_amph_type = ROOM_NR_TO_MATCHING_AMPH[room_i]\n amphs_with_matching_type = list(filter(lambda a: a==expected_amph_type, room_v))\n if len(amphs_with_matching_type) != 4:\n return False\n return True\n\n\nprint_building(input_hallway, input_rooms)\n\n\n# moves = all_possible_moves(input_hallway, input_rooms)\n# for move in moves:\n# hallway, rooms, cost = move\n# print(f\"cost: {cost}\")\n# print_building(hallway, rooms)\n# print()\n# print(f\"avilable moves: {len(moves)}\")\n\n\ndef iterative_solution(hallway, rooms):\n moves = all_possible_moves(hallway, rooms)\n moves_to_check = deque()\n moves_to_check.extend(list(map(lambda m: (0, m), moves)))\n\n results_unique = set()\n min_result = 9999999999999\n for i in range(1_000_000):\n current_cost, (new_hallway, new_rooms, additional_cost) = moves_to_check.pop()\n new_cost = current_cost + additional_cost\n if winnig_state(new_rooms):\n min_result = min(min_result, new_cost)\n results_unique.add(new_cost)\n else:\n moves_to_add = all_possible_moves(new_hallway, new_rooms)\n moves_to_check.extend(list(map(lambda m: (new_cost, m), moves_to_add)))\n\n if (i % 10_000 == 0):\n print(f\"{i} {len(moves_to_check)} {min_result}\")\n return (min_result, results_unique)\n\n\n# stats = defaultdict(lambda:0)\n\n# @lru_cache(maxsize=None)\n# def recursive_solution(hallway, rooms):\n# if winnig_state(rooms):\n# stats['winning'] += 1\n# return [0]\n \n# stats['tick'] += 1\n# if stats['tick'] % 10_000 == 0:\n# print(f\"{stats['tick']} {stats['winning'] }\")\n\n# all_possible_costs = []\n\n# moves = all_possible_moves(hallway, rooms)\n# for move in moves:\n# new_hallway, new_rooms, additional_cost = move\n# new_costs = recursive_solution(new_hallway, new_rooms)\n# all_possible_costs.extend(list(map(lambda c: c + additional_cost, new_costs)))\n\n# return all_possible_costs\n\nmin_result, results_unique = iterative_solution(input_hallway, input_rooms)\nprint(min_result)\nprint(f\"Unique results = {results_unique}\")\n\nmin_result = recursive_solution(input_hallway, input_rooms)\nprint(len(all_possible_costs))\nprint(min(all_possible_costs))\n","repo_name":"lenrok258/advent-of-code-2021","sub_path":"023/script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"30885016731","text":"def solution(operations):\n deq = []\n for operation in operations:\n op, num = operation.split(\" \")\n if op == \"I\":\n deq.append(int(num))\n deq.sort()\n else:\n if num == \"-1\":\n deq = deq[1:]\n else:\n deq = deq[:-1]\n\n return [max(deq),min(deq)] if deq else [0,0] ","repo_name":"hissue/Program-Solution","sub_path":"프로그래머스/lv3/42628. 이중우선순위큐/이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"37428871961","text":"from typing import List\nfrom math import sqrt\n\n\ndef show_menu():\n print(\"1.Citire date\")\n print(\"2.Determinati cea mai lunga subsecventa in care toate numerele sunt patrate perfecte.\")\n print(\"3.Determinati cea mai lunga subsecventa in care toate numerele sunt prime\")\n print(\"4.Determinati cea mai lunga subsecventa in care toate numerele sunt palindrome\")\n print(\"5.Exit.\")\n\n\ndef get_perfect_squares(number):\n \"\"\"\n Verifica daca un numar este patrat perfect\n :param number: Un numar introdus\n :return: Returneaza True sau False,respectiv daca numarul este patrat perfect sau daca nu este\n \"\"\"\n # Returneaza daca un numar este patrat perfect(True) iar in caz contrar(False)\n if sqrt(number) == int(sqrt(number)):\n return True\n return False\n\n\ndef convert_list_str_to_int(lst):\n \"\"\"\n Converteste elementele de tip str in elemente de tipp int\n :param lst: Lista de convertit\n :return: Returneaza lista cu elementele convertite in int\n \"\"\"\n list_int = []\n for i in lst:\n list_int.append(int(i))\n\n return list_int\n\n\ndef read_list():\n lst = []\n lst_str = input(\"Introduceti numerele separate printr-un spatiu: \")\n lst_str_split = lst_str.split(' ')\n for number in lst_str_split:\n lst.append(number)\n\n return lst\n\n\ndef get_longest_all_perfect_squares(lst):\n \"\"\"\n Determina cea mai lunga subsecventa in care numerele sunt patrate perfecte\n :param lst: Lista de numere introdusa de la tastatura(numere naturale)\n :return: Returneaza cea mai lunga subsecventa in care numerele sunt patrate perfecte(o lista)\n \"\"\"\n length = 0\n list_copy = []\n rezult_list = []\n mx_length = 0\n for i in range(len(lst)):\n if get_perfect_squares(lst[i]) == True:\n list_copy.append(lst[i])\n length = length + 1\n elif length > mx_length:\n mx_length = length\n rezult_list = list_copy[:]\n length = 0\n list_copy.clear()\n else:\n list_copy.clear()\n if length > mx_length:\n rezult_list = list_copy[:]\n return rezult_list\n\n\ndef test_longest_perfect_squares():\n assert get_longest_all_perfect_squares([5, 4, 9, 16, 3, 232, 36, 49, 81, 9]) == [36, 49, 81, 9]\n assert get_longest_all_perfect_squares([5, 7, 8, 4, 4, 9, 100, 121]) == [4, 4, 9, 100, 121]\n assert get_longest_all_perfect_squares([7, 8, 9, 9, 81, 121, 36, 7, 5, 5, 4, 4, 4, 9, 81, 36, 9]) == [4, 4, 4, 9,\n 81, 36, 9]\n\n\ndef is_prime(n):\n # Aceasta functie returneaza daca un numar este prim\n # parametrul n este numarul dat pentru care se returneaza daca este prim\n if n < 2:\n return False\n if n == 2:\n return True\n\n for i in range(2, n):\n if n % i == 0:\n return False\n\n return True\n\n\ndef get_longest_all_primes(lst: List[int]):\n \"\"\"\n Determina cea mai lunga subsecventa in care numerele sunt prime\n :param lst: Lista de numere introdusa de la tastatura(numere naturale)\n :return: Returneaza cea mai lunga subsecventa in care numerele sunt prime(o lista)\n \"\"\"\n\n length = 0\n list_copy = []\n rezult_list = []\n mx_length = 0\n for i in range(len(lst)):\n if is_prime(lst[i]) == True:\n list_copy.append(lst[i])\n length = length + 1\n elif length > mx_length :\n mx_length = length\n rezult_list = list_copy[:]\n length = 0\n list_copy.clear()\n else:\n list_copy.clear()\n if length > mx_length:\n rezult_list = list_copy[:]\n return rezult_list\n\n\ndef test_primes_subsec():\n assert get_longest_all_primes([9, 5, 3, 7, 4, 7, 5]) == [5, 3, 7]\n assert get_longest_all_primes([10, 5, 3, 7, 2, 9, 9, 4]) == [5, 3, 7, 2]\n assert get_longest_all_primes([11, 5, 3, 7, 2, 9, 4, 4]) == [11, 5, 3, 7, 2]\n\n\ndef is_palindrome(n):\n #Determina daca un numar este palindrom dau sau nu\n copie_n = n\n oglindit = 0\n while n:\n oglindit = oglindit * 10 + n % 10\n n = n // 10\n if( copie_n == oglindit ):\n return True\n return False\n\n\ndef get_longest_all_palindromes(lst: List[int]):\n \"\"\"\n Determina cea mai lunga subsecventa in care numerele sunt palindrome\n :param lst: Lista de numere introdusa de la tastatura(numere naturale)\n :return: Returneaza cea mai lunga subsecventa in care numerele sunt palindrome(o lista)\n \"\"\"\n\n length = 0\n list_copy = []\n rezult_list = []\n mx_length = 0\n for i in range(len(lst)):\n if is_palindrome(lst[i]) == True:\n list_copy.append(lst[i])\n length = length + 1\n elif length > mx_length :\n mx_length = length\n rezult_list = list_copy[:]\n length = 0\n list_copy.clear()\n else:\n list_copy.clear()\n if length > mx_length:\n rezult_list = list_copy[:]\n return rezult_list\n\n\ndef test_get_longest_all_palindromes() :\n assert get_longest_all_palindromes([11,232,122,5,7,9,121]) == [5,7,9,121]\n assert get_longest_all_palindromes([1333,44,55,345,1,11,121,232,131]) == [1,11,121,232,131]\n assert get_longest_all_palindromes([34,56,999,121,131,12334,1221]) == [999,121,131]\n\n\ndef main():\n lst = []\n while True:\n show_menu()\n optiune = (input(\"Optiunea: \"))\n if optiune == '1':\n lst = read_list()\n if optiune == '2':\n int_list = convert_list_str_to_int(lst)\n squares = get_longest_all_perfect_squares(int_list)\n print(f\"Cea mai lunga subsecventa de patrate perfecte este: {squares}\")\n if optiune == '3':\n int2_list = convert_list_str_to_int(lst)\n print(f\"Cea mai lunga subsecventa de numere prime este: {get_longest_all_primes(int2_list)}\")\n if optiune == '4':\n int3_list = convert_list_str_to_int(lst)\n print(f\"Cea mai lunga subsecventa de numere palindrome este: {get_longest_all_palindromes(int3_list)}\")\n if optiune == '5':\n break\n\n\nif __name__ == '__main__':\n test_primes_subsec()\n test_longest_perfect_squares()\n test_get_longest_all_palindromes()\n main()\n","repo_name":"AP-MI-2021/lab-3-Sebic29","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"43578723227","text":"def ask_ok(prompt,retries=4,reminder='Please try again!'):\n while True:\n ok = input(prompt)\n if ok in ('ye',\"y\",\"yes\"):\n return True\n if ok in ('n','no','nop'):\n return False\n retries = retries -1\n if retries<0:\n raise ValueError('invaild use response')\n print(reminder)\n \n ask_ok('Do you really want to quit?')","repo_name":"imlty/python-learn","sub_path":"learn/def2.py","file_name":"def2.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40490516982","text":"import names\nimport random\n\nfrom django import template\n\nregister = template.Library()\n\n@register.simple_tag\ndef get_random_name():\n return names.get_first_name(gender='female')\n\n@register.simple_tag\ndef get_random_extension():\n return random.randint(1000,9999)\n\n@register.simple_tag\ndef is_ad():\n return \"ad\" if random.uniform(0.0, 1.0) <= 0.01 else \"\"\n\n@register.simple_tag\ndef random_icon():\n icons = [\n 'aperture',\n 'basket',\n 'bar-chart',\n 'bar-chart',\n 'book',\n 'bug',\n 'briefcase',\n 'bullhorn',\n 'camera-slr',\n 'cart',\n 'cart',\n 'chat',\n 'clipboard',\n 'clock',\n 'cloud',\n 'cog',\n 'command',\n 'dollar',\n 'droplet',\n 'envelope-closed',\n 'eyedropper',\n 'euro',\n 'eye',\n 'fork',\n 'globe',\n 'graph',\n 'headphones',\n 'heart',\n 'home',\n 'image',\n 'infinity',\n 'info',\n 'key',\n 'location',\n 'map',\n 'map-marker',\n 'microphone',\n 'monitor',\n 'moon',\n 'musical-note',\n 'paperclip',\n 'pencil',\n 'people',\n 'person',\n 'phone',\n 'pie-chart',\n 'pin',\n 'print',\n 'pulse',\n 'puzzle-piece',\n 'rain',\n 'script',\n 'shield',\n 'signal',\n 'signpost',\n 'star',\n 'sun',\n 'tablet',\n 'tag',\n 'tags',\n 'target',\n 'terminal',\n 'video',\n 'wrench',\n 'yen',\n ]\n return random.choice(icons)","repo_name":"eventphone/guru3","sub_path":"core/templatetags/gelbeseiten.py","file_name":"gelbeseiten.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"85"} +{"seq_id":"4975740948","text":"from datetime import date, timedelta\n\nimport Library_Item\n\n# creating list of all book information\nbooks_catalog = [Library_Item.Books('Wool', 'on shelf', 1, 'null', 'Hugh Howey'),\n Library_Item.Books('Wicked', 'on shelf', 10, 'null', 'Gregory Maguire'),\n Library_Item.Books('Moon Palace', 'on shelf', 3, 'null', 'Paul Auster'),\n Library_Item.Books('Cats', 'on shelf', 10, 'null', 'Lisa Covey')]\n\n# creating list of all movie information\nmovies_catalog = [Library_Item.Movies('Footloose', 'on shelf', 10, 'null', 'Herbert Ross', '90min'),\n Library_Item.Movies('Braveheart', 'on shelf', 10, 'null', 'Mel Gibson', '178min'),\n Library_Item.Movies('Starman', 'on shelf', 7, 'null', 'John Carpenter', '135min'),\n Library_Item.Movies('Suicide Squad', 'on shelf', 4, 'null', 'David Ayer', '120min')]\n\n# creating list of all media information\nmedia_catalog = [Library_Item.Media('Load', 'on shelf', 10, 'null', 'Metallica'),\n Library_Item.Media('Aqualung', 'on shelf', 8, 'null', 'Jethro Tull'),\n Library_Item.Media('Cocky', 'on shelf', 2, 'null', 'Kid Rock'),\n Library_Item.Media('Dark Side of the Moon', 'on shelf', 6, 'null', 'Pink Floyd')]\n\ninventory = [books_catalog, movies_catalog, media_catalog]\n\n\n# viewing inventory function\n\ndef view_inventory(prompt):\n if prompt not in (1, 2, 3, 4):\n print(\"Please enter a valid number (1-4)\")\n elif prompt == 1:\n for book in books_catalog:\n print(str(book))\n elif prompt == 2:\n for movie in movies_catalog:\n print(str(movie))\n elif prompt == 3:\n for media in media_catalog:\n print(str(media))\n elif prompt == 4:\n for item in inventory:\n for i in item:\n print(str(i))\n\n\n# searching author funct\ndef search_author():\n \"\"\"\n Allows user to search for book by name of author. Needs entire name, first and last\n \"\"\"\n author = input(\"Enter the name of the author: \")\n books_by_author = []\n for book in books_catalog:\n if book.author.lower() == author.lower():\n books_by_author.append(book)\n for book in books_by_author:\n print()\n print(str(book))\n if len(books_by_author) < 1:\n print(\"No books by that author were found.\")\n\n\n# searching title function ## currently printing string instead of f string\ndef search_title():\n search_key = input(\"Enter keyword: \")\n works_by_keyword = []\n\n for item in inventory:\n for i in item:\n if search_key.lower() in i.title.lower():\n works_by_keyword.append(str(i))\n print(str(works_by_keyword))\n return works_by_keyword\n\n\n# check status function\ndef check_status(item):\n return item.status\n\n\n# adding new inventory function\ndef add_inventory():\n \"\"\"Will prompt the user for the title and creator for a new item, then create that item\n with default status of 'on shelf' and condition of 10\"\"\"\n while True:\n add_item = int(input(\"1. Book, 2. Movie, or 3. Media\\n>>>\"))\n if add_item == 1:\n new_title = input(\"Enter the title of the book: \")\n new_author = input(\"Enter the Author of the book: \")\n books_catalog.append(Library_Item.Books(new_title, \"on shelf\", 10, \"null\", new_author))\n break\n elif add_item == 2:\n new_title = input(\"Enter the title of the movie: \")\n new_director = input(\"Enter the director of the movie: \")\n new_runtime = int(input(\"Enter the runtime of the movie: \"))\n movies_catalog.append(Library_Item.Movies(new_title, \"on shelf\", 10, \"null\", new_director, new_runtime))\n break\n elif add_item == 3:\n new_title = input(\"Enter the title of the media: \")\n new_artist = input(\"Enter the artist's name: \")\n media_catalog.append(Library_Item.Media(new_title, \"on shelf\", 10, \"null\", new_artist))\n break\n else:\n print(\"Enter a valid number 1, 2, or 3: \")\n\n\n# search director function (add into search author)\ndef search_director():\n director = input(\"Enter the name of the director: \")\n movies_by_director = []\n for movie in movies_catalog:\n if movie.director.lower() == director.lower():\n movies_by_director.append(movie)\n for movie in movies_by_director:\n print()\n print(str(movie))\n if len(movies_by_director) < 1:\n print(\"No movies by that director were found.\")\n\n\n# search artist function (add into search author)\ndef search_artist():\n artist = input(\"Enter the name of the artist: \")\n media_by_artist = []\n for media in media_catalog:\n if media.artist.lower() == artist.lower():\n media_by_artist.append(media)\n for media in media_by_artist:\n print()\n print(str(media))\n if len(media_by_artist) < 1:\n print(\"No media by that artist was found.\")\n\n\n# check condition of items in catalog/recycle and replace if condition is poor\ndef check_condition(returned_item):\n if returned_item.condition <= 1:\n print(f\"{returned_item.title} was recycled due to poor condition. Consider replacing.\\n\")\n for item in inventory:\n for i in item:\n if i.title == returned_item.title:\n item.remove(returned_item)\n else:\n returned_item.status = \"on shelf\"\n returned_item.due_date = \"null\"\n print(f\"{returned_item.title} returned successfully.\\n\")\n\n\n# return item to inventory\ndef return_item(item):\n \"\"\"\n This function returns items to inventory list\n :param item: item: name of item\n \"\"\"\n if item.status.lower() == \"out\":\n check_condition(item)\n else:\n print(\"\\nItem is already returned to library.\\n\")\n\n\n# final checkout set return date/loop to ask if more than 1 item\ndef checkout(item):\n \"\"\"\n this function checks out an item\n :param item: name of item\n :return: none, name instead prints\n \"\"\"\n if item.status.lower() == \"on shelf\":\n item.due_date = date.today() + timedelta(days=14)\n item.status = \"out\"\n item.condition -= 1\n print(\"\\nYour item was checked out.\")\n print(str(item))\n\n else:\n print(\"This item is already checked out.\")\n\n\n# silly shush function\ndef quiet_down():\n print(\"\\nBryce says: Quiet down!\\n\")\n\n\n# printing menu options\ndef main():\n while True:\n prompt = int(input(\"Welcome to The Final Four Library! What would you like to do? \\n1. View Books \\n2. View \"\n \"Movies \\n3. View Media \\n4. View All \\n5. Search by Keyword \\n6. Search by \"\n \"Author/Director/Artist\\n7. Checkout Item \\n8. Return Item \\n9. \"\n \"Quit \\n10.Add Inventory(Employees Only!)\\n>>> \"))\n # view books\n if prompt == 1:\n view_inventory(1)\n # view movies\n elif prompt == 2:\n view_inventory(2)\n # view media\n elif prompt == 3:\n view_inventory(3)\n # view all items\n elif prompt == 4:\n view_inventory(4)\n # search by keyword\n elif prompt == 5:\n search_title()\n # secret hidden function\n elif prompt == 0:\n quiet_down()\n # search by author/director/artist functions\n elif prompt == 6:\n while True:\n user_ask = input(\"Are you looking for an Author, Director, or Artist? \")\n if user_ask.lower() == \"artist\":\n search_artist()\n break\n elif user_ask.lower() == \"director\":\n search_director()\n break\n elif user_ask.lower() == \"author\":\n search_author()\n break\n else:\n print(\"Improper input. Please choose between: Artist, Director, or Author\")\n # checkout item function - lower condition\n elif prompt == 7:\n while True:\n checkout_title = input('Enter the title of the item you would like to check out: ')\n checkout_item = None\n for item in inventory:\n for i in item:\n if i.title.lower() == checkout_title.lower():\n checkout_item = i\n\n if checkout_item != None:\n checkout(checkout_item)\n break\n else:\n print(\"Please make sure you entered a valid title\")\n # return item function\n elif prompt == 8:\n while True:\n return_title = input('Enter the title of the item you are returning: ')\n item_to_return = \"\"\n for item in inventory:\n for i in item:\n if i.title.lower() == return_title.lower():\n item_to_return = i\n\n if item_to_return != \"\":\n return_item(item_to_return)\n break\n else:\n print(\"Please make sure you entered a valid title \\n\")\n elif prompt == 10:\n add_inventory()\n # quit\n elif prompt == 9:\n print(\"Goodbye!\")\n break\n else:\n print(\"Improper input. Please choose from the list (1,2,3,4,5,6,7,8,9)\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RAlexanderKennedy/Python-Library-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15365368393","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import Movies, Categories\nfrom .forms import MoviesForm, LoginForm\n\n\n# Create your views here.\n\n\ndef log_in(request):\n # Diccionario que almacena los datos que enviamos a la vista\n form = LoginForm(\n request.POST or None\n )\n context = {'message': None, 'form': form}\n if request.POST and form.is_valid():\n # Verificar credenciales\n # Devuelve un objeto User si las credenciales son válidas.\n # Si las credenciales no son válidas, devuelve None\n user = authenticate(**form.cleaned_data)\n if user is not None:\n # Verificar si el usuario esta activo\n if user.is_active:\n # Adjuntar usuario autenticado a la sesión actual\n login(request, user)\n # Redireccionar a una vista utilizando el nombre de la url\n return redirect('movies:home')\n else:\n context['message'] = 'El usuario ha sido desactivado'\n else:\n context['message'] = 'Usuario o contraseña incorrecta'\n return render(request, 'movies/login.html', context)\n\n\n# decorador para restringir el acceso a solo usuarios autenticados\n@login_required\ndef log_out(request):\n logout(request)\n return redirect('movies:log-in')\n\n\n@login_required\ndef movie_list(request):\n movies = Movies.objects.all()\n return render(request, 'movies/index.html', {'movies': movies})\n\n\n@login_required\ndef movie_detail(request, pk):\n try:\n # recuperamos el objeto mediante la\n # API de abstracción de base de datos\n # que ofrece Django\n m = Movies.objects.get(pk=pk)\n except Movies.DoesNotExist:\n raise Http404(\"Esta pelicuala no existe\")\n\n # version con shortcuts de django, equivalente al codigo anterior\n # m = get_object_or_404(Movies, pk=pk)\n return render(request, 'movies/detail.html', {'movie': m})\n\n\n@login_required\ndef movie_create(request, **kwargs):\n # Intanciamos la clase form\n # si el diccionario request.POST no esta vacio\n # la instancia se creara con dichos datos, sino estara vacia\n form = MoviesForm(\n request.POST or None,\n request.FILES or None\n )\n # Comprobamos que la peticion es del motodo POST\n # y que el formulario es valido\n if request.POST and form.is_valid():\n # Guardamos el objeto\n form.save()\n # redirigir a una nueva URL\n return redirect('movies:home')\n return render(request, 'movies/form.html', {'form': form})\n\n\n@login_required\ndef movie_update(request, **kwargs):\n # recuperamos el objeto a actualizar\n movie = Movies.objects.get(pk=kwargs.get('pk'))\n # inicializamos el formulario con el objeto recuperado\n form = MoviesForm(\n request.POST or None,\n instance=movie\n )\n if request.POST and form.is_valid():\n form.save()\n return redirect('movies:home')\n return render(request, 'movies/form.html', {'form': form})\n\n\n@login_required\ndef movie_delete(request, **kwargs):\n movie = Movies.objects.get(pk=kwargs.get('pk'))\n movie.delete()\n return redirect('movies:home')\n\n\n@login_required\ndef category_list(request):\n categories = Categories.objects.all()\n return render(request, 'movies/category/category_list.html', {'categories': categories})","repo_name":"Agckygo99/simple-crud-django","sub_path":"apps/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18200084839","text":"import urllib.request\nimport urllib.parse\n\nx = urllib.request.urlopen('https://www.google.com')\n# print(x.read())\n\nurl = 'http://www.pythonprogramming.net'\nvalues = {'s' : 'basic',\n 'submit' : 'search'}\n\ndata = urllib.parse.urlencode(values) # encodes the values as it should be in the url\ndata = data.encode('utf-8') # encodes the data in the utf-8 form\n\nreq = urllib.request.Request(url, data) # it passes the encoded data to the url\nresp = urllib.request.urlopen(req) # visiting the url\nrespData = resp.read()\n\n# print(respData)\n\ntry:\n x = urllib.request.urlopen('https://www.google.com/search?q=test')\n print(x.read())\n\nexcept Exception as e:\n print(str(e))\n\ntry:\n url1 = urllib.request.urlopen('https://www.google.com/search?q=test')\n headers = {}\n headers['User-Agent'] = 'Mozilla/4.0'\n #'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17'\n # from the User-Agent, the website will not be able to know that we are using a program on their website\n # ATTEMPT TO FOOL GOOGLE\n req1 = urllib.request.Request(url, headers=headers)\n resp1 = urllib.request.urlopen(req1)\n respData1 = resp1.read()\n saveFile = open('headers.txt', 'w')\n saveFile.write(str(respData))\n saveFile.close()\n\nexcept Exception as e:\n print(str(e))","repo_name":"gagan1510/PythonBasics","sub_path":"PythonBasics/UrllibModule.py","file_name":"UrllibModule.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70576246352","text":"import os\nfrom os import path\nimport subprocess\n\n\ndef describe(directory=None):\n if directory:\n cwd = os.getcwd()\n os.chdir(directory)\n\n ret = subprocess.run(\n ['git', 'show', '--no-patch', '--date=short', '--format=%cd.%h'],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)\n\n if directory:\n os.chdir(cwd)\n\n ret.check_returncode()\n return ret.stdout.strip().replace('-', '')\n\n\ndef clone(url, directory):\n return_code = subprocess.call(['git', 'clone', url, directory])\n if return_code != 0:\n raise Exception('Failed to clone {}'.format(url))\n\n\ndef sync(cache_dir, repo_url, message=None):\n cwd = os.getcwd()\n devnull = open(os.devnull, 'wb')\n\n # Ensure git-sync tool is available.\n git_sync_dir = path.join(cache_dir, 'git-sync')\n git_sync_exec = path.join(git_sync_dir, 'git-sync')\n if not path.exists(git_sync_dir):\n os.makedirs(git_sync_dir)\n clone('https://github.com/simonthum/git-sync.git', git_sync_dir)\n else:\n os.chdir(git_sync_dir)\n subprocess.call(['git', 'pull', 'origin', 'master'], stdout=devnull, stderr=devnull)\n\n repo_name = path.basename(path.normpath(repo_url))\n repo_dir = path.join(cache_dir, repo_name)\n if not path.exists(repo_dir):\n os.makedirs(repo_dir)\n clone(repo_url, repo_dir)\n\n os.chdir(repo_dir)\n subprocess.call(['git', 'config', '--bool', 'branch.master.sync', 'true'])\n subprocess.call(['git', 'config', '--bool', 'branch.master.syncNewFiles', 'true'])\n if message:\n subprocess.call(['git', 'config', 'branch.master.syncCommitMsg', message])\n\n os.chdir(repo_dir)\n return_code = subprocess.call([git_sync_exec])\n if return_code != 0:\n raise Exception('failed to sync {}'.format(repo_name))\n\n os.chdir(cwd)\n\n return repo_dir\n","repo_name":"openSUSE/openSUSE-release-tools","sub_path":"osclib/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"83"} +{"seq_id":"7739433398","text":"import pickle\n\nmodel = pickle.load(open(\"../data/model.dat\"))\nvectorizer = pickle.load(open(\"../data/vectorizer.pickle\"))\nlabel_encoder = pickle.load(open(\"../data/label_encoder.pickle\"))\n\nlines = [line.replace(\"\\n\", \"\") for line in open('../data/un_labelled.csv')]\nwith open(\"../data/new_labelled.csv\", 'w') as wr:\n wr.write(\"Sentences,label\\n\")\n\n for line in lines:\n vector = vectorizer.transform([line])\n label = label_encoder.inverse_transform(model.predict(vector[0]))[0]\n wr.write(line + \",\" + str(label) + \"\\n\")\n","repo_name":"mudasirmohd/emotion-minner","sub_path":"svm_trainer/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70291691791","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom xcube_hub.models.base_model_ import Model\nfrom xcube_hub import util\n\n\nclass DataStoreStoreParams(Model):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_url=None): # noqa: E501\n \"\"\"DataStoreStoreParams - a model defined in OpenAPI\n\n :param api_url: The api_url of this DataStoreStoreParams. # noqa: E501\n :type api_url: str\n \"\"\"\n self.openapi_types = {\n 'api_url': str\n }\n\n self.attribute_map = {\n 'api_url': 'api_url'\n }\n\n self._api_url = api_url\n\n @classmethod\n def from_dict(cls, dikt) -> 'DataStoreStoreParams':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The DataStore_store_params of this DataStoreStoreParams. # noqa: E501\n :rtype: DataStoreStoreParams\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def api_url(self):\n \"\"\"Gets the api_url of this DataStoreStoreParams.\n\n\n :return: The api_url of this DataStoreStoreParams.\n :rtype: str\n \"\"\"\n return self._api_url\n\n @api_url.setter\n def api_url(self, api_url):\n \"\"\"Sets the api_url of this DataStoreStoreParams.\n\n\n :param api_url: The api_url of this DataStoreStoreParams.\n :type api_url: str\n \"\"\"\n\n self._api_url = api_url\n","repo_name":"bcdev/xcube-hub","sub_path":"xcube_hub/models/data_store_store_params.py","file_name":"data_store_store_params.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"42426004245","text":"from util import *\n\n\n@apply\ndef apply(self, offset):\n from axiom.algebra.sum.limits.subs.offset import limits_subs\n return Equal(self, limits_subs(Lamda, self, offset, simplify=False), evaluate=False)\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n\n a, b, i, d = Symbol(integer=True)\n f = Function(integer=True)\n Eq << apply(Lamda[i:a:b](f(i)), d)\n\n i = Symbol(domain=Range(b - a))\n Eq << algebra.eq.given.eq.getitem.apply(Eq[0], i)\n\n \n\n\nif __name__ == '__main__':\n run()\n# created on 2021-12-29\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/algebra/lamda/limits/subs/offset.py","file_name":"offset.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"72435155472","text":"from api import ma\nfrom api.items.models import Items,Stock\n\n\nclass ItemsSchema(ma.ModelSchema):\n class Meta:\n model = Items\n\n\nitem_schema = ItemsSchema(many=False)\nitems_schema = ItemsSchema(many=True)\n\n\nclass StockSchema(ma.ModelSchema):\n class Meta:\n model = Stock\n\n item_name = ma.Function(lambda obj: obj.item.name)\n\n\nstock_schema = StockSchema(many=False)\nstocks_schema = StockSchema(many=True)\n","repo_name":"jukemal/backend","sub_path":"api/items/Schema.py","file_name":"Schema.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27632064264","text":"import unittest\nfrom mppsolar.protocols.protocol_helpers import Hex2Int, Hex2Str\nfrom mppsolar.protocols.protocol_helpers import BigHex2Float\n\n\nclass test_protocol_helpers(unittest.TestCase):\n def test_Hex2Int(self):\n \"\"\" test the Hex2Int\"\"\"\n result = Hex2Int(bytes.fromhex(\"64\"))\n expected = 100\n # print(result)\n self.assertEqual(result, expected)\n\n def test_Hex2Str(self):\n \"\"\" test the Hex2Str\"\"\"\n result = Hex2Str(bytes.fromhex(\"AE0212\"))\n expected = \"ae0212\"\n # print(result)\n self.assertEqual(result, expected)\n\n def test_BigHex2Float(self):\n \"\"\" test BigHex2Float \"\"\"\n hexString = b\"\\x00\\x03\\xcb@\"\n result = BigHex2Float(hexString)\n expected = 248640\n # print(result)\n self.assertEqual(result, expected)\n","repo_name":"jblance/mpp-solar","sub_path":"tests/mppsolar_tests/test_protocol_helpers.py","file_name":"test_protocol_helpers.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":290,"dataset":"github-code","pt":"83"} +{"seq_id":"40431097716","text":"\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport pickle \nimport json\nfrom PIL import Image\n\n# load all files\n\nwith open('ab_model.pkl', 'rb') as file_1:\n ab_model = pickle.load(file_1)\n\n# Pre-processing\nwith open('scale_feat.pkl', 'rb') as file_2:\n scale_feat = pickle.load(file_2)\n\nwith open('winsoriser.pkl', 'rb') as file_3:\n winsoriser = pickle.load(file_3)\n \n# List Numeric & Category\nwith open('num_cols_sc.txt', 'r') as file_4:\n num_cols_sc = json.load(file_4)\n \nwith open('num_cols_nsc.txt', 'r') as file_5:\n num_cols_nsc = json.load(file_5)\n\n\ndef run():\n with st.form(key='from_diabetes'):\n \n st.title('Prediction Page')\n\n # sub header\n st.subheader('We calculate your metrics to calculate diabetes')\n\n # add pic\n image = Image.open('diabetes2.png')\n st.image(image)\n st.write('Columns below are parameter we would like to use to predict if a patient have a diabetes or not.')\n st.write('*`Please fill columns below to predict`*')\n\n gender = st.selectbox('Gender', [0,1], help='0 = Female, 1 = Male')\n\n age = st.number_input('Age', min_value=25, max_value=80,\n value=45, step=1, help='Usia Pasien')\n \n hypertension = st.number_input('Hypertension', min_value=0, max_value=1 , value=0,\n step=1, help='have hypertension?')\n\n heart_disease = st.number_input('Heart Disease', min_value=0, max_value=1 , value=0,\n step=1, help='have heart disease?')\n\n bmi = st.number_input('Body Mass Index', min_value=5, max_value=80, \n value=30, step=5, help='Amount of BMI')\n \n HbA1c_level = st.number_input('Hemogloblin Level', min_value= 3, max_value= 10, \n value= 6, help='Level of Hemogloblin 3-10')\n\n blood_glucose_level = st.slider('Glucose Level', 0, 400, 150, step=10, \n help='Glucose amount in blood stream')\n \n\n st.markdown('---')\n submitted = st.form_submit_button('Predict')\n\n data_inf = {\n 'age': age,\n 'bmi': bmi,\n 'hemoglobin_level': HbA1c_level,\n 'blood_glucose_level': blood_glucose_level,\n 'gender': gender,\n 'hypertension': hypertension,\n 'heart_disease': heart_disease,\n }\n \n data_inf = pd.DataFrame([data_inf])\n st.dataframe(data_inf)\n\n if submitted:\n data_inf_sc = data_inf[num_cols_sc]\n data_inf_nsc = data_inf[num_cols_nsc] \n\n # scalling\n data_inf_sc = scale_feat.transform(data_inf_sc)\n data_inf_sc = pd.DataFrame(data_inf_sc, columns=num_cols_sc)\n\n # Reset Index\n data_inf_sc.reset_index(drop= True, inplace= True)\n data_inf_nsc.reset_index(drop = True, inplace = True)\n data_final = pd.concat([data_inf_sc, data_inf_nsc], axis= 1)\n # modeling\n y_pred_inf = ab_model.predict(data_final)\n \n if y_pred_inf[0] == 1:\n st.write('# **`Prediction: You Have Diabetes`**')\n else:\n st.write('# **`Prediction: You do not Have Diabetes`**')\n\n if __name__ == '__main__':\n run()","repo_name":"beemabee/Diabetic_Predictor","sub_path":"deployment/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"27762903011","text":"from torch import nn\nimport pretrainedmodels\n\n\nclass SENetHead(nn.Module):\n\n def __init__(self, in_features: int, n_classes: int):\n super().__init__()\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.dropout = nn.Dropout(0.2)\n self.last_linear = nn.Linear(in_features, n_classes)\n\n def forward(self, x):\n x = self.avg_pool(x)\n x = self.dropout(x)\n x = x.view(x.size(0), -1)\n x = self.last_linear(x)\n return x\n\n\nclass SENetBase(nn.Module):\n\n def __init__(self, name: str, pretrained: bool):\n super().__init__()\n self.pretrained = pretrained\n self.base = pretrainedmodels.__dict__[name](\n num_classes=1000, pretrained='imagenet'\n )\n self.out_features = 2048\n\n def forward(self, x):\n base = self.base\n features = base.features(x)\n\n return features\n","repo_name":"tanyapohn/kaggle-bengali-2020","sub_path":"bengali/models/senet.py","file_name":"senet.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"8033348257","text":"\"\"\"Flask application setup.\"\"\"\n\nfrom flask import Flask\nfrom .database import db, SQLAlchemy\nimport os\nfrom dotenv import load_dotenv\nfrom .views import oauth, twitter, site\nfrom flask_compress import Compress\nfrom flask_talisman import Talisman\n\nload_dotenv()\ncompress = Compress()\n\n\ndef setup_app(testing: bool = False) -> Flask:\n \"\"\"Set up Flask application - register views, load secret key, env config, etc.\"\"\"\n app = Flask(\"twitter_virtual\")\n\n app.secret_key = os.environ[\"FLASK_SECRET_KEY\"]\n app.config[\"LIMIT_APP_USE\"] = bool(int(os.environ.get(\"LIMIT_APP_USE\", 0)))\n app.config[\"TWITTER_CONSUMER_KEY\"] = os.environ[\"TWITTER_CONSUMER_KEY\"]\n app.config[\"TWITTER_CONSUMER_SECRET\"] = os.environ[\"TWITTER_CONSUMER_SECRET\"]\n app.config[\"TWITTER_CALLBACK_URL\"] = os.environ[\"TWITTER_CALLBACK_URL\"]\n app.config[\"RECAPTCHA_SECRET\"] = os.environ[\"RECAPTCHA_SECRET\"]\n # app.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n app.register_blueprint(oauth.bp)\n app.register_blueprint(twitter.twitter_bp)\n app.register_blueprint(site.site_bp)\n\n # enable compression\n compress.init_app(app)\n\n if app.config[\"ENV\"] == \"production\" and testing is False:\n app.config[\"SESSION_COOKIE_SECURE\"] = True\n app.config[\"SESSION_COOKIE_HTTPONLY\"] = True\n app.config[\"SESSION_COOKIE_SAMESITE\"] = 'Lax'\n\n # talisman content security policy\n csp = {\n \"img-src\": \"*\",\n \"script-src\": [\n \"'self'\",\n \"*.google.com\",\n \"*.gstatic.com\"\n ],\n \"frame-src\": \"*.google.com\"\n }\n Talisman(app, content_security_policy=csp, content_security_policy_nonce_in=[\"script-src\"])\n\n return app\n\n\ndef setup_db(flask_app: Flask, testing: bool = False) -> SQLAlchemy:\n \"\"\"Set up Flask-SQLAlchemy.\"\"\"\n flask_app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n # app.config[\"SQLALCHEMY_ECHO\"] = True\n if testing:\n flask_app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///:memory:\"\n with flask_app.app_context():\n db.init_app(flask_app)\n db.create_all()\n else:\n flask_app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.environ[\"DATABASE_URL\"]\n db.init_app(flask_app)\n return db\n","repo_name":"juuuuuulian/twitter-virtual","sub_path":"twitter_virtual/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31907501755","text":"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfrom collections import defaultdict\nmolecule = {}\nf = open(\"/projects/btl/jowong/github/physlr/ground_truth/flchr4.reads.molecule.bed\", \"r\")\nh = open(\"f1chr4_corrected_GT.txt\", \"w\")\nfor i in f:\n columns= i.split(\"\\t\")\n new_mol = columns[3]\n while new_mol in molecule:\n new_mol = new_mol + \"_\"\n molecule[new_mol] = (int(columns[1]), int(columns[2]))\nmolecule_keys = list(molecule.keys())\nfor i in range(len(molecule_keys)):\n molecule_n = molecule_keys[i]\n n = molecule[molecule_n]\n for j in range(i+1, len(molecule_keys)):\n molecule_m = molecule_keys[j]\n m = molecule[molecule_m]\n if (m[0] >= n[0] and m[0]<=n[1]) or (m[1] >= n[0] and m[1]<=n[1]) or (m[1] >= n[1] and m[0]<=n[0]) or (m[1] <= n[1] and m[0]>=n[0]):\n h.write(str(molecule_n).rstrip(\"_\")+\"\\t\"+str(molecule_m).rstrip(\"_\")+\"\\n\")\nf.close()\nh.close()\n\n# code by Johnathan - start{\nfrom collections import defaultdict\nmolecule = {}\nf = open(\"/projects/btl/jowong/github/physlr/ground_truth/flchr4.reads.molecule.bed\", \"r\")\n#f = open(\"/projects/btl_scratch/aafshinfard/projects/physlr2/extra/spruce/ws77111.contig2.allreadsbx.molecule.bed\", \"r\")\nfor i in f:\n columns= i.split(\"\\t\")\n molecule[columns[3]] = (int(columns[1]), int(columns[2]))\nedges = defaultdict(int)\nfor i, n in molecule.items():\n for j, m in molecule.items():\n if edges[(j, i)] == 1:\n continue\n if i == j:\n continue\n if (m[0] >= n[0] and m[0]<=n[1]) or (m[1] >= n[0] and m[1]<=n[1]):\n edges[(i,j)] = 1\n#}end\nfout = \"f1chr4_corrected_GT.txt\"\nfo = open(fout, \"w\")\nfor k, v in edges.items():\n if v==1:\n fo.write('\\n'+str(k[0]) +'\\t'+ str(k[1]) +'\\t'+str(v))\n\nfo.close()\n","repo_name":"aafshinfard/temp","sub_path":"physlr_label_edges_and_write.py","file_name":"physlr_label_edges_and_write.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29608783866","text":"counti = 0\r\ncounth = 0\r\ncountmm20 = 0\r\nwhile True:\r\n idade = int(input('Digite a idade da pessoa a ser cadastrada: '))\r\n sexo = str(input('Digite o sexo da pessoa a ser cadastrada [M/F]: ')).strip().upper()[0]\r\n while sexo not in 'MF':\r\n sexo = str(input('Digite o sexo da pessoa a ser cadastrada [M/F]: ')).strip().upper()[0]\r\n if idade > 18:\r\n counti += 1\r\n if sexo == 'M':\r\n counth += 1\r\n if sexo == 'F' and idade < 20:\r\n countmm20 += 1\r\n escolha = str(input('Quer Continuar ? [S/N]: ')).strip().upper()[0]\r\n while escolha not in 'SN':\r\n escolha = str(input('Quer Continuar ? [S/N]: ')).strip().upper()[0]\r\n if escolha == 'N':\r\n print('VAMOS AOS RESULTADOS')\r\n break\r\n\r\nprint(f'A quantidade de pessoas com mais de 18 anos é {counti}')\r\nprint(f' A quantidade de homens que foram cadastrados é {counth}')\r\nprint(f'A quantidade de mulheres com menos de 20 anos é {countmm20}')\r\n","repo_name":"brunotakazono/CursoPython","sub_path":"Desafios/desafio069.py","file_name":"desafio069.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32230901715","text":"# coding=utf-8\n# from superdebug import debug, mail\nimport torch\ntry:\n from torchvision.utils import save_image\nexcept:\n save_image is None\ntf = None\nif False:\n try:\n import tensorflow as tf\n except:\n pass\nimport numpy as np\nimport sys\nimport os\nimport time\nimport re\nfrom collections import OrderedDict, defaultdict\ntry:\n from PIL import Image\nexcept:\n Image = None\n\n# 开关 #################################\nON_DEBUG = True # debug总开关\nPLAIN = False # 开启则仅普通的打印(至终端或debug.log��\nMAX_LOG = -1 # 0: 不debug, -1: 无限输出 NOTE: 无输出?可能这里设成了0,或者数量不够高、没到需要输出的变量!\nFULL = False # 是否输出完整的tensor、string内容,而不用...进行省略\nTO_FILE = False # 是否写入debug.log\nPRINT = True # 是否打印至终端\nBUGGY = True # 便捷地debug(出现bug则进入自动进入调试模式)\nPEEK_LAYER = 3 # 详细打印至第几层,不详细打印可使用0,详细打印建议用3\nMAX_PEEK_ITEM = 3 # 详细打印几项,标准为2\nMAX_STR_LEN = 540 # 最长打印的字符串长度,推荐: 540 ,无限大: 9999999999999999\nSAVE_IMAGE_NORM = False # 把tensor保存成图片时是否normalize\n# 控制是否打印细节:debug(True/False, xxx, xxx),False则只打印形状\n\n# 教程 #################################\n# 功能1: debug(xxx) : 用黄色字体打印出xxx的形状及具体值,debug(False, xxx)则只打印形状,不打印具体值。更多控制开关见上方。\n# 功能2: mark(xxx) : 标记运行到了某个位置,若有输入,则用黄色字体打印出xxx值,若仅用mark()无输入,则打印mark()所在的位置\n# 功能3: 在出错时跳至ipdb界面,便捷debug\n\n# 实现 #################################\ntry:\n MY_QQ_EMAIL = os.environ[\"MY_QQ_EMAIL\"] # Email address\n MY_QQ_EMAIL_PWD = os.environ[\"MY_QQ_EMAIL_PWD\"] # Password\nexcept:\n print(\"为了使用邮件提醒功能,请设置环境变量MY_QQ_EMAIL(QQ邮箱地址)与MY_QQ_EMAIL_PWD(QQ邮箱授权码)\")\n\ndebug_count = 1\ndebug_file = None\ndebug_path = \"super_debug\"\nif os.path.exists(debug_path):\n os.system(\"rm -r \" + debug_path)\nos.makedirs(debug_path, exist_ok=True)\nlog_path = os.path.join(debug_path, \"debug.log\")\nos.system(\"touch \" + log_path)\nimage_count = {}\nsimple_types = [str, int, float, bool]\n\n\nclass ExceptionHook:\n instance = None\n\n def __call__(self, *args, **kwargs):\n if not BUGGY:\n return\n if self.instance is None:\n from IPython.core import ultratb\n self.instance = ultratb.FormattedTB(mode='Plain',\n color_scheme='Linux', call_pdb=1)\n return self.instance(*args, **kwargs)\n\n\nsys.excepthook = ExceptionHook()\n\n\ndef get_pos(level=1, end=\"\\n\"):\n position = \"\"\"{}:{} {}\"\"\".format(\n # position = \"\"\"\"{}\", line {}, in {}\"\"\".format(\n sys._getframe(level).f_code.co_filename, # 当前文件名\n sys._getframe(level).f_lineno, # 当前行号\n sys._getframe(level).f_code.co_name, # 当前函数/module名\n )\n return position\n\ndef get_time():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\ndef print_yellow(text, end=\"\\n\"):\n print(f\"\\033[1;33m{text}\\033[0m\", end=end)\n\n\ndef normalize(tensor):\n max_value = torch.max(tensor)\n min_value = torch.min(tensor)\n tensor = (tensor - min_value) / (max_value - min_value)\n return tensor\n\ndef print_image(tensor, name, is_np = False):\n if name not in image_count:\n image_count[name] = 0\n file_path = os.path.join(debug_path, f\"tensor_{debug_count}_{name}_{image_count[name]}.jpg\")\n normallized_file_path = os.path.join(debug_path, f\"tensor_{debug_count}_{name}_{image_count[name]}_norm.jpg\")\n image_count[name] += 1\n if Image is not None and type(tensor) == Image.Image:\n tensor.save(file_path)\n else:\n if is_np:\n tensor = torch.Tensor(tensor)\n normalized_tensor = normalize(tensor)\n try:\n if save_image is not None:\n if SAVE_IMAGE_NORM:\n save_image(normalized_tensor, normallized_file_path)\n else:\n save_image(tensor, file_path)\n except Exception:\n pass\ndef mark(marker=None):\n print(\"Mark is deprecated. Use debug() instead.\")\n\n\ndef logging(*message, end=\"\\n\"):\n \"\"\"同时输出到终端和debug.log\"\"\"\n message = \" \".join([str(_) for _ in message])\n if PRINT:\n print_yellow(message, end=end)\n if debug_file and not debug_file.closed:\n message = re.sub(\"\\033\\[.*?m\", \"\", message)\n debug_file.write(message + end)\nlogging(\"------------------\\033[0m\\033[1;31m\", get_time(), \"\\033[0m\\033[1;33m------------------\")\n\n\ndef info(var, name=\"?\", detail=True, layer=0):\n \"\"\"递归打印变量\"\"\"\n space = \" \"\n if type(var) == int or type(var) == float:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"num val:\", var)\n else:\n if var is None:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"None\")\n elif type(var) == str:\n length = len(var)\n if not FULL and len(var) >= MAX_STR_LEN:\n var = var[:MAX_STR_LEN - 20] + \" ... \" + var[-20:]\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"str len\", str(length)+\":\", var)\n elif type(var) == bool:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"bool:\", var)\n elif type(var) == list:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"list size:\", len(var), end=\"\")\n if layer < PEEK_LAYER and len(var) > 0 and type(var[0]) not in simple_types: # a list of complex variables\n logging(f\" [{min(len(var), 3)*'.'}]\")\n for no, item in enumerate(var[:MAX_PEEK_ITEM]):\n info(item, \"item \" + str(no) + \": \", detail, layer + 1)\n if len(var) > MAX_PEEK_ITEM:\n logging(space * (layer + 1), len(var) - MAX_PEEK_ITEM, \"extra items\")\n else:\n var_str = str(var)\n if len(var) > 0 and type(var[0]) in simple_types and all([type(var[i]) == type(var[0]) for i in range(len(var))]): # a list of variables of the same simple type\n if len(var_str) >= MAX_STR_LEN + 3: # too long\n # show_num = len(var_str[:MAX_STR_LEN].split(\",\"))\n logging(\" val:\", f\"{var_str[:MAX_STR_LEN]} ... and extra items]\" if detail else \"*\")\n else:\n logging(\" val:\", var_str if detail else \"*\")\n elif layer < PEEK_LAYER: # variables of different simple types\n logging(f\" [{min(len(var), 3)*'.'}]\")\n for no, item in enumerate(var):\n info(item, \"item \" + str(no) + \": \", detail, layer + 1)\n else:\n logging(\" val:\", var_str if detail else \"*\")\n elif type(var) == tuple:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"tuple size:\", len(var), end=\"\")\n if layer < PEEK_LAYER and len(var) > 0:\n logging(f\" ({min(len(var), 3)*'.'})\")\n for no, item in enumerate(var):\n info(item, \"item \" + str(no) + \": \", detail, layer + 1)\n else:\n logging(\" val:\", var if detail else \"*\")\n elif type(var) == set:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"set size:\", len(var), end=\"\")\n if layer < PEEK_LAYER and len(var) > 0:\n logging(\" {\" + min(len(var), 3)*'.' + \"}\")\n for no, item in enumerate(var):\n info(item, \"item \" + str(no) + \": \", detail, layer + 1)\n else:\n logging(\" val:\", var if detail else \"*\")\n elif type(var) == dict:\n dict_keys = sorted(list(var.keys()))\n dict_end = \"\"\n if len(dict_keys) >= 100:\n dict_keys = dict_keys[:100]\n dict_end = \"... and extra items]\"\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"dict {\" + min(len(var), 3)*'.' + \"} \" + f\"with {len(dict_keys)} keys\", dict_keys, end=dict_end)\n if layer < PEEK_LAYER and len(var) > 0:\n logging(\"\")\n for key in dict_keys:\n info(var[key], key, detail, layer + 1)\n else:\n logging(\" val:\", var if detail else \"*\")\n elif type(var) == OrderedDict:\n dict_keys = sorted(list(var.keys()))\n dict_end = \"\"\n if len(dict_keys) >= 100:\n dict_keys = dict_keys[:100]\n dict_end = \"... and extra items]\"\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"OrderedDict {\" + min(len(var), 3)*'.' + \"} \" + f\"with {len(dict_keys)} keys\", dict_keys, end=dict_end)\n if layer < PEEK_LAYER and len(var) > 0:\n logging(\"\")\n for key in dict_keys:\n info(var[key], key, detail, layer + 1)\n else:\n logging(\" val:\", var if detail else \"*\")\n elif type(var) == defaultdict:\n tmp_val = 12341231354124\n assert tmp_val not in var\n default_val = var[tmp_val]\n del var[tmp_val]\n\n dict_keys = sorted(list(var.keys()))\n dict_end = \"\"\n if len(dict_keys) >= 100:\n dict_keys = dict_keys[:100]\n dict_end = \"... and extra items]\"\n\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"defaultdict {\" + min(len(var), 3)*'.' + \"} with default\", default_val, f\"{len(dict_keys)} keys\", dict_keys, end=dict_end)\n if layer < PEEK_LAYER and len(var) > 0:\n logging(\"\")\n for key in dict_keys:\n info(var[key], key, detail, layer + 1)\n else:\n logging(\" val:\", var if detail else \"*\")\n elif type(var) == torch.Tensor:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"Tensor size:\", var.shape, \"val:\", var if detail else \"*\")\n print_image(var, name)\n elif type(var) == np.ndarray:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"ndarray size:\", var.shape,\n \"val:\", var if detail else \"*\")\n print_image(var, name, True)\n elif tf is not None and type(var) == tf.Tensor:\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", \"Tensor size:\", var.shape, \"val:\", var if detail else \"*\")\n elif Image is not None and type(var) == Image.Image:\n print_image(var, name)\n else:\n var_type = str(type(var)).split(\"'\")[1]\n try:\n if layer >= PEEK_LAYER:\n raise Exception(\"Too many layers\")\n props = var.__dict__\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", var_type, \"with props\", list(props.keys()), end=\"\")\n prop_valid = False\n for key in props:\n if not key.startswith(\"_\"):\n if not prop_valid:\n prop_valid = True\n logging(\"\")\n info(props[key], key, detail, layer + 1)\n if not prop_valid:\n logging(\" val:\", var)\n except Exception:\n var_type = str(type(var))[8:-2]\n logging(space * layer, f\"\\033[0m\\033[1;36m{name}\\033[0m\\033[1;33m\", var_type, \"with val: \", var)\n\n\ndef debug(*args, **kwargs):\n \"\"\"debug打印主入口\"\"\"\n global ON_DEBUG\n global debug_count\n global debug_file\n global TO_FILE\n global PLAIN\n if not ON_DEBUG:\n return \n if TO_FILE:\n try:\n debug_file = open(log_path, \"a\", encoding='utf-8')\n except:\n debug_file = None\n logging(\"------------------\\033[0m\\033[1;31m\", get_time(), \"\\033[0m\\033[1;33m------------------\")\n if PLAIN:\n logging(*args, **kwargs, end=\"\\n\")\n if TO_FILE and debug_file is not None:\n debug_file.close()\n return\n global FULL\n if FULL:\n torch.set_printoptions(profile=\"full\")\n np.set_printoptions(threshold=sys.maxsize)\n count = 0\n if MAX_LOG != -1 and debug_count >= MAX_LOG:\n if debug_file:\n debug_file.close()\n return\n detail = True\n if args and type(args[0]) is bool:\n detail = args[0]\n args = args[1:]\n keys = list(kwargs.keys())\n if len(args) + len(kwargs) == 0:\n logging(f\"\\033[0m\\033[1;32mMARK:\\033[0m\\033[1;33m at \\033[0m\\033[1;32m{get_pos(level=2)}\\033[0m\\033[1;33m\")\n elif len(args) == 1 and len(kwargs) == 0 and type(args[0]) == str:\n logging(f\"\\033[0m\\033[1;32mDEBUG:\\033[0m\\033[1;33m at \\033[0m\\033[1;32m{get_pos(level=2)}\\033[0m\\033[1;33m\")\n logging(f\"\\033[0m\\033[1;36m{args[0]}\\033[0m\\033[1;33m\")\n else:\n logging(f\"\\033[0m\\033[1;32mDEBUG:\\033[0m\\033[1;33m {len(args) + len(kwargs)} vars: {['?' for _ in args] + keys}, at \\033[0m\\033[1;32m{get_pos(level=2)}\\033[0m\\033[1;33m\")\n for var in args:\n logging(f\"{count} / {debug_count}.\", end=\" \")\n info(var, detail=detail)\n debug_count += 1\n count += 1\n for key in keys:\n logging(f\"{count} / {debug_count}.\", end=\" \")\n info(kwargs[key], key, detail=detail)\n count += 1\n debug_count += 1\n logging(\"------------------\\033[0m\\033[1;31m\", get_time(), \"\\033[0m\\033[1;33m------------------\")\n if TO_FILE and debug_file is not None:\n debug_file.close()\n\ndef mail(subject = \"Progress Notification\", message = \"\"):\n from email.mime.text import MIMEText\n subject = f\"[SUPERDEBUG] {subject}\"\n message = f\"{message}\\nThis email is sent at {get_pos(level=2)}\"\n mail = MIMEText(message)\n mail['Subject'] = subject\n mail['From'] = MY_QQ_EMAIL\n mail['To'] = MY_QQ_EMAIL\n\n import smtplib\n smtp=smtplib.SMTP()\n smtp.connect('smtp.qq.com', 25)\n smtp.login(MY_QQ_EMAIL, MY_QQ_EMAIL_PWD)\n\n smtp.sendmail(MY_QQ_EMAIL, MY_QQ_EMAIL, mail.as_string()) # To是接收邮箱\n","repo_name":"Azure-Vision/SuperDebug","sub_path":"superdebug/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14542,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"83"} +{"seq_id":"71229680590","text":"# ---------------------------------------------------------- #\r\n# --------------------- classifySpec.py -------------------- #\r\n# --------- https://github.com/jhoormann/RMCodeDump -------- #\r\n# ---------------------------------------------------------- #\r\n# This code aims to help make it easier to decide which #\r\n# exposures, if any, are problematic and should be excluded #\r\n# from the analysis. #\r\n# ---------------------------------------------------------- #\r\n\r\nimport numpy as np\r\nfrom astropy.io import fits\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import fixed_quad\r\nfrom matplotlib.ticker import MaxNLocator\r\nimport OzDES_Calculation as ozcalc\r\nimport pandas as pd\r\nimport sys\r\n\r\nclass SpectrumCoadd(object):\r\n # Spectrum class for latest version of the OzDES pipeline\r\n\r\n def __init__(self, filepath=None):\r\n assert filepath != None, \"No file name is specified.\"\r\n self.filepath = filepath\r\n try:\r\n self.data = fits.open(filepath)\r\n except IOError:\r\n print(\"Error: file {0} could not be found\".format(filepath))\r\n exit()\r\n data = fits.open(filepath)\r\n self.combined = data[0]\r\n self.combinedVariance = data[1]\r\n self._wavelength = None\r\n self._flux = None\r\n self._variance = None\r\n self._fluxCoadd = None\r\n self._varianceCoadd = None\r\n self._dates = None\r\n self._runs = None\r\n self.numEpochs = int((np.size(data) - 3) / 3)\r\n self.redshift = self.combined.header['z']\r\n self.RA = self.combined.header['RA']\r\n self.DEC = self.combined.header['DEC']\r\n self.field = self.combined.header['FIELD']\r\n\r\n @property\r\n def wavelength(self):\r\n \"\"\"Define wavelength solution.\"\"\"\r\n if getattr(self, '_wavelength', None) is None:\r\n crpix = self.combined.header[\r\n 'crpix1'] - 1.0 # Central pixel value. The -1.0 is needed as Python is ZERO indexed\r\n crval = self.combined.header['crval1'] # central wavelength value\r\n self.cdelt = self.combined.header['cdelt1'] # Wavelength interval between subsequent pixels\r\n n_pix = self.combined.header[\"NAXIS1\"]\r\n wave = ((np.arange(n_pix) - crpix) * self.cdelt) + crval\r\n self._wavelength = wave\r\n return self._wavelength\r\n\r\n @property\r\n def flux(self):\r\n if getattr(self, '_flux', None) is None:\r\n self._flux = np.zeros((5000, self.numEpochs), dtype=float)\r\n for i in range(self.numEpochs):\r\n self._flux[:, i] = self.data[i * 3 + 3].data / 10 ** -16\r\n return self._flux\r\n\r\n @property\r\n def variance(self):\r\n if getattr(self, '_variance', None) is None:\r\n self._variance = np.zeros((5000, self.numEpochs), dtype=float)\r\n for i in range(self.numEpochs):\r\n self._variance[:, i] = self.data[i * 3 + 4].data / 10 ** -32\r\n return self._variance\r\n\r\n @property\r\n def fluxCoadd(self):\r\n if getattr(self, '_fluxCoadd', None) is None:\r\n self._fluxCoadd = np.zeros(5000, dtype=float)\r\n self._fluxCoadd[:] = self.data[0].data / 10 ** -16\r\n return self._fluxCoadd\r\n\r\n @property\r\n def varianceCoadd(self):\r\n if getattr(self, '_varianceCoadd', None) is None:\r\n self._varianceCoadd = np.zeros(5000, dtype=float)\r\n self._varianceCoadd[:] = self.data[1].data / 10 ** -32\r\n return self._varianceCoadd\r\n\r\n @property\r\n def dates(self):\r\n if getattr(self, '_dates', None) is None:\r\n self._dates = np.zeros(self.numEpochs, dtype=float)\r\n for i in range(self.numEpochs):\r\n self._dates[i] = self.data[i * 3 + 3].header[\r\n 'AVGDATE'] # this give the average Modified Julian Date (UTC) that observation was taken\r\n return self._dates\r\n\r\n @property\r\n def runs(self):\r\n if getattr(self, '_runs', None) is None:\r\n self._runs = np.zeros(self.numEpochs, dtype=float)\r\n for i in range(self.numEpochs):\r\n self._runs[i] = self.data[i * 3 + 3].header['RUN'] # this give the run number of the observation\r\n return self._runs\r\n\r\n\r\nclass Spectrumv14(object):\r\n def __init__(self, filepath=None):\r\n assert filepath is not None\r\n self.filepath = filepath\r\n try:\r\n self.data = fits.open(filepath)\r\n except IOError:\r\n print(\"Error: file {0} could not be found\".format(filepath))\r\n exit()\r\n data = fits.open(filepath)\r\n self.combinedFlux = data[0]\r\n self.combinedVariance = data[1]\r\n self.combinedPixels = data[2]\r\n self.numEpochs = int((np.size(data) - 3) / 3)\r\n self.field = self.data[3].header['SOURCEF'][19:21]\r\n self.cdelt1 = self.combinedFlux.header['cdelt1'] # Wavelength interval between subsequent pixels\r\n self.crpix1 = self.combinedFlux.header['crpix1']\r\n self.crval1 = self.combinedFlux.header['crval1']\r\n self.n_pix = self.combinedFlux.header['NAXIS1']\r\n self.RA = self.combinedFlux.header['RA']\r\n self.DEC = self.combinedFlux.header['DEC']\r\n\r\n self.fluxCoadd = self.combinedFlux.data\r\n self.varianceCoadd = self.combinedVariance.data\r\n self.badpixCoadd = self.combinedPixels.data\r\n\r\n self._wavelength = None\r\n self._flux = None\r\n self._variance = None\r\n self._badpix = None\r\n self._dates = None\r\n self._redzp = None\r\n self._bluezp = None\r\n self._run = None\r\n self._ext = None\r\n self._qc = None\r\n self._exposed = None\r\n\r\n @property\r\n def wavelength(self):\r\n \"\"\"Define wavelength solution.\"\"\"\r\n if getattr(self, '_wavelength', None) is None:\r\n wave = ((np.arange(self.n_pix) - self.crpix1) * self.cdelt1) + self.crval1\r\n self._wavelength = wave\r\n return self._wavelength\r\n\r\n @property\r\n def flux(self):\r\n if getattr(self, '_flux', None) is None:\r\n self._flux = np.zeros((5000, self.numEpochs), dtype=float)\r\n for i in range(self.numEpochs):\r\n self._flux[:, i] = self.data[i * 3 + 3].data\r\n return self._flux\r\n\r\n @property\r\n def variance(self):\r\n if getattr(self, '_variance', None) is None:\r\n self._variance = np.zeros((5000, self.numEpochs), dtype=float)\r\n for i in range(self.numEpochs):\r\n self._variance[:, i] = self.data[i * 3 + 4].data\r\n return self._variance\r\n\r\n @property\r\n def badpix(self):\r\n if getattr(self, '_badpix', None) is None:\r\n self._badpix = np.zeros((5000, self.numEpochs), dtype=float)\r\n for i in range(self.numEpochs):\r\n self._badpix[:, i] = self.data[i * 3 + 5].data\r\n return self._badpix\r\n\r\n @property\r\n def dates(self):\r\n if getattr(self, '_dates', None) is None:\r\n self._dates = np.zeros(self.numEpochs, dtype=float)\r\n for i in range(self.numEpochs):\r\n self._dates[i] = round(self.data[i * 3 + 3].header['UTMJD'],3)\r\n # this give Modified Julian Date (UTC) that observation was taken\r\n return self._dates\r\n\r\n @property\r\n def redzp(self):\r\n if getattr(self, '_redzp', None) is None:\r\n self._redzp = []\r\n for i in range(self.numEpochs):\r\n self._redzp.append(self.data[i * 3 + 3].header['REDZP'])\r\n # this gives if the ZP for the red arm\r\n return self._redzp\r\n\r\n @property\r\n def bluezp(self):\r\n if getattr(self, '_bluezp', None) is None:\r\n self._bluezp = []\r\n for i in range(self.numEpochs):\r\n self._bluezp.append(self.data[i * 3 + 3].header['BLUZP'])\r\n # this gives if the ZP for the blue arm\r\n return self._bluezp\r\n\r\n @property\r\n def ext(self):\r\n if getattr(self, '_ext', None) is None:\r\n self._ext = []\r\n for i in range(self.numEpochs):\r\n self._ext.append(i * 3 + 3) # gives the extension in original fits file\r\n return self._ext\r\n\r\n @property\r\n def run(self):\r\n if getattr(self, '_run', None) is None:\r\n self._run = []\r\n for i in range(self.numEpochs):\r\n source = self.data[i * 3 + 3].header['SOURCEF']\r\n self._run.append(int(source[3:6])) # this gives the run number of the observation\r\n return self._run\r\n\r\n @property\r\n def qc(self):\r\n if getattr(self, '_qc', None) is None:\r\n self._qc = []\r\n for i in range(self.numEpochs):\r\n self._qc.append(self.data[i * 3 + 3].header['QC'])\r\n # this tell you if there were any problems with the spectra that need to be masked out\r\n return self._qc\r\n\r\n @property\r\n def exposed(self):\r\n if getattr(self, '_exposed', None) is None:\r\n self._exposed = []\r\n for i in range(self.numEpochs):\r\n self._exposed.append(self.data[i * 3 + 3].header['EXPOSED'])\r\n # this will give you the exposure time of each observation\r\n return self._exposed\r\n\r\ntitle_font = {'size':'22', 'color':'black', 'weight':'normal', 'verticalalignment':'bottom'}\r\naxis_font = {'size':'22'}\r\n\r\ndef makeFigSingle(title, xlabel, ylabel, xlim=[0, 0], ylim=[0, 0]):\r\n fig = plt.gcf()\r\n fig.set_size_inches(10, 10, forward=True)\r\n\r\n ax = fig.add_subplot(111)\r\n for label in (ax.get_xticklabels() + ax.get_yticklabels()):\r\n label.set_fontsize(25)\r\n\r\n ax.set_ylabel(ylabel, **axis_font)\r\n if ylim != [0, 0] and ylim[0] < ylim[1]:\r\n ax.set_ylim(ylim)\r\n\r\n ax.set_xlabel(xlabel, **axis_font)\r\n if xlim != [0, 0] and xlim[0] < xlim[1]:\r\n ax.set_xlim(xlim)\r\n\r\n ax.set_title(title, **title_font)\r\n\r\n return fig, ax\r\ndef plot_fonts(size, color='black', weight='normal', align='bottom'):\r\n font = {'size': size, 'color': color, 'weight': weight, 'verticalalignment': align}\r\n return font\r\n\r\n\r\ndef plot_ticks(ax, size):\r\n for label in (ax.get_xticklabels() + ax.get_yticklabels()):\r\n label.set_fontsize(size)\r\n return\r\ndef plot_share_x(number, title, xlabel, xdat, ylabel, ydat, qc, xlim=(0, 0), ylim=(0, 0), lines=(0,0,0), asize=22,\r\n tsize=22, xdim=10, ydim=10, xtick=None, ytick=2):\r\n fig, ax_array = plt.subplots(number, sharex=True)\r\n fig = plt.gcf()\r\n fig.set_size_inches(xdim, ydim, forward=True)\r\n fig.subplots_adjust(hspace=0)\r\n\r\n title_font = plot_fonts(tsize, align='bottom')\r\n x_axis_font = plot_fonts(asize, align='top')\r\n y_axis_font = plot_fonts(asize, align='bottom')\r\n\r\n for [i, ax] in enumerate(ax_array):\r\n plot_ticks(ax, asize)\r\n ax.set_ylabel(ylabel[i], **y_axis_font)\r\n ax.yaxis.set_major_locator(MaxNLocator(prune='upper'))\r\n ax.tick_params(axis='y', pad=15)\r\n if ylim != (0, 0) and ylim[0] < ylim[1]:\r\n ax.set_ylim(ylim)\r\n if i == 0:\r\n ax.set_title(title, **title_font)\r\n ax.tick_params(axis='x', pad=15)\r\n if xlim != (0, 0) and xlim[0] < xlim[1]:\r\n ax.set_xlim(xlim)\r\n if i == number - 1:\r\n ax.set_xlabel(xlabel, **x_axis_font)\r\n if ytick is not None:\r\n ax.yaxis.major.locator.set_params(nbins=ytick)\r\n if xtick is not None:\r\n ax.xaxis.major.locator.set_params(nbins=xtick)\r\n if qc[i] == 'good':\r\n col = 'black'\r\n else:\r\n col = 'red'\r\n ax.plot(xdat, ydat[:,i], color=col)\r\n ax.axvline(x=lines[0], color='blue')\r\n ax.axvline(x=lines[1], color='blue')\r\n ax.axvline(x=lines[2], color='blue')\r\n return\r\n\r\n\r\ndef mark_as_bad(fluxes, variances, wavelength, numEpochs):\r\n for epoch in range(numEpochs):\r\n flux = fluxes[:, epoch]\r\n variance = variances[:, epoch]\r\n\r\n bad = np.zeros(len(flux))\r\n\r\n for i in range(len(flux)):\r\n if i % 100 == 0:\r\n avg = np.nanmean(variance[i:i + 99])\r\n if np.isnan(variance[i]) == False and variance[i] > 3.5 * avg:\r\n bad[i] = 2\r\n flux[i] = np.nan\r\n if i > 2 and i < 4996:\r\n flux[i - 1] = np.nan\r\n flux[i - 2] = np.nan\r\n flux[i - 3] = np.nan\r\n flux[i + 1] = np.nan\r\n flux[i + 2] = np.nan\r\n flux[i + 3] = np.nan\r\n\r\n bad[i - 1] = 1\r\n bad[i - 2] = 1\r\n bad[i - 3] = 1\r\n bad[i + 1] = 1\r\n bad[i + 2] = 1\r\n bad[i + 3] = 1\r\n\r\n return\r\n\r\n\r\ndef filter_bad_pixels(fluxes, variances, number):\r\n\r\n for epoch in range(number):\r\n if (number == 1):\r\n flux = fluxes[:]\r\n variance = variances[:]\r\n else:\r\n flux = fluxes[:, epoch]\r\n variance = variances[:, epoch]\r\n\r\n nBins = len(flux)\r\n flux[0] = 0.0\r\n flux[-1] = 0.0\r\n variance[0] = 0\r\n variance[-1] = 0\r\n\r\n bad_pixels = np.logical_or.reduce((np.isnan(flux), np.isnan(variance), variance < 0))\r\n\r\n bin = 0\r\n binStart = 0\r\n binEnd = 0\r\n\r\n while (bin < nBins):\r\n if (bad_pixels[bin] == True):\r\n binStart = bin\r\n binNext = bin + 1\r\n while (binNext < nBins):\r\n if bad_pixels[binNext] == False:\r\n binEnd = binNext - 1\r\n binNext = nBins\r\n binNext = binNext + 1\r\n\r\n ya = float(flux[binStart - 1])\r\n xa = float(binStart - 1)\r\n sa = variance[binStart - 1]\r\n yb = flux[binEnd + 1]\r\n xb = binEnd + 1\r\n sb = variance[binEnd + 1]\r\n\r\n step = binStart\r\n while (step < binEnd + 1):\r\n flux[step] = ya + (yb - ya) * (step - xa) / (xb - xa)\r\n variance[step] = sa + (sb + sa) * ((step - xa) / (xb - xa)) ** 2\r\n step = step + 1\r\n bin = binEnd\r\n bin = bin + 1\r\n\r\n\r\n# Either a data table with the column names below or just ID number, what you are expected to provide depends on the\r\n# flag you choose\r\nsources = pd.read_csv(\"specDetsClass.csv\")\r\n\r\n# I made a table with a bunch of data for each source (makeClassificationStats.py) these are the columns it expects.\r\n# This code focuses on filling out the class, issue columns\r\n# class (good or bad)\r\n# issue (okay (good) or reason why bad)\r\n# colNames = ['ID', 'z', 'ext', 'date', 'mg', 'mgerr', 'Fvarg', 'mr', 'mrerr', 'Fvarr', 'mi', 'mierr', 'Fvari', 'Fc',\r\n# 'SNRc', 'Fm', 'SNRm', 'Fh', 'SNRh', 'F1350', 'SNR1350', 'F3000', 'SNR3000', 'F5100', 'SNR5100', 'Fred', 'SNRred',\r\n# 'Fblue', 'SNRblue', 'badpix', 'class', 'issue']\r\n\r\ndataPath = \"../OzDES_Data/\"\r\n\r\n# If you include output from makeClassificationStats.py there will be multiple entries for each AGN ID, however\r\n# we don't want to repeat ourselves so we just find the unique values and analyse those.\r\nnames = np.unique(sources['ID'].values)\r\nnLines = len(names)\r\n\r\n# There are three flags to choose from 'table', 'plot', and 'combine'. 'plot' will likely be the most useful\r\nflag = 'plot'\r\n\r\n# Helps to determine if there are exposure that are so bad they should be excluded from the coadd, noise dominated,\r\n# issues with splicing, etc. All the exposures for a given run will pop up and you can interactively specify via the\r\n# command line which ones you want to exclude and why. The results are appended to a text file.\r\nif flag == 'plot':\r\n output = open(\"probNames.txt\", \"a\")\r\n # Initialize while loop with index of names array you want to start at\r\n i = 0\r\n\r\n # labels for the y axis corresponding to each exposure of a given run, I think this is enough (yes there has to be\r\n # a better way to do this, no I am not worrying about it now).\r\n ylab = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]\r\n while i < nLines:\r\n print(i)\r\n agn = names[i]\r\n print(agn)\r\n\r\n # Read in the spectral data\r\n spectra = Spectrumv14(dataPath + \"spectra180413/SVA1_COADD-\" + str(agn) + \".fits\")\r\n ozcalc.mark_as_bad(spectra.flux, spectra.variance)\r\n\r\n # You probably want to read in the file that includes the classifications made with the 'table' flag. If you\r\n # do that spectra that already classified as bad will be plotted using a different color.\r\n quality = []\r\n for e in range(spectra.numEpochs):\r\n iVal = sources.index[(sources['ID'] == agn) & (sources['ext'] == 3 * e + 3)].values[0]\r\n quality.append(sources['class'].iloc[iVal])\r\n z = sources['z'].iloc[iVal]\r\n\r\n lines = [1549*(1+z), 2798*(1+z), 4861*(1+z)]\r\n\r\n\r\n e = 0\r\n while e < spectra.numEpochs:\r\n # Determine which extensions belong to the same observing run\r\n step = 0\r\n numStep = step\r\n while step < spectra.numEpochs - e:\r\n if spectra.run[e+step] == spectra.run[e]:\r\n step += 1\r\n numStep = step\r\n else:\r\n step = spectra.numEpochs\r\n # Make it so all spectra are plotted with the same y-axis limits.\r\n maxV = np.nanmax(spectra.flux[:, e:e+numStep].flatten()) + 0.5\r\n minV = np.nanmin(spectra.flux[:, e:e+numStep].flatten()) - 0.5\r\n if maxV > 15:\r\n maxV = 15\r\n if minV < -5:\r\n minV = -5\r\n\r\n # Plot all epochs on the same graph. If there is already a known issue mark plot the spectra as red,\r\n # otherwise plot it in black\r\n\r\n if numStep > 1:\r\n plot_share_x(numStep, str(agn) + \" run \" + str(spectra.run[e]), \"Wavelength\", spectra.wavelength,\r\n ylab[0:numStep], spectra.flux[:, e:e+numStep], quality[e:e+numStep],\r\n [spectra.wavelength[0], spectra.wavelength[4999]], [minV, maxV], lines)\r\n else:\r\n fig, ax = makeFigSingle(str(agn) + \" run \" + str(spectra.run[e]), \"Wavelength\",\"0\",\r\n [spectra.wavelength[0], spectra.wavelength[4999]])\r\n if quality[e] == 'good':\r\n col = 'black'\r\n else:\r\n col = 'red'\r\n ax.plot(spectra.wavelength, spectra.flux[:,e], color=col)\r\n ax.axvline(x=lines[0], color='blue')\r\n ax.axvline(x=lines[1], color='blue')\r\n ax.axvline(x=lines[2], color='blue')\r\n\r\n # While the graph is open you will be prompted to specify if there are any issues with the run. If you say\r\n # yes then you will go through each extension in the run one by one and you can say if there is a problem\r\n # with the specific extension. You can specify four options 'w' = weather/noise, 's' = splice,\r\n # 'm' = missing (lots of bad pix, note we call mark_as_bad so this might mean there was a lot of noise\r\n # that were also interpolated over), or 'o' = other\r\n # If when asked if the run is bad you choose 'q' it will save the info up to this point to the output file\r\n # and clos edown\r\n plt.pause(0.1)\r\n b = 0\r\n run_flag = input(\"Is the run bad (y/n)? \")\r\n if run_flag == 'y':\r\n while b < numStep:\r\n ext_flag = input(\"Is extension \" + str(b) + \" bad (n/y(w/s/m/o))? \")\r\n if ext_flag in ['w', 's', 'm', 'o']:\r\n output.write(str(agn) + \" \" + str(spectra.ext[e+b]) + \" \" + ext_flag + \"\\n\")\r\n if ext_flag == 'q':\r\n b = numStep + 1\r\n b = b + 1\r\n if run_flag == 'q':\r\n e = spectra.numEpochs\r\n print(\"I am quitting at index \" + str(i))\r\n output.close()\r\n sys.exit()\r\n plt.close()\r\n e += numStep\r\n\r\n i += 1\r\n\r\n output.close()\r\n\r\n# Classify a exposure as bad based on things like, bad quality flag, not enough photometry to calibrate, bad weather,\r\n# and bad pixels, many of these include issue were picked up when trying to perform the spectrophotometric calibration\r\nif flag == 'table':\r\n for i in range(nLines):\r\n if i%10 ==0:\r\n print(\"Analysing AGN # \" + str(i))\r\n AGN = names[i]\r\n print(AGN)\r\n\r\n # Load in data\r\n spectra = ozcalc.SpectrumCoadd(dataPath + \"processedSpectraY5/\" + str(names[i]) + \"_scaled.fits\")\r\n spectraO = ozcalc.Spectrumv14(dataPath + \"spectra180413/SVA1_COADD-\" + str(AGN) + \".fits\")\r\n\r\n photo = pd.read_table(dataPath + \"photometryY5/\" + str(AGN) + \"_lc.dat\", delim_whitespace=True)\r\n\r\n # bad if the quality flag was determined to be problematic (ie anything other than okay or backup),\r\n # issue = 'qc'\r\n if spectra.badqc != '':\r\n badqc = np.array(spectra.badqc.split(\",\"))\r\n badqc = badqc.astype(int)\r\n\r\n for q in badqc:\r\n iVal = sources.index[(sources['ID'] == AGN) & (sources['ext'] == q)].values[0]\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = 'qc'\r\n\r\n # bad if there is insufficient photometry to calibrate\r\n # issue = 'nophoto'\r\n if spectra.nophoto != '':\r\n nophoto = np.array(spectra.nophoto.split(\",\"))\r\n nophoto = nophoto.astype(int)\r\n\r\n for p in nophoto:\r\n iVal = sources.index[(sources['ID'] == AGN) & (sources['ext'] == p)].values[0]\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = 'nophoto'\r\n\r\n # bad if there was a weather issue, typically so noisy magnitudes calculated in one of the bands during\r\n # calibration were nans\r\n # issue = 'noise'\r\n if spectra.weather != '':\r\n weather = np.array(spectra.weather.split(\",\"))\r\n weather = weather.astype(int)\r\n\r\n for w in weather:\r\n iVal = sources.index[(sources['ID'] == AGN) & (sources['ext'] == w)].values[0]\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = 'noise'\r\n\r\n # bad if the emission line flux is negative\r\n # issue = 'noise'\r\n # also bad if more than 10% of the pixels were bad\r\n # issue = 'missing'\r\n for e in range(spectraO.numEpochs):\r\n iVal = sources.index[(sources['ID'] == AGN) & (sources['ext'] == 3*e+3)].values[0]\r\n\r\n if sources['Fh'].iloc[iVal] < 0:\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = 'noise'\r\n elif sources['Fm'].iloc[iVal] < 0:\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = 'noise'\r\n elif sources['Fc'].iloc[iVal] < 0:\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = 'noise'\r\n elif sources['class'].iloc[iVal] != 'bad':\r\n if sources['badpix'].iloc[iVal] / 5000 > 0.10:\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = 'missing'\r\n else:\r\n sources['class'].iloc[iVal] = 'good'\r\n sources['issue'].iloc[iVal] = 'okay'\r\n sources.to_csv(\"specDetsClass.csv\", index=False)\r\n\r\n\r\n# This will read in the output table created from 'plot' and add those class/issues to the larger table created with\r\n# 'table' and makeClassificationStats.py\r\nif flag == 'combine':\r\n classList = pd.read_table(\"probNames.txt\", delim_whitespace=True)\r\n for l in range(len(classList)):\r\n AGN = classList['ID'].iloc[l]\r\n ext = classList['ext'].iloc[l]\r\n issue = classList['issue'].iloc[l]\r\n if issue == 'w':\r\n prob = 'noise'\r\n elif issue == 's':\r\n prob = 'splice'\r\n elif issue == 'm':\r\n prob = 'missing'\r\n else:\r\n prob = 'other'\r\n\r\n iVal = sources.index[(sources['ID'] == AGN) & (sources['ext'] == ext)].values[0]\r\n\r\n if sources['class'].iloc[iVal] == 'good':\r\n sources['class'].iloc[iVal] = 'bad'\r\n sources['issue'].iloc[iVal] = prob\r\n\r\n sources.to_csv(\"specDetsModel.csv\", index=False)\r\n","repo_name":"jhoormann/RMCodeDump","sub_path":"classifySpec.py","file_name":"classifySpec.py","file_ext":"py","file_size_in_byte":24914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22313848024","text":"from Crypto.Util.number import *\nfrom output import pubkey, c\nfrom tqdm import tqdm\nimport random\n\nTemperingMaskB = 0x9d2c5680\nTemperingMaskC = 0xefc60000\n\ndef untemper(y):\n y = undoTemperShiftL(y)\n y = undoTemperShiftT(y)\n y = undoTemperShiftS(y)\n y = undoTemperShiftU(y)\n return y\n\ndef undoTemperShiftL(y):\n last14 = y >> 18\n final = y ^ last14\n return final\n\ndef undoTemperShiftT(y):\n first17 = y << 15\n final = y ^ (first17 & TemperingMaskC)\n return final\n\ndef undoTemperShiftS(y):\n a = y << 7\n b = y ^ (a & TemperingMaskB)\n c = b << 7\n d = y ^ (c & TemperingMaskB)\n e = d << 7\n f = y ^ (e & TemperingMaskB)\n g = f << 7\n h = y ^ (g & TemperingMaskB)\n i = h << 7\n final = y ^ (i & TemperingMaskB)\n return final\n\ndef undoTemperShiftU(y):\n a = y >> 11\n b = y ^ a\n c = b >> 11\n final = y ^ c\n return final\n\ns, t, n = pubkey\nfenc = []\ninv = [inverse(i, n) for i in range(256)]\n\ndef xgcd(a, b):\n x0, x1, y0, y1 = 0, 1, 1, 0\n while a != 0:\n (q, a), b = divmod(b, a), a\n y0, y1 = y1, y0 - q * y1\n x0, x1 = x1, x0 - q * x1\n return b, x0, y0\n\ndef parse_1024bits(v):\n arr = []\n for i in range(0, 1024, 32):\n arr.append(v & 0xFFFFFFFF)\n v >>= 32\n return arr\n\n_, a, b = xgcd(s, t)\nassert s * a + t * b == 1\n\nv384s = []\nv416s = []\nv608s = []\nv640s = []\n\nfor idx, (c1, c2) in tqdm(enumerate(c), total=len(c)):\n if c1 == 0:\n fenc.append(0)\n continue\n\n for i in range(1, 256):\n rs = inv[i] * c1 % n\n rt = inv[i] * c2 % n\n if pow(rs, t, n) == pow(rt, s, n):\n fenc.append(i)\n if idx == 10:\n to_append = v384s\n elif idx == 11:\n to_append = v416s\n elif idx == 17:\n to_append = v608s\n elif idx == 18:\n to_append = v640s\n else:\n break\n r = pow(rs, a, n) * pow(rt, b, n) % n\n assert pow(r, s, n) == rs and pow(r, t, n) == rt\n while r.bit_length() <= 1024:\n to_append.append(r)\n r += n\n break\n else:\n print(\"??????\")\n exit(0)\n\nfenc = bytes_to_long(bytes(fenc))\n\nfor v384 in v384s:\n for v416 in v416s:\n for v608 in v608s:\n for v640 in v640s:\n state = parse_1024bits(s)\n state += [0] * (384 - 64)\n state += parse_1024bits(v384)\n state += parse_1024bits(v416)\n state += [0] * (608 - 448)\n state += parse_1024bits(v608)\n state += parse_1024bits(v640)[:16]\n assert len(state) == 624\n\n state = [untemper(v) if v else 0 for v in state]\n random.setstate((3, tuple(state + [0]), None))\n assert random.getrandbits(1024) == s\n\n for i in range(33):\n v = state[-1] ^ state[396]\n if v & 0x80000000:\n y = ((v ^ 0x9908b0df) << 1) | 1\n else:\n y = v << 1\n \n if i == 0:\n if (y & 0x7FFFFFFF) != state[0] & 0x7FFFFFFF:\n break\n state = [y & 0x80000000] + state[:-1]\n else:\n assert state[0] & 0x7FFFFFFF == 0\n state[0] |= y & 0x7FFFFFFF\n if i < 32:\n state = [y & 0x80000000] + state[:-1]\n else:\n random.setstate((3, tuple(state + [0]), None))\n a = random.getrandbits(1024)\n phi_mul = (1 - a) * t + a * s\n if phi_mul < 0:\n phi_mul = -phi_mul\n while phi_mul % 0x10001 == 0:\n phi_mul //= 0x10001\n \n if pow(2, phi_mul, n) != 1:\n continue\n\n res = inverse(0x10001, phi_mul)\n\n print(long_to_bytes(pow(fenc, res, n)))","repo_name":"perfectblue/ctf-writeups","sub_path":"2022/n1ctf-2022/brand_new_checkin/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":592,"dataset":"github-code","pt":"83"} +{"seq_id":"71847503311","text":"import threading\nfrom typing import List\n\nimport wx\n\nfrom Tools.SitemapGenerator import SitemapGenerator\n\n\nclass SitemapThread(threading.Thread):\n \"\"\"\n Creates a sitemap.\n \"\"\"\n\n def __init__(self, parent, pages: List[str], work_dir: str, disable: bool):\n \"\"\"\n Sitemap thread constructor.\n :param parent: The gui object that should receive the result.\n :param pages: List of pages to put into the sitemap.\n :param work_dir: Working directory of the editor. The sitemap and robots.txt will be saved there.\n :param disable: Leave editor disabled after thread finishes.\n \"\"\"\n threading.Thread.__init__(self)\n self._parent = parent\n self._pages = pages\n self._work_dir = work_dir\n self._disable = disable\n self._generator = SitemapGenerator(self._pages)\n\n def run(self) -> None:\n \"\"\"\n Overrides Thread.run. Don't call this directly its called internally when you call Thread.start().\n :return: None, this method calls the wx.CallAfter to pass results back into GUI.\n \"\"\"\n sitemap = self._generator.create_sitemap()\n wx.CallAfter(self._parent.on_sitemap_done, sitemap, self._disable)\n","repo_name":"Athwale/Python-White-Bear-Editor","sub_path":"Threads/SitemapThread.py","file_name":"SitemapThread.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74358358351","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport pickle\r\nimport altair as alt\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimage = Image.open('logo.png')\r\ncola, colb, colc = st.columns([3,6,1])\r\nwith cola:\r\n st.write(\"\")\r\n\r\nwith colb:\r\n st.image(image, width = 300)\r\n\r\nwith colc:\r\n st.write(\"\")\r\nmenu = [\"Home\",\"About\"]\r\nchoice = st.sidebar.selectbox(\"Menu\",menu)\r\nif choice == \"Home\":\r\n st.write(\"\"\"\r\n # Penguin Prediction App\r\n\r\n This app predicts the **Palmer Penguin** species!\r\n\r\n Data obtained from the [palmerpenguins library](https://github.com/allisonhorst/palmerpenguins) in R by Allison Horst.\r\n \"\"\")\r\n cola, colb, colc = st.columns([1,6,1])\r\n irisi = Image.open('all.png')\r\n with cola:\r\n st.write(\"\")\r\n with colb:\r\n st.image(irisi, width = 600)\r\n with colc:\r\n st.write(\"\")\r\n\r\n\r\n st.sidebar.header('User Input Parameters')\r\n\r\n\r\n st.sidebar.header('User Input Features')\r\n\r\n st.sidebar.markdown(\"\"\"\r\n [Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/penguins_example.csv)\r\n \"\"\")\r\n\r\n # Collects user input features into dataframe\r\n uploaded_file = st.sidebar.file_uploader(\"Upload your input CSV file\", type=[\"csv\"])\r\n if uploaded_file is not None:\r\n input_df = pd.read_csv(uploaded_file)\r\n else:\r\n def user_input_features():\r\n island = st.sidebar.selectbox('Island',('Biscoe','Dream','Torgersen'))\r\n sex = st.sidebar.selectbox('Sex',('male','female'))\r\n bill_length_mm = st.sidebar.slider('Bill length (mm)', 32.1,59.6,43.9)\r\n bill_depth_mm = st.sidebar.slider('Bill depth (mm)', 13.1,21.5,17.2)\r\n flipper_length_mm = st.sidebar.slider('Flipper length (mm)', 172.0,231.0,201.0)\r\n body_mass_g = st.sidebar.slider('Body mass (g)', 2700.0,6300.0,4207.0)\r\n data = {'island': island,\r\n 'bill_length_mm': bill_length_mm,\r\n 'bill_depth_mm': bill_depth_mm,\r\n 'flipper_length_mm': flipper_length_mm,\r\n 'body_mass_g': body_mass_g,\r\n 'sex': sex}\r\n features = pd.DataFrame(data, index=[0])\r\n return features\r\n input_df = user_input_features()\r\n\r\n # Combines user input features with entire penguins dataset\r\n # This will be useful for the encoding phase\r\n penguins_raw = pd.read_csv('penguins_cleaned.csv')\r\n penguins = penguins_raw.drop(columns=['species'])\r\n df = pd.concat([input_df,penguins],axis=0)\r\n\r\n # Encoding of ordinal features\r\n # https://www.kaggle.com/pratik1120/penguin-dataset-eda-classification-and-clustering\r\n encode = ['sex','island']\r\n for col in encode:\r\n dummy = pd.get_dummies(df[col], prefix=col)\r\n df = pd.concat([df,dummy], axis=1)\r\n del df[col]\r\n df = df[:1] # Selects only the first row (the user input data)\r\n\r\n # Displays the user input features\r\n st.subheader('User Input features')\r\n\r\n if uploaded_file is not None:\r\n st.write(df)\r\n else:\r\n st.write('Awaiting CSV file to be uploaded. Currently using example input parameters (shown below).')\r\n st.write(df)\r\n\r\n # Reads in saved classification model\r\n load_clf = pickle.load(open('penguins_clf.pkl', 'rb'))\r\n\r\n # Apply model to make predictions\r\n prediction = load_clf.predict(df)\r\n prediction_proba = load_clf.predict_proba(df)\r\n\r\n\r\n st.subheader('Prediction')\r\n penguins_species = np.array(['Adelie','Chinstrap','Gentoo'])\r\n # st.write(penguins_species[prediction])\r\n cola, colb, colc = st.columns([4,6,1])\r\n Gentoo = Image.open('gentoo.jpg')\r\n Chinstrap = Image.open('chinstrap.jpg')\r\n Adelie = Image.open('adelie.jpg')\r\n with cola:\r\n st.write(\"\")\r\n with colb:\r\n if (penguins_species[prediction]==\"Adelie\"):\r\n st.image(Adelie, width = 200)\r\n st.write(\"Adelie\")\r\n elif (penguins_species[prediction]==\"Chinstrap\"):\r\n st.image(Chinstrap, width = 200)\r\n st.write(\"Chinstrap\")\r\n elif (penguins_species[prediction]==\"Gentoo\"):\r\n st.image(Gentoo, width = 200)\r\n st.write(\"Gentoo\")\r\n with colc:\r\n st.write(\"\")\r\n\r\n\r\n st.subheader('Prediction Probability')\r\n # st.write(prediction_proba)\r\n\r\n proba_df_clean = prediction_proba.T\r\n proba_df= pd.DataFrame(proba_df_clean, columns=[\"Probabilities\"])\r\n penguins_n= ['Adelie','Chinstrap','Gentoo']\r\n proba_df[\"Penguins\"]= penguins_n\r\n # st.write(type(proba_df))\r\n column_names = [\"Penguins\", \"Probabilities\"]\r\n proba_df = proba_df.reindex(columns=column_names)\r\n st.write(proba_df)\r\n fig = alt.Chart(proba_df).mark_bar().encode(x='Penguins',y='Probabilities',color='Penguins')\r\n st.altair_chart(fig,use_container_width=True)\r\nelse:\r\n st.subheader(\"About\")\r\n st.write(\"With a hybrid profile of data science and computer science, I’m pursuing a career in AI-driven firms. I believe in dedication, discipline, and creativity towards my job, which will be helpful in meeting your firm's requirements as well as my personal development.\")\r\n st.write(\"Check out this project's [Github](https://github.com/bashirsadat/penguins)\")\r\n st.write(\" My [Linkedin](https://www.linkedin.com/in/saadaat/)\")\r\n st.write(\"See my other projects [LinkTree](https://linktr.ee/saadaat)\")\r\n","repo_name":"bashirsadat/penguins","sub_path":"penguins-app.py","file_name":"penguins-app.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"33969575055","text":"import click\n\nfrom yag import (\n install as yag_install,\n remove as yag_remove,\n run as yag_run,\n scan as yag_scan,\n search as yag_search,\n)\n\nfrom pathlib import Path\n\nSOURCE_SEP = ','\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.argument(\"title\", required=True)\n@click.option(\"--host\", required=False, default=\"127.0.0.1\")\ndef search(title, host):\n print(yag_search(title, host))\n return 0\n\n\n@cli.command()\n@click.argument(\"title\", required=True)\n@click.option(\"--host\", required=False, default=\"127.0.0.1\")\n@click.option(\"--source\", required=False, default=None)\n@click.option(\"--debug\", required=False, default=False, is_flag=True)\ndef install(title, host, source, debug):\n if source:\n if SOURCE_SEP in source:\n source = [Path(s.strip()) for s in source.split(SOURCE_SEP)]\n else:\n source = Path(source)\n yag_install(title, host, source, debug)\n return 0\n\n\n@cli.command()\n@click.argument(\"title\", required=True)\n@click.option(\"--host\", required=False, default=\"127.0.0.1\")\n@click.option(\"--debug\", required=False, default=False, is_flag=True)\ndef run(title, host, debug):\n yag_run(title, host, debug)\n return 0\n\n\n@cli.command()\n@click.argument(\"title\", required=True)\n@click.option(\"--host\", required=False, default=\"127.0.0.1\")\n@click.option(\"--debug\", required=False, default=False, is_flag=True)\ndef remove(title, host, debug):\n yag_remove(title, host, debug)\n return 0\n\n\n@cli.command()\n@click.argument(\"source\", required=True)\ndef scan(source):\n print(vars(yag_scan(Path(source))))\n return 0\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"rayrapetyan/yag","sub_path":"yag/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"83"} +{"seq_id":"8925255420","text":"import pytest\nimport os\nimport testfilemanager\nfrom os import path, remove\nfrom plaza_preprocessing import __main__, configuration\n\n\n@pytest.fixture\ndef config():\n config_path = 'testconfig.yml'\n yield configuration.load_config(config_path)\n os.remove(config_path)\n\n\ndef test_main(config):\n testfile = testfilemanager.get_testfile_name('helvetiaplatz')\n out_file = 'helvetiaplatz-merged.osm'\n try:\n __main__.preprocess_osm(testfile, out_file, config)\n assert path.exists(out_file)\n finally:\n remove(out_file)\n","repo_name":"PlazaRoute/plazaroute","sub_path":"plaza_preprocessing/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"83"} +{"seq_id":"42622976874","text":"import csv\n\nfrom requests import session\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\n\n\nst.set_page_config(layout=\"wide\")\nst.markdown(\"

STREAMING WARS : INDIA

\", unsafe_allow_html=True)\nst.markdown(\"

\", unsafe_allow_html=True)\nd1,d2,d3 = st.columns((1,4,1))\nd2.image('resources/streaming2.jpg', use_column_width=True, clamp=True)\n\n\ndef read_data():\n content = pd.read_csv('sample/all_data.csv')\n\n # data cleaning\n content = content[['title', 'type','platform','averageRating', 'numVotes','description', 'year', 'release_year',\n 'genre', 'age_certification', 'seasons', 'titleType', 'runtimeMinutes']]\n content.rename(columns={'title':'Title', 'type':'Type'}, inplace=True)\n\n content.release_year.fillna(content.year, inplace=True)\n content.drop(columns={'year'}, inplace = True)\n content.release_year = content.release_year.astype(int)\n content.numVotes = content.numVotes.astype(int)\n content.seasons = content.seasons.astype(str)\n\n content.Type.replace('movie', 'MOVIE', inplace=True)\n content.Type.replace('tv', 'TV SHOW', inplace=True)\n content.Type.replace('SHOW', 'TV SHOW', inplace=True)\n content.averageRating = [(\"%.2f\" % x) for x in content.averageRating] #limiting to 2 decimal places\n #content['release_year'] = [x.replace('.0', '') for x in content['release_year']]\n\n content.genre = [x.lower() for x in content.genre]\n content.genre = [x.replace(\"'\", \"\") for x in content.genre]\n content.genre = [x.replace(\"[\", \"\") for x in content.genre]\n content.genre = [x.replace(\"]\", \"\") for x in content.genre]\n content.genre = [x.replace(\" \", \"\") for x in content.genre]\n content.genre = [x.replace(\"documentation\", \"documentary\") for x in content.genre]\n content.genre = [x.replace(\"scifi\", \"sci-fi\") for x in content.genre]\n content.genre = [x.replace(\"sciencefiction\", \"sci-fi\") for x in content.genre]\n content.genre = [x.replace(\"science\", \"sci-fi\") for x in content.genre]\n content.genre = [x.replace(\"historical\", \"history\") for x in content.genre]\n content.genre = [x.replace(\"musical\", \"music\") for x in content.genre]\n return content\n\ndef sort_popular(content):\n sorted = content.sort_values(by='averageRating', ascending=False)\n sorted.reset_index(inplace=True, drop=True)\n return sorted\n\ndef select_type(content, c):\n \n type = c.radio('Select type of content (Movie/TV Show) :', ('All', 'MOVIE', 'TV SHOW'), key='type-radio')\n \n if (type=='All'):\n sorted_type = content.Type\n else:\n sorted_type = sort_popular(content[content.Type==type])\n \n return sorted_type\n\ndef select_platform(content, c):\n plat = c.radio('Select Platform :', ('All','Netflix', 'amazon prime', 'hotstar'), key='plat_radio')\n if (plat=='All'):\n sorted_plat = sort_popular(content)\n else:\n sorted_plat = sort_popular(content[content.platform==plat])\n return sorted_plat\n\ndef select_genres(content, c):\n \n genres= content.genre.str.split(\",\").explode('genre').value_counts().index.tolist()\n genres.insert(0, 'All')\n selected_genre = c.selectbox('Select genre', (genres), key='genre-radio')\n if (selected_genre=='All'):\n sorted_genre = sort_popular(content)\n else:\n sorted_genre = sort_popular(content[content.genre.str.contains(selected_genre)])\n return sorted_genre\n\ndef st_dataframe(df):\n st.dataframe(df)\n\nif 'flag' not in st.session_state:\n st.session_state.flag = False\n\nif 'watchlist' not in st.session_state:\n st.session_state.watchlist = []\n\ncontent = read_data()\nsorted = sort_popular(content)\n\nif 'df' not in st.session_state:\n st.session_state.df = sorted\n\ndef form_setup():\n form = st.form(key=\"filters\")\n c1, c2, c3 = form.columns((2, 1, 1))\n\n type = c3.radio('Select type of content (Movie/TV Show) :', ('All', 'MOVIE', 'TV SHOW'), key='type-radio')\n plat = c2.radio('Select Platform :', ('All','Netflix', 'amazon prime', 'hotstar'), key='plat_radio')\n genres= content.genre.str.split(\",\").explode('genre').value_counts().index.tolist()\n genres.insert(0, 'All')\n sel_genre = c1.selectbox('Select genre', (genres), key='genre-radio')\n c4, c5,c6 = form.columns((2,2,1))\n state = c5.form_submit_button(\"Filter\")\n \n search = st.text_input(\"Search Titles\", \"\")\n def watchlist():\n titles = form.multiselect(\"Select Titles for watchlist\", st.session_state.df)\n st.session_state.watchlist.append(titles)\n submit = form.form_submit_button(\"Submit!\")\n if submit:\n st.write(\"Your list is :\", st.session_state.watchlist)\n #st.session_state.flag=0\n fil1 = content\n if state:\n \n if type!='All':\n fil1= fil1[fil1.Type==type]\n\n if plat!='All':\n fil1 = fil1[fil1.platform==plat]\n\n if sel_genre!='All':\n fil1 = fil1[fil1.genre.str.contains(sel_genre, case=False)]\n\n if search:\n fil1 = content[content.Title.str.contains(search, case=False)] \n\n\n pop = sort_popular(fil1)\n st.session_state.df = pop\n form.write(\"Create a watchlist :\")\n \n st.dataframe(st.session_state.df)\n\nform_setup()\n\ndef watchlist_form():\n form2 = st.form(key=\"watch\")\n \n def sub(): \n #st.session_state.listname = \"temp\"\n form2.write(\"Your list is :\")\n df2 = content[content.Title.isin(st.session_state.multiselects)].reset_index()\n form2.table(df2)\n\n \n form2.write(\"Name your watchlist and save it : \")\n name = form2.text_input(\"Name of Watchlist\", key=\"listname\")\n\n def saved():\n print(st.session_state.listname)\n df2 = content[content.Title.isin(st.session_state.multiselects)].reset_index()\n st.session_state.multiselects = []\n\n df2['Name'] = st.session_state.listname\n try:\n csv_file = pd.read_csv('resources/watchlists.csv')\n except pd.errors.EmptyDataError:\n csv_file= df2\n\n else:\n #df2 = df2.apply(pd.to_numeric, errors='coerce')\n df2.averageRating = pd.to_numeric(df2.averageRating, errors='coerce')\n df2.runtimeMinutes = pd.to_numeric(df2.runtimeMinutes, errors='coerce')\n df2.seasons = pd.to_numeric(df2.seasons, errors='coerce')\n csv_file = csv_file.append(df2)\n #form2.write(csv_file)\n csv_file.to_csv('resources/watchlists.csv', index=False)\n #print(st.session_state.name_watchlist)\n \n save = form2.form_submit_button(\"Save\", on_click=saved)\n\n\n col= form2.columns((2,1))\n titles = col[0].multiselect(\"Select Titles for watchlist\", content, key = \"multiselects\")\n\n st.session_state.watchlist.append(titles)\n #st.session_state\n submit = col[1].form_submit_button(\"Submit!\", on_click=sub)\n\nwatchlist_form()\n \n\ndef view_lists():\n check = st.checkbox(\"View saved watchlists\")\n if check:\n try:\n csv_file = pd.read_csv('resources/watchlists.csv')\n except pd.errors.EmptyDataError:\n st.write(\"No saved lists yet\")\n else:\n names = csv_file['Name'].unique()\n for n in names:\n with st.expander(n):\n st.write(csv_file[csv_file.Name==n])\nview_lists()\n#watchlist()\n#AwesomeTable(sorted)\n\n\n \n","repo_name":"anvitag/movie-dashboard","sub_path":"sample/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":7425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"1764125806","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import Joy\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import Int8MultiArray\nfrom std_msgs.msg import MultiArrayDimension\n\nBUTTONS_NUM = 11\nvel_msg = Twist()\nbutton = Int8MultiArray()\nbutton.data = [0 for _ in range(BUTTONS_NUM)]\n\nrospy.init_node('joy_converter')\n\nx_gain = rospy.get_param('~x', 1)\ny_gain = rospy.get_param('~y', 1)\nz_gain = rospy.get_param('~z', 1)\n\ndef callback(_data):\n vel_msg.linear.x = _data.axes[1] * x_gain\n vel_msg.linear.y = _data.axes[0] * y_gain\n vel_msg.angular.z = _data.axes[3] * z_gain\n for i in range(BUTTONS_NUM):\n button.data[i] = _data.buttons[i]\ndef main():\n pub_cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n pub_buttons = rospy.Publisher('/buttons', Int8MultiArray, queue_size=10)\n r = rospy.Rate(10)\n button.layout.dim.append(MultiArrayDimension())\n button.layout.dim[0].size = BUTTONS_NUM\n rospy.Subscriber(\"joy\", Joy, callback)\n while not rospy.is_shutdown():\n pub_cmd_vel.publish(vel_msg)\n pub_buttons.publish(button)\n r.sleep()\n\nif __name__ == '__main__':\n main()\n","repo_name":"TaiyouKomazawa16/ros_ps_rover_base","sub_path":"scripts/joy_converter.py","file_name":"joy_converter.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35835824429","text":"#파스칼의 삼각형\n#백트래킹으로 풀어보기\nT = int(input())\n\nfor x in range(T):\n N = int(input())\n li = [[0] * N for _ in range(N)]\n li[0][0] = 1\n for i in range(N):\n li[i][0] = 1\n li[i][i] = 1\n \n for i in range(N):\n for j in range(N):\n if li[i][j] != 1:\n li[i][j] = li[i-1][j-1] + li[i-1][j]\n print(f'#{x+1}')\n for i in range(N):\n for j in range(i+1):\n print(li[i][j], end = ' ')\n print()\n\n# ------------------------------------------------- DP 조합\nmemo = [[0] * 100 for _ in range(100)]\n\ndef recur(cur, cnt):\n \n if memo[cur][cnt]:\n return memo[cur][cnt]\n\n if cur == cnt or cnt == 0:\n memo[cur][cnt] = 1\n return 1\n\n memo[cur][cnt] = recur(cur - 1, cnt - 1) + recur(cur - 1, cnt) #안고르는 경우\n return memo[cur][cnt]\n\nrecur(4, 4)\n","repo_name":"CodeAlpacat/Algorithm_PS","sub_path":"SWEA_algo/2005_파스칼의 삼각형.py","file_name":"2005_파스칼의 삼각형.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"5566370864","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n求从10到100中能被3或5整除的数的和\n\"\"\"\n\ndef solve():\n n1 = 10\n n2 = 100\n ret = list()\n for n in range(10, 101):\n if n % 3 == 0 or n % 5 == 0:\n ret.append(n)\n print(ret)\n\n\nif __name__ == \"__main__\":\n solve()","repo_name":"zhaoyanz405/weed","sub_path":"interview/in-2.py","file_name":"in-2.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71811414671","text":"\"\"\"\n@title: 136. Single Number\n@src: https://leetcode.com/problems/single-number/\n@Difficulty: EASY\n\nGiven a non-empty array of integers nums, every element appears twice except for one. Find that single one.\nYou must implement a solution with a linear runtime complexity and use only constant extra space.\n\nExample 1:\nInput: nums = [2,2,1]\nOutput: 1\nExample 2:\nInput: nums = [4,1,2,1,2]\nOutput: 4\nExample 3:\nInput: nums = [1]\nOutput: 1\n\n@Score: \nRuntime: 223 ms, faster than 67.41% of Python3 online submissions for Single Number.\nMemory Usage: 16.7 MB, less than 83.57% of Python3 online submissions for Single Number.\n\n\n\"\"\"\n\nimport os\nos.system('clear')\n\n\ndef singleNumber(nums):\n temp = {}\n for num in nums:\n if num in temp:\n temp.pop(num)\n else:\n temp[num] = True\n\n res = list(temp.keys())[0]\n return res\n\n\n# def singleNumber(nums):\n# temp = {}\n# for num in nums:\n# if num in temp:\n# temp.pop(num)\n# else:\n# temp[num] = True\n\n# res = list(temp.keys())[0]\n# return res\n\n\nsingleNumber([2, 2, 1]) # 1\nsingleNumber([4, 1, 2, 1, 2]) # 4\n","repo_name":"dipenparmar12/dsa","sub_path":"src/_challages/singleNumber.py","file_name":"singleNumber.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"13821324413","text":"import logging\nimport numpy as np\nfrom . import exceptions as excpt\nfrom .block import Block,Input,NamedInput\nfrom . import data as data\nfrom .data import ModelState ,RunResult\n\nlogger=logging.getLogger(__name__)\n\nclass Function_multi_input(Block):\n\n def __init__(self,name=None):\n cls=self.__class__.__name__\n name=name if name else cls\n super().__init__(n_max=-1,block_class=cls,name=name)\n self.data_obj=data.STREAM_DATA()\n\n def run(self,ms:ModelState)->RunResult:\n r=None\n if self.data_availible():\n for i in self.block_sources:\n if r is None:\n r=i.out_data_obj.data\n else:\n r=self.func(ms,r,i.out_data_obj.data)\n\n self.data_obj.data=r\n self.out_data_valid=True\n else:\n self.out_data_valid=False\n\n return RunResult(False,self.out_data_valid)\n\n\n\n\nclass Function_one_input(Block):\n def __init__(self,name=None):\n cls=self.__class__.__name__\n name=name if name else cls\n super().__init__(n_max=1,block_class=cls,name=name)\n self.data_obj=data.STREAM_DATA()\n\n def run(self,ms:ModelState)->RunResult:\n if self.data_availible():\n data_obj=self.block_sources[0].out_data_obj\n self.data_obj.data=self.func(ms,data_obj.data) \n self.out_data_valid=True\n else:\n self.out_data_valid=False\n \n \n return RunResult(False,self.out_data_valid)\n\n\nclass Sum(Function_multi_input):\n\n def func(self,ms:ModelState,r,l):\n return r+l\n\nclass Sub(Function_multi_input):\n \n def func(self,ms:ModelState,r,l):\n return r-l\n\nclass Multiplier(Function_multi_input):\n \n def func(self,ms:ModelState,r,l):\n return r*l\n\nclass ABS(Function_one_input):\n \n def func(self,ms:ModelState,data):\n return np.abs(data) \n\nclass REAL(Function_one_input):\n \n def func(self,ms:ModelState,data):\n return np.real(data) \n\nclass IMAG(Function_one_input):\n \n def func(self,ms:ModelState,data):\n return np.imag(data) \n\nclass Recipricol(Function_one_input):\n \n def func(self,ms:ModelState,data):\n return (1.0/data) \n\n\nclass Integrate(Function_one_input):\n \n def __init__(self,**kwargs):\n super().__init__(**kwargs)\n self.integral=0.0\n self.gain=kwargs.get(\"gain\",1.0)\n self.sat_max=kwargs.get(\"smax\",np.finfo.max)\n self.sat_min=kwargs.get(\"smin\",-np.finfo.max)\n \n \n def func(self,ms:ModelState,data):\n if self.last_run_time:\n dt=ms.time-self.last_run_time\n self.integral+=data*self.gain*dt\n self.integral=self.integral if self.integralself.sat_min else self.sat_min\n\n self.last_run_time=ms.time\n return (self.integral) \n\n\nclass Differentiate(Function_one_input):\n \n def __init__(self,**kwargs):\n super().__init__(**kwargs)\n self.z=0.0\n self.gain=kwargs.get(\"gain\",1.0)\n self.sat_max=kwargs.get(\"smax\",np.finfo.max)\n self.sat_min=kwargs.get(\"smin\",-np.finfo.max)\n\n \n def func(self,ms:ModelState,data):\n if self.last_run_time:\n dt=ms.time-self.last_run_time\n self.last_run_time=ms.time\n diff=(data-self.z)*dt\n self.z=data\n \n diff=diff if diffself.sat_min else self.sat_min\n return (diff) \n \n \nclass Switch(Block):\n\n def __init__(self,name:str=None,select:int=1):\n cls=self.__class__.__name__\n name=name if name else cls\n super().__init__(n_max=-1,block_class=cls,name=name)\n self.data_obj=data.STREAM_DATA()\n self.select_input = NamedInput(\"select\", select)\n\n def run(self,ms:ModelState)->RunResult:\n _select = int(self.select_input.get())-1\n self.out_data_valid=False\n if _select-self.fsd else -self.fsd\n return (d) \n","repo_name":"caspar-1/simple_sim","sub_path":"src/simplesimulator/blocks/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12067452575","text":"import re\nimport sys\n\nfrom setuptools import setup\n\nwith open('modcli/__init__.py', 'r') as fh:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fh.read(), re.MULTILINE).group(1)\n\nif sys.version_info[0] < 3:\n raise Exception(\"Must be using Python 3\")\n\nsetup(\n name='mod-devel-cli',\n python_requires='>=3',\n version=version,\n description='MOD Command Line Interface',\n author='Alexandre Cunha',\n author_email='alex@moddevices.com',\n license='Proprietary',\n install_requires=[\n 'click==6.7',\n 'crayons==0.1.2',\n 'requests>=2.18.4',\n ],\n packages=[\n 'modcli',\n ],\n entry_points={\n 'console_scripts': [\n 'modcli = modcli.cli:main',\n ]\n },\n classifiers=[\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n url='http://moddevices.com/',\n)\n","repo_name":"moddevices/mod-devel-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"9038022996","text":"\"\"\"\nA menu - you need to add the database and fill in the functions. \n\"\"\"\nfrom peewee import *\ndb = SqliteDatabase('chainsaw_records.sqlite')\n\nclass Juggler(Model):\n name = CharField()\n country = CharField()\n catches = IntegerField()\n \n class Meta:\n database = db\n\n def __str__(self):\n return f'{self.name}, {self.country}, {self.catches}'\n \n\ndef main():\n\n\n create_table()\n\n menu_text = \"\"\"\n 1. Display all records\n 2. Search by name\n 3. Add new record\n 4. Edit existing record\n 5. Delete record \n 6. Quit\n \"\"\"\n\n while True:\n print(menu_text)\n choice = input('Enter your choice: ')\n if choice == '1':\n display_all_records()\n elif choice == '2':\n search_by_name()\n elif choice == '3':\n add_new_record()\n elif choice == '4':\n edit_existing_record()\n elif choice == '5':\n delete_record()\n elif choice == '6':\n break\n else:\n print('Not a valid selection, please try again')\n\n\ndef create_table():\n db.connect()\n db.create_tables([Juggler])\n\n\ndef display_all_records():\n all_jugglers = Juggler.select()\n for contestant in all_jugglers:\n print(contestant)\n\n\ndef search_by_name():\n name_to_search = input('Enter the name of the contestant you want to look up: ')\n contestant = Juggler.get_or_none(name=name_to_search)\n if contestant:\n print(contestant)\n else:\n print('There were no players with that name in the database.')\n\n\ndef add_new_record():\n contestant_name = input('Enter name: ')\n contestant_country = input('Enter country: ')\n number_of_catches = int(input('Enter the number of catches: '))\n\n new_record = Juggler(name=contestant_name, country=contestant_country, catches=number_of_catches)\n new_record.save()\n print('todo add new record. What if user wants to add a record that already exists?')\n\n\ndef edit_existing_record():\n contestant = input('Enter the name of the contestant you want to update: ')\n new_number_of_catches = int(input('Enter the new number of catches: '))\n rows_modified = Juggler.update(catches=new_number_of_catches).where(Juggler.name == contestant).execute()\n if rows_modified > 0:\n print(f'{contestant}\\'s number of catches was updated to {new_number_of_catches}.')\n else:\n print(f'{contestant} could not be found.')\n\n\ndef delete_record():\n contestant = input('Enter the name of the contestant you want to delete: ')\n rows_modified = Juggler.delete().where(Juggler.name == contestant).execute()\n if rows_modified > 0:\n print(f'{contestant} has been deleted')\n else:\n print(f'{contestant} could not be found.')\n \n\n\nif __name__ == '__main__':\n main()","repo_name":"Thuroma/capstone_week_five_peewee_lab","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73196435790","text":"import pandas as pd\nfrom pandas import DataFrame, Series\nfrom unicodedata import decimal\n\n\n####################################################################\n######################### volabitlity indicators ###################\n####################################################################\n\ndef donchian(df: DataFrame, upper_period: int = 20, lower_period: int = 5\n ) -> DataFrame:\n\n upper = pd.Series(\n df[\"high\"].rolling(center=False, window=upper_period).max(), name=\"donchian_uper\"\n )\n lower = pd.Series(\n df[\"low\"].rolling(center=False, window=lower_period).min(), name=\"donchian_lower\"\n )\n middle = pd.Series((upper + lower) / 2, name=\"donchian_middle\")\n\n return df.join(pd.concat([lower, middle, upper], axis=1), on=\"date\")\n\ndef tr(df: DataFrame) -> Series:\n\n TR1 = pd.Series(df[\"high\"] - df[\"low\"]).abs() # True Range = High less Low\n\n TR2 = pd.Series(\n df[\"high\"] - df[\"close\"].shift()\n ).abs() # True Range = High less Previous Close\n\n TR3 = pd.Series(\n df[\"close\"].shift() - df[\"low\"]\n ).abs() # True Range = Previous Close less Low\n\n _TR = pd.concat([TR1, TR2, TR3], axis=1)\n\n _TR[\"tr\"] = _TR.max(axis=1)\n\n return df.join(pd.Series(_TR[\"tr\"], name=\"tr\").round(decimals=8), on=\"date\")","repo_name":"stoneyangzh/madrl-trading","sub_path":"feature_extraction/volatility_indicators.py","file_name":"volatility_indicators.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"23763131252","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport modules.OpenAIService as OpenAIService\n\n\nst.title('Test models')\n\nopenAIService = OpenAIService.OpenAIService()\nopenAIService.init_connection()\n\nwith st.form(\"my_form\"):\n textarea_val = st.text_area(\"Enter text below:\", value=\"How to clone a cat\")\n\n # Every form must have a submit button.\n submitted = st.form_submit_button(\"Submit\")\n if submitted:\n st.header(\"text-davinci-003\")\n #st.write(openAIService.createCompletion(\"text-davinci-003\", textarea_val, 0.6).choices[0].text)\n\n for i in range(0, 10):\n st.header(\"Temp \" + str(i/10.0))\n for j in range (0, 3):\n st.write(str(j) + \" - \" + openAIService.createCompletion(\"text-davinci-003\", textarea_val, i / 10.0).choices[0].text)\n ","repo_name":"MichalJankowskii/OpenAIExperiments","sub_path":"pages/Temperature test.py","file_name":"Temperature test.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5715138620","text":"from typing import Protocol, Dict, List, Iterator, Tuple, TypeVar, Optional\n\nT = TypeVar('T')\n\nLocation = TypeVar('Location')\n\n\nclass Graph(Protocol):\n def neighbors(self, id: Location) -> List[Location]: pass\n\n\nclass SimpleGraph:\n def __init__(self):\n self.edges: Dict[Location, List[Location]] = {}\n\n def neighbors(self, id: Location) -> List[Location]:\n return self.edges[id]\n\n\nimport collections\n\n\nclass Queue:\n def __init__(self):\n self.elements = collections.deque()\n\n def empty(self) -> bool:\n return len(self.elements) == 0\n\n def put(self, x: T):\n self.elements.append(x)\n\n def get(self) -> T:\n return self.elements.popleft()\n\n\nclass WeightedGraph(Graph):\n def cost(self, from_id: Location, to_id: Location) -> float: pass\n\n\nimport heapq\n\n\nclass PriorityQueue:\n def __init__(self):\n self.elements: Array[T] = []\n\n def empty(self) -> bool:\n return len(self.elements) == 0\n\n def put(self, item: T, priority: float):\n heapq.heappush(self.elements, (priority, item))\n\n def get(self) -> T:\n return heapq.heappop(self.elements)[1]\n\n\ndef dijkstra_search(graph: WeightedGraph, start: Location, goal: Location):\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from: Dict[Location, Optional[Location]] = {}\n cost_so_far: Dict[Location, float] = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n current: Location = frontier.get()\n\n if current == goal:\n break\n\n for next in graph.neighbors(current):\n new_cost = cost_so_far[current] + graph.cost(current, next)\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost\n frontier.put(next, priority)\n came_from[next] = current\n\n return came_from, cost_so_far\n\n\n# thanks to @m1sp for this simpler version of\n# reconstruct_path that doesn't have duplicate entries\n\ndef reconstruct_path(came_from: Dict[Location, Location],\n start: Location, goal: Location) -> List[Location]:\n current: Location = goal\n path: List[Location] = []\n while current != start:\n path.append(current)\n current = came_from[current]\n path.append(start) # optional\n path.reverse() # optional\n return path\n\n\ndef a_star_search(graph: WeightedGraph, start: Location, goal: Location):\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from: Dict[Location, Optional[Location]] = {}\n cost_so_far: Dict[Location, float] = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n current: Location = frontier.get()\n\n if current == goal:\n break\n\n for next in graph.neighbors(current):\n new_cost = cost_so_far[current] + graph.cost(current, next)\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + heuristic(goal, next)\n frontier.put(next, priority)\n came_from[next] = current\n\n return came_from, cost_so_far\n\n\ndef breadth_first_search(graph: Graph, start: Location, goal: Location):\n frontier = Queue()\n frontier.put(start)\n came_from: Dict[Location, Location] = {}\n came_from[start] = None\n\n while not frontier.empty():\n current: Location = frontier.get()\n\n if current == goal:\n break\n\n for next in graph.neighbors(current):\n if next not in came_from:\n frontier.put(next)\n came_from[next] = current\n\n return came_from\n\n\nif __name__ == \"__main__\":\n g = {\n 0: [1, 2],\n 1: [3],\n 2: [3],\n }\n graph = SimpleGraph()\n graph.edges = g\n breadth_first_search(graph, 0, None)\n","repo_name":"intelpen/graph_algos","sub_path":"graphs/graphs_implementation.py","file_name":"graphs_implementation.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31806874240","text":"import sympy as S\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\n\nx = S.symbols('x')\ny = S.symbols('y')\n\ny = 9449.962321455077 * x + 25792.20019866868\n\ndataset = pd.read_csv('Salary_Data.csv')\nX = dataset.iloc[:,:-1].values\nY = dataset.iloc[:,1].values\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=1/3,random_state = 0)\nregressor = LinearRegression()\nregressor.fit(X_train,Y_train)\nY_pred = regressor.predict(X_test)\n\nf = S.lambdify(x,y,'math')\nyen = f(X)\nprint(yen)\nyout = yen.astype(list)\n\nplt.scatter(X,Y,color = 'red')\nplt.plot(X, yout, color = 'green')\nplt.plot(X, regressor.predict(X),color ='blue')\nplt.show()\n","repo_name":"Montse-r/Carpeta-de-Evidencias","sub_path":"Prueba_graficacion.py","file_name":"Prueba_graficacion.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22121770869","text":"import scrapy\n\nclass CoinSpider(scrapy.Spider):\n name = \"coins\"\n allowed_domains = [\"https://coinmarketcap.com/\"]\n start_urls = [\n \"https://coinmarketcap.com/\",\n ]\n\n def parse(self, response):\n null = None\n for line in response.css('tr'):\n yield{\n 'number' : line.xpath('td[1]/text()').extract_first(),\n 'name' : line.xpath('td[2]/a/text()').extract_first(),\n 'marketCap' : line.xpath('td[3]/text()').extract_first(),\n 'price' : line.xpath('td[4]/a/text()').extract_first(),\n 'circulatingSupply' : line.xpath('td[5]/a/text()').extract_first(),\n 'volume' : line.xpath('td[6]/a/text()').extract_first(),\n 'percentChange' : line.xpath('td[7]/text()').extract_first()\n } ","repo_name":"jnoehren/cryptoCurrencyTracker","sub_path":"top100/coin_spider.py","file_name":"coin_spider.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"29377050489","text":"from django.conf import settings\n\nforeignkey_cache_enabled = False\ntry:\n foreignkey_cache_enabled = settings.FORMFIELD_CACHE_ENABLED\nexcept:\n pass\n\n\nclass DummyQueryset:\n \"\"\"\n Dummy Queryset\n 1. Pretend to be a queryset object, but only provide all() method.\n 2. Cache the query result at the initial stage\n \"\"\"\n\n def __init__(self, queryset):\n self.all_ret = queryset.all()\n\n def all(self):\n return self.all_ret\n\n\ndef foreignkey_cache(*cache_fields):\n \"\"\"\n Cache decorators for the Foreignkey.\n\n Example Usage:\n\n class EntryAdmin(admin.ModelAdmin):\n\n @foreignkey_cache('state', 'postcode')\n class AddressAdmin(admin.StackedInline):\n model = Address\n\n inlines = [AddressAdmin, ]\n list_display = ('first_name', 'last_name', )\n\n admin.site.register(Entry, EntryAdmin)\n \"\"\"\n\n def f(the_admin):\n formfield_for_foreignkey_old = the_admin.formfield_for_foreignkey\n\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n \"\"\"\n Get a form Field for a database Field that has declared choices.\n \"\"\"\n # If the field is named as a radio_field, use a RadioSelect\n if foreignkey_cache_enabled and db_field.name in cache_fields and request and request.method == 'GET':\n if '__fkcache' not in request:\n request.__fkcache = {}\n\n if db_field.name not in request.__fkcache:\n request.__fkcache[db_field.name] = DummyQueryset(db_field.formfield(**kwargs).queryset)\n\n kwargs['queryset'] = request.__fkcache[db_field.name]\n\n return formfield_for_foreignkey_old(self, db_field, request, **kwargs)\n\n the_admin.formfield_for_foreignkey = formfield_for_foreignkey\n\n return the_admin\n return f\n","repo_name":"tly1980/django-formfield-cache","sub_path":"src/formfield_cache/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20306994570","text":"#!/usr/bin/env python\n\n\ndef fib(n):\n \"\"\"Calculate the n-th Fibonacci number.\n\n Parameters\n ----------\n n : int\n\n Returns\n -------\n int : The n-th Fiboacci number.\n \"\"\"\n if n <= 1:\n return n\n else:\n return fib(n-1) + fib(n-2)\n\nif __name__ == '__main__':\n n = 37\n print(\"The %ith Fibonacci number is %i.\" % (n, fib(n)))\n","repo_name":"MartinThoma/rosetta-code","sub_path":"fibonacci/dumb-implementation/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"71980315082","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\nfrom sentence_encoder.basenet import BaseNet, kl\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\n\ndef init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n\n\nclass GRU(BaseNet):\n\n def __init__(self, vectors, args):\n super(GRU, self).__init__(vectors, args)\n self.device = args.device\n self.hidden_size = args.hidden_size\n self.window = args.window\n self.max_length = args.max_length\n self.gru = nn.GRU(input_size=self.embedder.hidden_size,\n hidden_size=args.hidden_size,\n num_layers=1,\n batch_first=True,\n bidirectional=True)\n self.fc = nn.Sequential(nn.Dropout(),\n nn.Tanh(),\n nn.Linear(args.hidden_size * 2, args.hidden_size),\n nn.Tanh())\n\n self.mutual = nn.Sequential(\n nn.Dropout(), nn.Sigmoid(), nn.Linear(2 * self.hidden_size, self.hidden_size), nn.Tanh()\n )\n\n def init_weight(self):\n self.apply(init_weights)\n\n def initHidden(self, batch_size):\n h0 = torch.zeros(2 * 1, batch_size, self.hidden_size).to(self.device)\n return h0\n\n def select_anchor(self, emb, anchor_index):\n \"\"\"\n\n :param emb: B x L x D\n :param anchor_index: B\n :return:\n \"\"\"\n B, L, D = emb.shape\n u = torch.tensor([x for x in range(L)]).unsqueeze(0).to(self.device)\n v = anchor_index.view(B, 1)\n mask = (u == v).unsqueeze(dim=2).to(self.device)\n x = torch.masked_select(emb, mask).view(-1, D)\n return x\n\n def forward(self, inputs):\n embedding = self.embedder(inputs['indices'], inputs['dist'], inputs['mask'])\n # print('| GRUEncoder: embedding > ', tuple(embedding.shape))\n\n B, T, _ = embedding.shape\n\n # print('| GRUEncoder: anchors > ', tuple(anchors.shape))\n\n h0 = self.initHidden(B)\n hidden_states, _ = self.gru(embedding, h0)\n rnnRep = self.select_anchor(hidden_states, inputs['anchor_index'])\n pool_x1 = F.max_pool1d(hidden_states.transpose(1, 2), T).squeeze(dim=2)\n\n # prune_embedding = self.embedder(inputs['prune_indices'], inputs['dist'], inputs['prune_mask'])\n # hidden_states, _ = self.gru(prune_embedding, h0)\n # pool_x2 = F.max_pool1d(hidden_states.transpose(1, 2), T).squeeze(dim=2)\n\n foot_print = inputs['prune_footprint'].unsqueeze(dim=2)\n # print('| GRUEncoder: foot_print > ', tuple(foot_print.shape))\n # print('| GRUEncoder: hidden_states > ', tuple(hidden_states.shape))\n\n hidden_states = hidden_states * foot_print\n\n pool_x2 = F.max_pool1d(hidden_states.transpose(1, 2), T).squeeze(dim=2)\n\n # print('| GRUEncoder: pool_x1 > ', tuple(pool_x1.shape))\n # print('| GRUEncoder: pool_x2 > ', tuple(pool_x2.shape))\n\n m1 = self.mutual(pool_x1)\n m2 = self.mutual(pool_x2)\n\n mutual_loss = kl(m1, m2)\n\n x = self.fc(rnnRep)\n\n return {'embedding': x,\n 'mutual_loss': mutual_loss\n }\n","repo_name":"laiviet/fsl-proact","sub_path":"sentence_encoder/gru_encoder.py","file_name":"gru_encoder.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"8889886675","text":"def case_counter(sentence):\n upper_count = 0\n lower_count = 0\n sen_list = list(sentence)\n for character in sen_list:\n if ord(character)>64 and ord(character)<90:\n upper_count +=1\n elif ord(character)>96 and ord(character)<122:\n lower_count +=1\n return (f\"Upper case characters: {upper_count}, lower case characters: {lower_count}\")\n\n","repo_name":"micnem/developers_institute","sub_path":"Week4/Day5/exercises/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10596013795","text":"# read and preprocess data\nimport numpy as np\nimport random\nimport pickle\nimport os\n\nclass dis_data:\n '''\n Encapsulation of our training data split across multiple nodes\n '''\n def __init__(self, data, label, nodes, shuffle=False, index=None, one_hot=False):\n self.size = len(data)\n self.nodes = nodes\n self.all_data = data\n self.all_label = label\n if index:\n self.index = index\n else:\n self.index = list(range(self.size))\n if shuffle:\n self.shuffle()\n self.dist_data, self.dist_label = self.distribute(nodes)\n if one_hot:\n new_label = []\n for node in self.dist_label:\n new_label.append(_one_hot(node))\n self.dist_label = new_label\n \n def shuffle(self):\n '''\n Shuffle the training data and labels, updates the member variables and returns the shuffled data and labels vectors\n '''\n random.shuffle(self.index)\n new_data = []\n new_label = []\n for ind in self.index:\n new_data.append(self.all_data[ind])\n new_label.append(self.all_label[ind])\n new_data = np.asarray(new_data)\n new_label = np.asarray(new_label)\n return new_data, new_label\n \n def distribute(self, nodes):\n '''\n Evenly distribute the training data across the nodes\n '''\n remainder = self.size % nodes\n frac = int(self.size/nodes)\n dist_data = []\n dist_label = []\n for n in range(nodes):\n if n == 0:\n dist_data.append(self.all_data[0 : frac + remainder])\n dist_label.append(self.all_label[0 : frac + remainder])\n else: \n dist_data.append(self.all_data[n * frac : (n + 1) * frac])\n dist_label.append(self.all_label[n * frac : (n + 1) * frac])\n dist_data = np.asarray(dist_data)\n dist_label = np.asarray(dist_label)\n return dist_data, dist_label\n def next_batch(self, node, size):\n '''\n Return next batch of distributed training samples with labels\n '''\n l = len(self.dist_data[node])\n sample = []\n label = []\n for _ in range(size):\n index = random.randint(0, l-1)\n sample.append(self.dist_data[node][index])\n label.append(self.dist_label[node][index])\n return sample, label\n\ndef data_prep(dataset, nodes, size=0, one_hot=False):\n '''\n Distribute training data across nodes and return test data w/ labels\n '''\n if dataset == 'MNIST':\n train_data, train_label, test_data, test_label = mnist_read_pickled()\n if one_hot:\n test_label = _one_hot(test_label)\n elif dataset == 'CIFAR':\n with open('cifar_dataset.pickle', 'rb') as handle:\n (train_data, train_label, test_data, test_label) = pickle.load(handle)\n train_data, test_data = train_data / 255.0, test_data / 255.0\n train_label = _one_hot(train_label)\n test_label = _one_hot(test_label)\n else:\n raise NameError(\"Cannot find %s dataset\") % (dataset)\n \n if size:\n train_data = train_data[:size]\n train_label = train_label[:size]\n \n full_data = dis_data(train_data, train_label, nodes, shuffle = True, one_hot=one_hot)\n return full_data, test_data, test_label\n\ndef _one_hot(label):\n l_oh = []\n for i in label:\n new_l = [0] * 10\n new_l[int(i)] = 1\n l_oh.append(new_l)\n return l_oh\n\ndef mnist_read_pickled(path = './data/MNIST/pickled'):\n '''\n Read pickled MNIST train/test data\n\n Args:\n path: Path to pickled numpy arrays (default: ./data/MNIST/pickled)\n Return:\n train_data\n train_labels\n test_data\n test_labels\n '''\n with open(os.path.join(path, 'train_data.pickle'), 'rb') as handle:\n train_data = pickle.load(handle)\n with open(os.path.join(path, 'train_labels.pickle'), 'rb') as handle:\n train_labels = pickle.load(handle)\n \n with open(os.path.join(path, 'test_data.pickle'), 'rb') as handle:\n test_data = pickle.load(handle)\n with open(os.path.join(path, 'test_labels.pickle'), 'rb') as handle:\n test_labels = pickle.load(handle)\n \n return train_data, train_labels, test_data, test_labels\n","repo_name":"INSPIRE-Lab-US/Byzantine-resilient-distributed-learning","sub_path":"dist_data.py","file_name":"dist_data.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"63"} +{"seq_id":"72689635402","text":"import jydoop\nimport json\n'''\nin following commands, UPDATE DATES\n\nmake ARGS=\"scripts/orphanDetection2/docIdsInParts.py ./outData/finalDocIdsInParts ./outData/partsOverlap\" hadoop\n\n'''\n\n######## to OUTPUT TO HDFS\ndef skip_local_output():\n return True\n\n\n\nsetupjob = jydoop.setupjob\n\n\ndef output(path, results):\n # just dump tab separated key/vals\n firstLine = True\n with open(path, 'w') as f:\n for k, v in results:\n if firstLine:\n f.write(str(k)+\"\\t\"+str(v))\n firstLine=False\n else:\n f.write(\"\\n\"+str(k)+\"\\t\"+str(v))\n\n\ndef localTextInput(mapper):\n #local feeds a line of text input to the function after cleaning it up\n #just ignore the lineKey.\n if jydoop.isJython():\n return mapper\n else:\n def localMapper(lineKey,inputLine,context):\n keyValList = inputLine.split(\"\\t\")\n key = keyValList[0]\n\n if keyValList[1][0:7]==\"('PART'\":\n val = eval(keyValList[1])\n else:\n val = keyValList[1]\n\n return mapper(key,val,context)\n return localMapper\n\nclass edgeTupError(Exception):\n def __init__(self, part, listOfEdges, iterOfEdges):\n self.part = part\n self.listOfEdges = listOfEdges\n self.iterOfEdges =iterOfEdges\n def __str__(self):\n return repr((self.part, self.listOfEdges,self.iterOfEdges))\n\n\n\n\n\n\n\n\n\n\n\n@localTextInput\ndef map(docId,partOrJson,context):\n if type(partOrJson)==tuple: #the part for these docs\n # emit ONLY the partId for the doc, which is partOrJson[1]\n context.write(docId,partOrJson[1])\n context.getCounter(\"MAPPER\", \"input parts (docId,part) pairs\").increment(1)\n\n\n else: #an FHR record; extract the tie breaker info\n context.getCounter(\"MAPPER\", \"input (docId,rawjson) pairs\").increment(1)\n try:\n payload = json.loads(partOrJson)\n except KeyError:\n context.getCounter(\"MAP ERROR\", \"record failed to parse\").increment(1)\n context.getCounter(\"MAP ERROR\", \"REJECTED RECORDS\").increment(1)\n return\n\n try: #was getting errors finding packets without a version field, so had to wrap this test in a try block\n if not (payload[\"version\"]==2):\n context.getCounter(\"MAPPER\", \"record not v2\").increment(1)\n return\n except KeyError:\n context.getCounter(\"MAP ERROR\", \"no version\").increment(1)\n context.getCounter(\"MAP ERROR\", \"REJECTED RECORDS\").increment(1)\n return\n\n try:\n thisPingDate = payload[\"thisPingDate\"]\n except KeyError:\n context.getCounter(\"MAP ERROR\", \"no thisPingDate\").increment(1)\n context.getCounter(\"MAP ERROR\", \"REJECTED RECORDS\").increment(1)\n return\n\n try:\n numAppSessionsPreviousOnThisPingDate=len(payload[\"data\"][\"days\"][thisPingDate]['org.mozilla.appSessions.previous'][\"main\"])\n except KeyError:\n context.getCounter(\"MAP WARNING\", \"no ['...appSessions.previous']['main'] on thisPingDate\").increment(1)\n numAppSessionsPreviousOnThisPingDate = 0\n except TypeError:\n #was getting \"TypeError: 'float' object is unsubscriptable\" errors in the above. this should not happen, and must indicate a bad packet, which we will discard\n context.getCounter(\"MAP ERROR\", \"float instead of obj in ['...appSessions.previous']['main'] on thisPingDate \").increment(1)\n context.getCounter(\"MAP ERROR\", \"REJECTED RECORDS\").increment(1)\n return\n\n try:\n currentSessionTime=payload[\"data\"][\"last\"]['org.mozilla.appSessions.current'][\"totalTime\"]\n except KeyError:\n currentSessionTime = 0\n context.getCounter(\"MAP WARNING\", \"no currentSessionTime\").increment(1)\n\n context.write(docId,\n (thisPingDate, numAppSessionsPreviousOnThisPingDate, currentSessionTime) )\n\n\n\n\ndef reduce(docId, iterOfPartOrTieBreakInfo, context):\n #since each doc id is unique, it should be associated with ONLY 1 partId, and ONLY 1 tuple of tieBreakInfo; so iterOfPartOrTieBreakInfo should only have 2 elts\n\n numTieBreakPackets=0\n numPartId = 0\n for partOrJson in iterOfPartOrTieBreakInfo:\n if type(partOrJson)==tuple: #the tie break info for this docs\n tieBreakInfo = partOrJson\n numTieBreakPackets+=1\n else: #the partId, a str\n partId = partOrJson\n numPartId+=1\n\n if numTieBreakPackets==1 and numPartId==1:\n context.write(partId,(docId,tieBreakInfo))\n context.getCounter(\"REDUCER\", \"OK (partId,(docId,tieBreakInfo)) k/v pair\").increment(1)\n return\n elif numTieBreakPackets==0 and numPartId==1:\n #in this case, if there is no tie break info, then the record must be one that is not linked to any other record. Emit the docId assigned to its own partId, and fill in dummy tieBreakInfo, since there are no ties to break.\n context.write(docId,(docId,(\"2000-01-01\",0,0)))\n context.getCounter(\"REDUCER\", \"docId not linked to any other doc\").increment(1)\n return\n elif numTieBreakPackets==1 and numPartId==0:\n context.getCounter(\"RED ERROR\", \"docId with tieBreakInfo, but no part\").increment(1)\n return\n elif numTieBreakPackets==0 and numPartId==0:\n context.getCounter(\"RED ERROR\", \"WTF docId with no part and no tieBreakInfo?\").increment(1)\n return\n elif numTieBreakPackets>1 or numPartId>1:\n context.getCounter(\"RED ERROR\", \"docId with too many tieBreakInfos or partIds\").increment(1)\n return\n\n\n\n\n\n\n","repo_name":"bcolloran/jydoop_bcolloran","sub_path":"scripts/orphanDetection3/kPartId_vDocId-tieBreakInfo.py","file_name":"kPartId_vDocId-tieBreakInfo.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6466681031","text":"import time\nfrom dataclasses import make_dataclass\n\nimport numpy as np\nimport pytest\nimport simpy\n\nfrom ralf.v2 import LIFO, BaseTransform, RalfApplication, RalfConfig, Record\nfrom ralf.v2.operator import OperatorConfig, RayOperatorConfig, SimpyOperatorConfig\nfrom ralf.v2.utils import get_logger\n\nIntValue = make_dataclass(\"IntValue\", [\"value\"])\n\n\nlogger = get_logger()\n\n\nclass CounterSource(BaseTransform):\n def __init__(self, up_to: int) -> None:\n self.count = 0\n self.up_to = up_to\n\n def on_event(self, record: Record) -> Record[IntValue]:\n self.count += 1\n if self.count >= self.up_to:\n logger.msg(\"self.count reached to self.up_to, sending StopIteration\")\n raise StopIteration()\n return Record(\n id_=record.id_,\n entry=IntValue(value=self.count),\n shard_key=str(self.count % 10),\n )\n\n\nclass Sum(BaseTransform):\n def __init__(self) -> None:\n self.state = 0\n self.history = []\n\n def on_event(self, record: Record[IntValue]):\n self.history.append(record.entry.value)\n self.state += record.entry.value\n time.sleep(0.1)\n return None\n\n\n@pytest.mark.parametrize(\"deploy_mode\", [\"local\", \"ray\"])\ndef test_simple_lifo(deploy_mode):\n app = RalfApplication(RalfConfig(deploy_mode=deploy_mode))\n\n sink = app.source(CounterSource(10)).transform(Sum(), LIFO())\n app.deploy()\n app.wait()\n\n operator = app.manager.operators[sink]\n transform_object = operator.dump_transform_state()[0]\n assert transform_object.state == 45\n event_history = transform_object.history\n assert np.argmax(event_history) != (\n len(event_history) - 1\n ), \"Top increment should be in the last place for LIFO\"\n\n\ndef test_simpy_lifo():\n app = RalfApplication(RalfConfig(deploy_mode=\"simpy\"))\n env = simpy.Environment()\n\n sink = app.source(\n CounterSource(10),\n operator_config=OperatorConfig(\n simpy_config=SimpyOperatorConfig(\n shared_env=env, processing_time_s=0.01, stop_after_s=0.2\n )\n ),\n ).transform(\n transform_object=Sum(),\n scheduler=LIFO(),\n operator_config=OperatorConfig(\n simpy_config=SimpyOperatorConfig(shared_env=env, processing_time_s=0.1)\n ),\n )\n assert sink.config.simpy_config.shared_env is env\n\n app.deploy()\n\n env.run(1)\n\n record_trace = app.wait()\n assert record_trace[0].entry.request_id == 0\n\n request_ids = [r.entry.request_id for r in record_trace if \"Sum\" in r.entry.frame]\n assert request_ids == [0, 10, 19, 18, 17, 16, 15, 14, 13]\n\n\ndef test_ray_wait():\n app = RalfApplication(RalfConfig(deploy_mode=\"ray\"))\n\n app.source(CounterSource(10)).transform(\n Sum(),\n LIFO(),\n operator_config=OperatorConfig(ray_config=RayOperatorConfig(num_replicas=2)),\n )\n app.deploy()\n app.wait()\n","repo_name":"feature-store/ralf","sub_path":"ralf/v2/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"63"} +{"seq_id":"25292896117","text":"#!/usr/bin/env python3\n\nimport sys\n\nsys.setrecursionlimit(2 * (10 ** 5))\nInf = INF = float(\"INF\")\n\n\ndef solve(N: int):\n ans = 0\n for n in range(1, N):\n ans += (N**2 / (N - n)**2) * (N - n) / N\n print(ans)\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n solve(N)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmiyakawa/atcoder-workspace","sub_path":"abc194/D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"3029536215","text":"# Tuenti Challenge Edition 7 Level 6\nfrom re import match\nfrom numpy import array, concatenate\nfrom math import ceil\nfrom networkx import shortest_path_length, shortest_path, DiGraph\n\n# ------------------------------- CONSTANTS --------------------------------- #\n\nFILE = \"submit\"\nMAX_SIDE_LEN = 2**32\n\n# ------------------------ GENERAL PURPOSE UTILITIES ------------------------ #\n\ndef group_list(l, each):\n\ti = 0\n\tres = []\n\twhile i < len(l):\n\t\tres.append(l[i:i+each])\n\t\ti += each\n\treturn res\n\n# ------------------------ JAM SPECIFIC FUNCTIONS --------------------------- #\n\ndef read_file(fname):\n\t\"\"\"Read input file format.\"\"\"\n\twith open(fname,\"r\") as f:\n\t\tlines = f.read().strip().split(\"\\n\")[1:]\n\tl = 0\n\tproblems = []\n\twhile l < len(lines):\n\t\tF, S = map(int, lines[l].split(\" \"))\n\t\tshortcuts = lines[l+1:l+1+S]\n\t\tshortcuts = [list(map(int, s.strip().split(\" \"))) for s in shortcuts]\n\t\tproblems.append((F, shortcuts))\n\t\tl += S + 1\n\treturn problems\n\ndef solve_all(fname):\n\t\"\"\"Process each test case.\"\"\"\n\tproblems = read_file(\"%s.in\" % fname)\n\tcase = 1\n\ttext = \"\"\n\tfor F, shortcuts in problems:\n\t\tprint(\"Solving Case #%s\" % case)\n\t\tif F == 1:\n\t\t\tres = 0\n\t\telse:\n\t\t\tres = solve(F, shortcuts)\n\t\ttext += \"Case #%s: %s\\n\" % (case, res)\n\t\tcase += 1\n\twith open(\"%s.out\" % fname, \"w\") as out:\n\t\tout.write(text[:-1])\n\n# ----------------------- PROBLEM SPECIFIC FUNCTIONS ------------------------ #\n\ndef solve(F, shortcuts):\n\t# discard dummy shortcuts\n\tshortcuts = [s for s in shortcuts if linear_time(s[0], s[1]) > s[2]]\n\t# create graph points\n\tpoints = [1] + [s[0] for s in shortcuts] + [s[1] for s in shortcuts] + [F]\n\tpoints = list(sorted(set(points)))\n\t# create graph edges\n\tedges = shortcuts\n\tfor i in range(len(points) - 1):\n\t\tt = linear_time(points[i], points[i+1])\n\t\tedges.append((points[i], points[i+1], t))\n\t\tedges.append((points[i+1], points[i], 0))\n\t# filter edges with same src and dest but different weight\n\tnew_edges = {}\n\tfor edge in edges:\n\t\tif (edge[0], edge[1]) not in new_edges or new_edges[(edge[0], edge[1])] > edge[2]:\n\t\t\tnew_edges[(edge[0], edge[1])] = edge[2]\n\t# build graph\n\tG = DiGraph(data = [(src, dst, {'w': w}) for (src, dst), w in new_edges.items()])\n\n\treturn shortest_path_length(G, points[0], points[-1], 'w')\n\ndef linear_time(n, m):\n\treturn int((m*(m-1) - n*(n-1))/2)\n\n# ------------------------------ ENTRYPOINT --------------------------------- #\n\nsolve_all(FILE)","repo_name":"juancroldan/tuenti-challenge","sub_path":"7/6/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18504277618","text":"import jax\nimport numpy as np\nimport scipy.io as sio\nimport cv2\nfrom scico.linop.radon import ParallelBeamProj\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nfrom scico import metric\nimport time\nimport JAX_TV\nimport os\n\ndef run(config):\n num_iter = int(config.method.baseline.num_iter)\n gamma = float(config.method.baseline.gamma)\n rho = float(config.method.baseline.rho)\n tau = float(config.method.baseline.tau)\n lambda_TV = float(config.method.baseline.lambda_TV)\n num_iter_TV = int(config.method.baseline.num_iter_TV)\n N = int(config.dataset.ct_sample.img_dim)\n n_projection = int(config.dataset.ct_sample.num_projection)\n num_detector = int(config.dataset.ct_sample.num_detector)\n\n \"\"\"\n Load CT image\n \"\"\"\n\n matContent = sio.loadmat('dataset/CT_images_preprocessed.mat', squeeze_me=True)\n ct = matContent['img_cropped'][:, :, 0]\n down_ct = cv2.resize(ct, (N, N))\n\n xin = jax.device_put(down_ct) # Convert to jax type, push to GPU\n\n \"\"\"\n Configure CT projection operator and generate synthetic measurements\n \"\"\"\n\n angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles\n\n A = ParallelBeamProj(xin.shape, 1.0, num_detector, angles) # Radon transform operator\n d = A @ xin # Sinogram\n\n # initialize x_hat to be the back-projection of d\n x_hat_in = A.fbp(d)\n y_hat_in = x_hat_in\n lambda_hat_in = jnp.zeros_like(y_hat_in)\n\n def A_i(A_func, x):\n return A_func @ x\n\n def A_i_adj(A_func, x):\n return A_func.adj(x)\n\n @jax.jit\n def lambda_step(l_x_hat, l_y_hat, l_lambda_hat):\n result = l_lambda_hat + l_y_hat - l_x_hat\n return result\n\n @jax.jit\n def x_step(x_y_hat, x_lambda_hat):\n return x_y_hat + x_lambda_hat\n\n def main_body_func(iteration, init_vals):\n x_hat, y_hat, lambda_hat = init_vals\n\n # y step can not jit due to A being implemented in Astra\n y_hat = y_hat - gamma * A_i_adj(A_i(y_hat) - d) - gamma * rho * (\n y_hat - x_hat + lambda_hat)\n\n # x step tau * rho\n prox_in = x_step(y_hat, lambda_hat)\n x_hat = JAX_TV.TotalVariation_Proximal(prox_in, lambda_TV / rho * tau, num_iter_TV)\n\n # lambda step\n lambda_hat = lambda_step(x_hat, y_hat, lambda_hat)\n\n return x_hat, y_hat, lambda_hat\n\n snr_list = []\n\n print(\"algorithm started\")\n start_time = time.time()\n\n for i in range(num_iter):\n # y step\n y_hat_in = y_hat_in - gamma * A_i_adj(A, A_i(A, y_hat_in) - d) - gamma * rho * (\n y_hat_in - x_hat_in + lambda_hat_in)\n\n if i % 5 == 0:\n # plt.imshow(d)\n # plt.title('d')\n # plt.colorbar()\n # plt.show()\n\n A_y = A_i(A, y_hat_in)\n # plt.imshow(A_y)\n # plt.title('A y')\n # plt.colorbar()\n # plt.show()\n\n diff = d - A_y\n plt.imshow(diff)\n plt.title('diff')\n plt.colorbar()\n plt.show()\n\n # x step tau * rho\n prox_in = x_step(y_hat_in, lambda_hat_in)\n x_hat_in = JAX_TV.TotalVariation_Proximal(prox_in, lambda_TV / rho * tau, num_iter_TV)\n # lambda step\n lambda_hat_in = lambda_step(x_hat_in, y_hat_in, lambda_hat_in)\n print(f'iteration {i} SNR: {metric.snr(xin, x_hat_in)}')\n snr_list.append(metric.snr(xin, x_hat_in))\n\n x_hat_out = x_hat_in\n\n # x_hat_out, _, _ = jax.lax.fori_loop(1, num_iter, body_fun=main_body_func,\n # init_val=(x_hat_in, y_hat_in, lambda_hat_in))\n\n x_hat_out.block_until_ready()\n\n running_time = time.time() - start_time\n print(\"--- %s seconds ---\" % running_time)\n print(f'final SNR: {metric.snr(xin, x_hat_out)}')\n\n np.save(os.path.join('saved', 'baseline'), snr_list)\n\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(18, 5))\n im1 = axes[0].imshow(x_hat_in, cmap=\"gray\")\n im2 = axes[1].imshow(x_hat_out, cmap=\"gray\")\n im3 = axes[2].imshow(xin, cmap=\"gray\")\n axes[0].title.set_text(f'input SNR: {\"{:.2f}\".format(metric.snr(xin, x_hat_in))}')\n axes[1].title.set_text(f'output SNR: {\"{:.2f}\".format(metric.snr(xin, x_hat_out))}')\n axes[2].title.set_text('ground truth')\n fig.colorbar(im1, ax=axes[0])\n fig.colorbar(im2, ax=axes[1])\n fig.colorbar(im3, ax=axes[2])\n plt.show()\n","repo_name":"yanpeng7/Concensus_ADMM","sub_path":"method/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74450231241","text":"import datetime\nfrom typing import Any, List\n\nimport pytest\n\nfrom tmlt.analytics.binning_spec import BinningSpec, _default_bin_names, _edges_as_str\nfrom tmlt.analytics.query_builder import ColumnType\n\n\"\"\"Tests for helper functions releated to default bin names.\"\"\"\n\n\n@pytest.mark.parametrize(\n \"bin_edges,expected_strs\",\n [\n ([0, 1, 2], [\"0\", \"1\", \"2\"]),\n ([\"0\", \"1\", \"2\"], [\"0\", \"1\", \"2\"]),\n (\n [datetime.date(2022, 1, 1), datetime.date(2022, 2, 1)],\n [\"2022-01-01\", \"2022-02-01\"],\n ),\n ([0.0, 0.1, 0.2], [\"0.00\", \"0.10\", \"0.20\"]),\n ([0.0, 0.111111, 0.222222], [\"0.00\", \"0.11\", \"0.22\"]),\n ([0.0, 0.000001, 0.000002], [\"0.000000\", \"0.000001\", \"0.000002\"]),\n ([0.0, 1.000001], [\"0.00\", \"1.00\"]),\n ([0.0, 0.999, 2.0], [\"0.00\", \"1.00\", \"2.00\"]),\n ([0.0, 0.999, 1.0], [\"0.000\", \"0.999\", \"1.000\"]),\n (\n [datetime.datetime(2022, 1, 1, 0), datetime.datetime(2022, 2, 1, 5)],\n [\"2022-01-01 00:00\", \"2022-02-01 05:00\"],\n ),\n (\n [\n datetime.datetime(2022, 1, 1, 0),\n datetime.datetime(2022, 2, 1, 5, 30, 15, 20000),\n ],\n [\"2022-01-01 00:00:00.000\", \"2022-02-01 05:30:15.020\"],\n ),\n (\n [\n datetime.datetime(2022, 1, 1, 0),\n datetime.datetime(2022, 2, 1, 5, 30, 15, 1),\n ],\n [\"2022-01-01 00:00:00.000000\", \"2022-02-01 05:30:15.000001\"],\n ),\n ],\n)\ndef test_edges_as_str(bin_edges: List, expected_strs: List[str]):\n \"\"\"Conversion of bin edges to strings works as expected.\"\"\"\n assert _edges_as_str(bin_edges) == expected_strs\n\n\n@pytest.mark.parametrize(\n \"args,expected_strs\",\n [\n (([0, 1, 2], True, True), [\"[0, 1]\", \"(1, 2]\"]),\n (([0, 1, 2], False, True), [\"[0, 1)\", \"[1, 2]\"]),\n (([0, 1, 2], True, False), [\"(0, 1]\", \"(1, 2]\"]),\n (([0, 1, 2], False, False), [\"[0, 1)\", \"[1, 2)\"]),\n (\n ([datetime.date(2022, 1, 1), datetime.date(2022, 3, 15)], True, False),\n [\"(2022-01-01, 2022-03-15]\"],\n ),\n ],\n)\ndef test_default_bin_names(args: List[Any], expected_strs: List[str]):\n \"\"\"Generation of bin names from bin edges works as expected.\"\"\"\n assert _default_bin_names(*args) == expected_strs\n\n\n\"\"\"Tests for :cls:`tmlt.analytics.binning_spec.BinningSpec`.\"\"\"\n\n\ndef test_binning() -> None:\n \"\"\"Basic BinningSpec works as expected.\"\"\"\n spec = BinningSpec([0, 5, 10, 15, 20])\n assert spec.bins() == [\"[0, 5]\", \"(5, 10]\", \"(10, 15]\", \"(15, 20]\"]\n assert spec.bins(include_null=True) == [\n \"[0, 5]\",\n \"(5, 10]\",\n \"(10, 15]\",\n \"(15, 20]\",\n None,\n ]\n assert spec.column_descriptor.column_type == ColumnType.VARCHAR\n bin_tests = {\n 2: \"[0, 5]\",\n 7: \"(5, 10]\",\n 12: \"(10, 15]\",\n 17: \"(15, 20]\",\n -1: None,\n 0: \"[0, 5]\",\n 20: \"(15, 20]\",\n 21: None,\n None: None,\n }\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\ndef test_binning_left() -> None:\n \"\"\"BinningSpec with right=False works as expected.\"\"\"\n spec = BinningSpec([0, 5, 10, 15, 20], right=False)\n assert spec.bins() == [\"[0, 5)\", \"[5, 10)\", \"[10, 15)\", \"[15, 20]\"]\n bin_tests = {\n 2: \"[0, 5)\",\n 7: \"[5, 10)\",\n 12: \"[10, 15)\",\n 17: \"[15, 20]\",\n -1: None,\n 0: \"[0, 5)\",\n 20: \"[15, 20]\",\n 21: None,\n None: None,\n }\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\n@pytest.mark.parametrize(\n \"edges,ty\",\n [\n ([0, 1], ColumnType.INTEGER),\n ([0.0, 1.0], ColumnType.DECIMAL),\n ([\"0\", \"1\"], ColumnType.VARCHAR),\n ([datetime.date(2022, 1, 1), datetime.date(2022, 1, 2)], ColumnType.DATE),\n (\n [datetime.datetime(2022, 1, 1), datetime.datetime(2022, 1, 2)],\n ColumnType.TIMESTAMP,\n ),\n ],\n)\ndef test_input_type(edges: List[Any], ty: ColumnType):\n \"\"\"BinningSpec.input_type works as expected.\"\"\"\n spec = BinningSpec(edges)\n assert spec.input_type == ty\n\n\n@pytest.mark.parametrize(\n \"names,ty\",\n [\n ([0, 1], ColumnType.INTEGER),\n ([0.0, 1.0], ColumnType.DECIMAL),\n ([\"0\", \"1\"], ColumnType.VARCHAR),\n ([datetime.date(2022, 1, 1), datetime.date(2022, 1, 2)], ColumnType.DATE),\n (\n [datetime.datetime(2022, 1, 1), datetime.datetime(2022, 1, 2)],\n ColumnType.TIMESTAMP,\n ),\n ],\n)\ndef test_column_type(names: List[Any], ty: ColumnType):\n \"\"\"BinningSpec.column_descriptor.column_type works as expected.\"\"\"\n spec = BinningSpec([0, 1, 2], names=names)\n assert spec.column_descriptor.column_type == ty\n\n\ndef test_binning_allow_nan() -> None:\n \"\"\"BinningSpec sets allow_nan as expected.\"\"\"\n edges = [0, 5, 10]\n spec = BinningSpec(edges, names=[float(\"NaN\"), float(\"0\")])\n assert spec.column_descriptor.allow_nan\n spec = BinningSpec(edges, names=[float(\"-NaN\"), float(\"0\")])\n assert spec.column_descriptor.allow_nan\n spec = BinningSpec(edges, names=[float(\"0\"), float(\"5\")])\n assert not spec.column_descriptor.allow_nan\n\n\ndef test_binning_allow_null() -> None:\n \"\"\"BinningSpec sets allow_null as expected.\"\"\"\n edges = [0, 5, 10]\n spec = BinningSpec(edges, names=[\"null\", \"5\"])\n assert spec.column_descriptor.allow_null\n spec = BinningSpec(edges, names=[\"NULL\", \"5\"])\n assert spec.column_descriptor.allow_null\n spec = BinningSpec(edges, names=[\"Null\", \"5\"])\n assert spec.column_descriptor.allow_null\n spec = BinningSpec(edges, names=[\"0\", \"5\"])\n assert not spec.column_descriptor.allow_null\n\n\ndef test_binning_allow_inf() -> None:\n \"\"\"BinningSpec sets allow_inf as expected.\"\"\"\n edges = [float(\"0\"), float(\"5\"), float(\"10\")]\n spec = BinningSpec(edges, names=[float(\"0\"), float(\"inf\")])\n assert spec.column_descriptor.allow_inf\n spec = BinningSpec(edges, names=[float(\"0\"), float(\"-inf\")])\n assert spec.column_descriptor.allow_inf\n spec = BinningSpec(edges, names=[float(\"0\"), float(\"5\")])\n assert not spec.column_descriptor.allow_inf\n\n\ndef test_binning_noninclusive() -> None:\n \"\"\"BinningSpec with include_both_endpoints=False works as expected.\"\"\"\n spec = BinningSpec([0, 5, 10, 15, 20], include_both_endpoints=False)\n assert spec.bins() == [\"(0, 5]\", \"(5, 10]\", \"(10, 15]\", \"(15, 20]\"]\n assert spec(0) is None\n assert spec(20) == \"(15, 20]\"\n spec = BinningSpec([0, 5, 10, 15, 20], right=False, include_both_endpoints=False)\n assert spec.bins() == [\"[0, 5)\", \"[5, 10)\", \"[10, 15)\", \"[15, 20)\"]\n assert spec(0) == \"[0, 5)\"\n assert spec(20) is None\n\n\ndef test_binning_names() -> None:\n \"\"\"BinningSpec with custom bin names works as expected.\"\"\"\n spec = BinningSpec([0, 64, 69, 79, 89, 100], names=[\"F\", \"D\", \"C\", \"B\", \"A\"])\n assert spec.bins() == [\"F\", \"D\", \"C\", \"B\", \"A\"]\n assert spec.column_descriptor.column_type == ColumnType.VARCHAR\n bin_tests = {0: \"F\", 10: \"F\", 75: \"C\", 100: \"A\", None: None}\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\ndef test_binning_repeated_names() -> None:\n \"\"\"BinningSpec with non-unique bin names works as expected.\"\"\"\n spec = BinningSpec([-15, -5, 5, 15], names=[\"high\", \"low\", \"high\"])\n assert spec.bins() == [\"high\", \"low\"]\n assert spec.bins(include_null=True) == [\"high\", \"low\", None]\n assert spec.column_descriptor.column_type == ColumnType.VARCHAR\n bin_tests = {-16: None, -15: \"high\", -5: \"high\", -4: \"low\", 4: \"low\", 10: \"high\"}\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\ndef test_binning_inf_nan() -> None:\n \"\"\"Binning infinite/NaN values works as expected.\"\"\"\n spec = BinningSpec(\n [float(\"-inf\"), 0.0, float(\"inf\")],\n right=False,\n names=[\"negative\", \"nonnegative\"],\n )\n assert spec.bins() == [\"negative\", \"nonnegative\"]\n assert spec.column_descriptor.column_type == ColumnType.VARCHAR\n bin_tests = {\n -1.0: \"negative\",\n 0.0: \"nonnegative\",\n 1.0: \"nonnegative\",\n float(\"-inf\"): \"negative\",\n float(\"inf\"): \"nonnegative\",\n float(\"nan\"): None,\n None: None,\n }\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\ndef test_binning_nan_bin() -> None:\n \"\"\"Binning with the nan_bin option works as expected.\"\"\"\n spec = BinningSpec(\n [float(\"-inf\"), 0.0, float(\"inf\")],\n right=False,\n names=[\"negative\", \"nonnegative\"],\n nan_bin=\"NaN\",\n )\n assert spec.bins() == [\"negative\", \"nonnegative\", \"NaN\"]\n assert spec.column_descriptor.column_type == ColumnType.VARCHAR\n bin_tests = {\n 1.0: \"nonnegative\",\n float(\"inf\"): \"nonnegative\",\n float(\"nan\"): \"NaN\",\n None: None,\n }\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\ndef test_binning_nan_bin_matching() -> None:\n \"\"\"Binning with nan_bin works when the given bin matches another bin.\"\"\"\n spec = BinningSpec(\n [float(\"-inf\"), 0.0, float(\"inf\")],\n right=False,\n names=[\"negative\", \"nonnegative\"],\n nan_bin=\"nonnegative\",\n )\n assert spec.bins() == [\"negative\", \"nonnegative\"]\n assert spec.column_descriptor.column_type == ColumnType.VARCHAR\n bin_tests = {\n 1.0: \"nonnegative\",\n float(\"inf\"): \"nonnegative\",\n float(\"nan\"): \"nonnegative\",\n None: None,\n }\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\ndef test_binning_date_names() -> None:\n \"\"\"BinningSpecs with dates as bin names work as expected.\"\"\"\n spec = BinningSpec(\n [datetime.datetime(2022, 1, day) for day in range(1, 10)],\n names=[datetime.date(2022, 1, day) for day in range(1, 9)],\n right=False,\n )\n assert spec.column_descriptor.column_type == ColumnType.DATE\n bin_tests = {\n datetime.datetime.fromisoformat(\"2022-01-02\"): datetime.date(2022, 1, 2),\n datetime.datetime.fromisoformat(\"2022-01-02 00:00\"): datetime.date(2022, 1, 2),\n datetime.datetime.fromisoformat(\"2022-01-03 05:30\"): datetime.date(2022, 1, 3),\n }\n for val, expected_bin in bin_tests.items():\n assert spec(val) == expected_bin\n\n\ndef test_not_enough_bins() -> None:\n \"\"\"Edge lists that result in zero bins are rejected.\"\"\"\n with pytest.raises(ValueError, match=\"At least two bin edges must be provided\"):\n BinningSpec([])\n with pytest.raises(ValueError, match=\"At least two bin edges must be provided\"):\n BinningSpec([1])\n\n\n@pytest.mark.parametrize(\n \"edges\",\n [\n ([1, 2, 3, 5, 4]),\n ([1, 2, 3, 4, 4]),\n ([1, 2, 3, 3, 4]),\n ([1, 1, 2, 3, 4]),\n ([1.0, 1.1, 1.10, 2.0]),\n ([datetime.date(2022, 1, 1), datetime.date(2022, 1, 1)]),\n ],\n)\ndef test_unsorted_edges(edges: List[Any]):\n \"\"\"Edge lists that are not sorted or contain duplicate values are rejected.\"\"\"\n with pytest.raises(\n ValueError,\n match=\"Bin edges must be sorted in ascending order, with no duplicates\",\n ):\n BinningSpec(edges)\n\n\n@pytest.mark.parametrize(\n \"edges\",\n [\n ([1.0, 2]),\n ([1, 2, 3, 4, 5, 6.0, 7, 8]),\n ([1, \"2\"]),\n ([\"1\", 2]),\n ([datetime.date(1, 1, 1), datetime.datetime(1, 1, 2)]),\n ([datetime.datetime(1, 1, 1), datetime.date(1, 1, 2)]),\n ],\n)\ndef test_mixed_type_edges(edges: List[Any]):\n \"\"\"Edge lists with non-uniform type are rejected.\"\"\"\n with pytest.raises(\n ValueError, match=\"Invalid bin edges: list contains elements of multiple types\"\n ):\n BinningSpec(edges)\n\n\n@pytest.mark.parametrize(\"edges\", [([1, None]), ([None, 1])])\ndef test_none_type_edges(edges: List[Any]):\n \"\"\"Edge lists with non-uniform type are rejected.\"\"\"\n with pytest.raises(ValueError, match=\"Invalid bin edges: None is not allowed\"):\n BinningSpec(edges)\n\n\n@pytest.mark.parametrize(\n \"names\",\n [\n [1.0, 2],\n [1, 2, 3, 4, 5, 6.0, 7, 8],\n [1, \"2\"],\n [\"1\", 2],\n [datetime.date(1, 1, 1), datetime.datetime(1, 1, 2)],\n [datetime.datetime(1, 1, 1), datetime.date(1, 1, 2)],\n ],\n)\ndef test_mixed_type_names(names: List[Any]):\n \"\"\"Bin name lists with non-uniform type are rejected.\"\"\"\n with pytest.raises(\n ValueError, match=\"Invalid bin names: list contains elements of multiple types\"\n ):\n BinningSpec(range(len(names) + 1), names=names)\n\n\n@pytest.mark.parametrize(\"names\", [([1, None]), ([None, 1])])\ndef test_none_type_names(names: List[Any]):\n \"\"\"Bin name lists with non-uniform type are rejected.\"\"\"\n with pytest.raises(ValueError, match=\"Invalid bin names: None is not allowed\"):\n BinningSpec(range(len(names) + 1), names=names)\n\n\n@pytest.mark.parametrize(\n \"names,nan_bin\", [([\"0\", \"1\"], 0), ([0, 1], \"nan\"), ([0.5, 1.0], \"nan\")]\n)\ndef test_mismatched_nan_bin_name(names: List[Any], nan_bin: Any):\n \"\"\"NaN bin names that don't match other bin names' type are rejected.\"\"\"\n with pytest.raises(\n ValueError, match=\"NaN bin name must have the same type as other bin names\"\n ):\n BinningSpec(range(len(names) + 1), names=names, nan_bin=nan_bin)\n\n\n@pytest.mark.parametrize(\n \"edges,names\",\n [\n ([1, 2], []),\n ([1, 2], [\"a\", \"b\"]),\n ([1, 2], [\"a\", \"b\", \"c\"]),\n ([\"a\", \"b\", \"c\", \"d\"], [1, 2, 3, 4]),\n ([\"a\", \"b\", \"c\", \"d\"], [1, 2]),\n ],\n)\ndef test_wrong_names_length(edges: List[Any], names: List[Any]):\n \"\"\"Bin name lists of the wrong length are rejected.\"\"\"\n with pytest.raises(\n ValueError,\n match=\"Number of bin names must be one less than the number of bin edges\",\n ):\n BinningSpec(edges, names=names)\n","repo_name":"uscensusbureau/DAS_2020_DDHCA_Production_Code","sub_path":"tumult/analytics/test/unit/test_binning_spec.py","file_name":"test_binning_spec.py","file_ext":"py","file_size_in_byte":13938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"9125323702","text":"from django.shortcuts import render\nfrom django.urls import reverse\nfrom django.shortcuts import render,redirect,get_object_or_404\nfrom django.contrib import messages\nfrom django.views import View\n# Create your views here.\nfrom .models import Post, Price\nfrom .forms import PostDeleteModelForm, PostModelForm, PostModelFormView\n\n\nclass PostListView(View):\n\n def get(self, request, *args, **kwargs):\n posts = Post.objects.all()\n return render(request, 'products_list.html', {'posts': posts})\n\n def post(self, request, *args, **kwargs):\n if self.request.POST.get('from-price') and self.request.POST.get('until-price'):\n frome=self.request.POST.get('from-price')\n until=self.request.POST.get('until-price')\n posts = Post.objects.all()\n answers = []\n for post in posts:\n if post.cost>=int(frome) and post.cost<=int(until):\n answers.append(post)\n return render(request, 'products_list.html', {'posts': answers})\n\n\nclass PostDetailView(View):\n model = Post\n template_name = 'post_detail.html'\n \n def get(self, request, *args, **kwargs):\n self.post = self.model.objects.get(slug=self.kwargs['slug'])\n self.costs = Price.objects.filter(product = self.post)\n print(self.costs)\n self.icosts = []\n for cost in self.costs:\n self.icosts += [cost.value]\n # calculate the middle cost\n print(self.icosts)\n mergeSort(self.icosts)\n if len(self.icosts)%2 == 0:\n self.middle_cost = (self.icosts[len(self.icosts)//2] + self.icosts[(len(self.icosts)//2)-1])/2 \n else:\n self.middle_cost = self.icosts[(len(self.icosts)-1)//2]\n # calculate more expensive products\n self.expensives = self.model.objects.filter(cost__gt=self.post.cost)\n print(self.expensives)\n # calculate cheaper products\n self.cheapers = self.model.objects.filter(cost__lt=self.post.cost)\n print(self.cheapers)\n return render(request, 'post_detail.html',{'post':self.post,'middle_cost':self.middle_cost,'expensives':self.expensives,'cheapers':self.cheapers})\n \n\ndef mergeSort(arr):\n if len(arr) > 1:\n \n # Finding the mid of the array\n mid = len(arr)//2\n \n # Dividing the array elements\n L = arr[:mid]\n \n # into 2 halves\n R = arr[mid:]\n \n # Sorting the first half\n mergeSort(L)\n \n # Sorting the second half\n mergeSort(R)\n \n i = j = k = 0\n \n # Copy data to temp arrays L[] and R[]\n while i < len(L) and j < len(R):\n if L[i] < R[j]:\n arr[k] = L[i]\n i += 1\n else:\n arr[k] = R[j]\n j += 1\n k += 1\n \n # Checking if any element was left\n while i < len(L):\n arr[k] = L[i]\n i += 1\n k += 1\n \n while j < len(R):\n arr[k] = R[j]\n j += 1\n k += 1\n\ndef AddPost(request):\n form = PostModelForm()\n if request.method == \"POST\":\n form = PostModelForm(request.POST,request.FILES)\n print(\"----------\")\n if form.is_valid():\n print(\"+++++++++\")\n updated_request = request.POST.copy()\n updated_request.update({'slug': 'a'})\n form = PostModelFormView(updated_request,request.FILES)\n a=form.save()\n a.slug = a.name\n a.save()\n b = Price(product = a,value = a.cost)\n b.save()\n messages.add_message(request, messages.ERROR, f'The post successfully saved!',extra_tags=\"success\")\n return redirect(reverse('post_list'))\n\n return render(request,'forms/post_form.html',{'form':form})\n\n\ndef EditPost(request,slug):\n post = get_object_or_404(Post,slug=slug)\n form = PostModelForm(instance=post)\n\n if request.method == \"POST\":\n form =PostModelForm(request.POST,instance=post) \n if form.is_valid():\n a=form.save()\n b = Price(product = a,value = a.cost)\n b.save()\n return redirect(reverse('post_list'))\n\n return render(request,'forms/edit_post_form.html',{'form':form,'post':post})\n\n \n\n\ndef DeletePost(request,slug):\n \n post = get_object_or_404(Post,slug=slug)\n \n form = PostDeleteModelForm(instance=post)\n if request.method == \"POST\":\n post.delete()\n return redirect('/') \n\n\n return render(request,'forms/delete_post_form.html',{'form':form,'post':post})\n \n\n","repo_name":"amirsahebi/data_structure_project","sub_path":"post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39192766414","text":"def ler_matriz():\n matriz = []\n for i in range(10):\n linha = []\n for j in range(10):\n elemento = int(input(f\"Digite o valor para [{i+1},{j+1}]: \"))\n linha.append(elemento)\n matriz.append(linha)\n return matriz\n\n\ndef encontrar_posicao_maior(matriz):\n maior = matriz[0][0]\n linha_maior = 0\n coluna_maior = 0\n for i in range(10):\n for j in range(10):\n if matriz[i][j] > maior:\n maior = matriz[i][j]\n linha_maior = i\n coluna_maior = j\n return linha_maior, coluna_maior\n\n\nmatriz = ler_matriz()\nlinha_maior, coluna_maior = encontrar_posicao_maior(matriz)\n\n\nprint(\"A posição do maior elemento é:\", (linha_maior + 1, coluna_maior + 1))\n","repo_name":"ViniciusCanedo/algLogicaProgramacaoFatec","sub_path":"aula9-2/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28255912207","text":"# Author: Jintao Huang\n# Email: hjt_study@qq.com\n# Date: 2021-6-2\n\nimport tensorflow as tf\nimport torch\n\n# tf.where\ndef relu_tf(x):\n return tf.where(x > 0, x, 0)\n\n\ndef relu_torch(x):\n return torch.where(x > 0, x, torch.tensor(0, dtype=x.dtype, device=x.device))\n\n\nx = tf.constant([-1, 0, 1.])\nx2 = torch.tensor([-1, 0, 1.])\nprint(relu_tf(x))\nprint(relu_torch(x2))\n# tf.Tensor([0. 0. 1.], shape=(3,), dtype=float32)\n# tensor([0., 0., 1.])\n\n# 2.\na = tf.constant([-1, 0, 1])\nb = tf.constant([1, 0, -1])\nprint(tf.where(a > b, a, b))\nprint(tf.where(a >= b))\n# tf.Tensor([1 0 1], shape=(3,), dtype=int32)\n# tf.Tensor(\n# [[1]\n# [2]], shape=(2, 1), dtype=int64)\n# --------------------------- 随机数\nimport numpy as np\nrdm = np.random.RandomState(seed=1)\nprint(rdm.rand())\nrdm = np.random.RandomState(seed=1)\nprint(rdm.rand(2))\nrdm = np.random.RandomState(seed=1)\nprint(rdm.rand(2, 2))\n# 0.417022004702574\n# [0.417022 0.72032449]\n# [[4.17022005e-01 7.20324493e-01]\n# [1.14374817e-04 3.02332573e-01]]\n","repo_name":"Jintao-Huang/torch_tf2_study","sub_path":"_old/tf2/1 basic_tf2/course_2/1.1 ready.py","file_name":"1.1 ready.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"72810521801","text":"from app_util import *\nimport requests\nimport re\n\n\ndef findUrls(string):\n\n regex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n url = re.findall(regex, string)\n return [x[0] for x in url]\n\n\ndef handleOutlineCommand(browser, chatBoxInput, link=''):\n\n links = [link]\n\n if link == '':\n message = getLastMessageElement(browser, -2)\n links = findUrls(message.text)\n\n if len(links) == 0:\n sendMessage(browser, chatBoxInput,\n f'(BOT) Não foi encontrado um link válido')\n else:\n for link in links:\n params = {'source_url': link}\n headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',\n 'origin': 'https://outline.com',\n 'referer': 'https://outline.com/',\n }\n r = requests.get(\n 'https://api.outline.com/v3/parse_article', params=params, headers=headers)\n resp = r.json()\n data = resp['data']\n sendMessage(browser, chatBoxInput,\n f'(BOT) {data[\"title\"]}\\n Tempo de leitura de {data[\"read_time\"]}\\nhttps://outline.com/{data[\"short_code\"]}')\n\n\noutline_command = {\n 'command': '!xeule',\n 'function': handleOutlineCommand,\n 'args': [],\n 'acceptParams': True,\n 'description': 'Remove paywall do ultimo link enviado ou em parametro'\n}\n","repo_name":"lucasbmeister/hangouts-bot","sub_path":"command_handlers/outline_command.py","file_name":"outline_command.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43507319131","text":"from pyparsing import CaselessKeyword\n\n\nNOT = CaselessKeyword('not')\nAND = CaselessKeyword('and')\nOR = CaselessKeyword('or')\nCASE = CaselessKeyword('case')\nWHEN = CaselessKeyword('when')\nTHEN = CaselessKeyword('then')\nELSE = CaselessKeyword('else')\nEND = CaselessKeyword('end')\nCAST = CaselessKeyword('cast')\nAS = CaselessKeyword('as')\nDATE = CaselessKeyword('date')\nEXTRACT = CaselessKeyword('extract')\nFROM = CaselessKeyword('from')\n\nSECOND = CaselessKeyword('second')\nMINUTE = CaselessKeyword('minute')\nHOUR = CaselessKeyword('hour')\nDAY = CaselessKeyword('day')\nWEEK = CaselessKeyword('week')\nMONTH = CaselessKeyword('month')\nQUARTER = CaselessKeyword('quarter')\nYEAR = CaselessKeyword('year')\nINTERVAL = CaselessKeyword('interval')\nSUBSTRING = CaselessKeyword('substring')\nFOR = CaselessKeyword('for')\nTIMESTAMP = CaselessKeyword('timestamp')\nTRIM = CaselessKeyword('trim')\nBOTH = CaselessKeyword('both')\nLEADING = CaselessKeyword('leading')\nTRAILING = CaselessKeyword('trailing')\nOVER = CaselessKeyword('over')\nDISTINCT = CaselessKeyword('distinct')\nIS = CaselessKeyword('is')\nNULL = CaselessKeyword('null')\nFIRST = CaselessKeyword('first')\nID = CaselessKeyword('id')\nKEY = CaselessKeyword('key')\nPARTITION = CaselessKeyword('partition')\nBY = CaselessKeyword('by')\nORDER = CaselessKeyword('order')\nROWS = CaselessKeyword('rows')\nRANGE = CaselessKeyword('range')\nBETWEEN = CaselessKeyword('between')\nCURRENT = CaselessKeyword('current')\nROW = CaselessKeyword('row')\nUNBOUNDED = CaselessKeyword('unbounded')\nPRECEDING = CaselessKeyword('preceding')\nFOLLOWING = CaselessKeyword('following')\nDESC = CaselessKeyword('desc')\nASC = CaselessKeyword('asc')\nASCENDING = CaselessKeyword('ascending')\nDESCENDING = CaselessKeyword('descending')\nNULLS = CaselessKeyword('nulls')\nLAST = CaselessKeyword('last')\nCOLLATE = CaselessKeyword('collate')\nWITH = CaselessKeyword('with')\nSELECT = CaselessKeyword('select')\nTOP = CaselessKeyword('top')\nTIES = CaselessKeyword('ties')\nLEFT = CaselessKeyword('left')\nINNER = CaselessKeyword('inner')\nARRAY = CaselessKeyword('array')\nJOIN = CaselessKeyword('join')\nWINDOW = CaselessKeyword('window')\nPREWHERE = CaselessKeyword('prewhere')\nWHERE = CaselessKeyword('where')\nGROUP = CaselessKeyword('group')\nCUBE = CaselessKeyword('cube')\nROLLUP = CaselessKeyword('rollup')\nTOTALS = CaselessKeyword('totals')\nHAVING = CaselessKeyword('having')\nLIMIT = CaselessKeyword('limit')\nSETTINGS = CaselessKeyword('settings')\nOFFSET = CaselessKeyword('offset')\nGLOBAL = CaselessKeyword('global')\nIN = CaselessKeyword('in')\nLIKE = CaselessKeyword('like')\nILIKE = CaselessKeyword('ilike')\nLOCAL = CaselessKeyword('local')\nFINAL = CaselessKeyword('final')\nALL = CaselessKeyword('all')\nANY = CaselessKeyword('any')\nASOF = CaselessKeyword('asof')\nSEMI = CaselessKeyword('semi')\nANTI = CaselessKeyword('anti')\nRIGHT = CaselessKeyword('right')\nOUTER = CaselessKeyword('outer')\nFULL = CaselessKeyword('full')\nCROSS = CaselessKeyword('cross')\nON = CaselessKeyword('on')\nUSING = CaselessKeyword('using')\nSAMPLE = CaselessKeyword('sample')\nUNION = CaselessKeyword('union')\nEXCEPT = CaselessKeyword('except')\nINTERSECT = CaselessKeyword('intersect')\nFORMAT = CaselessKeyword('format')\nINTO = CaselessKeyword('into')\nOUTFILE = CaselessKeyword('outfile')\nCOMPRESSION = CaselessKeyword('compression')\nLEVEL = CaselessKeyword('level')\n\nIF = CaselessKeyword('if')\nADD = CaselessKeyword('add')\nCOLUMN = CaselessKeyword('column')\nAFTER = CaselessKeyword('after')\nEXISTS = CaselessKeyword('exists')\nDEFAULT = CaselessKeyword('default')\nMATERIALIZED = CaselessKeyword('materialized')\nMATERIALIZE = CaselessKeyword('materialize')\nALIAS = CaselessKeyword('alias')\nTYPE = CaselessKeyword('type')\nGRANULARITY = CaselessKeyword('granularity')\nREMOVE = CaselessKeyword('remove')\nTTL = CaselessKeyword('ttl')\nRENAME = CaselessKeyword('rename')\nTO = CaselessKeyword('to')\nMODIFY = CaselessKeyword('modify')\nATTACH = CaselessKeyword('attach')\nCLEAR = CaselessKeyword('clear')\nINDEX = CaselessKeyword('index')\nPROJECTION = CaselessKeyword('projection')\nCOMMENT = CaselessKeyword('comment')\nDELETE = CaselessKeyword('delete')\nDETACH = CaselessKeyword('detach')\nDETACHED = CaselessKeyword('detached')\nDROP = CaselessKeyword('drop')\nFREEZE = CaselessKeyword('freeze')\nUPDATE = CaselessKeyword('update')\nREPLACE = CaselessKeyword('replace')\nMOVE = CaselessKeyword('to')\nDISK = CaselessKeyword('disk')\nVOLUME = CaselessKeyword('volume')\nTABLE = CaselessKeyword('table')\nCODEC = CaselessKeyword('codec')\nALTER = CaselessKeyword('alter')\nPART = CaselessKeyword('part')\nNAME = CaselessKeyword('name')\nUNFREEZE = CaselessKeyword('unfreeze')\nFETCH = CaselessKeyword('fetch')\nRESET = CaselessKeyword('reset')\nGRANULARITY = CaselessKeyword('granularity')\nCHECK = CaselessKeyword('check')\nCONSTRAINT = CaselessKeyword('constraint')\nQUERY = CaselessKeyword('query')\nPROFILE = CaselessKeyword('profile')\nCLUSTER = CaselessKeyword('cluster')\nPERMISSIVE = CaselessKeyword('permissive')\nRESTRICTIVE = CaselessKeyword('restrictive')\nPOLICY = CaselessKeyword('policy')\nNONE = CaselessKeyword('none')\nROLE = CaselessKeyword('role')\nCREATE = CaselessKeyword('create')\nDATABASE = CaselessKeyword('database')\nENGINE = CaselessKeyword('engine')\nDICTIONARY = CaselessKeyword('dictionary')\nEXPRESSION = CaselessKeyword('expression')\nHIERARCHICAL = CaselessKeyword('hierarchical')\nINJECTIVE = CaselessKeyword('injective')\nIS_OBJECT_ID = CaselessKeyword('is_object_id')\n","repo_name":"tabarincev/clickhouse_parser","sub_path":"src/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"ceb","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11255117752","text":"\"\"\"\nIMAGE SEGMENTATION: RGB max\n\"\"\"\nfrom os.path import join\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nfrom __init__ import assetsdir\n\ndef RGBmax(img):\n r = img[:, :, 0]\n g = img[:, :, 1]\n b = img[:, :, 2]\n\n M = np.maximum(np.maximum(r, g), b)\n\n img.setflags(write=1)\n\n r[r 0:\n nTier1Copies += 1\n # add candidates\n for tmpCandSite in candSites:\n if not tmpCandSite in usedSites:\n allCandidates.append(tmpCandSite)\n # add candidates for MoU\n for tmpCandSite in candForMoU:\n if not tmpCandSite in usedSites:\n allCandidatesMoU.append(tmpCandSite)\n # add clouds\n if not tmpCloud in allOKClouds:\n allOKClouds.append(tmpCloud)\n self.putLog(\"PD2P sites with comp replicas : %s\" % str(allCompPd2pSites))\n self.putLog(\"PD2P T2 candidates : %s\" % str(allCandidates))\n self.putLog(\"PD2P T2 MoU candidates : %s\" % str(allCandidatesMoU)) \n self.putLog(\"PD2P # of T2 subscriptions : %s\" % totalUserSub)\n self.putLog(\"PD2P # of T1 secondaries : %s\" % totalSecReplicas)\n self.putLog(\"PD2P # of T1 subscriptions : %s\" % nT1Sub)\n self.putLog(\"PD2P # of T1 replicas : %s\" % nTier1Copies) \n self.putLog(\"PD2P T1 candidates : %s\" % str(allT1Candidates))\n self.putLog(\"PD2P nUsed : %s\" % nUsed)\n # get dataset size\n retDsSize,dsSize = rucioAPI.getDatasetSize(tmpDS)\n if not retDsSize:\n self.putLog(\"failed to get dataset size of %s\" % tmpDS,type='error',sendLog=True)\n continue\n self.putLog(\"PD2P nWaitingJobsets : %s\" % nWaitingJobsets)\n if totalInputSize != 0:\n self.putLog(\"PD2P nWaitingJobs : %s = %s(all)*%s(dsSize)/%s(contSize)\" % \\\n (int((float(nWaitingJobsAll * dsSize) / float(totalInputSize))),\n nWaitingJobsAll,dsSize,totalInputSize))\n else:\n self.putLog(\"PD2P nWaitingJobs : %s = %s(all)\" % \\\n (nWaitingJobsAll,nWaitingJobsAll))\n # make T1-T1\n triggeredT1PD2P = False\n if nUsed > 0:\n # extract integer part. log10(nUsed) and log10(nUsed)+1 are used to avoid round-off error\n intLog10nUsed = int(math.log10(nUsed))\n if self.simul or (int(math.log10(nUsed)) > totalSecReplicas and \\\n (nUsed == 10**intLog10nUsed or nUsed == 10**(intLog10nUsed+1)) and \\\n nT1Sub == 0 and allT1Candidates != []):\n self.putLog(\"making T1-T1\",sendLog=True)\n # make subscription\n retT1Sub,useSmallT1 = self.makeT1Subscription(allT1Candidates,tmpDS,dsSize,prodsourcelabel,nUsed)\n self.putLog(\"done for T1-T1\")\n triggeredT1PD2P = True\n # make a T2 copy when T1 PD2P was triggered\n if triggeredT1PD2P:\n # TODO\n retT2MoU, selectedSite = self.makeT2SubscriptionMoU(allCandidatesMoU, tmpDS, dsSize,' T1MOU',\n prodsourcelabel, nUsed)\n if retT2MoU and selectedSite is not None:\n # remove from candidate list\n if selectedSite in allCandidates:\n allCandidates.remove(selectedSite)\n if selectedSite in allCandidatesMoU:\n allCandidatesMoU.remove(selectedSite)\n # increment the number of T2 subscriptions\n totalUserSub += 1\n # set the number of T2 PD2P replicas\n maxSitesHaveDS = 1\n # additional replicas\n if nWaitingJobsets > maxWaitingJobsets:\n # the number of waiting jobs for this dataset\n if totalInputSize != 0:\n # dataset in container\n tmpN = float(nWaitingJobsAll * dsSize) / float(totalInputSize)\n else:\n # dataset\n tmpN = float(nWaitingJobsAll)\n tmpN = int(math.log10(tmpN/float(maxWaitingJobs))) + nTier1Copies\n maxSitesHaveDS = max(maxSitesHaveDS,tmpN)\n # protection against too many replications\n maxSitesHaveDS = min(maxSitesHaveDS,protectionMaxNumReplicas)\n self.putLog(\"PD2P maxSitesHaveDS : %s\" % maxSitesHaveDS)\n # ignore the first job\n if nUsed == 0:\n self.putLog(\"skip the first job\",\n sendLog=True,actionTag='SKIPPED',tagsMap={'reason':'FIRSTJOB','dataset':tmpDS})\n if not self.simul:\n continue\n # check number of replicas \n if len(allCompPd2pSites) >= maxSitesHaveDS and nUsed != 1:\n self.putLog(\"skip since many T2 PD2P sites (%s>=%s) have the replica\" % (len(allCompPd2pSites),maxSitesHaveDS),\n sendLog=True,actionTag='SKIPPED',tagsMap={'reason':'TOO_MANY_T2_REPLICAS','dataset':tmpDS})\n if not self.simul:\n continue\n # check the number of subscriptions\n maxNumSubInAllCloud = max(0,maxSitesHaveDS-len(allCompPd2pSites))\n maxNumSubInAllCloud = min(2,maxNumSubInAllCloud)\n self.putLog(\"PD2P maxNumSubInAllCloud : %s\" % maxNumSubInAllCloud)\n if totalUserSub >= maxNumSubInAllCloud:\n self.putLog(\"skip since enough subscriptions (%s>=%s) were already made for T2 PD2P\" % \\\n (totalUserSub,maxNumSubInAllCloud),\n sendLog=True,actionTag='SKIPPED',tagsMap={'reason':'TOO_MANY_T2_SUBSCRIPTIONS','dataset':tmpDS})\n if not self.simul: \n continue\n # no candidates\n if len(allCandidates) == 0:\n self.putLog(\"skip since no candidates\",sendLog=True,actionTag='SKIPPED',tagsMap={'reason':'NO_T2_CANDIDATE','dataset':tmpDS})\n continue\n # get inverse weight for brokerage\n weightForBrokerage = self.getWeightForBrokerage(allCandidates, tmpDS, nReplicasInCloud, prodsourcelabel)\n self.putLog(\"inverse weight %s\" % str(weightForBrokerage))\n # get free disk size\n self.putLog(\"getting free disk size for T2 PD2P\")\n retFreeSizeMap,freeSizeMap = self.getFreeDiskSize(tmpDS,allCandidates, prodsourcelabel)\n if not retFreeSizeMap:\n self.putLog(\"failed to get free disk size\",type='error',sendLog=True)\n continue\n # run brokerage\n tmpJob = JobSpec()\n tmpJob.AtlasRelease = ''\n self.putLog(\"run brokerage for %s\" % tmpDS)\n usedWeight = pandaserver.brokerage.broker.schedule([tmpJob],self.taskBuffer,self.siteMapper,True,allCandidates,\n True,specialWeight=weightForBrokerage,getWeight=True,\n sizeMapForCheck=freeSizeMap,datasetSize=dsSize)\n selectedSite = tmpJob.computingSite\n for tmpWeightSite in usedWeight:\n tmpWeightStr = usedWeight[tmpWeightSite]\n tmpTagsMap = {'site':tmpWeightSite,'weight':tmpWeightStr,'dataset':tmpDS} \n if tmpWeightSite == selectedSite:\n if nUsed == 1:\n tmpActionTag = 'SELECTEDT2_JOB'\n elif len(allCompPd2pSites) == 0:\n tmpActionTag = 'SELECTEDT2_NOREP'\n else:\n tmpActionTag = 'SELECTEDT2_WAIT'\n tmpTagsMap['nused'] = nUsed\n tmpTagsMap['nwaitingjobs'] = nWaitingJobsAll\n tmpTagsMap['nwaitingjobsets'] = nWaitingJobsets\n tmpTagsMap['nsiteshaveds'] = len(allCompPd2pSites)\n else:\n tmpActionTag = 'UNSELECTEDT2'\n self.putLog(\"weight %s %s\" % (tmpWeightSite,tmpWeightStr),sendLog=True,\n actionTag=tmpActionTag,tagsMap=tmpTagsMap)\n self.putLog(\"site for T2 PD2P -> %s\" % selectedSite)\n # remove from candidate list\n if selectedSite in allCandidates:\n allCandidates.remove(selectedSite)\n if selectedSite in allCandidatesMoU:\n allCandidatesMoU.remove(selectedSite)\n # make subscription\n if not self.simul:\n selectedSiteSpec = self.siteMapper.getSite(selectedSite)\n scope_input, scope_output = select_scope(selectedSiteSpec, prodsourcelabel)\n subRet,dq2ID = self.makeSubscription(tmpDS,selectedSite, scope_input, ddmShare='secondary')\n self.putLog(\"made subscription to %s:%s\" % (selectedSite,dq2ID),sendLog=True)\n usedSites.append(selectedSite)\n # update database\n if subRet:\n self.taskBuffer.addUserSubscription(tmpDS,[dq2ID])\n # additional T2 copy with MoU share when it is the second submission\n if nUsed == 1 or self.simul:\n retT2MoU,selectedSite = self.makeT2SubscriptionMoU(allCandidatesMoU, tmpDS, dsSize, 'T2MOU',\n prodsourcelabel, nUsed)\n self.putLog(\"end for %s\" % self.jobs[0].PandaID)\n except Exception:\n errType,errValue = sys.exc_info()[:2]\n self.putLog(\"%s %s\" % (errType,errValue),'error')\n\n\n # get candidate sites for subscription\n def getCandidates(self, inputDS, prodsourcelabel, checkUsedFile=True, useHidden=False, useCloseSites=False):\n # return for failure\n failedRet = False,{'':{'':([],[],[],0,False,False,0,0,[])}}\n # get replica locations\n if inputDS.endswith('/'):\n # container\n status,tmpRepMaps = self.getListDatasetReplicasInContainer(inputDS)\n # get used datasets\n if status and checkUsedFile:\n status,tmpUsedDsList = self.getUsedDatasets(tmpRepMaps)\n # remove unused datasets\n newRepMaps = {}\n for tmpKey in tmpRepMaps:\n tmpVal = tmpRepMaps[tmpKey]\n if tmpKey in tmpUsedDsList:\n newRepMaps[tmpKey] = tmpVal\n tmpRepMaps = newRepMaps \n else:\n # normal dataset\n status,tmpRepMap = self.getListDatasetReplicas(inputDS)\n tmpRepMaps = {inputDS:tmpRepMap}\n if not status:\n # failed\n self.putLog(\"failed to get replica locations for %s\" % inputDS,'error')\n return failedRet\n # get close sites\n closeSitesMap = {}\n # get all sites\n allSiteMap = {}\n for tmpSiteName in self.siteMapper.siteSpecList:\n tmpSiteSpec = self.siteMapper.siteSpecList[tmpSiteName]\n # check cloud\n if not tmpSiteSpec.cloud in self.pd2pClouds:\n continue\n # ignore test sites\n if 'test' in tmpSiteName.lower():\n continue\n # analysis only\n if not tmpSiteSpec.runs_analysis():\n continue\n # online\n if not tmpSiteSpec.status in ['online']:\n self.putLog(\"skip %s due to status=%s\" % (tmpSiteName,tmpSiteSpec.status))\n continue\n allSiteMap.setdefault(tmpSiteSpec.cloud, [])\n allSiteMap[tmpSiteSpec.cloud].append(tmpSiteSpec)\n # NG DQ2 IDs\n ngDQ2SuffixList = ['LOCALGROUPDISK','STAGING']\n # loop over all clouds\n returnMap = {}\n checkedMetaMap = {}\n userSubscriptionsMap = {}\n for cloud in self.pd2pClouds:\n # DQ2 prefix of T1\n tmpT1SiteID = self.siteMapper.getCloud(cloud)['source']\n tmpT1SiteSpec = self.siteMapper.getSite(tmpT1SiteID)\n tmp_scope_input, tmp_scope_output = select_scope(tmpT1SiteSpec, prodsourcelabel)\n tmpT1DQ2ID = tmpT1SiteSpec.ddm_input[tmp_scope_input]\n prefixDQ2T1 = re.sub('[^_]+DISK$','',tmpT1DQ2ID)\n # loop over all datasets \n for tmpDS in tmpRepMaps:\n tmpRepMap = tmpRepMaps[tmpDS]\n candSites = []\n sitesComDS = []\n sitesCompPD2P = []\n # check T1 has a replica and get close sites\n t1HasReplica = False\n t1HasPrimary = False\n nSecReplicas = 0\n closeSiteList = []\n candForMoU = []\n for tmpDQ2ID in tmpRepMap:\n # check NG suffix\n ngSuffixFlag = False\n for tmpNGSuffix in ngDQ2SuffixList:\n if tmpDQ2ID.endswith(tmpNGSuffix):\n ngSuffixFlag = True\n break\n if ngSuffixFlag:\n continue\n # get close sites\n if tmpDQ2ID in closeSitesMap:\n for tmpCloseSiteID in closeSitesMap[tmpDQ2ID]:\n if not tmpCloseSiteID in closeSiteList:\n closeSiteList.append(tmpCloseSiteID)\n self.putLog(\"close sites : %s\" % str(closeSiteList))\n # get on-going subscriptions\n timeRangeSub = 7\n userSubscriptionsMap.setdefault(tmpDS, self.taskBuffer.getUserSubscriptions(tmpDS,timeRangeSub))\n userSubscriptions = userSubscriptionsMap[tmpDS]\n # unused cloud\n if cloud not in allSiteMap:\n continue\n # count the number of T1 subscriptions\n nT1Sub = 0\n for tmpUserSub in userSubscriptions:\n if tmpUserSub.startswith(prefixDQ2T1):\n nT1Sub += 1\n # check sites\n nUserSub = 0\n for tmpSiteSpec in allSiteMap[cloud]:\n tmp_scope_input, tmp_scope_output = select_scope(tmpSiteSpec, prodsourcelabel)\n\n # check cloud\n if tmpSiteSpec.cloud != cloud:\n continue\n # prefix of DQ2 ID\n if tmpSiteSpec.ddm_input[tmp_scope_input] is None:\n continue\n prefixDQ2 = re.sub('[^_]+DISK$', '', tmpSiteSpec.ddm_input[tmp_scope_input])\n # skip T1\n if prefixDQ2 == prefixDQ2T1:\n continue\n # check if corresponding DQ2 ID is a replica location\n hasReplica = False\n for tmpDQ2ID in tmpRepMap:\n tmpStatMap = tmpRepMap[tmpDQ2ID]\n # check NG suffix\n ngSuffixFlag = False\n for tmpNGSuffix in ngDQ2SuffixList:\n if tmpDQ2ID.endswith(tmpNGSuffix):\n ngSuffixFlag = True\n break\n if ngSuffixFlag:\n continue\n if tmpDQ2ID.startswith(prefixDQ2):\n if tmpStatMap[0]['total'] == tmpStatMap[0]['found']:\n # complete\n sitesComDS.append(tmpSiteSpec.sitename)\n if tmpSiteSpec.cachedse == 1:\n sitesCompPD2P.append(tmpSiteSpec.sitename) \n hasReplica = True\n break\n # site doesn't have a replica\n if (not hasReplica) and tmpSiteSpec.cachedse == 1:\n candForMoU.append(tmpSiteSpec.sitename)\n if not useCloseSites:\n candSites.append(tmpSiteSpec.sitename)\n else:\n # use close sites only\n if self.getDQ2ID(tmpSiteSpec.sitename, tmpDS, tmp_scope_input) in closeSiteList:\n candSites.append(tmpSiteSpec.sitename)\n # the number of subscriptions\n for tmpUserSub in userSubscriptions:\n if tmpUserSub.startswith(prefixDQ2):\n nUserSub += 1\n break\n # append\n returnMap.setdefault(tmpDS, {})\n returnMap[tmpDS][cloud] = (candSites,sitesComDS,sitesCompPD2P,nUserSub,t1HasReplica,t1HasPrimary,\n nSecReplicas,nT1Sub,candForMoU)\n # return\n return True,returnMap\n\n \n # get map of DQ2 IDs\n def getDQ2ID(self, sitename, dataset, scope):\n\n # get DQ2 ID\n if not self.siteMapper.checkSite(sitename):\n self.putLog(\"cannot find SiteSpec for %s\" % sitename)\n return ''\n dq2ID = self.siteMapper.getSite(sitename).ddm_input[scope]\n if True:\n # data\n matchEOS = re.search('_EOS[^_]+DISK$',dq2ID)\n if matchEOS is not None:\n dq2ID = re.sub('_EOS[^_]+DISK','_EOSDATADISK',dq2ID)\n else:\n dq2ID = re.sub('_[^_]+DISK','_DATADISK',dq2ID)\n else:\n # unsupported prefix for subscription\n self.putLog('%s has unsupported prefix for subscription' % dataset,'error')\n return ''\n # patch for MWT2_UC\n if dq2ID == 'MWT2_UC_DATADISK':\n dq2ID = 'MWT2_DATADISK'\n # return\n return dq2ID\n \n\n # get list of datasets\n def makeSubscription(self, dataset, sitename, scope, givenDQ2ID=None,ddmShare='secondary'):\n # return for failuer\n retFailed = False,''\n # get DQ2 IDs\n if givenDQ2ID is None:\n dq2ID = self.getDQ2ID(sitename, dataset, scope)\n else:\n dq2ID = givenDQ2ID\n if dq2ID == '':\n self.putLog(\"cannot find DQ2 ID for %s:%s\" % (sitename,dataset))\n return retFailed\n # register subscription\n self.putLog('registerDatasetSubscription %s %s' % (dataset,dq2ID))\n nTry = 3\n for iDDMTry in range(nTry):\n try:\n status = rucioAPI.registerDatasetSubscription(dataset,[dq2ID],\n activity='Data Brokering')\n out = 'OK'\n break\n except Exception:\n status = False\n errType,errValue = sys.exc_info()[:2]\n out = \"%s %s\" % (errType,errValue)\n time.sleep(30)\n # result\n if not status:\n self.putLog(out,'error')\n self.putLog('bad DDM response for %s' % dataset,'error')\n return retFailed\n # update \n self.putLog('%s %s' % (status,out))\n return True,dq2ID\n\n \n # get weight for brokerage\n def getWeightForBrokerage(self, sitenames, dataset, nReplicasInCloud, prodsourcelabel):\n # return for failuer\n retFailed = False,{}\n retMap = {}\n # get the number of subscriptions for last 24 hours\n numUserSubs = self.taskBuffer.getNumUserSubscriptions()\n # loop over all sites\n for sitename in sitenames:\n # get DQ2 ID\n siteSpec = self.siteMapper.getSite(sitename)\n scope_input, scope_output = select_scope(siteSpec, prodsourcelabel)\n dq2ID = self.getDQ2ID(sitename,dataset, scope_input)\n if dq2ID == '':\n self.putLog(\"cannot find DQ2 ID for %s:%s\" % (sitename,dataset))\n return retFailed\n # append\n if dq2ID in numUserSubs:\n retMap[sitename] = 1 + numUserSubs[dq2ID]\n else:\n retMap[sitename] = 1\n # negative weight if a cloud already has replicas\n tmpCloud = self.siteMapper.getSite(sitename).cloud\n retMap[sitename] *= (1 + nReplicasInCloud[tmpCloud])\n # return\n return retMap\n\n\n # get free disk size\n def getFreeDiskSize(self, dataset, siteList, prodsourcelabel):\n # return for failuer\n retFailed = False,{}\n # loop over all sites\n sizeMap = {}\n for sitename in siteList:\n # reuse cached value\n if sitename in self.cachedSizeMap:\n sizeMap[sitename] = self.cachedSizeMap[sitename]\n continue\n # get DQ2 IDs\n siteSpec = self.siteMapper.getSite(sitename)\n scope_input, scope_output = select_scope(siteSpec, prodsourcelabel)\n dq2ID = self.getDQ2ID(sitename, dataset, scope_input)\n if dq2ID == '':\n self.putLog(\"cannot find DQ2 ID for %s:%s\" % (sitename,dataset))\n return retFailed\n tmpMap = rucioAPI.getRseUsage(dq2ID)\n if tmpMap == {}:\n self.putLog('getRseUsage failed for {0}'.format(sitename))\n # append\n sizeMap[sitename] = tmpMap\n # cache\n self.cachedSizeMap[sitename] = sizeMap[sitename]\n # return\n self.putLog('getFreeDiskSize done->%s' % str(sizeMap))\n return True,sizeMap\n \n\n \n # get list of replicas for a dataset\n def getListDatasetReplicas(self,dataset):\n nTry = 3\n for iDDMTry in range(nTry):\n self.putLog(\"%s/%s listDatasetReplicas %s\" % (iDDMTry,nTry,dataset))\n status,out = rucioAPI.listDatasetReplicas(dataset)\n if status != 0:\n time.sleep(10)\n else:\n break\n # result \n if status != 0:\n self.putLog(out,'error')\n self.putLog('bad response for %s' % dataset, 'error') \n return False,{}\n self.putLog('getListDatasetReplicas->%s' % str(out))\n return True,out\n\n \n \n # get replicas for a container \n def getListDatasetReplicasInContainer(self,container):\n # response for failure\n resForFailure = False,{}\n # get datasets in container\n nTry = 3\n for iDDMTry in range(nTry):\n self.putLog('%s/%s listDatasetsInContainer %s' % (iDDMTry,nTry,container))\n datasets,out = rucioAPI.listDatasetsInContainer(container)\n if datasets is None:\n time.sleep(60)\n else:\n break\n if datasets is None:\n self.putLog(out,'error')\n self.putLog('bad DDM response for %s' % container, 'error')\n return resForFailure\n # loop over all datasets\n allRepMap = {}\n for dataset in datasets:\n # get replicas\n status,tmpRepSites = self.getListDatasetReplicas(dataset)\n if not status:\n return resForFailure\n # append\n allRepMap[dataset] = tmpRepSites\n # return\n self.putLog('getListDatasetReplicasInContainer done')\n return True,allRepMap \n\n\n # get datasets used by jobs\n def getUsedDatasets(self,datasetMap):\n resForFailure = (False,[])\n # loop over all datasets\n usedDsList = []\n for datasetName in datasetMap:\n # get file list\n nTry = 3\n for iDDMTry in range(nTry):\n try:\n self.putLog('%s/%s listFilesInDataset %s' % (iDDMTry,nTry,datasetName))\n fileItems,out = rucioAPI.listFilesInDataset(datasetName)\n status = True\n break\n except Exception:\n status = False\n errType,errValue = sys.exc_info()[:2]\n out = '{0} {1}'.format(errType,errValue)\n time.sleep(60)\n if not status:\n self.putLog(out,'error')\n self.putLog('bad DDM response to get size of %s' % datasetName, 'error')\n return resForFailure\n # get \n # check if jobs use the dataset\n usedFlag = False\n for tmpJob in self.jobs:\n for tmpFile in tmpJob.Files:\n if tmpFile.type == 'input' and tmpFile.lfn in fileItems:\n usedFlag = True\n break\n # escape \n if usedFlag:\n break\n # used\n if usedFlag:\n usedDsList.append(datasetName)\n # return\n self.putLog(\"used datasets = %s\" % str(usedDsList))\n return True,usedDsList\n\n\n # get file from dataset\n def getFileFromDataset(self,datasetName,guid,randomMode=False,nSamples=1):\n resForFailure = (False,None)\n # get files in datasets\n global g_filesInDsMap\n if datasetName not in g_filesInDsMap:\n nTry = 3\n for iDDMTry in range(nTry):\n try:\n self.putLog('%s/%s listFilesInDataset %s' % (iDDMTry,nTry,datasetName))\n fileItems,out = rucioAPI.listFilesInDataset(datasetName)\n status = True\n break\n except Exception:\n status = False\n errType,errValue = sys.exc_info()[:2]\n out = '{0} {1}'.format(errType,errValue)\n time.sleep(60)\n if not status:\n self.putLog(out,'error')\n self.putLog('bad DDM response to get size of %s' % datasetName, 'error')\n return resForFailure\n # append\n g_filesInDsMap[datasetName] = fileItems\n # random mode\n if randomMode:\n tmpList = list(g_filesInDsMap[datasetName])\n random.shuffle(tmpList)\n retList = []\n for iSamples in range(nSamples):\n if iSamples < len(tmpList):\n tmpLFN = tmpList[iSamples]\n retMap = g_filesInDsMap[datasetName][tmpLFN]\n retMap['lfn'] = tmpLFN\n retMap['dataset'] = datasetName\n retList.append(retMap)\n return True,retList \n # return\n for tmpLFN in g_filesInDsMap[datasetName]:\n tmpVal = g_filesInDsMap[datasetName][tmpLFN]\n if tmpVal['guid'] == guid:\n retMap = tmpVal\n retMap['lfn'] = tmpLFN\n retMap['dataset'] = datasetName \n return True,retMap\n return resForFailure\n \n \n # register new dataset container with datasets\n def registerDatasetContainerWithDatasets(self,containerName,files,replicaMap,nSites=1,owner=None):\n # parse DN\n if owner is not None:\n out = rucioAPI.parse_dn(owner)\n status,userInfo = rucioAPI.finger(out)\n if not status:\n self.putLog('failed to finger: {0}'.format(userInfo))\n else:\n owner = userInfo['nickname']\n self.putLog('parsed DN={0}'.format(owner))\n # sort by locations\n filesMap = {}\n for tmpFile in files:\n tmpLocations = replicaMap[tmpFile['dataset']]\n tmpLocations.sort()\n newLocations = []\n # skip STAGING\n for tmpLocation in tmpLocations:\n if not tmpLocation.endswith('STAGING'):\n newLocations.append(tmpLocation)\n if newLocations == []:\n continue\n tmpLocations = newLocations\n tmpKey = tuple(tmpLocations)\n filesMap.setdefault(tmpKey, [])\n # append file\n filesMap[tmpKey].append(tmpFile)\n # get nfiles per dataset\n nFilesPerDataset,tmpR = divmod(len(files),nSites)\n if nFilesPerDataset == 0:\n nFilesPerDataset = 1\n maxFilesPerDataset = 1000\n if nFilesPerDataset >= maxFilesPerDataset:\n nFilesPerDataset = maxFilesPerDataset\n # register new datasets\n datasetNames = []\n tmpIndex = 1\n for tmpLocations in filesMap:\n tmpFiles = filesMap[tmpLocations]\n tmpSubIndex = 0\n while tmpSubIndex < len(tmpFiles):\n tmpDsName = containerName[:-1] + '_%04d' % tmpIndex\n tmpRet = self.registerDatasetWithLocation(tmpDsName,tmpFiles[tmpSubIndex:tmpSubIndex+nFilesPerDataset],\n #tmpLocations,owner=owner)\n tmpLocations,owner=None)\n # failed\n if not tmpRet:\n self.putLog('failed to register %s' % tmpDsName, 'error')\n return False\n # append dataset\n datasetNames.append(tmpDsName)\n tmpIndex += 1\n tmpSubIndex += nFilesPerDataset\n # register container\n nTry = 3\n for iDDMTry in range(nTry):\n try:\n self.putLog('%s/%s registerContainer %s' % (iDDMTry,nTry,containerName))\n status = rucioAPI.registerContainer(containerName,datasetNames)\n out = 'OK'\n break\n except Exception:\n status = False\n errType,errValue = sys.exc_info()[:2]\n out = '{0} {1}'.format(errType,errValue)\n time.sleep(10)\n if not status:\n self.putLog(out,'error')\n self.putLog('bad DDM response to register %s' % containerName, 'error')\n return False\n # return\n self.putLog(out)\n return True\n \n \n\n # register new dataset with locations\n def registerDatasetWithLocation(self,datasetName,files,locations,owner=None):\n resForFailure = False\n # get file info\n guids = []\n lfns = []\n fsizes = []\n chksums = []\n for tmpFile in files:\n guids.append(tmpFile['guid'])\n lfns.append(tmpFile['scope']+':'+tmpFile['lfn'])\n fsizes.append(long(tmpFile['filesize']))\n chksums.append(tmpFile['checksum'])\n # register new dataset \n nTry = 3\n for iDDMTry in range(nTry):\n try:\n self.putLog('%s/%s registerNewDataset %s len=%s' % (iDDMTry,nTry,datasetName,\n len(files)))\n out = rucioAPI.registerDataset(datasetName,lfns,guids,fsizes,chksums,\n lifetime=14)\n self.putLog(out)\n break\n except Exception:\n errType,errValue = sys.exc_info()[:2]\n self.putLog(\"%s %s\" % (errType,errValue),'error')\n if iDDMTry+1 == nTry:\n self.putLog('failed to register {0} in rucio'.format(datasetName))\n return resForFailure\n time.sleep(10)\n # freeze dataset \n nTry = 3\n for iDDMTry in range(nTry):\n self.putLog('%s/%s freezeDataset %s' % (iDDMTry,nTry,datasetName))\n try:\n rucioAPI.closeDataset(datasetName)\n status = True\n except Exception:\n errtype,errvalue = sys.exc_info()[:2]\n out = 'failed to freeze : {0} {1}'.format(errtype,errvalue)\n status = False\n if not status:\n time.sleep(10)\n else:\n break\n if not status:\n self.putLog(out,'error')\n self.putLog('bad DDM response to freeze %s' % datasetName, 'error')\n return resForFailure\n # register locations\n for tmpLocation in locations:\n nTry = 3\n for iDDMTry in range(nTry):\n try:\n self.putLog('%s/%s registerDatasetLocation %s %s' % (iDDMTry,nTry,datasetName,tmpLocation))\n out = rucioAPI.registerDatasetLocation(datasetName,[tmpLocation],14,owner)\n self.putLog(out)\n status = True\n break\n except Exception:\n status = False\n errType,errValue = sys.exc_info()[:2]\n self.putLog(\"%s %s\" % (errType,errValue),'error')\n if iDDMTry+1 == nTry:\n self.putLog('failed to register {0} in rucio'.format(datasetName))\n return resForFailure\n time.sleep(10)\n if not status:\n self.putLog(out,'error')\n self.putLog('bad DDM response to set owner %s' % datasetName, 'error')\n return resForFailure\n return True\n\n\n # list datasets by file GUIDs\n def listDatasetsByGUIDs(self,guids,dsFilters):\n resForFailure = (False,{})\n resForFatal = (False,{'isFatal':True})\n # get size of datasets\n nTry = 3\n for iDDMTry in range(nTry):\n self.putLog('%s/%s listDatasetsByGUIDs GUIDs=%s' % (iDDMTry, nTry, str(guids)))\n try:\n out = rucioAPI.listDatasetsByGUIDs(guids)\n status = True\n break\n except Exception:\n errtype,errvalue = sys.exc_info()[:2]\n out = 'failed to get datasets with GUIDs : {0} {1}'.format(errtype,errvalue)\n status = False\n time.sleep(10)\n if not status:\n self.putLog(out,'error')\n self.putLog('bad response to list datasets by GUIDs','error')\n if 'DataIdentifierNotFound' in out:\n return resForFatal\n return resForFailure\n self.putLog(out)\n # get map\n retMap = {}\n try:\n outMap = out\n for guid in guids:\n tmpDsNames = []\n # GUID not found\n if guid not in outMap:\n self.putLog('GUID=%s not found' % guid,'error')\n return resForFatal\n # ignore junk datasets\n for tmpDsName in outMap[guid]:\n if tmpDsName.startswith('panda') or \\\n tmpDsName.startswith('user') or \\\n tmpDsName.startswith('group') or \\\n tmpDsName.startswith('archive') or \\\n re.search('_sub\\d+$',tmpDsName) is not None or \\\n re.search('_dis\\d+$',tmpDsName) is not None or \\\n re.search('_shadow$',tmpDsName) is not None:\n continue\n # check with filters\n if dsFilters != []:\n flagMatch = False\n for tmpFilter in dsFilters:\n if fnmatch.fnmatchcase(tmpDsName, tmpFilter):\n flagMatch = True\n break\n # not match\n if not flagMatch:\n continue\n # append\n tmpDsNames.append(tmpDsName)\n # empty\n if tmpDsNames == []:\n self.putLog('no datasets found for GUID=%s' % guid)\n continue\n # duplicated\n if len(tmpDsNames) != 1:\n self.putLog('there are multiple datasets %s for GUID:%s' % (str(tmpDsNames),guid),'error')\n return resForFatal\n # append\n retMap[guid] = tmpDsNames[0]\n except Exception:\n self.putLog('failed to list datasets by GUIDs','error')\n return resForFailure\n return True,retMap\n\n\n # conver event/run list to datasets\n def convertEvtRunToDatasets(self,runEvtList,dsType,streamName,dsFilters,amiTag,user,runEvtGuidMap,ei_api):\n self.putLog('convertEvtRunToDatasets type=%s stream=%s dsPatt=%s amitag=%s' % \\\n (dsType,streamName,str(dsFilters),amiTag))\n # check data type\n failedRet = False,{},[]\n fatalRet = False,{'isFatal':True},[]\n streamRef = 'Stream' + dsType\n # import event lookup client\n if runEvtGuidMap == {}:\n if len(runEvtList) == 0:\n self.putLog(\"Empty list for run and events was provided\",type='error')\n return failedRet\n # Hadoop EI\n from eventLookupClientEI import eventLookupClientEI\n elssiIF = eventLookupClientEI()\n # Oracle EI\n from pandaserver.taskbuffer.EiTaskBuffer import eiTaskBuffer\n eiTaskBuffer.init()\n # loop over all events\n nEventsPerLoop = 500\n iEventsTotal = 0\n while iEventsTotal < len(runEvtList):\n tmpRunEvtList = runEvtList[iEventsTotal:iEventsTotal+nEventsPerLoop]\n iEventsTotal += nEventsPerLoop\n regStart = datetime.datetime.utcnow()\n guidListELSSI,tmpCom,tmpOut,tmpErr = elssiIF.doLookup(tmpRunEvtList,stream=streamName,tokens=streamRef,\n amitag=amiTag,user=user,ei_api=ei_api)\n regTime = datetime.datetime.utcnow()-regStart\n self.putLog(\"Hadoop EI command: {0}\".format(tmpCom))\n self.putLog(\"Hadoop EI took {0}.{1:03d} sec for {2} events\" .format(regTime.seconds,\n regTime.microseconds/1000,\n len(tmpRunEvtList)))\n regStart = datetime.datetime.utcnow()\n \"\"\"\n statOra,guidListOraEI = eiTaskBuffer.getGUIDsFromEventIndex(tmpRunEvtList,streamName,amiTag,dsType)\n regTime = datetime.datetime.utcnow()-regStart\n self.putLog(\"Oracle EI took {0}.{1:03d} sec for {2} events\" .format(regTime.seconds,\n regTime.microseconds/1000,\n len(tmpRunEvtList)))\n \"\"\"\n # failed\n if not tmpErr in [None,''] or len(guidListELSSI) == 0:\n self.putLog(tmpCom)\n self.putLog(tmpOut)\n self.putLog(tmpErr)\n self.putLog(\"invalid retrun from EventIndex\",type='error')\n return failedRet\n # check events\n for runNr,evtNr in tmpRunEvtList:\n paramStr = 'Run:%s Evt:%s Stream:%s' % (runNr,evtNr,streamName)\n self.putLog(paramStr)\n tmpRunEvtKey = (long(runNr),long(evtNr))\n \"\"\"\n # check in Oracle EI\n if not tmpRunEvtKey in guidListOraEI:\n errStr = \"no GUIDs were found in Oracle EI for %s\" % paramStr\n self.putLog(errStr)\n \"\"\"\n # not found\n if not tmpRunEvtKey in guidListELSSI or len(guidListELSSI[tmpRunEvtKey]) == 0:\n self.putLog(tmpCom)\n self.putLog(tmpOut)\n self.putLog(tmpErr)\n errStr = \"no GUIDs were found in EventIndex for %s\" % paramStr\n self.putLog(errStr,type='error')\n return fatalRet\n # append\n runEvtGuidMap[tmpRunEvtKey] = guidListELSSI[tmpRunEvtKey]\n # convert to datasets\n allDatasets = []\n allFiles = []\n allLocations = {}\n for tmpIdx in runEvtGuidMap:\n tmpguids = runEvtGuidMap[tmpIdx]\n runNr,evtNr = tmpIdx\n tmpDsRet,tmpDsMap = self.listDatasetsByGUIDs(tmpguids,dsFilters)\n # failed\n if not tmpDsRet:\n self.putLog(\"failed to convert GUIDs to datasets\",type='error')\n if 'isFatal' in tmpDsMap and tmpDsMap['isFatal'] == True:\n return fatalRet\n return failedRet\n # empty\n if tmpDsMap == {}:\n self.putLog(\"there is no dataset for Run:%s Evt:%s GUIDs:%s\" % (runNr,evtNr,str(tmpguids)),type='error')\n return fatalRet\n if len(tmpDsMap) != 1:\n self.putLog(\"there are multiple datasets %s for Run:%s Evt:%s GUIDs:%s\" % (str(tmpDsMap),runNr,evtNr,\n str(tmpguids)),\n type='error')\n return fatalRet\n # append\n for tmpGUID in tmpDsMap:\n tmpDsName = tmpDsMap[tmpGUID]\n # collect dataset names\n if not tmpDsName in allDatasets:\n allDatasets.append(tmpDsName)\n # get location\n statRep,replicaMap = self.getListDatasetReplicas(tmpDsName)\n # failed\n if not statRep:\n self.putLog(\"failed to get locations for DS:%s\" % tmpDsName,type='error')\n return failedRet\n # collect locations\n tmpLocationList = []\n for tmpLocation in replicaMap:\n # use only complete replicas\n dsStatDict = replicaMap[tmpLocation][0]\n if dsStatDict['total'] is not None and dsStatDict['total'] == dsStatDict['found']:\n if not tmpLocation in tmpLocationList:\n tmpLocationList.append(tmpLocation)\n allLocations[tmpDsName] = tmpLocationList\n # get file info\n tmpFileRet,tmpFileInfo = self.getFileFromDataset(tmpDsName,tmpGUID)\n # failed\n if not tmpFileRet:\n self.putLog(\"failed to get fileinfo for GUID:%s DS:%s\" % (tmpGUID,tmpDsName),type='error')\n return failedRet\n # collect files\n allFiles.append(tmpFileInfo)\n # return\n self.putLog('converted to %s, %s, %s' % (str(allDatasets),str(allLocations),str(allFiles)))\n return True,allLocations,allFiles\n\n # put log\n def putLog(self,msg,type='debug',sendLog=False,actionTag='',tagsMap={}):\n if self.logger is None:\n tmpMsg = self.token+' '+str(msg)\n else:\n tmpMsg = str(msg)\n if type == 'error':\n if self.logger is None:\n _logger.error(tmpMsg)\n else:\n self.logger.error(tmpMsg)\n # keep last error message\n self.lastMessage = tmpMsg \n else:\n if self.logger is None:\n _logger.debug(tmpMsg)\n else:\n self.logger.debug(tmpMsg)\n # send to logger\n if sendLog:\n tmpMsg = self.token + ' - '\n if actionTag != '':\n tmpMsg += 'action=%s ' % actionTag\n for tmpTag in tagsMap:\n tmpTagVal = tagsMap[tmpTag]\n tmpMsg += '%s=%s ' % (tmpTag,tmpTagVal)\n tmpMsg += '- ' + msg \n tmpPandaLogger = PandaLogger()\n tmpPandaLogger.lock()\n tmpPandaLogger.setParams({'Type':'pd2p'})\n tmpLog = tmpPandaLogger.getHttpLogger(panda_config.loggername)\n # add message\n if type == 'error':\n tmpLog.error(tmpMsg)\n else:\n tmpLog.info(tmpMsg) \n # release HTTP handler\n tmpPandaLogger.release()\n time.sleep(1)\n \n\n # peek log\n def peekLog(self):\n return self.lastMessage\n \n \n # make T1 subscription\n def makeT1Subscription(self, allCloudCandidates, tmpDS, dsSize, prodsourcelabel,\n nUsed=None, nWaitingJobs=None, nWaitingJobsets=None):\n useSmallT1 = None\n # no candidate\n if allCloudCandidates == []:\n return True,useSmallT1\n\n # convert to siteIDs\n t1Candidates = []\n t1Weights = {}\n siteToCloud = {}\n for tmpCloud in allCloudCandidates:\n tmpCloudSpec = self.siteMapper.getCloud(tmpCloud)\n tmpT1SiteID = tmpCloudSpec['source']\n t1Candidates.append(tmpT1SiteID)\n # use MoU share\n t1Weights[tmpT1SiteID] = tmpCloudSpec['mcshare']\n # reverse lookup\n siteToCloud[tmpT1SiteID] = tmpCloud\n # get free disk size\n self.putLog(\"getting free disk size for T1 PD2P\") \n retFreeSizeMap,freeSizeMap = self.getFreeDiskSize(tmpDS, t1Candidates, prodsourcelabel)\n if not retFreeSizeMap:\n self.putLog(\"failed to get free disk size\",type='error',sendLog=True)\n return False,useSmallT1\n # run brokerage\n tmpJob = JobSpec()\n tmpJob.AtlasRelease = ''\n self.putLog(\"run brokerage for T1-T1 for %s\" % tmpDS)\n selectedSite = self.chooseSite(t1Weights,freeSizeMap,dsSize)\n self.putLog(\"site for T1 PD2P -> %s\" % selectedSite)\n # simulation\n if self.simul:\n return True,useSmallT1\n # no candidate\n if selectedSite is None:\n self.putLog(\"no candidate for T1-T1\")\n return False,useSmallT1\n # make subscription\n tmpJob.computingSite = selectedSite\n tmpSiteSpec = self.siteMapper.getSite(tmpJob.computingSite)\n scope_input, scope_output = select_scope(tmpSiteSpec, prodsourcelabel)\n subRet,dq2ID = self.makeSubscription(tmpDS, tmpJob.computingSite, scope_input)\n tmpTagsMap = {'site':tmpJob.computingSite,'dataset':tmpDS}\n if nUsed is not None:\n tmpTagsMap['nused'] = nUsed\n if nWaitingJobs is not None:\n tmpTagsMap['nwaitingjobs'] = nWaitingJobs\n if nWaitingJobsets is not None:\n tmpTagsMap['nwaitingjobsets'] = nWaitingJobsets\n self.putLog(\"made subscription for T1-T1 to %s:%s\" % (tmpJob.computingSite,dq2ID),sendLog=True,\n actionTag='SELECTEDT1',tagsMap=tmpTagsMap)\n # check if small cloud is used\n if siteToCloud[tmpJob.computingSite] in cloudsWithSmallT1:\n useSmallT1 = siteToCloud[tmpJob.computingSite]\n # update database\n if subRet:\n self.taskBuffer.addUserSubscription(tmpDS,[dq2ID])\n return True,useSmallT1\n else:\n return False,useSmallT1\n\n\n # make T2 subscription with MoU share\n def makeT2SubscriptionMoU(self, allCandidates, tmpDS, dsSize, pd2pType, prodsourcelabel,\n nUsed=None, nWaitingJobs=None, nWaitingJobsets=None):\n # no candidate\n if allCandidates == []:\n return True,None\n # get MoU share\n if self.shareMoUForT2 is None:\n self.shareMoUForT2 = self.taskBuffer.getMouShareForT2PD2P()\n # convert to DQ2 ID\n t2Candidates = []\n t2Weights = {}\n dq2List = []\n for tmpCandidate in allCandidates:\n tmpCandidateSpec = self.siteMapper.getSite(tmpCandidate)\n scope_input, scope_output = select_scope(tmpCandidateSpec, prodsourcelabel)\n tmpDQ2ID = self.getDQ2ID(tmpCandidate, tmpDS, scope_input)\n if not tmpDQ2ID in dq2List:\n # append\n dq2List.append(tmpDQ2ID)\n # get MoU share\n if tmpDQ2ID not in self.shareMoUForT2:\n # site is undefined in t_regions_replication \n self.putLog(\"%s is not in MoU table\" % tmpDQ2ID,type='error')\n continue\n if not self.shareMoUForT2[tmpDQ2ID]['status'] in ['ready']:\n # site is not ready\n self.putLog(\"%s is not ready in MoU table\" % tmpDQ2ID)\n continue\n tmpWeight = self.shareMoUForT2[tmpDQ2ID]['weight']\n # skip if the weight is 0\n if tmpWeight == 0:\n self.putLog(\"%s has 0 weight in MoU table\" % tmpDQ2ID)\n continue\n # collect siteIDs and weights for brokerage\n t2Candidates.append(tmpCandidate)\n t2Weights[tmpCandidate] = tmpWeight\n # sort for reproducibility\n t2Candidates.sort()\n # get free disk size\n self.putLog(\"getting free disk size for T2 %s PD2P\" % pd2pType) \n retFreeSizeMap,freeSizeMap = self.getFreeDiskSize(tmpDS, t2Candidates, prodsourcelabel)\n if not retFreeSizeMap:\n self.putLog(\"failed to get free disk size\",type='error',sendLog=True)\n return False,None\n # run brokerage\n tmpJob = JobSpec()\n tmpJob.AtlasRelease = ''\n self.putLog(\"run brokerage for T2 with %s for %s\" % (pd2pType,tmpDS))\n selectedSite = self.chooseSite(t2Weights,freeSizeMap,dsSize)\n self.putLog(\"site for T2 %s PD2P -> %s\" % (pd2pType,selectedSite))\n # simulation\n if self.simul:\n return True,selectedSite\n # no candidate\n if selectedSite is None:\n self.putLog(\"no candidate for T2 with %s\" % pd2pType)\n return False,None\n # make subscription\n selectedSiteSpec = self.siteMapper.getSite(selectedSite)\n scope_input, scope_output = select_scope(selectedSiteSpec, prodsourcelabel)\n subRet,dq2ID = self.makeSubscription(tmpDS, selectedSite, scope_input)\n tmpTagsMap = {'site':selectedSite,'dataset':tmpDS}\n if nUsed is not None:\n tmpTagsMap['nused'] = nUsed\n if nWaitingJobs is not None:\n tmpTagsMap['nwaitingjobs'] = nWaitingJobs\n if nWaitingJobsets is not None:\n tmpTagsMap['nwaitingjobsets'] = nWaitingJobsets\n self.putLog(\"made subscription for T2 with %s to %s:%s\" % (pd2pType,selectedSite,dq2ID),sendLog=True,\n actionTag='SELECTEDT2_%s' % pd2pType,tagsMap=tmpTagsMap)\n # update database\n if subRet:\n self.taskBuffer.addUserSubscription(tmpDS,[dq2ID])\n return True,selectedSite\n else:\n return False,None\n\n\n # choose site\n def chooseSite(self,canWeights,freeSizeMap,datasetSize):\n # loop over all candidates\n totalW = 0\n allCandidates = []\n for tmpCan in canWeights:\n tmpW = canWeights[tmpCan]\n # size check\n if tmpCan in freeSizeMap:\n # disk threshold for PD2P max(5%,3TB)\n diskThresholdPD2P = 1024 * 3\n thrForThisSite = long(freeSizeMap[tmpCan]['total'] * 5 / 100)\n if thrForThisSite < diskThresholdPD2P:\n thrForThisSite = diskThresholdPD2P\n remSpace = freeSizeMap[tmpCan]['total'] - freeSizeMap[tmpCan]['used']\n if remSpace-datasetSize < thrForThisSite:\n self.putLog(' skip: disk shortage %s-%s< %s' % (remSpace,datasetSize,thrForThisSite))\n continue\n self.putLog('weight %s %s' % (tmpCan,tmpW))\n # get total weight \n totalW += tmpW\n # append candidate\n allCandidates.append(tmpCan)\n # no candidate\n if allCandidates == []:\n return None\n # sort for reproducibility\n allCandidates.sort()\n # choose site \n rNumber = random.random() * totalW\n for tmpCan in allCandidates:\n rNumber -= canWeights[tmpCan]\n if rNumber <= 0:\n return tmpCan\n return allCandidates[-1]\n","repo_name":"eschanet/QMonit","sub_path":"pandaserver/dataservice/DynDataDistributer.py","file_name":"DynDataDistributer.py","file_ext":"py","file_size_in_byte":61974,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"33523263082","text":"import argparse\nimport os,sys\nimport time\nimport subprocess\nfrom subprocess import PIPE, Popen\n\nsignal_idx = [0, 1, 2, 3, 4, 5, 6, 7]\nsignal_label = ['LFM', 'Barker', 'costas', 'Frank', 'P1', 'P2', 'P3', 'P4']\n\ndef inference_cmd(img_path):\n cmd = \"curl -s http://localhost:8081/hi -d {}\".format(img_path)\n\n process = subprocess.Popen(cmd, shell=True, stdout=PIPE, stderr=None)\n output = process.communicate()[0]\n \n pred_idx = int(bytes.decode(output))\n return pred_idx\n\nparser = argparse.ArgumentParser(description=\"地址接收\")\nparser.add_argument(\"img_path\", type=str, help=\"传入地址\")\nargs = parser.parse_args()\n\nidx = inference_cmd(args.img_path)\nlabel = signal_label[idx]\nprint(label)\n\n# python rader_single.py img_path\n","repo_name":"Jasookii/EfficientNet-Pytorch-TensorRT","sub_path":"Serve/Client/rader_single.py","file_name":"rader_single.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16631093426","text":"import sys\nimport pickle\n\nx = pickle.load(open(sys.argv[1], 'rb'))\nprint(\"Length\", len(x))\nprint(x[0])\nprint(\"Press y to print all data and n to exit\")\n\nt = input()\nif t == 'y':\n for each in x:\n print(each)\n","repo_name":"hthuwal/sign-language-gesture-recognition","sub_path":"loadpicklefileanddisplay.py","file_name":"loadpicklefileanddisplay.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":399,"dataset":"github-code","pt":"63"} +{"seq_id":"35389106414","text":"from src import *\r\nfrom src.optim import levmarq\r\nfrom src.math3d.rotations import *\r\n\r\nclass Gyro_Calibrator(levmarq.LM):\r\n \r\n def residual_update(self):\r\n dt, rot_vecs, magnos = self.data\r\n\r\n S = self.t_state[0]\r\n b = self.t_state[1]\r\n a_min_b = (rot_vecs - b)\r\n calibrated = ein('ij, tj -> ti', S, a_min_b)\r\n\r\n rot_mats = exp_rotmat(-dt*calibrated)\r\n\r\n # print(dt.shape, rot_vecs.shape, magnos.shape, rot_mats.shape)\r\n\r\n v = ein('tij, tj -> ti', rot_mats, magnos[:-1])\r\n f = v - magnos[1:]\r\n # print(f)\r\n d_eS = ein('t, tip, tj -> tijp', -dt[:,0],\r\n skew_symm(magnos[:-1]), a_min_b) # Tx3x3xP\r\n d_eb = ein('t, tjp, ji -> tip', dt[:,0],\r\n skew_symm(magnos[:-1]), S)\r\n d_eb = d_eb[..., None, :, :] # Tx1x3xP\r\n\r\n self.t_residual = f # Tx3\r\n self.t_derivs = torch.cat((d_eS, d_eb), dim=-3) # Tx4x3xP\r\n\r\n def objective_update(self):\r\n self.t_obj = ein(\r\n 'tp, tp', self.t_residual, self.t_residual)\r\n\r\n def state_update(self):\r\n state_mat = torch.cat(\r\n [self.state[0], self.state[1][..., None, :]], dim=-2)\r\n\r\n J = self.t_derivs # Tx4x3xP\r\n JJT = ein('tijp, tmnp -> im', J, J) # 4x4\r\n G = ein('tijp, tp -> ij', J, self.t_residual) # 4x3\r\n S_trust_scale = JJT * torch.eye(4)\r\n new_state = state_mat - \\\r\n torch.linalg.solve(JJT + S_trust_scale *\r\n self.distrust_lambda, G) # 4x3\r\n\r\n self.t_state = [new_state[:3], new_state[3]]\r\n\r\n def apply_model(self, data):\r\n Sinv = self.state[0]\r\n b = self.state[1]\r\n return ein('ij, tj -> ti', Sinv, data - b)","repo_name":"nikolageorgiev2000/IMU-Pose-Estimation","sub_path":"src/calibration/matrix_gyro_calibrator.py","file_name":"matrix_gyro_calibrator.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"6707675278","text":"from WebDriverIO.common.page_objects_selectors import *\nfrom Configuration.BasePage import *\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass HomePage(BasePage):\n def __init__(self, driver, timeout=10):\n BasePage.__init__(self, driver=driver, timeout=timeout)\n\n def open_homepage(self, url):\n self.driver.get(url)\n self.visibility_of_element(HomePageSelectors.MainContainer)\n print ('--> Homepage was successfully loaded!')\n\n\nclass Header(BasePage):\n def __init__(self, driver, timeout=10):\n BasePage.__init__(self, driver=driver, timeout=timeout)\n\n def click_api_link(self):\n self.click_on_element('API Link', HeaderSelectors.APILink)\n\n def click_search_field(self):\n self.click_on_element('Search Field', HeaderSelectors.SearchField)\n\n def click_on_io_logo(self):\n self.click_on_element('WebDriver IO logo', HeaderSelectors.IOLogo)\n\n\nclass SearchWidget(BasePage):\n def __init__(self, driver, timeout=10):\n BasePage.__init__(self, driver=driver, timeout=timeout)\n try:\n self.visibility_of_element(SearchWidgetSelectors.SearchInputField)\n print ('--> Search Popup was successfully loaded!')\n except:\n raise Exception('#### Search popup was not loaded' + traceback.format_exc())\n\n def search_for_text(self, text):\n self.clickable_element(SearchWidgetSelectors.SearchInputField).send_keys(text)\n\n def click_on_specific_search_result(self, exp_search_result):\n time.sleep(0.2)\n iteration = 1\n all_search_results = self.visibility_of_elements(SearchWidgetSelectors.SearchResults)\n for act_search_result in all_search_results:\n if exp_search_result == act_search_result.text:\n act_search_result.click()\n print('--> Clicked on search result: ' + exp_search_result)\n break\n else:\n iteration += 1\n if iteration > len(all_search_results):\n raise Exception('#### Could not find given search result: ' + exp_search_result + '\\n' + traceback.format_exc())\n\n def assert_clicked_result_was_loaded(self, exp_result):\n assert self.visibility_of_element(GeneralSelectors.TitleHeader).text == exp_result, '#### Clicked result was not loaded'\n print('--> Clicked result ' + exp_result + ' was successfully loaded!')\n\n def assert_no_results_for_invalid_keyword(self, keyword):\n exp_result = 'No results for \"' + keyword + '\"'\n assert self.visibility_of_element(SearchWidgetSelectors.NoResultsSearchTitle).text == exp_result, '#### Wrong result for invalid keyword'\n print('--> Valid error message for not found results is shown!')\n\n def verify_recent_history_results_are_saved(self, exp_recent_results):\n all_recent_results = self.visibility_of_elements(SearchWidgetSelectors.RecentSearches)\n all_recent_results_text = []\n\n for recent_result in all_recent_results:\n all_recent_results_text.append(recent_result.text)\n\n error_count = 0\n for exp_result in exp_recent_results:\n if exp_result not in all_recent_results_text:\n print ('#### Expected search result ' + exp_result + ' is not in Recent list')\n error_count += 1\n\n if error_count != 0:\n raise Exception('#### There is difference between expected and actual Recent results. Check the logs!')\n else:\n print('--> All expected search results are in the Recent list')\n\n def save_recent_result_in_favourite(self, result_to_save):\n self.click_on_element('Favourite button for result ' + result_to_save, SearchWidgetSelectors.favourite_button_specific_recent_result(result_to_save))\n\n def verify_result_saved_in_favourite(self, saved_result):\n try:\n self.visibility_of_element(SearchWidgetSelectors.result_in_favourites(saved_result))\n print('--> Result ' + saved_result + ' successfully saved in Favorites')\n except:\n raise Exception('#### Result ' + saved_result + ' not saved in Favourites! \\n' + traceback.format_exc())\n\n\n def delete_specific_recent_result(self, result_to_delete):\n self.click_on_element('Delete button for result ' + result_to_delete, SearchWidgetSelectors.remove_button_specific_recent_result(result_to_delete))\n\n def verify_deleted_result_not_in_recent_history(self, deleted_result):\n counter = 0\n while counter < 10:\n if self.is_element_displayed(SearchWidgetSelectors.remove_button_specific_recent_result(deleted_result)):\n counter +=1\n time.sleep(0.5)\n else:\n print('--> Element ' + deleted_result + ' successfully deleted from Recent history')\n break\n if counter == 10:\n raise Exception('#### Element not deleted from Recent history for 5 seconds!')\n\n def close_search_widget(self):\n ActionChains(self.driver).send_keys(Keys.ESCAPE).perform()\n print('--> Escape key pressed with Search widget on screen!')\n\n\nclass APIDocumentation(BasePage):\n def __init__(self, driver, timeout=10):\n BasePage.__init__(self, driver=driver, timeout=timeout)\n\n def expand_protocols_section(self):\n self.click_on_element('Protocols section', APIDocumentationSelectors.ProtocolsSection)\n\n def verify_elements_in_section_list(self, section, exp_list):\n error_count = 0\n act_list_elements = self.visibility_of_elements(APIDocumentationSelectors.all_elements_in_given_section(section))\n act_list_strings = []\n\n for element in act_list_elements:\n act_list_strings.append(element.text) # getting the strings for all web elements in the opened section\n\n for exp_list_element in exp_list:\n if exp_list_element in act_list_strings:\n act_list_strings.remove(exp_list_element) # removing checked element from actual list\n else:\n print('#### Expected element: ' + exp_list_element + ' is not in the ' + section + ' section!!!')\n error_count += 1\n\n if len(act_list_strings) != 0: # checking if any new elements were added into Protocols section on website\n print('#### There are elements in Actual ' + section + ' section list that are not in the expected one: ' + str(\n act_list_strings))\n error_count += 1\n\n if error_count == 0:\n print('--> All expected elements in ' + section + ' section match with actual elements!')\n else:\n raise Exception('#### There are differences between expected and actual lists in ' + section + ' section. Please check the log!!!')\n","repo_name":"sgeorgiev87/QuickBaseExercise","sub_path":"WebDriverIO/common/page_objects.py","file_name":"page_objects.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71382339722","text":"\nmemo = {}\nsumList = []\ndef howSum(target, nums) -> list[int]:\n\n if target in memo: return memo[target]\n if target == 0: return []\n if target < 0: return None\n \n for n in nums:\n remainder = target - n\n remainderResult = howSum(remainder,nums)\n if remainderResult is not None:\n remainderResult.append(n)\n memo[remainder] = remainderResult\n return memo[remainder]\n memo[target] = None\n return None\n\nprint(howSum(600, [3,4,5]))\n\"\"\"\nprint(howSum(7, [2,3]))\nprint(howSum(7, [2,4]))\nprint(howSum(8, [2,3,5]))\nprint(howSum(300, [7,14]))\n\"\"\"","repo_name":"carlosnavaja16/SWEInterviewProblems","sub_path":"howSum.py","file_name":"howSum.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71567810122","text":"from copy import deepcopy\nimport logging\nimport sys\nlogger = logging.getLogger(__name__)\n\ndef makeAttribute(name, readable_name, description, data_type, default_value):\n ret = {\n \"name\" : name,\n \"readable_name\" : readable_name,\n \"description\" : description,\n \"data_type\" : data_type,\n \"default_value\" : default_value,\n }\n return ret\n\ndef makeAllowedRelation(descr_from, descr_to):\n ret = {\n \"from\" : descr_from,\n \"to\" : descr_to,\n }\n return ret\n\nclass ConfigurationObject:\n def __init__(self, configuration=None, cname_or_cid=-1, id=-1):\n self.configuration = configuration\n self.attributes = {}\n self.cid = self.configuration.convertCNameIfNeeded(cname_or_cid)\n self.id = id\n self.is_loaded = False\n\n def getId(self):\n return (self.cid, self.id)\n\n def __contains__(self, item):\n return item in self.attributes\n\n def __delitem__(self, key):\n del self.attributes[key]\n\n def get_default_attribute_value(self, key):\n class_info = self.configuration.classes[self.cid]\n\n #Check for entity its prototype existence and value\n if class_info[\"type\"]==\"entity_class\":\n protos = self.configuration.getAllNeighbours(self, \"prototypes\", role=\"from\")\n len_protos = len(protos)\n #print self.id, protos[0].id\n #if len_protos>1: raise Exception(\"Multiple prototypes of instance %s\" % str(self.getId()))\n if len_protos==1:\n if protos[0].id != self.id:\n return protos[0][key]\n\n #Check default value\n for attr in class_info[\"attributes\"]:\n if attr[\"name\"]==key: return attr[\"default_value\"]\n raise KeyError(key)\n\n def __getitem__(self, key):\n if key==\"id\": return self.id\n if key==\"cid\": return self.cid\n #Check instance value\n if key in self.attributes: return self.attributes[key]\n return self.get_default_attribute_value(key)\n\n def __setitem__(self, key, value):\n self.attributes[key] = value\n\nclass Entity(ConfigurationObject):\n\n def save(self):\n return self.configuration.saveEntityAttributes(self)\n\n def load(self):\n return self.configuration.loadEntityAttributes(self)\n\n def delete(self):\n return self.configuration.deleteEntity(self)\n\n def getNeighbours(self, relation_cname_or_cid=None, filter_func=None):\n return self.configuration.getAllNeighbours(self, relation_cname_or_cid, filter_func=filter_func)\n\n def getNeighboursFrom(self, relation_cname_or_cid=None, filter_func=None):\n return self.configuration.getAllNeighbours(self, relation_cname_or_cid, filter_func=filter_func, role=\"from\")\n\n def getNeighboursTo(self, relation_cname_or_cid=None, filter_func=None):\n return self.configuration.getAllNeighbours(self, relation_cname_or_cid, filter_func=filter_func, role=\"to\")\n\n def getTitle(self, length=None):\n ret_arr = []\n attributes = self.configuration.classes[self.cid][\"attributes\"]\n for attr in attributes[:1]:\n attr_name = attr[\"name\"]\n ret_arr += [self[attr_name]]\n ret = \" \".join(ret_arr)\n protos = self.configuration.getAllNeighbours(self, \"prototypes\", role=\"from\")\n len_protos = len(protos)\n if len_protos>=1:\n ret += \" [\" + protos[0].getTitle() + \"]\"\n if length and len(ret)>length: ret = ret[:(length-3)] + \"...\"\n return ret\n\n def getDescription(self, length=None):\n ret_arr = []\n attributes = self.configuration.classes[self.cid][\"attributes\"]\n for attr in attributes[1:]:\n attr_name = attr[\"name\"]\n ret_arr += [str(self[attr_name])]\n ret = \" \".join(ret_arr)\n if length and len(ret)>length: ret = ret[:(length-3)] + \"...\"\n return ret\n\n def __str__(self):\n ret = \"%s %s: \" % (self.configuration.classes[self.cid][\"name\"], self.id)\n if self.is_loaded:\n ret += \"%s\" % self.attributes\n else:\n ret += \"[Not loaded]\"\n return ret\n\nclass Relation(ConfigurationObject):\n def __init__(self, configuration, cname_or_cid=-1, ent_from_id=None, ent_to_id=None, id=-1):\n ConfigurationObject.__init__(self, configuration, cname_or_cid, id)\n self.from_id = ent_from_id\n self.to_id = ent_to_id\n\n def getFromEntity(self):\n return self.configuration.loadEntityByEntityId(self.from_id)\n\n def getToEntity(self):\n return self.configuration.loadEntityByEntityId(self.to_id)\n\n def save(self):\n return self.configuration.saveRelationAttributes(self)\n\n def load(self):\n return self.configuration.loadRelationAttributes(self)\n\n def delete(self):\n return self.configuration.deleteRelation(self)\n\nclass Configuration:\n MUL_ZERO_OR_ONE = \"zero_or_one\"\n MUL_ONE = \"one\"\n MUL_ONE_OR_MORE = \"one_or_more\"\n MUL_ZERO_OR_MORE = \"zero_or_more\"\n\n TYPE_INTEGER = \"int\"\n TYPE_DOUBLE = \"double\"\n TYPE_STRING = \"string\"\n TYPE_TEXT = \"text\"\n TYPE_DICTIONARY = \"dict\"\n\n def __init__(self):\n self.classes = {} ## cid to class header dict\n self.cnames_to_cids = {}\n\n def initialize(self, storage):\n allowed_rels = []\n cids = self.classes.keys()\n cids.sort()\n for cls_id in cids:\n cls_info = self.classes[cls_id]\n if cls_info[\"type\"] != \"entity_class\" : continue\n allowed_rels += [makeAllowedRelation(\n {\"cname\": cls_info[\"name\"], \"multiplicity\" : self.MUL_ZERO_OR_MORE},\n {\"cname\": cls_info[\"name\"], \"multiplicity\" : self.MUL_ONE}\n )]\n\n self.addRelationClass(10001, \"prototypes\", \"Prototypes\", \"Prototypes links\", [\n ], allowed_rels)\n\n self.storage = storage\n return True\n\n def __del__(self):\n del self.storage\n\n def addEntityClass(self, cid, name, readable_name, description, attributes_list):\n if cid in self.classes: logger.warning('Entity Class cid has been redefined: %s.%s = %s!' % (__name__, name, cid))\n self.classes[cid] = {\n \"type\" : \"entity_class\",\n \"name\" : name,\n \"readable_name\" : readable_name,\n \"description\" : description,\n \"attributes\" : attributes_list,\n }\n if name in self.cnames_to_cids: logger.warning('Entity Class name has been redefined: %s.%s = %s!' % (__name__, name, cid))\n self.cnames_to_cids[name] = cid\n return True\n\n def addRelationClass(self, cid, name, readable_name, description, attributes_list, allowed_relations):\n if cid in self.classes: logger.warning('Relation Class cid has been redefined: %s.%s = %s!' % (__name__, name, cid))\n for allowed_rel in allowed_relations:\n for role, cfg in allowed_rel.iteritems():\n if cfg[\"cname\"] not in self.cnames_to_cids: logger.warning('Entity Class name has not been found: %s!' % (cfg[\"cname\"]))\n cfg[\"cid\"] = self.cnames_to_cids[cfg[\"cname\"]]\n\n self.classes[cid] = {\n \"type\" : \"relation_class\",\n \"name\" : name,\n \"readable_name\" : readable_name,\n \"description\" : description,\n \"attributes\" : attributes_list,\n \"allowed_relations\" : allowed_relations,\n }\n if name in self.cnames_to_cids: logger.warning('Relation Class name has been redefined: %s.%s = %s!' % (__name__, name, cid))\n self.cnames_to_cids[name] = cid\n return True\n\n def convertCNameIfNeeded(self, cname_or_cid):\n cid = cname_or_cid\n # print(self.cnames_to_cids)\n if type(cid) == str: cid = self.cnames_to_cids[cname_or_cid]\n return cid\n\n ################## ENTITIES\n\n def makeEntity(self, cname_or_cid, id=-1):\n ret = Entity(self, cname_or_cid, id)\n return ret\n\n def loadEntity(self, cname_or_cid, id):\n ret = self.makeEntity(cname_or_cid, id)\n ret.load()\n return ret\n\n def loadEntityByEntityId(self, ent_id):\n ret = self.makeEntity(ent_id[0], ent_id[1])\n ret.load()\n return ret\n\n def deleteEntity(self, entity):\n eid = entity.getId()\n self.storage.nxgraph.remove_node(eid) # All edges will be deleted also\n\n def saveEntityAttributes(self, entity):\n if entity.id == -1:\n max_ids = self.storage.nxgraph.graph[\"max_ids\"]\n if entity.cid not in max_ids: max_ids[entity.cid] = 1\n entity.id = max_ids[entity.cid]\n max_ids[entity.cid] += 1\n\n self.storage.nxgraph.add_node(entity.getId())\n self.storage.nxgraph.node[entity.getId()] = entity.attributes\n return True\n\n def loadEntityAttributes(self, entity, data=None):\n id = entity.getId()\n # TODO: check for existence\n if not data:\n if id not in self.storage.nxgraph.node: raise Exception(\"Object %s not found in the storage\" % str(id))\n data = self.storage.nxgraph.node[id]\n entity.attributes = deepcopy(data)\n# for (key, value) in data.iteritems():\n# entity.attributes[key] = value\n entity.is_loaded = True\n return True\n\n ################### RELATIONS\n\n def makeRelation(self, cname_or_id, ent_or_id_from=None, ent_or_id_to=None, id=-1):\n if isinstance(ent_or_id_from, ConfigurationObject): ent_or_id_from = ent_or_id_from.getId()\n if isinstance(ent_or_id_to, ConfigurationObject): ent_or_id_to = ent_or_id_to.getId()\n ret = Relation(self, cname_or_id, ent_or_id_from, ent_or_id_to, id)\n return ret\n\n def findRelations(self, cname_or_cid, ent1, ent2):\n cid = self.convertCNameIfNeeded(cname_or_cid)\n ents = [ent1, ent2]\n ret = []\n for i in (0,1):\n t_ent1 = ents[i]\n t_ent2 = ents[1-i]\n t_eid1 = t_ent1.getId()\n t_eid2 = t_ent2.getId()\n if t_eid2 not in self.storage.nxgraph[t_eid1]: continue\n relations = self.storage.nxgraph[t_eid1][t_eid2]\n for rid in relations.keys():\n if rid[0] == cid:\n trel = self.makeRelation(cid, t_ent1, t_ent2, rid[1])\n data = self.storage.nxgraph[t_eid1][t_eid2][rid]\n trel.attributes = deepcopy(data)\n ret += [trel]\n return ret\n\n def loadRelation(self, cname_or_cid, id):\n cid = self.convertCNameIfNeeded(cname_or_cid)\n rid = (cid, id)\n (eid1,eid2) = self.storage.nxgraph.graph[\"edges_by_ids\"][rid]\n rel = self.makeRelation(cid, eid1, eid2, id)\n rel.load()\n return rel\n\n def deleteRelation(self, relation):\n rid = relation.getId()\n (eid1, eid2) = self.storage.nxgraph.graph[\"edges_by_ids\"][rid]\n self.storage.nxgraph.remove_edge(eid1, eid2, key=rid)\n\n def saveRelationAttributes(self, relation):\n (cid,id) = relation.getId()\n if id == -1:\n max_ids = self.storage.nxgraph.graph[\"max_ids\"]\n if cid not in max_ids: max_ids[cid] = 1\n id = max_ids[cid]\n max_ids[cid] += 1\n self.storage.nxgraph.graph[\"edges_by_ids\"][(cid, id)] = (relation.from_id, relation.to_id)\n # TODO: check for existence\n self.storage.nxgraph.add_edge(relation.from_id, relation.to_id, key=(cid,id), attr_dict=relation.attributes)\n return True\n\n def loadRelationAttributes(self, relation, data=None):\n rid = relation.getId()\n (eid1,eid2) = self.storage.nxgraph.graph[\"edges_by_ids\"][rid]\n # TODO: check for existence relation.entity_from, relation.entity_to, key=rid\n if (data == None):\n if eid1 not in self.storage.nxgraph \\\n or eid2 not in self.storage.nxgraph[eid1] \\\n or rid not in self.storage.nxgraph[eid1][eid2]:\n raise Exception(\"Edge %s = %s -> %s not found in the storage\" % (rid, eid1, eid2))\n data = self.storage.nxgraph[eid1][eid2][rid]\n relation.attributes = deepcopy(data)\n# self.__writeAttributesByClassInfo(relation)\n# for (key, value) in data.iteritems():\n# relation.attributes[key] = value\n relation.is_loaded = True\n return True\n\n ################### QUERIES\n\n def getAllEntities(self, cname_or_cid=None, filter_func=None, load_instances=True):\n #TODO: make optimization by creating indexes by cid at initial configuration load\n if not cname_or_cid and not filter_func and not load_instances: return self.storage.nxgraph.nodes(data=False)\n nodes = self.storage.nxgraph.nodes(data=True)\n ret = []\n cid = self.convertCNameIfNeeded(cname_or_cid)\n for node in nodes:\n will_add = True\n if cname_or_cid: will_add = will_add and node[0][0] == cid\n if filter_func:\n params = {\"cid\" : node[0][0], \"id\" : node[0][1]} # Make a copy of dict for lame storage protection\n params.update(node[1])\n try:\n will_add = will_add and filter_func(params)\n except KeyError:\n continue\n if will_add:\n if not load_instances: ret += [node[0]]\n else: # Load instance without querying storage\n instance = self.makeEntity(node[0][0], node[0][1])\n self.loadEntityAttributes(instance, node[1])\n ret += [instance]\n return ret\n\n def getAllRelations(self, entity, relation_cname_or_cid=None, filter_func=None, load_instances=True, role=None, include_prototype_relations=True):\n edges = []\n if not role or role==\"from\":\n edges += self.storage.nxgraph.in_edges(entity.getId(), keys=True, data=True)\n if not role or role==\"to\":\n edges += self.storage.nxgraph.out_edges(entity.getId(), keys=True, data=True)\n\n ret = []\n rcid = self.convertCNameIfNeeded(relation_cname_or_cid)\n\n for edge in edges:\n (from_id, to_id, rel_id, data) = edge\n will_add = True\n if relation_cname_or_cid: will_add = will_add and rel_id[0] == rcid\n if filter_func:\n params = {\"from_cid\" : from_id[0], \"from_id\" : from_id[1],\"to_cid\" : to_id[0], \"to_id\" : to_id[1], \"cid\" : rel_id[0], \"id\" : rel_id[1]} # Make a copy of dict for lame storage protection\n params.update(data)\n will_add = will_add and filter_func(params)\n if will_add:\n if not load_instances: ret += [(rel_id, from_id, to_id)] # Rel_id, Ent1_id, Rnt2_id\n else: # Load instance without querying storage\n relation = self.makeRelation(rel_id[0], from_id, to_id, rel_id[1])\n self.loadRelationAttributes(relation, data)\n ret += [relation]\n if include_prototype_relations:\n protos = self.getAllNeighbours(entity, \"prototypes\", role=\"from\", include_prototype_relations=False)\n len_protos = len(protos)\n if len_protos>=1:\n ret += self.getAllRelations(protos[0], relation_cname_or_cid, filter_func, load_instances, role, include_prototype_relations)\n return ret\n\n def getAllNeighbours(self, entity, relation_cname_or_cid=None, filter_func=None, load_instances=True, role=None, include_prototype_relations=True):\n ret = []\n if not role or role==\"from\":\n relations = self.getAllRelations(entity, relation_cname_or_cid, None, load_instances, \"from\", include_prototype_relations)\n for relation in relations:\n will_add = True\n neighbour = relation.getFromEntity()\n if filter_func:\n will_add = will_add and filter_func(neighbour)\n if will_add: ret += [neighbour]\n if not role or role==\"to\":\n relations = self.getAllRelations(entity, relation_cname_or_cid, None, load_instances, \"to\", include_prototype_relations)\n for relation in relations:\n will_add = True\n neighbour = relation.getToEntity()\n if filter_func:\n will_add = will_add and filter_func(neighbour)\n if will_add: ret += [neighbour]\n return ret\n\n def getAllAllowedNeighboursPatternsByRelationsClassesIds(self, entity, relation_cname_or_cid=None):\n ret = {}\n cids = self.classes.keys()\n cids.sort()\n rcid = self.convertCNameIfNeeded(relation_cname_or_cid)\n for cid in cids:\n if rcid and rcid != cid: continue\n if self.classes[cid][\"type\"]!=\"relation_class\": continue\n t_cls = self.classes[cid]\n allowed_relations = t_cls[\"allowed_relations\"]\n for allowed_rel in allowed_relations:\n for role_pair in [(\"from\", \"to\"), (\"to\", \"from\")]:\n will_add = True\n if allowed_rel[role_pair[0]][\"cid\"] != entity.getId()[0]:\n will_add = False\n if will_add:\n if cid not in ret: ret[cid] = []\n ret[cid] += [{\"entity_role\" : role_pair[0], \"neighbour_role\" : role_pair[1], \"neighbour\" : allowed_rel[role_pair[1]]}]\n return ret\n","repo_name":"alexmakeev/pycdb","sub_path":"pycdb_kernel_apps/graph_db/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":17444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"18053737514","text":"\"\"\"An Asyncio POP3 client class.\r\n\r\nBased on the J. Myers POP3 draft, Jan. 96\r\n\"\"\"\r\n\r\n# Author: Synodriver \r\n# [heavily stealing from stdlib]\r\n\r\n# Example (see the test function at the end of this file)\r\n\r\n# Imports\r\nimport sys\r\nimport asyncio\r\nimport re\r\nfrom typing import Tuple\r\n\r\ntry:\r\n import ssl\r\n\r\n HAVE_SSL = True\r\nexcept ImportError:\r\n HAVE_SSL = False\r\n\r\n__all__ = [\"POP3\", \"error_proto\"]\r\n\r\n\r\n# Exception raised when an error or invalid response is received:\r\n\r\nclass error_proto(Exception):\r\n pass\r\n\r\n\r\n# Standard Port\r\nPOP3_PORT = 110\r\n\r\n# POP SSL PORT\r\nPOP3_SSL_PORT = 995\r\n\r\n# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)\r\nCR = b'\\r'\r\nLF = b'\\n'\r\nCRLF = CR + LF\r\n\r\n# maximal line length when calling readline(). This is to prevent\r\n# reading arbitrary length lines. RFC 1939 limits POP3 line length to\r\n# 512 characters, including CRLF. We have selected 2048 just to be on\r\n# the safe side.\r\n_MAXLINE = 2048\r\n\r\n\r\nclass TCPSocket:\r\n def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):\r\n self._reader = reader\r\n self._writer = writer\r\n\r\n @classmethod\r\n async def create_connection(cls, host: str, port: int, timeout: float, ssl: ssl.SSLContext = None) -> \"TCPSocket\":\r\n reader, writer = await asyncio.wait_for(asyncio.open_connection(host, port, ssl=ssl), timeout)\r\n return cls(reader, writer)\r\n\r\n async def sendall(self, data: bytes) -> None:\r\n self._writer.write(data)\r\n await self._writer.drain()\r\n\r\n async def read(self, n: int = -1) -> bytes:\r\n return await self._reader.read(n)\r\n\r\n async def readexactly(self, n: int) -> bytes:\r\n return await self._reader.readexactly(n)\r\n\r\n async def readline(self, size: int = -1) -> bytes:\r\n if size == -1:\r\n return await self._reader.readline()\r\n else:\r\n buffer = bytearray()\r\n for _ in range(size):\r\n data = await self.readexactly(1)\r\n if data != b\"\\n\":\r\n buffer.extend(data)\r\n else:\r\n break\r\n return bytes(buffer)\r\n\r\n async def close(self) -> None:\r\n self._writer.close()\r\n await self._writer.wait_closed()\r\n\r\n\r\nclass POP3:\r\n \"\"\"This class supports both the minimal and optional command sets.\r\n Arguments can be strings or integers (where appropriate)\r\n (e.g.: retr(1) and retr('1') both work equally well.\r\n\r\n Minimal Command Set:\r\n USER name user(name)\r\n PASS string pass_(string)\r\n STAT stat()\r\n LIST [msg] list(msg = None)\r\n RETR msg retr(msg)\r\n DELE msg dele(msg)\r\n NOOP noop()\r\n RSET rset()\r\n QUIT quit()\r\n\r\n Optional Commands (some servers support these):\r\n RPOP name rpop(name)\r\n APOP name digest apop(name, digest)\r\n TOP msg n top(msg, n)\r\n UIDL [msg] uidl(msg = None)\r\n CAPA capa()\r\n STLS stls()\r\n UTF8 utf8()\r\n\r\n Raises one exception: 'error_proto'.\r\n\r\n Instantiate with:\r\n POP3(hostname, port=110)\r\n\r\n NB: the POP protocol locks the mailbox from user\r\n authorization until QUIT, so be sure to get in, suck\r\n the messages, and quit, each time you access the\r\n mailbox.\r\n\r\n POP is a line-based protocol, which means large mail\r\n messages consume lots of python cycles reading them\r\n line-by-line.\r\n\r\n If it's available on your mail server, use IMAP4\r\n instead, it doesn't suffer from the two problems\r\n above.\r\n \"\"\"\r\n\r\n encoding = 'UTF-8'\r\n\r\n def __init__(self, host, port=POP3_PORT,\r\n timeout=1):\r\n self.host = host\r\n self.port = port\r\n self._tls_established = False\r\n sys.audit(\"poplib.connect\", self, host, port)\r\n self.sock = None # type: TCPSocket\r\n self.timeout = timeout\r\n # self.file = self.sock.makefile('rb')\r\n self._debugging = 0\r\n self.welcome = None # type: bytes\r\n\r\n async def connect(self):\r\n \"\"\"\r\n need coonnect first\r\n :return:\r\n \"\"\"\r\n assert self.sock is None\r\n self.sock = await self._create_socket(self.timeout)\r\n self.welcome = await self._getresp()\r\n\r\n async def _create_socket(self, timeout, ssl: ssl.SSLContext = None) -> TCPSocket:\r\n return await TCPSocket.create_connection(self.host, self.port, timeout, ssl)\r\n\r\n async def _putline(self, line):\r\n if self._debugging > 1:\r\n print('*put*', repr(line))\r\n sys.audit(\"poplib.putline\", self, line)\r\n await self.sock.sendall(line + CRLF)\r\n\r\n # Internal: send one command to the server (through _putline())\r\n\r\n async def _putcmd(self, line):\r\n if self._debugging:\r\n print('*cmd*', repr(line))\r\n line = bytes(line, self.encoding)\r\n await self._putline(line)\r\n\r\n # Internal: return one line from the server, stripping CRLF.\r\n # This is where all the CPU time of this module is consumed.\r\n # Raise error_proto('-ERR EOF') if the connection is closed.\r\n\r\n async def _getline(self) -> Tuple[bytes, int]:\r\n line = await self.sock.readline(_MAXLINE + 1)\r\n if len(line) > _MAXLINE:\r\n raise error_proto('line too long')\r\n\r\n if self._debugging > 1:\r\n print('*get*', repr(line))\r\n if not line:\r\n raise error_proto('-ERR EOF')\r\n octets = len(line)\r\n # server can send any combination of CR & LF\r\n # however, 'readline()' returns lines ending in LF\r\n # so only possibilities are ...LF, ...CRLF, CR...LF\r\n if line[-2:] == CRLF:\r\n return line[:-2], octets\r\n if line[:1] == CR:\r\n return line[1:-1], octets\r\n return line[:-1], octets\r\n\r\n # Internal: get a response from the server.\r\n # Raise 'error_proto' if the response doesn't start with '+'.\r\n\r\n async def _getresp(self) -> bytes:\r\n resp, o = await self._getline()\r\n if self._debugging > 1:\r\n print('*resp*', repr(resp))\r\n if not resp.startswith(b'+'):\r\n raise error_proto(resp)\r\n return resp\r\n\r\n # Internal: get a response plus following text from the server.\r\n\r\n async def _getlongresp(self):\r\n resp = await self._getresp()\r\n list = [];\r\n octets = 0\r\n line, o = await self._getline()\r\n while line != b'.':\r\n if line.startswith(b'..'):\r\n o = o - 1\r\n line = line[1:]\r\n octets = octets + o\r\n list.append(line)\r\n line, o = await self._getline()\r\n return resp, list, octets\r\n\r\n # Internal: send a command and get the response\r\n\r\n async def _shortcmd(self, line):\r\n await self._putcmd(line)\r\n return await self._getresp()\r\n\r\n # Internal: send a command and get the response plus following text\r\n\r\n async def _longcmd(self, line):\r\n await self._putcmd(line)\r\n return await self._getlongresp()\r\n\r\n # These can be useful:\r\n\r\n def getwelcome(self):\r\n return self.welcome\r\n\r\n def set_debuglevel(self, level):\r\n self._debugging = level\r\n\r\n # Here are all the POP commands:\r\n\r\n async def user(self, user):\r\n \"\"\"Send user name, return response\r\n\r\n (should indicate password required).\r\n \"\"\"\r\n return await self._shortcmd('USER %s' % user)\r\n\r\n async def pass_(self, pswd):\r\n \"\"\"Send password, return response\r\n\r\n (response includes message count, mailbox size).\r\n\r\n NB: mailbox is locked by server from here to 'quit()'\r\n \"\"\"\r\n return await self._shortcmd('PASS %s' % pswd)\r\n\r\n async def stat(self):\r\n \"\"\"Get mailbox status.\r\n\r\n Result is tuple of 2 ints (message count, mailbox size)\r\n \"\"\"\r\n retval = await self._shortcmd('STAT')\r\n rets = retval.split()\r\n if self._debugging:\r\n print('*stat*', repr(rets))\r\n numMessages = int(rets[1])\r\n sizeMessages = int(rets[2])\r\n return (numMessages, sizeMessages)\r\n\r\n async def list(self, which=None):\r\n \"\"\"Request listing, return result.\r\n\r\n Result without a message number argument is in form\r\n ['response', ['mesg_num octets', ...], octets].\r\n\r\n Result when a message number argument is given is a\r\n single response: the \"scan listing\" for that message.\r\n \"\"\"\r\n if which is not None:\r\n return await self._shortcmd('LIST %s' % which)\r\n return await self._longcmd('LIST')\r\n\r\n async def retr(self, which):\r\n \"\"\"Retrieve whole message number 'which'.\r\n\r\n Result is in form ['response', ['line', ...], octets].\r\n \"\"\"\r\n return await self._longcmd('RETR %s' % which)\r\n\r\n async def dele(self, which):\r\n \"\"\"Delete message number 'which'.\r\n\r\n Result is 'response'.\r\n \"\"\"\r\n return await self._shortcmd('DELE %s' % which)\r\n\r\n async def noop(self):\r\n \"\"\"Does nothing.\r\n\r\n One supposes the response indicates the server is alive.\r\n \"\"\"\r\n return await self._shortcmd('NOOP')\r\n\r\n async def rset(self):\r\n \"\"\"Unmark all messages marked for deletion.\"\"\"\r\n return await self._shortcmd('RSET')\r\n\r\n async def quit(self):\r\n \"\"\"Signoff: commit changes on server, unlock mailbox, close connection.\"\"\"\r\n resp = await self._shortcmd('QUIT')\r\n await self.close()\r\n return resp\r\n\r\n async def close(self):\r\n \"\"\"Close the connection without assuming anything about it.\"\"\"\r\n await self.sock.close()\r\n\r\n # __del__ = quit\r\n\r\n # optional commands:\r\n\r\n async def rpop(self, user):\r\n \"\"\"Not sure what this does.\"\"\"\r\n return await self._shortcmd('RPOP %s' % user)\r\n\r\n timestamp = re.compile(br'\\+OK.[^<]*(<.*>)')\r\n\r\n async def apop(self, user, password):\r\n \"\"\"Authorisation\r\n\r\n - only possible if server has supplied a timestamp in initial greeting.\r\n\r\n Args:\r\n user - mailbox user;\r\n password - mailbox password.\r\n\r\n NB: mailbox is locked by server from here to 'quit()'\r\n \"\"\"\r\n secret = bytes(password, self.encoding)\r\n m = self.timestamp.match(self.welcome)\r\n if not m:\r\n raise error_proto('-ERR APOP not supported by server')\r\n import hashlib\r\n digest = m.group(1) + secret\r\n digest = hashlib.md5(digest).hexdigest()\r\n return await self._shortcmd('APOP %s %s' % (user, digest))\r\n\r\n async def top(self, which, howmuch):\r\n \"\"\"Retrieve message header of message number 'which'\r\n and first 'howmuch' lines of message body.\r\n\r\n Result is in form ['response', ['line', ...], octets].\r\n \"\"\"\r\n return await self._longcmd('TOP %s %s' % (which, howmuch))\r\n\r\n async def uidl(self, which=None):\r\n \"\"\"Return message digest (unique id) list.\r\n\r\n If 'which', result contains unique id for that message\r\n in the form 'response mesgnum uid', otherwise result is\r\n the list ['response', ['mesgnum uid', ...], octets]\r\n \"\"\"\r\n if which is not None:\r\n return await self._shortcmd('UIDL %s' % which)\r\n return await self._longcmd('UIDL')\r\n\r\n async def utf8(self):\r\n \"\"\"Try to enter UTF-8 mode (see RFC 6856). Returns server response.\r\n \"\"\"\r\n return await self._shortcmd('UTF8')\r\n\r\n async def capa(self):\r\n \"\"\"Return server capabilities (RFC 2449) as a dictionary\r\n >>> c=aiopoplib.POP3('localhost')\r\n >>> await c.capa()\r\n {'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],\r\n 'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],\r\n 'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],\r\n 'UIDL': [], 'RESP-CODES': []}\r\n >>>\r\n\r\n Really, according to RFC 2449, the cyrus folks should avoid\r\n having the implementation split into multiple arguments...\r\n \"\"\"\r\n\r\n def _parsecap(line):\r\n lst = line.decode('ascii').split()\r\n return lst[0], lst[1:]\r\n\r\n caps = {}\r\n try:\r\n resp = await self._longcmd('CAPA')\r\n rawcaps = resp[1]\r\n for capline in rawcaps:\r\n capnm, capargs = _parsecap(capline)\r\n caps[capnm] = capargs\r\n except error_proto as _err:\r\n raise error_proto('-ERR CAPA not supported by server')\r\n return caps\r\n\r\n async def stls(self, context: ssl.SSLContext = None):\r\n \"\"\"Start a TLS session on the active connection as specified in RFC 2595.\r\n\r\n context - a ssl.SSLContext\r\n \"\"\"\r\n raise NotImplementedError\r\n # if not HAVE_SSL:\r\n # raise error_proto('-ERR TLS support missing')\r\n # if self._tls_established:\r\n # raise error_proto('-ERR TLS session already established')\r\n # caps = self.capa()\r\n # if not 'STLS' in caps:\r\n # raise error_proto('-ERR STLS not supported by server')\r\n # if context is None:\r\n # context = ssl._create_stdlib_context()\r\n # resp = await self._shortcmd('STLS')\r\n # self.sock = context.wrap_socket(self.sock,\r\n # server_hostname=self.host)\r\n # self.file = self.sock.makefile('rb')\r\n # self._tls_established = True\r\n # return resp\r\n\r\n\r\nif HAVE_SSL:\r\n\r\n class POP3_SSL(POP3):\r\n \"\"\"POP3 client class over SSL connection\r\n\r\n Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,\r\n context=None)\r\n\r\n hostname - the hostname of the pop3 over ssl server\r\n port - port number\r\n keyfile - PEM formatted file that contains your private key\r\n certfile - PEM formatted certificate chain file\r\n context - a ssl.SSLContext\r\n\r\n See the methods of the parent class POP3 for more documentation.\r\n \"\"\"\r\n\r\n def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,\r\n timeout=1, context=None):\r\n if context is not None and keyfile is not None:\r\n raise ValueError(\"context and keyfile arguments are mutually \"\r\n \"exclusive\")\r\n if context is not None and certfile is not None:\r\n raise ValueError(\"context and certfile arguments are mutually \"\r\n \"exclusive\")\r\n if keyfile is not None or certfile is not None:\r\n import warnings\r\n warnings.warn(\"keyfile and certfile are deprecated, use a \"\r\n \"custom context instead\", DeprecationWarning, 2)\r\n self.keyfile = keyfile\r\n self.certfile = certfile\r\n if context is None:\r\n context = ssl._create_stdlib_context(certfile=certfile,\r\n keyfile=keyfile)\r\n self.context = context # type: ssl.SSLContext\r\n super().__init__(host, port, timeout)\r\n\r\n async def _create_socket(self, timeout, ssl: ssl.SSLContext = None):\r\n sock = await super()._create_socket(timeout, ssl=self.context)\r\n return sock\r\n\r\n def stls(self, keyfile=None, certfile=None, context=None):\r\n \"\"\"The method unconditionally raises an exception since the\r\n STLS command doesn't make any sense on an already established\r\n SSL/TLS session.\r\n \"\"\"\r\n raise error_proto('-ERR TLS session already established')\r\n\r\n\r\n __all__.append(\"POP3_SSL\")\r\n\r\n\r\nasync def main(argv):\r\n a = POP3(argv[1])\r\n await a.connect()\r\n print(a.getwelcome())\r\n await a.user(argv[2])\r\n await a.pass_(argv[3])\r\n await a.list()\r\n (numMsgs, totalSize) = await a.stat()\r\n for i in range(1, numMsgs + 1):\r\n (header, msg, octets) = await a.retr(i)\r\n print(\"Message %d:\" % i)\r\n for line in msg:\r\n print(' ' + line)\r\n print('-----------------------')\r\n await a.quit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n\r\n asyncio.run(main(sys.argv))\r\n","repo_name":"ProjectHentai/aiopoplib","sub_path":"aiopoplib.py","file_name":"aiopoplib.py","file_ext":"py","file_size_in_byte":16739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"6741666122","text":"#!/usr/bin/env python3\n \nimport json\nimport os\n\nclass Group:\n def __init__(self, batch, groupName, permRepName):\n self.batch = batch\n self.groupName = groupName\n self.permRepName = permRepName\n\nprint(\"Reading theta.json\")\n\nf = open(\"brook/theta.json\", \"r\")\nfileContent = f.read()\nf.close()\n\nprint(\"theta.json read. Parsing as json\")\n\ntheta = json.loads(fileContent)\n\nprint(\"Parsed theta.json successfully. Applying sorts to theta.\")\n\nfor simpleGroup in theta[\"caboodle\"]:\n for isoGroup in simpleGroup.get(\"isoGroups\", []):\n permReps = isoGroup.get(\"permReps\", [])\n permReps.sort(key=lambda x: x[\"points\"].zfill(8))\n char0Reps = isoGroup.get(\"char0Reps\", []) \n char0Reps.sort(key=lambda x: x[\"dimension\"].zfill(8) + x[\"id\"].zfill(4) + x[\"ring\"].zfill(8))\n modularReps = isoGroup.get(\"modularReps\", []) \n modularReps.sort(key=lambda x: x[\"characteristic\"].zfill(8) + x[\"dimension\"].zfill(8) + x[\"id\"].zfill(4) + x[\"field\"].zfill(8) )\n \nprint(\"Sorts applied successfully. Creating _data folder\")\n\nos.mkdir(\"docs/_data\")\n\nprint(\"Created _data folder. Writing theta.json to output.\")\n\nwith open('docs/_data/theta.json', 'w') as f:\n json.dump(theta, f)\n\nprint(\"theta.json written. Creating allrep folder\")\n\nos.mkdir(\"docs/_allrep\")\n\nprint(\"allrep folder created. Writing allreps\")\n\nfor simpleGroup in theta[\"caboodle\"]:\n with open('docs/_allrep/' + simpleGroup[\"name\"] + '.md', 'w') as f:\n f.write(\"---\\r\\n\")\n f.write(\"data: \" + json.dumps(simpleGroup) + '\\r\\n')\n f.write(\"layout: allrep\\r\\n\")\n f.write(\"---\\r\\n\") \n\nprint(\"allreps written. Creating permreps folder\")\n\nos.mkdir(\"docs/_permrep\")\n\nprint(\"permrep folder created. Writing permreps\")\n\nfor simpleGroup in theta[\"caboodle\"]:\n groupName = simpleGroup[\"name\"]\n batch = simpleGroup[\"batch\"]\n for isoGroup in simpleGroup.get(\"isoGroups\", []):\n for permRep in isoGroup.get(\"permReps\", []):\n for basis in permRep.get(\"bases\", []):\n with open('docs/_permrep/' + permRep[\"name\"] + basis[\"name\"] +'.md', 'w') as f:\n f.write(\"---\\r\\n\")\n f.write(\"permRep: \" + json.dumps(permRep) + '\\r\\n')\n f.write(\"basis: \" + json.dumps(basis) + '\\r\\n')\n f.write(\"simpleGroup: \" + json.dumps(simpleGroup) + '\\r\\n')\n f.write(\"isoGroup: \" + json.dumps(isoGroup) + '\\r\\n')\n f.write(\"layout: permrep\\r\\n\")\n f.write(\"---\\r\\n\") \n \nprint(\"permrep written. Creating char0rep folder\")\n\nos.mkdir(\"docs/_char0rep\")\n\nprint(\"char0rep folder created. Writing char0reps to char0rep folder\")\n\nfor simpleGroup in theta[\"caboodle\"]:\n groupName = simpleGroup[\"name\"]\n batch = simpleGroup[\"batch\"]\n for isoGroup in simpleGroup.get(\"isoGroups\", []):\n for char0Rep in isoGroup.get(\"char0Reps\", []):\n for basis in char0Rep.get(\"bases\", []):\n with open('docs/_char0rep/' + char0Rep[\"name\"] + basis[\"name\"] +'.md', 'w') as f:\n f.write(\"---\\r\\n\")\n f.write(\"char0rep: \" + json.dumps(char0Rep) + '\\r\\n')\n f.write(\"basis: \" + json.dumps(basis) + '\\r\\n')\n f.write(\"simpleGroup: \" + json.dumps(simpleGroup) + '\\r\\n')\n f.write(\"isoGroup: \" + json.dumps(isoGroup) + '\\r\\n')\n f.write(\"layout: char0rep\\r\\n\")\n f.write(\"---\\r\\n\") \n \nprint(\"char0reps written. Creating modularrep folder\")\n\nos.mkdir(\"docs/_modularrep\")\n\nprint(\"modularrep folder created. Writing modularreps to modularrep folder\")\n\nfor simpleGroup in theta[\"caboodle\"]:\n groupName = simpleGroup[\"name\"]\n batch = simpleGroup[\"batch\"]\n for isoGroup in simpleGroup.get(\"isoGroups\", []):\n for modularRep in isoGroup.get(\"modularReps\", []):\n for basis in modularRep.get(\"bases\", []):\n with open('docs/_modularrep/' + modularRep[\"name\"] + basis[\"name\"] +'.md', 'w') as f:\n f.write(\"---\\r\\n\")\n f.write(\"modularrep: \" + json.dumps(modularRep) + '\\r\\n')\n f.write(\"basis: \" + json.dumps(basis) + '\\r\\n')\n f.write(\"simpleGroup: \" + json.dumps(simpleGroup) + '\\r\\n')\n f.write(\"isoGroup: \" + json.dumps(isoGroup) + '\\r\\n')\n f.write(\"layout: modularrep\\r\\n\")\n f.write(\"---\\r\\n\") \n \nprint(\"modular reps written.\")\n","repo_name":"chrisparker/JekyllTest","sub_path":".github/scripts/produce-jekyll-files.py","file_name":"produce-jekyll-files.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"37038560540","text":"\"\"\"AyudaEnPython: https://www.facebook.com/groups/ayudapython\n\nEscriba un programa en el que se pida al usuario el tamaño (n) de una\nmatriz. Forme una matriz tridiagonal. Si por ejemplo, el usuario\ningreso n = 6, se debería presentar la siguiente matriz:\n\n Ingrese el tamaño de la matriz: 6\n [\n [ 2. 1. 0. 0. 0. 0.]\n [-1. 4. 2. 0. 0. 0.]\n [ 0. -2. 6. 3. 0. 0.]\n [ 0. 0. -3. 8. 4. 0.]\n [ 0. 0. 0. -4. 10. 5.]\n [ 0. 0. 0. 0. -5. 12.]\n ]\n\"\"\"\n# pip install prototools\nfrom prototools import matrix, show_matrix\nfrom prototools.colorize import magenta, cyan, yellow\n\n\ndef f(m, d, color):\n for i in range(len(m)):\n m[i][i] = d(str(i*2 + 2))\n for i in range(len(m)-1):\n m[i][i+1] = color(str(i + 1))\n for i in range(len(m)-1):\n m[i+1][i] = color(str((i + 1) * -1))\n\n\nif __name__ == \"__main__\":\n n = 6\n m = matrix(n, n, (0, 0))\n f(m, yellow, cyan)\n show_matrix(m, width=5,color=magenta)\n","repo_name":"AyudaEnPython/Soluciones","sub_path":"ejercicios/arreglos/tridiagonal.py","file_name":"tridiagonal.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"73118210121","text":"import numpy as np\nfrom imfittre.fit.image_fit import Fit\n\nclass Gaussian(Fit):\n def fit_function(self, x, y, x0=0, y0=0, A=0, sigmax=0, sigmay=0, theta=0, offset=0, gradx=0, grady=0):\n \"\"\"A 2D Gaussian function with a linear background.\n\n Args:\n x (numpy.ndarray): The x values at which to evaluate the function.\n y (numpy.ndarray): The y values at which to evaluate the function.\n x0 (float): The x coordinate of the center of the Gaussian.\n y0 (float): The y coordinate of the center of the Gaussian.\n A (float): The amplitude of the Gaussian.\n sigmax (float): The standard deviation of the Gaussian in the x direction.\n sigmay (float): The standard deviation of the Gaussian in the y direction.\n theta (float): The angle of the Gaussian in radians.\n offset (float): The offset of the linear background.\n gradx (float): The gradient of the linear background in the x direction.\n grady (float): The gradient of the linear background in the y direction.\n\n Returns:\n numpy.ndarray: The function evaluated at x and y.\n \"\"\"\n x = np.array(x)\n y = np.array(y)\n x = x - x0\n y = y - y0\n xprime = x*np.cos(theta) - y*np.sin(theta)\n yprime = x*np.sin(theta) + y*np.cos(theta)\n return A*np.exp(-0.5*(xprime**2/sigmax**2 + yprime**2/sigmay**2)) + offset + gradx*x + grady*y\n \n def post_process(self):\n res = self.result[\"params\"]\n im_data = self.data\n\n # Note that this will only work for equal x and y binning\n px_size = self.config[\"calibrations\"][\"px_size_um\"]*im_data[\"binning\"][0]\n eff = self.config[\"calibrations\"][\"eff\"]\n lmda = self.config[\"calibrations\"][\"lambda_m\"]\n\n derived = {}\n derived[\"sigmax_um\"] = res[\"sigmax\"]*px_size\n derived[\"sigmay_um\"] = res[\"sigmay\"]*px_size\n derived[\"N\"] = (1 / eff) * 2 * res[\"A\"] * (1E-6)**2 * derived[\"sigmax_um\"] * derived[\"sigmay_um\"] * (2*np.pi)**2 / (3*lmda**2)\n\n self.result[\"derived\"] = derived\n","repo_name":"cal-miller-harvard/ImfitTre","sub_path":"imfittre/fit/fit_functions.py","file_name":"fit_functions.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41988663495","text":"\"\"\" Entrypoint for the Robinhood scraper. Pulls data from the top instruments and pushes the\nIDs of tradable instruments into a RabbitMQ queue. \"\"\"\n\nfrom time import sleep\nfrom typing import Dict, Iterable, List, Tuple\n\nimport click\nimport pika\nimport pymongo\nfrom Robinhood import Robinhood\n\nfrom common import parse_throttle_res\nfrom db import get_db, set_instruments_finished, set_update_started, lock_cache\n\n\ndef get_tradable_instrument_ids(instruments: List[Dict[str, str]]) -> List[Tuple[str, str]]:\n \"\"\" Returns the instrument IDs and symbols of all tradable instruments in the provided list\n of instruments. \"\"\"\n tradable_instruments: Iterable[Dict[str, str]] = filter(\n lambda instrument: instrument.get(\"tradability\") == \"tradable\", instruments\n )\n\n tuples: Iterable[Tuple[str, str]] = map(\n lambda instrument: (instrument[\"id\"], instrument[\"symbol\"]), tradable_instruments\n )\n\n return list(tuples)\n\n\n@click.command()\n@click.option(\"--rabbitmq_host\", type=click.STRING, default=\"localhost\")\n@click.option(\"--rabbitmq_port\", type=click.INT, default=5672)\n@click.option(\"--scraper_request_cooldown_seconds\", type=click.FLOAT, default=1.0)\ndef cli(rabbitmq_host: str, rabbitmq_port: int, scraper_request_cooldown_seconds: float):\n rabbitmq_connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=rabbitmq_host, port=rabbitmq_port)\n )\n rabbitmq_channel = rabbitmq_connection.channel()\n rabbitmq_channel.queue_declare(queue=\"instrument_ids\")\n\n # Lock and flush the existing cache\n print('Locking the cache in preparation for update...')\n set_update_started()\n\n trader = Robinhood()\n res = trader.get_url(\"https://api.robinhood.com/instruments/\")\n\n db = get_db()\n index_col = db[\"index\"]\n index_col.create_index(\"instrument_id\", unique=True)\n\n total_ids = 0\n quotes = []\n instrument_ids = []\n while True:\n fetched_instruments: List[Dict[str, str]] = res[\"results\"]\n tradable_instrument_ids = get_tradable_instrument_ids(fetched_instruments)\n total_ids += len(tradable_instrument_ids)\n\n for instrument_id, symbol in tradable_instrument_ids:\n try:\n index_col.insert_one({\"instrument_id\": instrument_id, \"symbol\": symbol})\n except pymongo.errors.DuplicateKeyError:\n pass\n\n instrument_ids.append(instrument_id)\n quotes.append(symbol)\n\n if len(quotes) == 20:\n rabbitmq_channel.basic_publish(\n exchange=\"\", routing_key=\"symbols\", body=\",\".join(quotes)\n )\n\n rabbitmq_channel.basic_publish(\n exchange=\"\", routing_key=\"instrument_ids\", body=\",\".join(instrument_ids)\n )\n\n quotes = []\n instrument_ids = []\n\n if res.get(\"detail\"):\n # Request was throttled; wait for a cooldown before continuing\n\n cooldown_seconds = parse_throttle_res(res[\"detail\"])\n print(\n \"Instruments fetch request failed; waiting for {} second cooldown...\".format(\n cooldown_seconds\n )\n )\n sleep(cooldown_seconds)\n elif res.get(\"next\"):\n # There are more instruments to scrape. Wait for the standard cooldown and then\n # continue by fetching the next request url.\n\n sleep(scraper_request_cooldown_seconds)\n res = trader.get_url(res[\"next\"])\n else:\n # We're done scraping; there are no more instruments in the list.\n\n rabbitmq_channel.basic_publish(\n exchange=\"\", routing_key=\"symbols\", body=\",\".join(quotes)\n )\n rabbitmq_channel.basic_publish(\n exchange=\"\", routing_key=\"instrument_ids\", body=\",\".join(instrument_ids)\n )\n\n # Publish a finished message over the channels to indicate that there are no more\n # items to process in this run.\n rabbitmq_channel.basic_publish(exchange=\"\", routing_key=\"symbols\", body=\"__DONE\")\n rabbitmq_channel.basic_publish(exchange=\"\", routing_key=\"instrument_ids\", body=\"__DONE\")\n\n # Mark the instrument scrape as finished\n set_instruments_finished()\n\n print(\n \"Finished scraping; fetched a total of {} tradable instrument IDs.\".format(\n total_ids\n )\n )\n break\n\n rabbitmq_connection.close()\n\n\nif __name__ == \"__main__\":\n cli() # pylint: disable=E1120\n","repo_name":"iozeey/robin","sub_path":"scraper/src/scrape_instruments.py","file_name":"scrape_instruments.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"26689651712","text":"\"\"\"\n10.4.6 Прекращение работы процесса\n\nНесмотря на то чго для отправки сигнала, извещающего процесс о том, что\nон должен завершить работу, лучше использовать метод \"отравленнной таблетки\"\n(раздел 10.4.10), в тех случаях, когда вероятно зависание процесса или создание\nситуации взаимоблокировки, полезно иметь возможность принудительного завершения\nпроцесса. Это обеспечивается вызовом метода terminate() для объекта дочернего процесса.\n\"\"\"\nimport multiprocessing\nimport time\n\n\ndef slow_worker():\n print('Starting worker')\n time.sleep(0.1)\n print('Finished worker')\n\n\nif __name__ == '__main__':\n p = multiprocessing.Process(target=slow_worker)\n print('BEFORE:', p, p.is_alive())\n\n p.start()\n print('DURING: ', p, p.is_alive())\n\n p.terminate()\n print('TEMINATED:', p, p.is_alive())\n \"\"\"\n Важно вызвать метод\n join() для процесса после принудительного прекращения его работы, \n чтобы код, управляющий процессом, имел достаточно времени для \n обновления состояния объекта c учетом преждевременного завершения выполнения.\n \"\"\"\n p.join()\n print('JOINED:', p, p.is_alive())\n","repo_name":"BakeNecko/standart_library_python_examples","sub_path":"chapter10/_multiprocessing/multiprocessing_terminate.py","file_name":"multiprocessing_terminate.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34818221015","text":"class Solution:\n def Function(self,n,num):\n res = []\n for i in range(len(num)):\n res.append(self.mex(num[0:i]+num[i+1:]))\n return res\n def mex(self,num):\n num.sort()\n if num[-1] == len(num)-1:\n return num[-1]+1\n for i in range(max(num)):\n if i != num[i]:\n return i\nn = int(input())\nnum = list(map(int,input().split()))\nfor r in Solution().Function(n,num):\n print(r,end=\" \")\n\n# 4\n# 5 0 3 1","repo_name":"DongYun666/leetcode","sub_path":"美团2022校招笔试/第二题.py","file_name":"第二题.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"23389271810","text":"import cv2\r\nfrom io import BytesIO\r\nimport numpy as np\r\nfrom tensorflow.keras.preprocessing import image\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras.applications import imagenet_utils\r\nfrom PIL import Image\r\nfrom keras.models import load_model\r\nimport tensorflow_addons as tfa\r\n\r\n\r\nimport tensorflow as tf\r\n\r\nmodel = None\r\nrms = tf.keras.optimizers.RMSprop(learning_rate=0.0001)\r\n\r\ndef load_model1():\r\n model = load_model('resnet_model.h5')\r\n print(\"model loaded\")\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=rms,\r\n metrics=['accuracy'])\r\n print(\"model compiled successfully\")\r\n return model\r\n\r\n# def read_imagefile(file)->Image.Image:\r\n# image = cv2.imread(file)\r\n# return image\r\n\r\ndef read_imagefile(file) -> Image.Image:\r\n image = Image.open(BytesIO(file))\r\n return image \r\n\r\ndef predict(image1: Image.Image):\r\n global model\r\n if model is None:\r\n model = load_model1()\r\n\r\n image_resized= np.asarray(image1.resize((224,224)))[..., :3] \r\n finalimg = np.expand_dims(image_resized,axis=0)\r\n finalimg = tf.keras.applications.mobilenet_v2.preprocess_input(finalimg)\r\n predictions = model.predict(finalimg)\r\n final_prediction = max(predictions)\r\n predicted_class1 = np.argmax(predictions)\r\n print(predicted_class1)\r\n item = image_name(predicted_class1)\r\n return item\r\n\r\ndef image_name(predicted_class):\r\n food_items = [\"donuts\", \"fries\", \"noodles\", \"pizza\", \"samosa\"]\r\n return food_items[predicted_class] ","repo_name":"aditya19ml/Food-Image-Classifier","sub_path":"image_predictor.py","file_name":"image_predictor.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41797460984","text":"# coding: utf-8\n# Tournament Class\n# Runs a series of games, maintains the score for human and computer player, processes game serialization/restoration requests & declares winner.\n#\n\n\"\"\"\t************************************************************\n* Name:\t\t\tVivek Pandey\t\t\t\t\t\t\t\t*\n* Project:\t\tDuell Python\t\t\t\t\t\t\t\t*\n* Class:\t\tCMPS 366\t\t\t\t\t\t\t\t\t*\n* Date:\t\t\t12/10/2016\t\t\t\t\t\t\t\t\t*\n************************************************************ \"\"\"\n\nfrom msvcrt import getche\nfrom copy import deepcopy\n\nfrom Game import Game\nfrom Notifications import Notifications\nfrom Serializer import Serializer\nfrom Board import Board\n\nclass Tournament:\n\n #Default Constructor\n def __init__(self):\n #Serializer components\n self.serializer = Serializer()\n self.restoreFilePath = None\n\n #Tournament details\n self.humanScore = 0\n self.botScore = 0\n self.nextPlayer = None\n self.gameResult = None\n \n #Booleans for decision making\n self.quit = False\n self.restoringGame = False\n \n #Notifications purposes\n self.notifications = Notifications()\n\n \"\"\" *********************************************************************\n Function Name: play_tournament\n\n Purpose: Runs a series of games and maintains score until user serializes or quits\n\n Parameters: none\n\n Return Value: none\n\n Local Variables: none\n\n Assistance Received: none\n ********************************************************************* \"\"\"\n # Runs a Tournament\n def play_tournament(self):\n #Ask user if they want to restore the tournament from existing file\n self.notifications.msg_restore_from_file()\n if self.wants_to_continue():\n self.restoringGame = True\n self.notifications.msg_enter_file_path()\n self.restoreFilePath = raw_input()\n\n #Start the tournament and keep going until user chooses to quit or serialize\n while True:\n #Implement a fresh game\n game = Game()\n\n #Modify the board and other tournament, game objects from serialization file here if one is provided\n if self.restoringGame:\n pkg = {}\n pkg['board'] = Board()\n pkg['botWins'] = None\n pkg['humanWins'] = None\n pkg['nextPlayer'] = None\n\n #Exit the game if restore failed\n if not self.serializer.read_from_file(self.restoreFilePath, pkg):\n self.notifications.msg_serialized(\"FAILED\")\n return\n game.board = deepcopy(pkg['board'])\n self.botScore = pkg['botWins']\n self.humanScore = pkg['humanWins']\n self.nextPlayer = pkg['nextPlayer']\n\n self.gameResult = game.implement_game(self.restoringGame, self.nextPlayer)\n self.restoringGame = False\n else:\n self.gameResult = game.implement_game(self.restoringGame)\n\n #If a player has won the game\n if self.gameResult == 'h':\n self.humanScore += 1\n if self.gameResult == 'c':\n self.botScore += 1\n\n # 'S' refers to serialize during computer's turn and 's' refers to serialize during human's turn\n if (self.gameResult == 'S' or self.gameResult == 's'):\n self.serialize_game(game)\n return True\n \n #Ask if user wants to continue to next round\n self.notifications.msg_want_to_play_again()\n if not self.wants_to_continue():\n self.quit = True\n\n self.notifications.draw_divider()\n\n #If user chooses to quit, stop the tournament\n if self.quit:\n break\n\n #Displaying the tournament results\n self.notifications.msg_display_results(self.botScore, self.humanScore)\n\n\n \"\"\" *********************************************************************\n Function Name: serialize_game\n\n Purpose: Processes the serialization request from the human player\n\n Parameters: game, the current game object\n\n Return Value: none\n\n Local Variables: none\n\n Assistance Received: none\n ********************************************************************* \"\"\"\n # Serializes a Tournament state\n def serialize_game(self, game):\n #Store the next player in a string\n if self.gameResult == 'S':\n self.nextPlayer = \"Computer\" \n else:\n self.nextPlayer = \"Human\"\n\n #Write the serialized output to a file and exit\n if (self.serializer.write_to_file(game.board, self.botScore, self.humanScore, self.nextPlayer)):\n self.notifications.msg_serialized(\"SUCCESSFUL\")\n else:\n self.notifications.msg_serialized(\"FAILED\")\n\n \"\"\" *********************************************************************\n Function Name: wants_to_continue\n\n Purpose: To Ask user if they want to continue. At any given time\n\n Parameters: none\n\n Return Value: True if user picks yes, False otherwise\n\n Local Variables: none\n\n Assistance Received: none\n ********************************************************************* \"\"\"\n # Gets user's choice on whether to continue to another round\n def wants_to_continue(self):\n #Continue asking user for input until they press 'y' or 'n'\n while True:\n input = getche()\n if (input == 'y' or input == 'Y'):\n return True\n if (input == 'n' or input == 'N'):\n return False\n","repo_name":"Viveckh/Duell_Py","sub_path":"Tournament.py","file_name":"Tournament.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"19865762021","text":"import smtplib\nimport os\nfrom email.mime import multipart, text, application\nfrom config.config import config\nfrom static.phrases import phrases\n\n\ndef get_message(email, file, cfg):\n msg = multipart.MIMEMultipart()\n msg['Subject'] = phrases[\"subject\"]\n msg['From'] = cfg[\"login\"]\n msg['To'] = email\n\n body = text.MIMEText(phrases[\"text\"])\n msg.attach(body)\n\n fp = open(file, 'rb')\n pdf = application.MIMEApplication(fp.read(), _subtype=\"pdf\")\n fp.close()\n pdf.add_header('Content-Disposition', 'attachment', filename=file)\n msg.attach(pdf)\n\n return msg\n\n\ndef login_and_send(email, msg, cfg):\n s = smtplib.SMTP(os.environ[cfg[\"host\"]], int(os.environ[cfg[\"port\"]]))\n s.starttls()\n s.login(os.environ[cfg[\"login\"]], os.environ[cfg[\"password\"]])\n s.sendmail(os.environ[cfg[\"login\"]], [email], msg.as_string())\n s.quit()\n\n\ndef send_mail(email, files):\n cfg = config[\"email_config\"]\n if not cfg[\"to_mail\"]:\n return \"E-mail not sent due to settings of config\"\n msg = get_message(email, files[0], cfg)\n login_and_send(email, msg, cfg)\n\n return \"E-mail sent successfully\"\n\n\ndef send_to_chat(bot, peer, file):\n cfg = config[\"email_config\"]\n if cfg[\"to_chat\"]:\n bot.messaging.send_file(peer, file)\n return \"File sent to chat\"\n return \"File not sent due to settings of config\"\n","repo_name":"eftblack/dialog_feedback_bot","sub_path":"src/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"8622509386","text":"# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n\n__version__ = \"$Revision: 2019101601 $\"\n\n################################################################################\nimport cv2\nimport glob\nimport os\nimport numpy as np\n\n################################################################################\n\n# Process the video from all users.\nfor i in range(166):\n\n # Get the folder name.\n folder = \"%04d\" % i\n\n # Get all the paths to the eye-videos.\n files = glob.glob(\"/Users/eyeinfo/Desktop/outputs/%s/*.mov\" % folder)\n files.sort()\n\n # Create the output folder.\n if not os.path.exists(\"mean/%s\" % folder):\n os.makedirs(\"mean/%s\" % folder)\n\n # Process individually each video.\n for file in files:\n\n # Open the current video.\n video = cv2.VideoCapture(file)\n\n # Read the first frame.\n ret, image = video.read()\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = image.astype(np.float)\n\n # Grab the frames.\n while ret:\n\n # Get the current frame.\n ret, frame = video.read()\n if ret:\n\n # Convert to grayscale.\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Add the current frame to the accumulator.\n image += frame\n\n # Calculate the mean.\n image /= 150\n image = image.astype(np.uint8).copy()\n\n # Save the current image.\n filename = os.path.basename(file).split(\".\")[0]\n cv2.imwrite(\"mean/%s/%s.png\" % (folder, filename), image)","repo_name":"fabricionarcizo/eye-tracking-data","sub_path":"03_scripts/00_python/create_mean_image.py","file_name":"create_mean_image.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31506723980","text":"# Importing modules\nimport nltk \nimport pandas as pd\n\n## Downloading nltk submodules if not in machine\n# nltk.download('wordnet')\n# nltk.download('punkt')\n# nltk.download('averaged_perceptron_tagger')\n\n# Importing word bank and functions\nfrom nltk.corpus import wordnet\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk import word_tokenize\nfrom nltk import pos_tag\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\nlemmatizer = WordNetLemmatizer()\n\n# Main function\ndef generate_sentences(root_sentence, number_sentences, similarity_treshold = 0.5):\n \n '''\n This functions generates similar sentences given a root/parent sentence.\n\n It does this by searching for synonyms for each word in the sentence\n and selecting them based on the given word funcion (noun, verb, ...) and the similarity\n between the two words.\n\n This function holds two parameters: \n - number_sentences: maximum number of sentences to be generated\n - similarity_treshold: minimum value of similarity for a synonym to be considered\n\n The sentences are returned as strings in a list.\n\n\n BUG: sometimes the same sentence is generated, which is not good\n\n '''\n # Tokenizing and tagging root sentence\n tokenized = word_tokenize(root_sentence)\n tagged_proper = synonym_tagger(tokenized)\n\n # Initializing synonym dictionary\n syn_dict = {}\n for word, tag in tagged_proper: syn_dict[word] = []\n\n # Filling synonym dictionary\n for word_tag in tagged_proper:\n find_synonyms(word_tag, syn_dict, similarity_treshold)\n\n # Converting synonyms dict to data frame\n synonyms_df = dict_to_data_frame(syn_dict, tagged_proper)\n \n # Initializing sentences list parameters\n sentences = []\n sentence_counter = 0\n number_synonyms = synonyms_df.shape[0]\n\n # Main loop for creating sentences\n for index in range(number_synonyms):\n\n # grabbing word and synonym\n word = synonyms_df.at[index, 'word']\n syn = synonyms_df.at[index, 'syn']\n\n # making copy of sentence and change word for synonym\n word_index = tokenized.index(word)\n sent = [t for t in tokenized]\n # this replace is made to handle more that one word synonym\n sent[word_index] = syn.replace('_', ' ') \n\n # transforming tokenized sentence into string\n untokenized_sentence = TreebankWordDetokenizer().detokenize(sent)\n\n # appending string into list \n sentences.append(untokenized_sentence)\n sentence_counter += 1\n\n #checks if the number of sentences desired was fulfilled\n if sentence_counter >= number_sentences: break\n\n return sentences\n\n\n# Auxiliary functions\ndef find_synonyms(word_tagged, dict, treshold):\n\n '''\n This function finds synonyms of words that meet a certain criteria.\n\n The words are passed in the \"word_tagged\" parameter as (word, tag) tuple with\n this tag being 'n', 'v', 'r' or 'a' for noun, verb, adverb, and adjective.\n\n The similarities and synonyms are calculated using the Wordnet from nltk.\n\n '''\n\n # Separate word and tag and lemmatizes word\n word, tag = word_tagged\n lemma = lemmatizer.lemmatize(word)\n\n # Main loop\n try:\n # Convert word to wordnet syn format\n word_syn = wordnet.synset(lemma + '.' + tag + '.01')\n\n # Loops through all synonyms with the same tag\n for syn in wordnet.synsets(word, wordnet_tag(tag)):\n \n # Calculates similarity\n similarity = word_syn.wup_similarity(syn)\n\n # Checks treshold condition\n if similarity < treshold: break\n\n # Append lemmas and similarity values in the dict as tuples\n for lemma in syn.lemmas():\n if lemma.name() not in [x[0] for x in dict[word]]: \n dict[word].append((lemma.name(), similarity))\n except:\n pass\n\ndef synonym_tagger (words):\n\n '''\n This function tags word (in the form of list of tokens) in a simpler way.\n\n It starts by tagging in the regular NLTK format and then reduces it to \n account only for nouns, verbs and adverbs.\n\n For some reason, the adjectives as 'a' are not supported in other nltk syn functions\n so I just took that classification out.\n\n '''\n\n # Tags the words as expected\n tagged = pos_tag(words)\n\n # Initializes result list. \n results = []\n\n # Appends result list with tuple (word, tag), where tag is a simple version\n for word, tag in tagged:\n if tag.startswith('N'): results.append((word, 'n'))\n if tag.startswith('V'): results.append((word, 'v'))\n if tag.startswith('R'): results.append((word, 'r'))\n # if tag.startswith('JJ'): results.append((word, 'a'))\n \n # Return the resulting list\n return results\n\ndef wordnet_tag(tag):\n '''\n\n This function uses the simple tag calculated in \"synonym tagger\" \n to create wordnet tag objects. Those objects are used in the \"synsets\" function\n later.\n\n Again, I cant figure out why adjectives are not supported by those functions.\n\n '''\n\n if tag == 'n': return wordnet.NOUN\n if tag == 'v': return wordnet.VERB\n if tag == 'r': return wordnet.ADV\n # if tag == 'a': return wordnet.ADJ\n\ndef dict_to_data_frame(synonym_dict, tagged):\n \n '''\n\n This function creates a data frame from the synonym dictionary provided.\n\n This data frame has the form (word, synonym, simple tag, similarity value)\n for all the words provided in the dictionary keys.\n\n The tag column is separated in 3 other columns with binary values for simplicity.\n\n Then this table is sorted in a tag preference order and, after, in the similarity value\n This is done do guarantee the best results in the first rows\n\n The tag preference order is done in a way that the meaning of the sentence is less altered\n in the first synonyms proposed and more altered in the last.\n\n '''\n\n # Create empty data frame by columns \n df = pd.DataFrame(columns=['word', 'syn', 'tag', 'sim'])\n\n # Append rows with desired values\n for word in synonym_dict.keys():\n for syn in synonym_dict[word]:\n row = {\n 'word': word, \n 'syn': syn[0],\n 'tag': dict(tagged)[word],\n 'sim': syn[1]\n }\n df = df.append(row, ignore_index=True)\n\n # Separates the tag column in 3 binary columns\n df = pd.get_dummies(df, columns=['tag'])\n\n # Check if there are missing columns and create them\n tags = ['tag_n', 'tag_v', 'tag_r', 'tag_a']\n for tag in tags: \n if tag not in df: df[tag] = 0\n\n # Sets tag synonym preference\n simple_tags = ['r', 'n', 'v']\n substitution_order_preference = [\"tag_\" + tag for tag in simple_tags]\n \n\n # Sort data frame\n df.sort_values(substitution_order_preference + ['sim'], ascending=False, ignore_index=True, inplace=True)\n\n # Removing word and synonym equal values\n df = df[df['word'] != df['syn']].reset_index()\n\n # Returning data frame\n return df","repo_name":"guilevieiram/recipe_sorting","sub_path":"sentence_generator(old)/similar_sentence_generator.py","file_name":"similar_sentence_generator.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19814447988","text":"from recidiviz.calculator.query import bq_utils\nfrom recidiviz.calculator.query.state import (\n dataset_config,\n state_specific_query_strings,\n)\nfrom recidiviz.calculator.query.state.views.public_dashboard.utils import (\n spotlight_age_buckets,\n)\nfrom recidiviz.metrics.metric_big_query_view import MetricBigQueryViewBuilder\nfrom recidiviz.utils.environment import GCP_PROJECT_STAGING\nfrom recidiviz.utils.metadata import local_project_id_override\n\nSUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_NAME = (\n \"supervision_revocations_by_period_by_type_by_demographics\"\n)\n\nSUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_DESCRIPTION = \"\"\"Supervision revocations by period, by source violation type, and by demographic breakdowns. Person-based counts\n with respect to metric_period_months and supervision_type. If a person has more than one revocation of the same\n supervision type in a given metric period, the most recent one is chosen.\"\"\"\n\nSUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_QUERY_TEMPLATE = \"\"\"\n WITH revocations_by_period_by_person AS (\n SELECT\n state_code,\n metric_period_months,\n {state_specific_race_or_ethnicity_groupings},\n person_id,\n supervision_type,\n IFNULL(most_severe_violation_type, 'EXTERNAL_UNKNOWN') as most_severe_violation_type,\n IFNULL(gender, 'EXTERNAL_UNKNOWN') as gender,\n {age_bucket},\n ROW_NUMBER() OVER (PARTITION BY state_code, metric_period_months, supervision_type, person_id ORDER BY admission_date DESC) as revocation_ranking\n FROM `{project_id}.{shared_metric_views_dataset}.event_based_commitments_from_supervision_materialized`,\n UNNEST ([36]) AS metric_period_months\n WHERE {metric_period_condition}\n AND supervision_type IN ('ALL', 'PAROLE', 'PROBATION')\n )\n \n SELECT\n state_code,\n metric_period_months,\n supervision_type,\n COUNT(DISTINCT IF(most_severe_violation_type IN ('FELONY', 'MISDEMEANOR', 'LAW'), person_id, NULL)) AS new_crime_count,\n COUNT(DISTINCT IF(most_severe_violation_type = 'TECHNICAL', person_id, NULL)) AS technical_count,\n COUNT(DISTINCT IF(most_severe_violation_type IN ('ESCAPED', 'ABSCONDED'), person_id, NULL)) AS absconsion_count,\n -- TODO(#14294): replace with a more robust/generalizable filter\n IF(state_code = 'US_ID', 0, COUNT(DISTINCT IF(most_severe_violation_type = 'EXTERNAL_UNKNOWN', person_id, NULL))) as unknown_count,\n race_or_ethnicity,\n gender,\n age_bucket,\n -- TODO(#14294): replace with a more robust/generalizable filter\n COUNT(DISTINCT IF(state_code = 'US_ID' AND most_severe_violation_type = 'EXTERNAL_UNKNOWN', NULL, person_id)) AS revocation_count\n FROM revocations_by_period_by_person,\n {unnested_race_or_ethnicity_dimension},\n {gender_dimension},\n {age_dimension}\n WHERE revocation_ranking = 1\n AND {state_specific_supervision_type_inclusion_filter}\n AND ((race_or_ethnicity != 'ALL' AND gender = 'ALL' AND age_bucket = 'ALL') -- Race breakdown\n OR (race_or_ethnicity = 'ALL' AND gender != 'ALL' AND age_bucket = 'ALL') -- Gender breakdown\n OR (race_or_ethnicity = 'ALL' AND gender = 'ALL' AND age_bucket != 'ALL') -- Age breakdown\n OR (race_or_ethnicity = 'ALL' AND gender = 'ALL' AND age_bucket = 'ALL')) -- Overall breakdown\n GROUP BY state_code, metric_period_months, supervision_type, race_or_ethnicity, gender, age_bucket\n ORDER BY state_code, metric_period_months, supervision_type, race_or_ethnicity, gender, age_bucket\n \"\"\"\n\nSUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_BUILDER = MetricBigQueryViewBuilder(\n dataset_id=dataset_config.PUBLIC_DASHBOARD_VIEWS_DATASET,\n view_id=SUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_NAME,\n view_query_template=SUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_QUERY_TEMPLATE,\n dimensions=(\n \"state_code\",\n \"supervision_type\",\n \"metric_period_months\",\n \"race_or_ethnicity\",\n \"gender\",\n \"age_bucket\",\n ),\n description=SUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_DESCRIPTION,\n shared_metric_views_dataset=dataset_config.SHARED_METRIC_VIEWS_DATASET,\n metric_period_condition=bq_utils.metric_period_condition(),\n age_bucket=spotlight_age_buckets(),\n unnested_race_or_ethnicity_dimension=bq_utils.unnest_column(\n \"race_or_ethnicity\", \"race_or_ethnicity\"\n ),\n gender_dimension=bq_utils.unnest_column(\"gender\", \"gender\"),\n age_dimension=bq_utils.unnest_column(\"age_bucket\", \"age_bucket\"),\n state_specific_race_or_ethnicity_groupings=state_specific_query_strings.state_specific_race_or_ethnicity_groupings(),\n state_specific_supervision_type_inclusion_filter=state_specific_query_strings.state_specific_supervision_type_inclusion_filter(),\n)\n\nif __name__ == \"__main__\":\n with local_project_id_override(GCP_PROJECT_STAGING):\n SUPERVISION_REVOCATIONS_BY_PERIOD_BY_TYPE_BY_DEMOGRAPHICS_VIEW_VIEW_BUILDER.build_and_print()\n","repo_name":"Recidiviz/pulse-data","sub_path":"recidiviz/calculator/query/state/views/public_dashboard/supervision/supervision_revocations_by_period_by_type_by_demographics.py","file_name":"supervision_revocations_by_period_by_type_by_demographics.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"85"} +{"seq_id":"5849885964","text":"#Crie um programa que apresente o nome de uma pessoa e seu salário do mês de abril e mostre no formato padrão.\r\n\r\npessoa = input(\"Insira seu primeiro nome: \")\r\nvalor = input(\"Insira seu salário do mês de abril: \")\r\n\r\nparte1valor = valor[0:1]\r\nparte2valor = valor[1:4]\r\n\r\n\r\npessoavalor = \"O salário de \" + pessoa + \" no mês de abril foi de R$\" + parte1valor + \".\" + parte2valor + \",\" + \"00\" + \"reais\" \r\n\r\nprint(pessoavalor)","repo_name":"NaiaraLemes/Nova-Atividade-3","sub_path":"Atividade3.py","file_name":"Atividade3.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31326803408","text":"# Blender script to export the skinning weights in csv\nimport bpy\n\nvertices = bpy.data.objects[\"Hand\"].data.vertices\ngroup_names = [g.name for g in bpy.data.objects[\"Hand\"].vertex_groups]\nbones_count = len(group_names)\n\nfile = open(\"weights.csv\", \"w\")\nstr = \",\".join([name for name in group_names])\nfile.write(str + \"\\n\")\n\nfor v in vertices:\n weights = [0 for i in range(bones_count)]\n for g in v.groups:\n weights[g.group] = g.weight\n str = \",\".join([(\"%.6f\" % w) for w in weights])\n file.write(str + \"\\n\")\nfile.close()\n","repo_name":"niceterran36/Hand_deformation_model","sub_path":"external/mesh/export_weights.py","file_name":"export_weights.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"33369912299","text":"import math\n\nnumber = 325489\nreference = math.sqrt(number)\nspiral = int(reference)\n\nif spiral % 2 != 0:\n spiral += 2\nif spiral % 2 == 0:\n spiral += 1\n\nside = spiral - 1\ncounting_distance = side // 2\nmax_dis = spiral - 1\n\nfirst_corner = spiral ** 2\n\ncount = first_corner\njumps = 0\nx = 0\n\nwhile count > number:\n count -= 1\n jumps += 1\n if jumps <= counting_distance:\n x += 1\n else:\n x -= 1\n\n if jumps == side:\n jumps = 0\n\ndistance = max_dis - x\nprint(distance)\n","repo_name":"maialenbz/advent","sub_path":"2017/day03/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3666086","text":"# def cong_chuoi_dang_xen(s1, s2):\n# chuoi_ket_qua = \"\"\n# s2 = s2[::-1]\n# do_dai = max(len(s1), len(s2))\n# for i in range(do_dai):\n# if i < len(s1):\n# chuoi_ket_qua += s1[i]\n# if i < len(s2):\n# chuoi_ket_qua += s2[i]\n# return chuoi_ket_qua\n\n\n# s1 = \"ABC\"\n# s2 = \"abc\"\n# print(cong_chuoi_dang_xen(s1, s2))\n#######################################\ndef chuoi_dan_xen(s1, s2):\n # Dao nguoc chuoi s2\n s2DaoNguoc = s2[::-1]\n # Su dung ham max() de lay do dai chuoi dai hon\n maxDoDaiChuoi = max(len(s1), len(s2))\n chuoiDanXen = \"\"\n\n # Su dung vong lap de dan xen hai chuoi\n for i in range(maxDoDaiChuoi):\n if i < len(s1):\n chuoiDanXen += s1[i]\n if i < len(s2):\n chuoiDanXen += s2DaoNguoc[i]\n # Tra ve chuoi ket qua\n return chuoiDanXen\n\n\n# Nhap cac chuoi tu ban phim\ns1 = input()\ns2 = input()\n\n# Goi ham xu ly va truyen cac tham so can thiet\nprint(chuoi_dan_xen(s1, s2))\n","repo_name":"chanh1311/Python_Basic_200","sub_path":"Kteam/hamcongchuoinguoc.py","file_name":"hamcongchuoinguoc.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1933913155","text":"import collections\n\n\ndef topKFrequent(nums, k):\n clist = collections.Counter(nums).most_common()\n return [clist[i][0] for i in range(k)]\n\n\nif __name__ == \"__main__\":\n print(topKFrequent([1, 1, 1, 2, 2, 3], 2))\n","repo_name":"xsank/cabbird","sub_path":"leetcode/top_k_frequent_elements.py","file_name":"top_k_frequent_elements.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"85"} +{"seq_id":"18653320419","text":"import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt \nimport seaborn as sns \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder \nfrom sklearn.preprocessing import MinMaxScaler \n# Import for modeling\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nimport warnings\nwarnings.filterwarnings('ignore')\nimport scipy.stats as stats\n\ndf = pd.read_csv('WA_Fn-UseC_-HR-Employee-Attrition.csv')\ndf['Attrition'] = df['Attrition'].apply(lambda x:1 if x == 'Yes' else 0)\ndf['OverTime'] = df['OverTime'].apply(lambda x:1 if x == 'Yes' else 0)\ndf['Over18'] = df['Over18'].apply(lambda x:1 if x == 'Yes' else 0)\ndf.drop(['EmployeeCount', \"StandardHours\", 'Over18', 'EmployeeNumber'], axis=1, inplace=True)\n\ndef split_HR_data(df):\n '''\n This function performs split on zillow data\n Returns train, validate, and test dfs.\n '''\n train_validate, test = train_test_split(df, test_size=.2, \n random_state=123)\n train, validate = train_test_split(train_validate, test_size=.3, \n random_state=123)\n return train, validate, test\n\ntrain, validate, test = split_HR_data(df)\n\nleft_train = train[train['Attrition'] == 1]\nstayed_train = train[train['Attrition'] == 0]\n\ndef univariate():\n train.hist(bins = 30, figsize = (20, 20), color= 'orange')\n\ndef Environment_Satisfaction_Countplot():\n plt.figure(figsize = [8, 4])\n sns.countplot(x ='EnvironmentSatisfaction', hue ='Attrition', data = train)\n\ndef Hourly_Rate_KDE():\n plt.figure(figsize = (12, 7))\n\n sns.kdeplot(left_train['HourlyRate'], label = 'Employees who left', shade = True, color = 'r')\n sns.kdeplot(stayed_train['HourlyRate'], label = 'Employees who stayed', shade = True, color = 'b')\n\n plt.xlabel('Hourly Rate')\n\ndef Job_Level_Countplot():\n plt.figure(figsize = [8, 4])\n sns.countplot(x ='JobLevel', hue ='Attrition', data = train)\n\ndef Job_Role_Countplot():\n plt.figure(figsize = [20, 5])\n sns.countplot(x ='JobRole', hue ='Attrition', data = train)\n\ndef Stock_Option_Countplot():\n plt.figure(figsize = [20, 5])\n sns.countplot(x ='StockOptionLevel', hue ='Attrition', data = train)\n\ndef Years_At_Company_KDE():\n plt.figure(figsize = (12, 7))\n\n sns.kdeplot(left_train['YearsAtCompany'], label = 'Employees who left', shade = True, color = 'r')\n sns.kdeplot(stayed_train['YearsAtCompany'], label = 'Employees who stayed', shade = True, color = 'b')\n\n plt.xlabel('Years with Company')\n\ndef Employee_Low_Satisfaction_EnvironmentSatisfaction():\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4)\n\n #define data\n data1 = [20, 80]\n labels1 = ['Dissatisfied', 'Okay']\n colors = sns.color_palette('coolwarm')[0:5]\n\n ax1.pie(data1, labels = labels1, colors = colors, autopct='%.0f%%')\n ax1.set_title('Sales Executive', fontdict = {'fontsize' : 14})\n\n data2 = [20, 80]\n labels2 = ['Dissatisfied', 'Okay']\n\n ax2.pie(data2, labels = labels2, colors = colors, autopct='%.0f%%')\n ax2.set_title(\"Research Scientist\", fontdict = {'fontsize' : 14})\n\n data3 = [20, 80]\n labels3 = ['Dissatisfied', 'Okay']\n\n ax3.pie(data3, labels = labels3, colors = colors, autopct='%.0f%%')\n ax3.set_title(\"Laboratory Technician\", fontdict = {'fontsize' : 14})\n\n data4 = [28, 72]\n labels4 = ['Dissatisfied', 'Okay']\n\n ax4.pie(data4, labels = labels4, colors = colors, autopct='%.0f%%')\n ax4.set_title(\"Research Director\", fontdict = {'fontsize' : 14})\n\n\n plt.tight_layout()\n sns.set(rc = {'figure.figsize':(10,6)})\n\ndef Employee_Low_Satisfaction_EnvironmentSatisfaction2():\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4)\n\n #define data\n data1 = [17, 83]\n labels1 = ['Dissatisfied', 'Okay']\n colors = sns.color_palette('coolwarm')[0:5]\n\n ax1.pie(data1, labels = labels1, colors = colors, autopct='%.0f%%')\n ax1.set_title('Healthcare Representative', fontdict = {'fontsize' : 14})\n\n data2 = [15, 85]\n labels2 = ['Dissatisfied', 'Okay']\n\n ax2.pie(data2, labels = labels2, colors = colors, autopct='%.0f%%')\n ax2.set_title(\"Manufacturing Director\", fontdict = {'fontsize' : 14})\n\n data3 = [13, 87]\n labels3 = ['Dissatisfied', 'Okay']\n\n ax3.pie(data3, labels = labels3, colors = colors, autopct='%.0f%%')\n ax3.set_title(\"Sales Representative\", fontdict = {'fontsize' : 14})\n\n data4 = [19, 81]\n labels4 = ['Dissatisfied', 'Okay']\n\n ax4.pie(data4, labels = labels4, colors = colors, autopct='%.0f%%')\n ax4.set_title(\"Human Resources\", fontdict = {'fontsize' : 14})\n\n\n plt.tight_layout()\n sns.set(rc = {'figure.figsize':(10,6)})\n\ndef level_one_ResearchScientists():\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)\n\n #define data\n data1 = [19, 81]\n labels1 = ['Quit', 'Stayed']\n colors = sns.color_palette('coolwarm')[0:5]\n\n ax1.pie(data1, labels = labels1, colors = colors, autopct='%.0f%%')\n ax1.set_title('Attrition Rate', fontdict = {'fontsize' : 14})\n\n data2 = [21, 79]\n labels2 = ['Dissatisfied', 'Okay']\n\n ax2.pie(data2, labels = labels2, colors = colors, autopct='%.0f%%')\n ax2.set_title(\"Environment Satisfaction\", fontdict = {'fontsize' : 14})\n\n data3 = [18, 82]\n labels3 = ['Dissatisfied', 'Okay']\n\n ax3.pie(data3, labels = labels3, colors = colors, autopct='%.0f%%')\n ax3.set_title(\"Job Satisfaction\", fontdict = {'fontsize' : 14})\n\n\n plt.tight_layout()\n sns.set(rc = {'figure.figsize':(10,6)})\n\ndef level_one_LaboratoryTechnicians():\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)\n\n #define data\n data1 = [28, 72]\n labels1 = ['Quit', 'Stayed']\n colors = sns.color_palette('coolwarm')[0:5]\n\n ax1.pie(data1, labels = labels1, colors = colors, autopct='%.0f%%')\n ax1.set_title('Attrition Rate', fontdict = {'fontsize' : 14})\n\n data2 = [20, 80]\n labels2 = ['Dissatisfied', 'Okay']\n\n ax2.pie(data2, labels = labels2, colors = colors, autopct='%.0f%%')\n ax2.set_title(\"Environment Satisfaction\", fontdict = {'fontsize' : 14})\n\n data3 = [23, 77]\n labels3 = ['Dissatisfied', 'Okay']\n\n ax3.pie(data3, labels = labels3, colors = colors, autopct='%.0f%%')\n ax3.set_title(\"Job Satisfaction\", fontdict = {'fontsize' : 14})\n\n\n plt.tight_layout()\n sns.set(rc = {'figure.figsize':(10,6)})\n\ndef level_one_SalesRepresentative():\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)\n\n #define data\n data1 = [42, 58]\n labels1 = ['Quit', 'Stayed']\n colors = sns.color_palette('coolwarm')[0:5]\n\n ax1.pie(data1, labels = labels1, colors = colors, autopct='%.0f%%')\n ax1.set_title('Attrition Rate', fontdict = {'fontsize' : 14})\n\n data2 = [14, 86]\n labels2 = ['Dissatisfied', 'Okay']\n\n ax2.pie(data2, labels = labels2, colors = colors, autopct='%.0f%%')\n ax2.set_title(\"Environment Satisfaction\", fontdict = {'fontsize' : 14})\n\n data3 = [16, 84]\n labels3 = ['Dissatisfied', 'Okay']\n\n ax3.pie(data3, labels = labels3, colors = colors, autopct='%.0f%%')\n ax3.set_title(\"Job Satisfaction\", fontdict = {'fontsize' : 14})\n\n\n plt.tight_layout()\n sns.set(rc = {'figure.figsize':(10,6)})\n\ndef level_one_HumanResources():\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)\n\n #define data\n data1 = [30, 70]\n labels1 = ['Quit', 'Stayed']\n colors = sns.color_palette('coolwarm')[0:5]\n\n ax1.pie(data1, labels = labels1, colors = colors, autopct='%.0f%%')\n ax1.set_title('Attrition Rate', fontdict = {'fontsize' : 14})\n\n data2 = [21, 79]\n labels2 = ['Dissatisfied', 'Okay']\n\n ax2.pie(data2, labels = labels2, colors = colors, autopct='%.0f%%')\n ax2.set_title(\"Environment Satisfaction\", fontdict = {'fontsize' : 14})\n\n data3 = [15, 85]\n labels3 = ['Dissatisfied', 'Okay']\n\n ax3.pie(data3, labels = labels3, colors = colors, autopct='%.0f%%')\n ax3.set_title(\"Job Satisfaction\", fontdict = {'fontsize' : 14})\n\n\n plt.tight_layout()\n sns.set(rc = {'figure.figsize':(10,6)})\n\ndef Stock_Option_countplot():\n plt.figure(figsize = [8, 4])\n sns.countplot(x ='StockOptionLevel', hue ='Attrition', data = df)\n\ndef Current_Attrition_Rate():\n labels = 'Quit', 'Stayed'\n data = [17, 83]\n colors = sns.color_palette('pastel')\n plt.pie(data, labels=labels, colors = colors)\n plt.show()\n\n ","repo_name":"Kayfj41901/IBM_HR","sub_path":"visuals.py","file_name":"visuals.py","file_ext":"py","file_size_in_byte":8559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27571270117","text":"from aws_cdk import core\nfrom aws_cdk import aws_ec2 as ec2\nfrom aws_cdk import aws_iam as iam\nfrom aws_cdk import aws_route53 as route53\n\n\nclass MindlampStack(core.Stack):\n\n def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n \"\"\"\n Creates a VPC with 2 public and 2 private subnets,\n each subnet with a /18 CIDR range,\n a 2 NAT gateways\n \"\"\"\n vpc = ec2.Vpc(\n self, \"MindLAMPVPC\",\n cidr=\"10.10.0.0/16\"\n )\n\n # Security group for instances\n security_group = ec2.SecurityGroup(\n self, \"MindLAMPSecurityGroup\",\n vpc=vpc,\n description=\"Security group for LAMP Platform instances\"\n )\n\n # Allow TCP to port 80 from 0.0.0.0/0\n security_group.add_ingress_rule(\n peer=ec2.Peer.any_ipv4(),\n connection=ec2.Port.tcp(80),\n description=\"Allow HTTP connections to port 80\"\n )\n # Allow TCP to port 443 from 0.0.0.0/0\n security_group.add_ingress_rule(\n peer=ec2.Peer.any_ipv4(),\n connection=ec2.Port.tcp(443),\n description=\"Allow HTTP connections to port 443\"\n )\n\n # Allow TCP to port 443 from 0.0.0.0/0\n security_group.add_ingress_rule(\n peer=ec2.Peer.any_ipv6(),\n connection=ec2.Port.tcp(443),\n description=\"Allow HTTP connections to port 443 with ipv6\"\n )\n\n # Install docker on boot\n user_data = ec2.UserData.for_linux()\n user_data.add_commands(\n \"yum -y install docker && usermod -a -G docker ec2-user\")\n\n # The EC2 instance\n instance1 = ec2.Instance(\n self, \"MindLAMPInstance1\",\n instance_type=ec2.InstanceType(\"t3a.large\"),\n machine_image=ec2.MachineImage.latest_amazon_linux(),\n user_data=user_data,\n instance_name=\"LAMP platform\",\n block_devices=[\n ec2.BlockDevice(\n device_name=\"/dev/sdf\",\n volume=ec2.BlockDeviceVolume.ebs(\n volume_size=30,\n encrypted=True,\n delete_on_termination=True\n )\n ),\n ec2.BlockDevice(\n device_name=\"/dev/sdg\",\n volume=ec2.BlockDeviceVolume.ebs(\n volume_size=100,\n encrypted=True,\n delete_on_termination=False\n )\n )\n ],\n security_group=security_group,\n vpc=vpc,\n vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)\n )\n\n # Associate the SSM managed policy for SSM control + access\n instance1.role.add_managed_policy(\n policy=iam.ManagedPolicy.from_aws_managed_policy_name(\n managed_policy_name=\"AmazonSSMManagedInstanceCore\")\n )\n\n # Get an existing Route53 hosted zone\n hosted_zone = route53.HostedZone.from_hosted_zone_attributes(\n self, \"MindLAMPHostedZone\",\n hosted_zone_id=self.node.try_get_context(\"hosted_zone_id\"),\n zone_name=self.node.try_get_context(\"zone_name\")\n )\n\n # Create an A record to point to the public IP of instance1\n record_set1 = route53.RecordSet(\n self, \"Node1RecordSet\",\n record_type=route53.RecordType.A,\n target=route53.RecordTarget(values=[instance1.instance_public_ip]),\n zone=hosted_zone,\n record_name=\"node1\"\n )\n","repo_name":"nragusa/LAMPExample","sub_path":"mindlamp/mindlamp_stack.py","file_name":"mindlamp_stack.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13613352557","text":"from app.services.schedule import get_schedules, get_schedule, get_schedule_by_day, create_schedule, update_schedule, delete_schedule\nfrom app.serializers import ScheduleSchema\nfrom faker import Faker\nfrom datetime import datetime\n\n\ndef test_get_schedules(app_context):\n response = get_schedules()\n assert response['status'] == 404\n\n\ndef test_get_schedule(schedule, app_context):\n response = get_schedule(schedule.id)\n assert response['status'] == 200\n assert ScheduleSchema().dump(schedule) == response['message']\n\n\ndef test_get_schedule_by_day(schedule, worker, app_context):\n date_str = schedule.day.strftime(\"%Y-%m-%d\")\n date_obj = datetime.strptime(date_str, \"%Y-%m-%d\")\n year = date_obj.year\n month = date_obj.month\n day = date_obj.day\n response = get_schedule_by_day(worker.id, year, month, day)\n assert response['status'] == 404\n assert 'Schedule does not exist for this worker at this date' == response['message']\n\n\ndef test_create_schedule(worker, app_context):\n faker = Faker()\n schedule_data = {\n 'year': 2023,\n 'month': 1,\n 'day': 1,\n 'start_time_h': 9,\n 'start_time_m': 0,\n 'end_time_h': 18,\n 'end_time_m': 0,\n 'worker_id': worker.id,\n 'location_id': 1\n }\n response = create_schedule(schedule_data)\n assert response['status'] == 200\n\n\ndef test_update_schedule(schedule, worker, app_context):\n faker = Faker()\n new_data = {\n 'year': 2023,\n 'month': 1,\n 'day': 1,\n 'start_time_h': 10,\n 'start_time_m': 0,\n 'end_time_h': 19,\n 'end_time_m': 0,\n 'worker_id': worker.id,\n 'location_id': 2\n }\n response = update_schedule(schedule.id, new_data)\n assert response['status'] == 200\n\n\ndef test_delete_schedule(schedule, app_context):\n schedule_id = schedule.id\n response = delete_schedule(schedule_id)\n assert response['status'] == 200\n","repo_name":"Fhockk/WorkerAPI","sub_path":"tests/test_services_schedule.py","file_name":"test_services_schedule.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27528076867","text":"import datetime\nimport os\n\nimport qrcode\nfrom flask import make_response\nfrom fpdf import FPDF\nfrom src.core.models.Configuracion import Configuracion\nfrom src.web.controllers.FactoryCrud import get_doc_json\n\n\ndef transformMonth(numMonth):\n month = {\"1\": \"Enero\",\"2\": \"Febrero\",\"3\":\"Marzo\",\"4\":\"Abril\",\"5\":\"Mayo\",\"6\":\"Junio\",\"7\":\"Julio\",\"8\":\"Agosto\",\"9\":\"Septiembre\",\"10\":\"Octubre\",\"11\":\"Noviembre\",\"12\":\"Diciembre\"}\n return month[str(numMonth)]\n\ndef createPDF_payment(partner,payment):\n config = get_doc_json(Configuracion, 1)\n encabezado = config[\"encabezado_pago\"]\n moneda = config[\"moneda\"]\n image_path = \"https://cdve.files.wordpress.com/2017/06/cropped-cropped-logodepo1.png\"\n pdf = FPDF()\n pdf.add_page()\n\n pdf.set_font('Arial', 'B', 20)\n pdf.image(name=image_path,x=10,y=10,w=30,h=30)\n pdf.text(x=45,y=20,txt=f'Recibo # {payment[\"id\"]}')\n\n pdf.text(x=45,y=40,txt=f'{encabezado}')\n\n pdf.set_font('Arial', '', 12)\n pdf.text(x=182,y=10,txt=f'Fecha')\n pdf.text(x=180,y=15,txt=f'{datetime.datetime.now().strftime(\"%x\")}')\n\n pdf.line(10,45,200,45)\n\n pdf.set_font('Arial', \"\", 16)\n pdf.text(x=30,y=55,txt=f'Recibimos de {partner[\"nombre\"]} {partner[\"apellido\"]}')\n pdf.text(x=30,y=60,txt=f'El importe en {moneda} ({payment[\"pago\"]})')\n pdf.text(x=30,y=65,txt=f'Por el concepto de cuota societaria mes {transformMonth(payment[\"fecha\"].month)} {payment[\"fecha\"].year}')\n\n pdf.line(10,75,200,75)\n\n response = make_response(pdf.output(dest=\"S\").encode('latin-1'))\n response.headers.set(\"Content-Disposition\",\"attachment\",filename=\"recibo.pdf\")\n response.headers.set('Content-Type', 'application/pdf')\n return response\n\ndef createPDF_perAsoc(tipo,value,result):\n pdf=FPDF(orientation='P', unit='mm', format='A4')\n pdf.add_page()\n pdf.set_font('Arial', 'B', 16)\n image_path = \"https://cdve.files.wordpress.com/2017/06/cropped-cropped-logodepo1.png\"\n pdf.image(name=image_path,x=10,y=8,w=30,h=30)\n \n \n if (value != 'vacio'): #apellido cargado\n \n if (tipo=='nada'): #estado sin cargar\n pdf.text(x=50,y=25,txt=f'Tabla De Socios filtrada por apellido: {value}')\n \n else: #estado cargado\n pdf.text(x=82,y=21,txt=f'Tabla De Socios filtrada por:')\n pdf.text(x=74, y=28, txt=f'Apellido: {value} y Estado: {tipo}')\n \n else: #apellido sin cargar\n if (tipo=='nada'): #Estado sin cargar\n pdf.text(x=60,y=25,txt=f'Tabla De Socios sin filtrar ') \n else: #Estado cargado\n pdf.text(x=60,y=25,txt=f'Tabla De Socios filtada por Estado: {tipo}')\n pdf.line(0, 45, 256, 45) \n pdf.ln(40) \n\n #CREO LA TABLA\n pdf.set_fill_color(r= 184, g=190 , b=250)\n pdf.cell(w=50,h=15, txt='Nro socio', border = 1, align='C', fill=1)\n pdf.cell(w=50,h=15, txt='Nombre', border = 1, align='C', fill=1)\n pdf.cell(w=50,h=15, txt='Apellido', border = 1, align='C', fill=1)\n pdf.cell(w=40,h=15, txt='Estado', border = 1, align='C', ln=1, fill=1)\n \n pdf.set_fill_color(r=232 , g=232 , b=232)\n for socio in result:\n pdf.cell(w=50,h=15, txt=str(socio.nro_socio), border = 1, align='C', fill=1)\n pdf.cell(w=50,h=15, txt=socio.nombre, border = 1, align='C', fill=1)\n pdf.cell(w=50,h=15, txt=socio.apellido, border = 1, align='C', fill=1)\n if (socio.estado == True):\n pdf.cell(w=40,h=15, txt='Activo', border = 1, align='C', ln=1, fill=1)\n else:\n pdf.cell(w=40,h=15, txt='Inactivo', border = 1, align='C', ln=1, fill=1)\n\n \n response = make_response(pdf.output(dest=\"S\").encode('latin-1'))\n response.headers.set(\"Content-Disposition\",\"attachment\",filename=\"tabla_de_socios.pdf\")\n response.headers.set('Content-Type', 'application/pdf')\n return response\n\ndef createPDF_qr(id,data):\n pdf=FPDF(orientation='P', unit='mm', format='A4')\n pdf.add_page()\n\n #IMAGEN QR\n urlQr = \"https://admin-grupo21.proyecto2022.linti.unlp.edu.ar/admin/socios/informacionSocio/\" + id; #URL PARA USO REMOTO\n #urlQr = \"http://127.0.0.1:5000/admin/socios/informacionSocio/\" + id; URL PARA USO LOCAL\n img = qrcode.make(urlQr)\n img.save(\"datos\"+id+\".png\")\n img.path=\"datos\"+id+\".png\"\n pdf.image(name=img.path,x=71,y=53,w=40,h=40)\n \n #IMAGEN CLUB\n image_club_path = \"https://cdve.files.wordpress.com/2017/06/cropped-cropped-logodepo1.png\"\n pdf.image(name=image_club_path,x=30,y=2,w=10,h=10)\n \n #IMAGEN SOCIO\n image_socio_path=\"https://cdn-icons-png.flaticon.com/512/23/23171.png\"\n pdf.image(name=image_socio_path,x=15,y=20,w=40,h=40)\n \n \n #TEXTO\n pdf.set_font('Arial', 'B', 20)\n pdf.text(x=65,y=10,txt=f'Club Deportivo Villa Elisa')\n pdf.set_font('Arial', 'B', 16)\n \n pdf.line(0, 14, 256, 14)\n \n pdf.text(x=75,y=25,txt=f'Credencial de {data[\"nombre\"]} {data[\"apellido\"]}')\n pdf.text(x=75,y=32,txt=f'{data[\"tipo_documento\"]}: {data[\"nro_documento\"]}')\n pdf.text(x=75,y=39,txt=f'Socio: #{data[\"nro_socio\"]}')\n pdf.text(x=75,y=46,txt=f'Fecha de alta: {data[\"fecha_alta\"]}')\n if (data[\"moroso\"]==0):\n pdf.text(x=18,y=75,txt=f'Moroso: Si')\n else:\n pdf.text(x=18,y=75,txt=f'Moroso: No')\n \n pdf.line(0, 100, 256, 100)\n \n response = make_response(pdf.output(dest=\"S\").encode('latin-1'))\n response.headers.set(\"Content-Disposition\",\"attachment\",filename=\"credencial.pdf\")\n response.headers.set('Content-Type', 'application/pdf')\n \n os.remove(img.path)\n \n return response\n","repo_name":"CamiloDiPaolo/Proyecto-de-Software","sub_path":"admin/src/web/controllers/PDFCreate.py","file_name":"PDFCreate.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"26989721706","text":"import cv2\nfrom deepface import DeepFace\nfrom datetime import datetime\nimport shutil\nimport os\n# from Class.mysqlconnection import MariaDB\n\nclass FaceDetection:\n def __init__(self):\n self.haarcascades = {\n 'face': cv2.CascadeClassifier('./data/haarcascades/haarcascade_frontalface_default.xml'), \n 'eye': cv2.CascadeClassifier('./data/haarcascades/haarcascade_eye.xml'),\n 'eyeleft': cv2.CascadeClassifier('./data/haarcascades/haarcascade_lefteye_2splits.xml'),\n 'eyeright': cv2.CascadeClassifier('./data/haarcascades/haarcascade_righteye_2splits.xml'),\n 'mouth': cv2.CascadeClassifier('./data/haarcascades/Mouth.xml'),\n 'nose': cv2.CascadeClassifier('./data/haarcascades/Nariz.xml')\n }\n\n self.cascadefile = {\n 'eyeleft': cv2.CascadeClassifier('./data/cascade-files/haarcascade_mcs_lefteye.xml'),\n 'eyeright': cv2.CascadeClassifier('./data/cascade-files/haarcascade_mcs_righteye.xml'),\n 'mouth': cv2.CascadeClassifier('./data/cascade-files/haarcascade_mcs_mouth.xml'),\n 'nose': cv2.CascadeClassifier('./data/cascade-files/haarcascade_mcs_nose.xml') \n }\n\n self.font = cv2.FONT_HERSHEY_PLAIN\n self.cap = cv2.VideoCapture(0)\n self.colorGreen = (0, 255, 0)\n self.filenameFormat = \"{:s}/{:s}-{:%Y%m%d_%H%M%S}.{:s}\"\n self.EXTENSION = 'jpg'\n\n self.removedata = {\n 'Eyeleft' : \"../EyeLeft\",\n 'Eyeright' : \"../EyeRight\",\n 'Face' : \"../Face\",\n 'Mouth' : \"../Mouth\",\n 'Nose' : \"../Nose\"\n }\n\n self.deleteFolder = ['EyeLeft', 'EyeRight', 'Face', 'Mouth', 'Nose']\n\n self.EyeLeft_Size = {\"X\" : 0, \"Y\" : 0} \n self.EyeRight_Size = {\"X\" : 0, \"Y\" : 0} \n self.Nose_Size = {\"X\" : 0, \"Y\" : 0} \n self.Mouth_Size = {\"X\" : 0, \"Y\" : 0} \n\n # self.MqSQl = MariaDB()\n # self.MqSQlEyeLeft = MariaDB()\n # self.MqSQlEyeRight = MariaDB()\n # self.MqSQlMouth = MariaDB()\n # self.MqSQlNose = MariaDB()\n\n def CheckCamera(self):\n if not self.cap:\n return False\n else:\n return True\n\n def VideoCaptrue(self):\n Checkresult = self.CheckCamera()\n # self.removedata_Detection()\n\n if Checkresult:\n while True:\n self.ret, self.img = self.cap.read()\n\n self.DetectionAI()\n\n cv2.imshow(\"Camera 0\", self.img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n return False\n else:\n print(\"No have camera !!!\")\n\n def removedata_Detection(self):\n for filepath in self.deleteFolder:\n shutil.rmtree('./' + filepath, ignore_errors = True)\n os.mkdir(filepath)\n\n def DetectionAI(self):\n try:\n self.result = DeepFace.analyze(self.img, actions = ['age', 'emotion'])\n self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n\n self.DetectionFace()\n self.DectectionEye()\n self.DectectionMouth()\n self.DectectionNose()\n\n # print(\"\\n\")\n # print(\"EyeLeft : \", self.EyeLeft_Size)\n # print(\"EyeRight : \", self.EyeRight_Size)\n # print(\"Mouth : \", self.Mouth_Size)\n # print(\"Nose\", self.Nose_Size)\n # print(\"\\n\")\n\n except:\n return False\n\n def DetectionFace(self, l = 50, s = 7, d = 20):\n self.faces = self.haarcascades['face'].detectMultiScale(self.gray, 1.2, 10)\n\n for (x , y, w, h) in self.faces:\n x1, y1 = x + w, y + h\n\n self.roi_img = self.img[y : y + h, x : x + w]\n self.roi_img1 = self.roi_img.copy()\n\n self.date = datetime.now()\n outfile = self.filenameFormat.format(\"Face\", \"Face\", self.date, self.EXTENSION)\n cv2.imwrite(outfile, self.roi_img)\n\n # self.MqSQl.InsertDataFace(outfile)\n\n # MariaDB.InsertDataFace(outfile, 10, 10)\n \n\n cv2.rectangle(self.img, (x, y), (x + w, y + h), self.colorGreen, 1) \n\n #Top, Left\n cv2.line(self.img, (x, y), (x + l, y), self.colorGreen, s)\n cv2.line(self.img, (x, y), (x, y + l), self.colorGreen, s)\n\n #Bottom, Left\n cv2.line(self.img, (x, y1), (x + l, y1), self.colorGreen, s)\n cv2.line(self.img, (x, y1), (x, y1 - l), self.colorGreen, s)\n\n #Top, Right\n cv2.line(self.img, (x1, y), (x1 - l, y), self.colorGreen, s)\n cv2.line(self.img, (x1, y), (x1, y + l), self.colorGreen, s)\n\n #Bottom, Right\n cv2.line(self.img, (x1, y1), (x1 - l, y1), self.colorGreen, s)\n cv2.line(self.img, (x1, y1), (x1, y1 - l), self.colorGreen, s)\n\n cv2.putText(self.img, self.result['dominant_emotion'] + \" \" + str(self.result['age']), (x, y - d), self.font, 2, (0, 255, 0), 2)\n\n def DectectionEye(self):\n self.Eye = self.haarcascades['eye'].detectMultiScale(self.roi_img1, 1.7, 8)\n\n for (x, y, w, h) in self.Eye:\n cv2.rectangle(self.roi_img, (x, y),(x + w, y + h), (255, 255, 0), 1)\n else:\n if len(self.Eye) == 1:\n print(\"No can't detection left or right Eye\")\n else:\n if self.Eye[0][0] > self.Eye[1][0]:\n xr, yr, wr, hr = self.Eye[0]\n xl, yl, wl, hl = self.Eye[1]\n\n EyeRight_Image = self.roi_img1[yr : yr + hr, xr : xr + wr]\n outfile = self.filenameFormat.format(\"EyeRight\", \"EyeRight\", self.date, self.EXTENSION)\n cv2.imwrite(outfile, EyeRight_Image)\n # self.MqSQlEyeRight.InsertDataPath(\"eyeright\", outfile, int(xr + wr), int(yr + hr))\n\n EyeLeft_Image = self.roi_img1[yl : yl + hl, xl : xl + wl]\n outfile = self.filenameFormat.format(\"EyeLeft\", \"EyeLeft\", self.date, self.EXTENSION)\n cv2.imwrite(outfile, EyeLeft_Image)\n # self.MqSQlEyeLeft.InsertDataPath(\"eyeleft\", outfile, int(xl + wl), int(yl + hl))\n\n\n def DectectionMouth(self):\n self.Mouth = self.cascadefile['mouth'].detectMultiScale(self.roi_img1, 1.7, 8)\n\n for (x , y, w, h) in self.Mouth:\n mouthimage = self.roi_img1[y : y + h, x : x + w + 15]\n\n outfile = self.filenameFormat.format(\"Mouth\", \"Mouth\", self.date, self.EXTENSION)\n cv2.imwrite(outfile, mouthimage)\n\n # self.MqSQlMouth.InsertDataPath(\"mouth\", outfile, int(x + w), int(y + h))\n\n cv2.rectangle(self.roi_img, (x, y),(x + w, y + h), (255, 0, 0), 1) \n\n def DectectionNose(self):\n self.Nose = self.cascadefile['nose'].detectMultiScale(self.roi_img1, 1.7, 4)\n\n for (x, y, w, h) in self.Nose:\n noseimage = self.roi_img1[y : y + h, x : x + w]\n\n outfile = self.filenameFormat.format(\"Nose\", \"Nose\", self.date, self.EXTENSION)\n cv2.imwrite(outfile, noseimage)\n\n # self.MqSQlNose.InsertDataPath(\"nose\", \"Nose\", int(x + w), int(y + h))\n\n cv2.rectangle(self.roi_img, (x, y),(x + w , y + h), (0, 255, 255), 1) \n\n def __call__(self):\n self.VideoCaptrue()\n\n def __repr__(self):\n return f'{self.result[\"dominant_emotion\"]} {str(self.result[\"age\"])}'","repo_name":"porndet/Face-Detection","sub_path":"Class/AI_Face.py","file_name":"AI_Face.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40768213748","text":"import numpy\r\nimport random\r\nimport math\r\nimport csv\r\n\r\n\r\ndef convert_csv_file_to_array(file_name):\r\n array = []\r\n with open(file_name) as csv_file:\r\n reader = csv.reader(csv_file, delimiter=',')\r\n for row in reader:\r\n array.append(row)\r\n return array\r\n\r\n\r\ndef index_of_max(array):\r\n temp = array[0]\r\n num = 0\r\n for i in range(0, len(array)):\r\n if temp < array[i]:\r\n num = i\r\n temp = array[i]\r\n return num\r\n\r\n\r\ndef normalisation_factor(array):\r\n maximum = array[0]\r\n minimum = array[0]\r\n for i in range(0, len(array)):\r\n if maximum < array[i]:\r\n maximum = array[i]\r\n if minimum > array[i]:\r\n minimum = array[i]\r\n difference = maximum - minimum\r\n return minimum, difference\r\n\r\n\r\ndef sigmoid(number):\r\n sol = (1 / (1 + math.exp(-number)))\r\n return sol\r\n\r\n\r\ndef d_sigmoid(number):\r\n sol = number * (1 - number)\r\n return sol\r\n\r\n\r\ndef probability_generator(probability):\r\n number = random.random()\r\n if number < probability:\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ndef parameters_gathering(ans):\r\n # get the parameters of the neural network\r\n lay = int(input(\"Type the number of hidden layers: \"))\r\n layer_nodes = []\r\n for i in range(0, lay):\r\n layer_nodes.append(int(input(\"type the number of nodes in layer \" + str(i + 1) + \": \")))\r\n layer_nodes.append(ans)\r\n print(\"\")\r\n learning_rate = float(input(\"type the learning rate: \"))\r\n print(\"\")\r\n return lay, layer_nodes, learning_rate\r\n\r\n\r\ndef initialise_neural_network(inp, ans, lay, layer_nodes):\r\n # initialising the neural network\r\n\r\n # initialising the layers\r\n layers = []\r\n temp = []\r\n for i in range(0, inp):\r\n temp.append(0)\r\n layers.append(numpy.transpose(numpy.matrix(temp)))\r\n for i in range(0, lay):\r\n temp = []\r\n for j in range(0, layer_nodes[i]):\r\n temp.append(0)\r\n layers.append(numpy.transpose(numpy.matrix(temp)))\r\n temp = []\r\n for i in range(0, ans):\r\n temp.append(0)\r\n layers.append(numpy.transpose(numpy.matrix(temp)))\r\n\r\n # initialising the weights\r\n weights = []\r\n for i in range(0, len(layers) - 1):\r\n temp = []\r\n for j in range(0, len(layers[i + 1])):\r\n temp1 = []\r\n for k in range(0, len(layers[i])):\r\n temp1.append(random.random() * 2 - 1)\r\n temp.append(temp1)\r\n weights.append(numpy.matrix(temp))\r\n\r\n # initialising the biases\r\n bias = []\r\n for i in range(0, len(layers) - 1):\r\n temp = []\r\n for j in range(0, len(layers[i + 1])):\r\n temp.append(random.random() * 2 - 1)\r\n bias.append(numpy.transpose(numpy.matrix(temp)))\r\n\r\n return layers, weights, bias\r\n\r\n\r\ndef forward_prop(inputs, weights, bias):\r\n # Forward propagation\r\n forward_propagation = [inputs]\r\n for i in range(0, len(bias)):\r\n forward_propagation.append(numpy.matmul(weights[i], forward_propagation[i]) + bias[i])\r\n for j in range(0, len(forward_propagation[i + 1])):\r\n forward_propagation[i + 1][j][0] = sigmoid(float(forward_propagation[i + 1][j][0]))\r\n\r\n return forward_propagation\r\n\r\n\r\ndef back_prop(forward_propagation, answer, learning_rate, bias, weights):\r\n # Weighted error\r\n master_error = [answer - forward_propagation[len(forward_propagation) - 1]]\r\n for i in range(1, len(bias)):\r\n j = len(bias) - i\r\n master_error.append(numpy.matmul(numpy.transpose(weights[j]), master_error[i - 1]))\r\n\r\n # Calculate gradient\r\n gradients = []\r\n for i in range(1, len(forward_propagation)):\r\n temp = []\r\n m = len(forward_propagation) - i - 1\r\n for j in range(0, len(forward_propagation[i])):\r\n temp.append(d_sigmoid(float(forward_propagation[i][j])) * float(master_error[m][j]) * learning_rate)\r\n gradients.append(numpy.transpose(numpy.matrix(temp)))\r\n\r\n # Calculation of differences\r\n forward_propagation_transpose = []\r\n for i in range(0, len(forward_propagation)):\r\n forward_propagation_transpose.append(numpy.transpose(forward_propagation[i]))\r\n\r\n weights_differences = []\r\n for i in range(0, len(gradients)):\r\n weights_differences.append(numpy.matmul(gradients[i], forward_propagation_transpose[i]))\r\n\r\n bias_differences = gradients\r\n\r\n for i in range(0, len(weights)):\r\n weights[i] = weights[i] + weights_differences[i]\r\n for i in range(0, len(bias)):\r\n bias[i] = bias[i] + bias_differences[i]\r\n\r\n return weights, bias\r\n","repo_name":"Abhilol123/Neural-Network-Library---py","sub_path":"NN_Lib.py","file_name":"NN_Lib.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31226996431","text":"#!/usr/bin/python\n###############################################################################\n#\n# @2018 Bevywise Networks - www.bevywise.com \n#\n# This plugin is an extension to the MQTTRoute, the enterprise mqtt broker. \n# This plugin helps you store all the data received by the broker from different edge devices into the ELASTIC. \n#\n# Elastic.py\n#\n# Author Name: Vardharajulu K N (VKN)\n#\n# The Package contains Elastic instance creation, data insertion.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# \n# you may not use this file except in compliance with the License.\n# \n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n# \n###############################################################################\n\nfrom elasticsearch import Elasticsearch\nimport logger, os, sys\nfrom config import Config\n\nclass Elastic(Config):\n # Initialiser Class\n def __init__(self, filepath):\n super(Elastic, self).__init__(filepath)\n \n if self._open() == True:\n self.log = logger.Logger(self.get_value('LOG','LOG_FILE_PATH'))\n self.log.info(\"Config File Loaded Sucessfully.\")\n else:\n self.log = logger.Logger()\n self.log.err(\"Config file open error.\")\n self.custom_data = {\n 'elastic_host' : self.get_value('ELASTIC','HOSTNAME'), \n 'elastic_port' : int(self.get_value('ELASTIC','PORT')), \n 'elastic_index' : self.get_value('ELASTIC','INDEX_NAME')\n }\n self.init_db()\n\n def init_db(self):\n try:\n self.Elastic_instance = Elasticsearch(self.custom_data['elastic_host'], port = self.custom_data['elastic_port'], max_retries = 0)\n if not self.Elastic_instance.ping():\n self.log.err(\"Unable to Connect with Elastic Port {0}. Change the port in plugin.conf\".format(self.custom_data['elastic_port']))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n self.log.err(\"Error in DB connection \") \n self.log.err('{}:{}:{}'.format(exc_type, fname, exc_tb.tb_lineno))\n else:\n self.log.info(\"Sucessfully Connected To Elastic at port - {0} \".format(self.custom_data['elastic_port'])) \n \n def data_consumer(self,data,result = ''):\n try:\n self.Elastic_instance.index(index=self.custom_data['elastic_index'],doc_type='payload',body=data)\n except Exception:\n self.log.err(\"Data Insert Error\") \n \n# Write code for testing. \nif __name__ == '__main__':\n Elastic(Config) \n","repo_name":"bevywise-networks/mqttroute-elasticsearch-connector","sub_path":"Elastic/Elastic.py","file_name":"Elastic.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"40265329181","text":"\"\"\"\nAuthors: Magdalena Asmus-Mrzygłód, Patryk Klimek\n\nIn order to be able to run script with this game you will need:\nPython at least 3.8\nnumpy package\nTensorFlow with some packages which should be imported in the code:\n-cifar10 dataset\n-Sequential model\n-Conv2D, MaxPooling2D, Flatten, Dense, Dropout layers\n-to_categorical utils\n\nPycharm or other IDE for Python\nLink to install python: https://www.python.org/downloads/\nTo run script you need to run command \"python animals_nn.py\"\n\n==========================================\nNeural Networks\n==========================================\n\nNeural networks are computing systems inspired by the human brain's structure and function.\nThey consist of interconnected nodes called neurons organized in layers.\nEach neuron processes input data and transmits signals to neurons in the next layer.\nNeural networks learn by adjusting connections between neurons based on example data.\n\nIn provided example we are analyzing photos of animals and some vehicles.\nIn this implementation we created 2 architectures of neural network .\nFirst one is less complex. Second one is more complex.\nAfter some tests we can say that second model gives better results. Both overall results(%) and specific predictions.\n\"\"\"\nimport numpy as np\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\nfrom tensorflow.keras.utils import to_categorical\n\n# load cifar10 data from tensorflow\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\n\n# normalize data to range between 0 and 1\nX_train = X_train.astype('float32') / 255.0\nX_test = X_test.astype('float32') / 255.0\n\n# change labels value from scalar to vector\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\n# define first, less complex model\nmodel1 = Sequential()\nmodel1.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=X_train.shape[1:]))\nmodel1.add(MaxPooling2D(pool_size=(2, 2)))\nmodel1.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel1.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel1.add(Flatten())\nmodel1.add(Dense(512, activation='relu'))\nmodel1.add(Dense(10, activation='softmax'))\n\n# define second, more complex model\nmodel2 = Sequential()\nmodel2.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=X_train.shape[1:]))\nmodel2.add(MaxPooling2D(pool_size=(2, 2)))\nmodel2.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2)))\nmodel2.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel2.add(Flatten())\nmodel2.add(Dense(512, activation='relu'))\nmodel2.add(Dense(256, activation='relu'))\nmodel2.add(Dense(128, activation='relu'))\nmodel2.add(Dropout(0.5))\nmodel2.add(Dense(10, activation='softmax'))\n\n# compiling both models\nmodel1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# define batch size and numbers of epochs\nbatch_size = 64\nepochs = 10\n\n# fitting both models\nhistory1 = model1.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_test, y_test))\nhistory2 = model2.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_test, y_test))\n\n# evaluate first model\nscore = model1.evaluate(X_test, y_test, verbose=0)\nprint(\"Test model 1 loss:\", score[0])\nprint(\"Test model 1 accuracy:\", score[1])\n\n# evaluate second model\nscore1 = model2.evaluate(X_test, y_test, verbose=0)\nprint(\"Test model 2 loss:\", score1[0])\nprint(\"Test model 2 accuracy:\", score1[1])\n\n# define specific record for test of prediction\npredicting_record = 44\nprint('Predicting rekord number : ', predicting_record)\nprint('Expected label : ', np.argmax(y_test[predicting_record]))\n\n# predict results for specific record from two models\npredict1 = model1.predict(np.expand_dims(X_test[predicting_record], axis=0))\npredict2 = model2.predict(np.expand_dims(X_test[predicting_record], axis=0))\nprint('Predicted by model 1 label: ', np.argmax(predict1))\nprint('Predicted by model 2 label: ', np.argmax(predict2))\n","repo_name":"Nomuron/NAI","sub_path":"Zjazd5/animals_nn.py","file_name":"animals_nn.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22591666032","text":"from django.db import models\nfrom artist.models import Artist\n\n\n# Create your models here.\nclass Album(models.Model):\n id = models.BigAutoField(primary_key=True)\n id_artist = models.ForeignKey(Artist, on_delete=models.CASCADE)\n album_name = models.CharField(max_length=100)\n release_date = models.DateField()\n genre = models.CharField(max_length=50)\n is_favorite = models.BooleanField()\n\n class Meta:\n db_table = 'album'\n\n def __str__(self) -> str:\n return f\"{self.id_artist} - {self.album_name}\"\n","repo_name":"rodrigobpe/teste-suf-web-ll","sub_path":"backend/album/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72163361878","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index),\n path('Flipkart/', views.flipkart, name=\"flipkartsite\"),\n path('Amazon/', views.amazon, name=\"amazonsite\"),\n path('Snapdeal/', views.snapdeal, name=\"snapdealsite\"),\n]\n","repo_name":"Ashray11/woc3.0-ecommerce-price-tracker-Ashray11","sub_path":"price_tracker/price_tracker_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22147770128","text":"import argparse\nfrom time import perf_counter\nfrom dataclasses import dataclass\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom wikiwhy_gpt2 import (\n AnswerModel, \n ExplainerModel,\n WikiWhyQA, \n WikiWhyExplain,\n build_qa_input,\n build_exp_input\n) \n\n# ### Custom generation method\n# HuggingFace's generate() does not take token_type_ids as input.\n# \n# Adapted from Thomas Wolf's repo: \n# https://github.com/huggingface/transfer-learning-conv-ai/blob/master/interact.py\n\n@dataclass\nclass GenerationArgs:\n max_length = 222\n max_gen_length = 171\n min_length = 0\n temperature = 1.0\n top_k = 50\n top_p = 1.0\n sample = False\n device = \"cpu\"\n\n\ndef top_filtering(logits, top_k=0, top_p=0.9, threshold=-float('Inf'), filter_value=-float('Inf')):\n \"\"\" \n Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n threshold: a minimal threshold to keep logits\n \"\"\"\n assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code\n top_k = min(top_k, logits.size(-1))\n if top_k > 0:\n # Remove all tokens with a probability less than the last token in the top-k tokens\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n # Compute cumulative probabilities of sorted tokens\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probabilities > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # Back to unsorted indices and set them to -infinity\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n\n indices_to_remove = logits < threshold\n logits[indices_to_remove] = filter_value\n\n return logits\n\ndef generate(\n inputs,\n tokenizer, \n model,\n reformat_example,\n task,\n special_tokens,\n args=GenerationArgs(), \n current_output=[]\n):\n # handle current output as a string\n if isinstance(current_output, str):\n current_output = tokenizer.encode(current_output)\n\n model.to(args.device)\n special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)\n encoded_inputs = {k: tokenizer.encode(v) for k, v in inputs.items()}\n\n for i in range(args.max_exp_length):\n # instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)\n instance = reformat_example(encoded_inputs, current_output, tokenizer, special_tokens, add_eos=False)\n if task == \"qa\":\n instance = build_qa_input(\n encoded_inputs[\"question\"],\n current_output, \n tokenizer=tokenizer, \n special_tokens=special_tokens,\n add_eos=False\n )\n\n else:\n instance = build_exp_input(\n encoded_inputs[\"cause\"],\n encoded_inputs[\"effect\"],\n current_output, \n tokenizer=tokenizer, \n special_tokens=special_tokens,\n add_eos=False\n )\n # print(instance)\n\n input_ids = torch.tensor(\n instance[\"input_ids\"], \n device=args.device\n ).unsqueeze(0)\n token_type_ids = torch.tensor(\n instance[\"type_ids\"],\n device=args.device\n ).unsqueeze(0)\n\n logits = model(input_ids, token_type_ids=token_type_ids)[\"logits\"]\n logits = logits[0, -1, :] / args.temperature\n logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)\n probs = F.softmax(logits, dim=-1)\n\n prev = torch.topk(probs, 1)[1] if not args.sample else torch.multinomial(probs, 1)\n if i < args.min_length and prev.item() in special_tokens_ids:\n while prev.item() in special_tokens_ids:\n if probs.max().item() == 1:\n warnings.warn(\"Warning: model generating special token with probability 1.\")\n break # avoid infinitely looping over special token\n prev = torch.multinomial(probs, num_samples=1)\n\n if prev.item() in special_tokens_ids:\n break\n current_output.append(prev.item())\n\n return tokenizer.decode(current_output)\n\n\ndef df_generate(task, df, tokenizer, model, args, new_column_name=\"generated\"):\n if task == \"qa\":\n input_fields = [\"question\"] \n format_input = build_qa_input\n special_tokens = AnswerModel.SPECIAL_TOKENS\n else: \n input_fields = [\"cause\", \"effect\"]\n format_input = build_exp_input\n special_tokens = ExplainerModel.SPECIAL_TOKENS\n \n\n df[new_column_name] = df.apply(\n lambda r: generate(\n inputs=r[input_fields].to_dict(),\n tokenizer=tokenizer,\n model=model,\n reformat_example=format_input,\n task=task,\n special_tokens=special_tokens,\n args=args,\n current_output=[]\n ),\n axis=1\n )\n\n\ndef gen2(\n inputs,\n model,\n build_input,\n special_tokens_ids,\n args=GenerationArgs(), \n):\n current_output = []\n for i in range(args.max_gen_length):\n instance = build_input(inputs, current_output)\n input_ids = torch.tensor(\n instance[\"input_ids\"], \n device=args.device\n ).unsqueeze(0)\n token_type_ids = torch.tensor(\n instance[\"type_ids\"],\n device=args.device\n ).unsqueeze(0)\n\n logits = model(input_ids, token_type_ids=token_type_ids)[\"logits\"]\n logits = logits[0, -1, :] / args.temperature \n logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)\n probs = F.softmax(logits, dim=-1)\n\n prev = torch.topk(probs, 1)[1] if not args.sample else torch.multinomial(probs, 1)\n if i < args.min_length and prev.item() in special_tokens_ids:\n while prev.item() in special_tokens_ids:\n if probs.max().item() == 1:\n warnings.warn(\"Warning: model generating special token with probability 1.\")\n break # avoid infinitely looping over special token\n prev = torch.multinomial(probs, num_samples=1)\n\n if prev.item() in special_tokens_ids:\n break\n current_output.append(prev.item())\n\n return current_output\n\n\ndef generate_column(task, df, tokenizer, model, args, new_label=\"generated\"):\n if task == \"qa\":\n input_fields = [\"question\"] \n special_tokens = AnswerModel.SPECIAL_TOKENS\n def build_input(inputs, outputs):\n return build_qa_input(\n inputs[0],\n outputs, \n tokenizer=tokenizer, \n special_tokens=special_tokens,\n add_eos=False\n )\n else:\n input_fields = [\"cause\", \"effect\"]\n special_tokens = ExplainerModel.SPECIAL_TOKENS\n def build_input(inputs, outputs):\n return build_exp_input(\n inputs[0],\n inputs[1],\n outputs, \n tokenizer=tokenizer, \n special_tokens=special_tokens,\n add_eos=False\n )\n\n special_token_ids = tokenizer.convert_tokens_to_ids(special_tokens)\n df[new_label] = df.apply(\n lambda r: tokenizer.decode(gen2(\n [tokenizer.encode(r[field]) for field in input_fields],\n model,\n build_input,\n special_token_ids,\n args=args\n )),\n axis=1\n )\n\n\nif __name__ == \"__main__\":\n psr = argparse.ArgumentParser()\n psr.add_argument(\"--input\", required=True)\n psr.add_argument(\"--output\", required=True)\n psr.add_argument(\"--checkpoint\", required=True)\n psr.add_argument(\"--task\", choices=[\"qa\", \"exp\"], required=True)\n psr.add_argument(\"--temp\", type=float, default=1.0)\n psr.add_argument(\"--max_length\", type=int, default=222)\n psr.add_argument(\"--max_gen_length\", type=int, default=171)\n psr.add_argument(\"--min_length\", type=int, default=0)\n psr.add_argument(\"--temperature\", type=float, default=1.0)\n psr.add_argument(\"--top_k\", type=int, default=50)\n psr.add_argument(\"--top_p\", type=float, default=1.0)\n psr.add_argument(\"--sample\", type=bool, default=False)\n psr.add_argument(\"--device\", default=\"cpu\")\n \n args = psr.parse_args()\n \n input_df = pd.read_csv(args.input, index_col=\"id\")\n print(f\"\\nDataset Summary\\n - total_size: {len(input_df)}\")\n print(\" - columns:\", input_df.columns.to_list())\n \n # get test split\n test_set = input_df\n if \"split\" in input_df.columns.tolist():\n test_set = input_df.loc[input_df[\"split\"] == \"test\"]\n test_set = test_set.copy()\n print(\"test set size:\", len(test_set))\n \n explain = args.task == \"exp\"\n model = (\n ExplainerModel if explain \n else AnswerModel\n ).load_from_checkpoint(args.checkpoint)\n \n # print(model.hyperparams)\n # print(model.tokenizer)\n # print(model.model)\n # print(vars(args))\n\n # setup device\n # args.device = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n device = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n model.model.to(args.device)\n\n # generate completions\n start = perf_counter()\n generate_column(args.task, test_set, model.tokenizer, model.model, args)\n print(f\"Finished in {perf_counter() - start}s\")\n\n # save file\n test_set.to_csv(args.output)\n","repo_name":"matt-seb-ho/WikiWhy","sub_path":"code/baselines/gpt2_generation.py","file_name":"gpt2_generation.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"85"} +{"seq_id":"26960295871","text":"import cv2\r\nimport numpy as np\r\n\r\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')\r\n\r\nvideo = cv2.VideoCapture('for_face_detection.mp4')\r\nwhile(video.isOpened()):\r\n ret, frame = video.read()\r\n if ret == True:\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n face = face_cascade.detectMultiScale(gray, 1.3, 3)\r\n\r\n for(x, y, w, h) in face:\r\n frame = cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 0, 255), 2)\r\n\r\n\r\n cv2.imshow('image', frame)\r\n\r\n if cv2.waitKey(20) & 0xFF == ord('e'):\r\n break\r\n\r\nvideo.release()\r\ncv2.destroyAllWindows()","repo_name":"sarmistha1619/Face-Detection","sub_path":"detect face from video/b_face_detection_from_vdo.py","file_name":"b_face_detection_from_vdo.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"9811726106","text":"import pyconll\nimport os\nimport json\n\n# stores statistics of processing\nstats = []\ndirectory = \"./to-process\"\n\n# walks at every file inside directory\nfor filename in os.listdir(path=directory):\n # loads corpus\n corpus_conll_gold = pyconll.load_from_file(f\"{directory}/{filename}\")\n\n # should have the clean conll-u files\n clean_conll = []\n\n # should have the dirty conll-u files\n dirty_conll = []\n\n # removes extension from filename\n filename_without_extension = filename[0:-7]\n\n # iterates through every token of every sentence in the golden corpus\n for sentence in corpus_conll_gold:\n sentence_has_e_pproc = False\n\n for token in sentence:\n # if the token has E_PPROC as value, put the\n # sentence in the dirty_conll list\n if token.upos == \"E_DIGIT\":\n sentence_has_e_pproc = True\n dirty_conll.append(sentence)\n break\n\n # if the sentence does not has E_PPROC as a value, it's clean\n if not sentence_has_e_pproc:\n clean_conll.append(sentence)\n\n # writes clean conllu\n with open(f\"./processed/arquivos-limpos/{filename}\", 'w', encoding='utf-8') as f:\n for clean_sentence in clean_conll:\n f.write(clean_sentence.conll())\n f.write(\"\\n\\n\")\n\n # writes dirty conllu\n with open(f\"./processed/arquivos-sujos/{filename}\", 'w', encoding='utf-8') as f:\n for dirty_sentence in dirty_conll:\n f.write(dirty_sentence.conll())\n f.write(\"\\n\\n\")\n\n # annotates stats\n stats.append({\n \"pack\": filename_without_extension,\n \"qtd_inicial_de_tweets\": len(corpus_conll_gold),\n \"qtd_de_E_DIGIT\": len(dirty_conll),\n \"qtd_final_de_tweets\": len(clean_conll)\n })\n\n\n# serializes dict with stats\nwith open(\"./processed/stats.json\", \"w\") as f:\n f.write(json.dumps(stats, sort_keys=True, indent=4))\n","repo_name":"lucasg-mm/poetisa-tools","sub_path":"dante/delete-E_PPROC/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14705053919","text":"import sys\n\n\ntranslation_table = {\n 0: 'zero',\n 1: 'one',\n 2: 'two',\n 3: 'three',\n 4: 'four',\n 5: 'five',\n 6: 'six',\n 7: 'seven',\n 8: 'eight',\n 9: 'nine',\n}\n\ntry:\n digit = int(sys.argv[1])\n print(translation_table[digit])\nexcept ValueError:\n print(sys.argv[1], 'is not a number', file=sys.stderr)\n sys.exit(1)\nexcept KeyError:\n print(digit, 'is not a digit', file=sys.stderr)\n","repo_name":"jfasch/2022-01-17","sub_path":"digit.py","file_name":"digit.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21539832991","text":"# import necessary packages\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\nimport requests\nimport os, shutil\nfrom datetime import date\nimport schedule as sh\nimport time\nfrom credentials import *\n\nBASEDIR = os.path.abspath(os.path.dirname(__file__))\nTODAY_DATE = date.today().strftime('%d%m%Y')\nREPORT_LINK = f'COVID-19_Report_dated_on_{TODAY_DATE}_NMMC.pdf'\n\nif not os.path.exists('./Report'):\n os.makedirs('./Report')\n\ndef get_pdf_report():\n print(\"Download Starting...\")\n downloadUrl = f'{URL}/{REPORT_LINK}'\n path = BASEDIR+\"/Report/Corona.pdf\"\n r = requests.get(downloadUrl)\n with open(path, 'wb') as f:\n f.write(r.content)\n print(\"Download complete\")\n\ndef mail_report():\n print(\"Sending Mail in Progress...\")\n subject = 'Today\\'s Corona Report of Navi Mumbai'\n\n msg = MIMEMultipart()\n msg['From'] = USER_EMAIL\n msg['To'] = \", \".join(SENDER_EMAIL)\n msg['Subject'] = subject\n\n filename= ['Report/Corona.pdf']\n for f in filename:\n attachment = open(f,'rb')\n\n part = MIMEBase('application','octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition',\"attachment; filename= \"+f)\n\n msg.attach(part)\n text = msg.as_string()\n try:\n server = smtplib.SMTP('smtp.mail.yahoo.com',587)\n server.starttls()\n server.login(USER_EMAIL, USER_PASSWORD)\n\n server.sendmail(USER_EMAIL, SENDER_EMAIL, text)\n server.quit()\n print(\"Mail successfully send\")\n except smtplib.SMTPException:\n print(\"Error: unable to send email\")\n\ndef delete_report():\n path = BASEDIR+\"/Report\"\n if os.path.isdir(path):\n shutil.rmtree(path)\n print(\"Corona Report Deleted\")\n\nif __name__ == \"__main__\":\n sh.every().day.at(\"16:00\").do(get_pdf_report)\n sh.every().day.at(\"16:05\").do(mail_report)\n sh.every().day.at(\"16:06\").do(delete_report)\n while True:\n sh.run_pending()\n time.sleep(1)\n","repo_name":"vinodnimbalkar/python-playground","sub_path":"CoronaReport/send_report.py","file_name":"send_report.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"25142859342","text":"import os\nfrom setuptools import setup, find_packages\n\n__version__ = '1.0.2'\n\n\npackages = sorted(find_packages())\nrequires = []\nextra_requires = {\":python_version < '3.7'\": ['dataclasses']}\nscripts = []\npackage_data = {}\n\n# All executables are here\nconsole_scripts = []\n\nclassifiers = [\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\"]\n\n# description\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(name='gzinfo',\n version=__version__,\n author='Pierre-Selim',\n author_email='pierre-selim@huard.info',\n url='https://github.com/PierreSelim/gzinfo',\n description='Retrieving archive filename from gz files in Python ',\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT Licence',\n classifiers=classifiers,\n packages=packages,\n package_data=package_data,\n install_requires=requires,\n extras_require=extra_requires,\n scripts=scripts,\n entry_points={\n 'console_scripts': console_scripts\n })\n","repo_name":"PierreSelim/gzinfo","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"9671240948","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom som.experiment import Experiment\nfrom som.kohonen import KohonenMap, ExponentialLearning, LinearLearning\n\nscaler = MinMaxScaler()\ntraining = None\nROWS = 10\nCOLS = 10\nMAX_ITERATIONS = 10\nNUM_FEATURES = 9\n\n# ==========================================================================\n# Main script\n# ==========================================================================\n\ndata = pd.read_csv('datasets/glass.csv')\n\n# remove class and Id column\ndata = data.loc[:, data.columns != 'Type']\ndata = data.loc[:, data.columns != 'Id']\n\n## Exponential\nexponential_learning = ExponentialLearning(\n learning_decay=5,\n neighbourhood_decay=5,\n max_iterations=MAX_ITERATIONS,\n initial_neighbourhood_size=20,\n initial_learning_rate=0.005\n)\nchanges = {\n 'learning_decay': [1, 5, 10],\n 'initial_learning_rate': [0.001, 0.0025, 0.005],\n 'neighbourhood_decay': [1, 5, 10],\n 'initial_neighbourhood_size': [5, 10, 20]\n}\nmap = KohonenMap(rows=ROWS, cols=COLS, num_features=NUM_FEATURES)\nexperiment = Experiment(data, map, exponential_learning, changes)\nexperiment.run_all()\n\n\n## Linear\nlinearLearning = LinearLearning(\n learning_decay=1.05,\n neighbourhood_decay=1.05,\n max_iterations=MAX_ITERATIONS,\n initial_neighbourhood_size=20,\n initial_learning_rate=0.005\n)\nchanges = {\n 'learning_decay': [0.8, 1.05, 1.2],\n 'initial_learning_rate': [0.001, 0.005, 0.01],\n 'neighbourhood_decay': [0.8, 1.05, 1.2],\n 'initial_neighbourhood_size': [5, 10, 20]\n}\nmap = KohonenMap(rows=ROWS, cols=COLS, num_features=NUM_FEATURES)\nexperiment2 = Experiment(data, map, linearLearning, changes)\nexperiment2.run_all()\n\n","repo_name":"viniciusjssouza/neural-net","sub_path":"glass_som.py","file_name":"glass_som.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18584684980","text":"from pde import (\n ScalarField, \n CartesianGrid, \n FieldCollection, \n PDEBase, \n FieldBase, \n UnitGrid, \n CallbackTracker,\n ProgressTracker\n)\n\nfrom pde.trackers.base import TrackerCollection\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.signal import convolve2d\nfrom scipy.interpolate import Rbf\n\n\nfrom eq_generator import EqGenerator\nfrom initial import rbf_init\nfrom animation import animate_solution\n\n\nclass PolynomialPDE(PDEBase):\n def __init__(self, equation, bc = 'auto_periodic_neumann') -> None:\n super().__init__()\n\n self.equation = equation\n self.bc = bc\n\n def get_partial_derivative(self, u, derivative_str):\n bc = 'auto_periodic_neumann'\n derivative_components = derivative_str.split('_')\n order_x = int(derivative_components[2])\n order_y = int(derivative_components[4])\n for i in range(order_x):\n u = u.gradient(bc)[0]\n for i in range(order_y):\n u = u.gradient(bc)[1]\n return u\n \n def extract_coordinates_from_grid(self, grid):\n shape = grid.shape\n x_bounds = grid.axes_bounds[0]\n y_bounds = grid.axes_bounds[1]\n x = (grid.cell_coords[:,:,0]-x_bounds[0])/(x_bounds[1]-x_bounds[0])\n y = (grid.cell_coords[:,:,1]-y_bounds[0])/(y_bounds[1]-y_bounds[0])\n return x, y\n\n\n\n def get_term(self, u, term):\n result = term[0]\n for t in term[1:]:\n result *= self.get_partial_derivative(u, t[0])**t[1]\n return result\n \n def get_initial_state(self, grid):\n \"\"\"prepare a useful initial state\"\"\"\n x, y = self.extract_coordinates_from_grid(grid)\n\n # initialize fields\n u = ScalarField(grid, rbf_init((64, 64)), label=\"u\")\n #u = ScalarField(grid, self.rbf_init(x, y, period=10), label=\"u\")\n return u, x, y\n \n def evolution_rate(self, state: FieldBase, t: float = 0) -> FieldBase:\n terms = [self.get_term(state, term) for term in self.equation]\n derivative = sum(terms)\n\n return sum(terms)\n \n\n\ndef solve_equation(equation: str, save_interval=0.01, tmax = 1):\n\n eq = EqGenerator.parse_equation_from_string(equation)\n print(eq)\n\n p = PolynomialPDE(eq)\n\n grid = CartesianGrid([[0,8],[0,8]],[64, 64], periodic=True)\n\n state, x, y = p.get_initial_state(grid)\n\n\n # setup saving equation states\n data = []\n times = []\n def save_state(state, time):\n data.append(state.copy().data)\n times.append(time)\n\n\n tracker_callback = CallbackTracker(save_state, interval=save_interval)\n tracker_progress = ProgressTracker(interval=save_interval)\n tracker = TrackerCollection([tracker_callback, tracker_progress])\n\n\n # solve\n sol = p.solve(state, t_range=(0, tmax), tracker=tracker)\n data = np.stack(data)\n times = np.stack(times)\n flat_points = np.hstack([x.reshape(-1, 1), y.reshape(-1,1)])\n return data, times, flat_points\n\n\nif __name__ == \"__main__\":\n\n np.random.seed(42)\n\n eq = \"1.097*u_x_1_y_0^1+2.945*u_x_0_y_1^1+0.219*u_x_2_y_0^1+0.32*u_x_0_y_2^1+0.786*u_x_1_y_1^1\"\n # eq = \"0.912*u_x_1_y_0^1+1.123*u_x_1_y_0^1+0.421*u_x_2_y_0^1+0.012*u_x_0_y_2^1\"\n # eq = \"0.234*u_x_1_y_0^1+0.234*u_x_1_y_0^1+1.234*u_x_2_y_0^1+2.096*u_x_0_y_2^1\"\n\n # run solver\n data, times, flatpoints = solve_equation(eq)\n\n # save numpy arrays\n np.save(\"data/eq_data.npy\", data)\n np.save(\"data/eq_times.npy\", times)\n np.save(\"data/eq_points.npy\", flatpoints)\n \n # to read arrays use\n # data = np.load(\"eq_data.npy\")\n # times = np.load(\"eq_times.npy\")\n\n # make a gif of the simulation\n animate_solution(data)\n\n # visualize start and end state\n fig, ax = plt.subplots(1,2)\n\n ax[0].imshow(data[0], label=f\"u at t = 0\", vmin=-0.5, vmax=0.5)\n ax[1].imshow(data[-1], label=f\"u at t = {times[-1]}\", vmin=-0.5, vmax=0.5)\n\n plt.show()\n\n\n","repo_name":"LSX-UniWue/TaylorPDENet","sub_path":"models/utils/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23245588799","text":"import argparse\nimport os\nimport sys\nfrom src import freeway_network, intersection_network, grid_network, __version__, __label__\nfrom test import freewaytest, intersectiontest, gridtest\n\nfrom src.base.xmlCommenter import writeDocumentation\n\n\ndef main():\n arg_parser = argparse.ArgumentParser()\n parser = arg_parser.add_subparsers()\n\n freeway_parser = parser.add_parser(\"freeway\", help=\"\")\n intersection_parser = parser.add_parser(\"intersection\", help=\"\")\n grid_chicago_parser = parser.add_parser(\"grid_chicago\", help=\"\")\n grid_manhattan_parser = parser.add_parser(\"grid_manhattan\", help=\"\")\n\n # Create Options for freeway\n freeway_parser.add_argument(\"-b\", \"--bidirectional\", default=False, action='store_true',\n help=\"Generates both directions (default: false)\")\n freeway_parser.add_argument(\"-l\", \"--lanes\", default=4, type=int, help=\"Number of lanes per direction (default: 4)\")\n freeway_parser.add_argument(\"-len\", \"--length\", default=250, type=float,\n help=\"Length for each lane (in meters, default:250)\")\n freeway_parser.add_argument(\"-s\", \"--speed\", default=27.778, type=float,\n help=\"The maximum speed for vehicles (default: 27.778 m/s)\")\n freeway_parser.add_argument(\"-t\", \"--traffic\", default=False, action='store_true',\n help=\"If true, sss4s will generate artificial traffic for the scenario (default: false)\")\n\n # Create Options for Intersection\n intersection_parser.add_argument(\"-m\", \"--multiple\", default=1, type=int)\n intersection_parser.add_argument(\"-x-len\", \"--intersection_x_length\", default=100, type=float)\n intersection_parser.add_argument(\"-y-len\", \"--intersection_y_length\", default=100, type=float)\n intersection_parser.add_argument(\"-s\", \"--speed\", default=13.89, type=float)\n intersection_parser.add_argument(\"-l\", \"--lanes\", default=1, type=int)\n intersection_parser.add_argument(\"-tl-all\", \"--traffic_light_all\", action='store_true')\n intersection_parser.add_argument(\"-tl\", \"--traffic_light\", default=[], nargs='+')\n intersection_parser.add_argument(\"-tl-notall\", \"--traffic_light_notall\", default=[], nargs='+')\n intersection_parser.add_argument(\"-p\", \"--polygon\", action='store_true')\n intersection_parser.add_argument(\"-pm\", \"--polygon_margin\", default=5, type=int)\n intersection_parser.add_argument(\"-t\", \"--traffic\", default=False, action='store_true',\n help=\"If true, sss4s will generate artificial traffic for the scenario (default: \"\n \"false)\")\n\n # Create Options for Grid_Chicago\n grid_chicago_parser.add_argument(\"-x-len\", \"--grid_x_length\", default=160, type=float)\n grid_chicago_parser.add_argument(\"-y-len\", \"--grid_y_length\", default=70, type=float)\n grid_chicago_parser.add_argument(\"-x-dir\", \"--grid_x_direction\", default=4, type=int)\n grid_chicago_parser.add_argument(\"-y-dir\", \"--grid_y_direction\", default=4, type=int)\n grid_chicago_parser.add_argument(\"-l\", \"--lanes\", default=1, type=int)\n grid_chicago_parser.add_argument(\"-tl-all\", \"--traffic_light_all\", action='store_true')\n grid_chicago_parser.add_argument(\"-tl\", \"--traffic_light\", default=[], nargs='+')\n grid_chicago_parser.add_argument(\"-tl-notall\", \"--traffic_light_notall\", default=[], nargs='+')\n grid_chicago_parser.add_argument(\"-p\", \"--polygon\", action='store_true')\n grid_chicago_parser.add_argument(\"-s\", \"--speed\", default=13.89, type=float)\n grid_chicago_parser.add_argument(\"-pm\", \"--polygon_margin\", default=5, type=int)\n grid_chicago_parser.add_argument(\"-t\", \"--traffic\", default=False, action='store_true',\n help=\"If true, sss4s will generate artificial traffic for the scenario (default: false)\")\n\n # Create Options for Grid_Manhattan\n grid_manhattan_parser.add_argument(\"-len\", \"--grid_length_bothdirections\", default=60, type=float)\n grid_manhattan_parser.add_argument(\"-x-dir\", \"--grid_x_direction\", default=4, type=int)\n grid_manhattan_parser.add_argument(\"-y-dir\", \"--grid_y_direction\", default=4, type=int)\n grid_manhattan_parser.add_argument(\"-l\", \"--lanes\", default=1, type=int)\n grid_manhattan_parser.add_argument(\"-tl-all\", \"--traffic_light_all\", action='store_true')\n grid_manhattan_parser.add_argument(\"-tl\", \"--traffic_light\", default=[], nargs='+')\n grid_manhattan_parser.add_argument(\"-tl-notall\", \"--traffic_light_notall\", default=[], nargs='+')\n grid_manhattan_parser.add_argument(\"-p\", \"--polygon\", action='store_true')\n grid_manhattan_parser.add_argument(\"-s\", \"--speed\", default=13.89, type=float)\n grid_manhattan_parser.add_argument(\"-pm\", \"--polygon_margin\", default=5, type=int)\n grid_manhattan_parser.add_argument(\"-t\", \"--traffic\", default=False, action='store_true',\n help=\"If true, sss4s will generate artificial traffic for the scenario (default: false)\")\n\n args = arg_parser.parse_args()\n\n dir_road_networks = \"road_networks\"\n if not os.path.exists(dir_road_networks):\n os.makedirs(dir_road_networks)\n\n if len(sys.argv) == 1:\n sys.exit(\"Use --help to get the list of options.\")\n if sys.argv[1] == \"freeway\":\n print(\"Generating freeway ...\")\n freeway_test = freewaytest.TestFreeway(args.lanes, args.length, args.speed)\n freeway_test.test_freeway()\n freeway_network.create_freeway(args.bidirectional, args.lanes, args.length, args.speed, args.traffic)\n\n if sys.argv[1] == \"intersection\":\n print(\"Generating intersection ...\")\n intersection_test = intersectiontest.TestIntersection(args.intersection_x_length,\n args.intersection_y_length, args.lanes,\n args.multiple, args.speed,\n args.polygon)\n intersection_test.test_intersection()\n intersection_network.create_intersection(\"intersection\", args.intersection_x_length, args.intersection_y_length,\n args.lanes, args.traffic_light_all, args.traffic_light,\n args.traffic_light_notall, args.multiple, args.speed, args.polygon,\n args.polygon_margin,\n args.traffic)\n\n if sys.argv[1] == \"grid_chicago\":\n print(\"Generating Chicago grid ...\")\n grid_test = gridtest.TestGrid(args.grid_x_length, args.grid_y_length, args.grid_x_direction,\n args.grid_y_direction, args.lanes, args.speed)\n grid_test.test_grid()\n grid_network.create_grid(\"grid\", args.grid_x_length, args.grid_y_length, args.grid_x_direction,\n args.grid_y_direction, args.lanes, args.traffic_light_all, args.traffic_light,\n args.traffic_light_notall, args.speed, args.polygon, args.polygon_margin, args.traffic)\n\n if sys.argv[1] == \"grid_manhattan\":\n print(\"Generating Manhattan grid ...\")\n grid_network.create_grid(\"grid\", args.grid_length_bothdirections, args.grid_length_bothdirections,\n args.grid_x_direction, args.grid_y_direction, args.lanes, args.traffic_light_all,\n args.traffic_light, args.traffic_light_notall, args.speed, args.polygon,\n args.polygon_margin, args.traffic)\n\n # Write sss4s parameters to XML\n writeDocumentation(sys.argv[1], args)\n\n\nif __name__ == \"__main__\":\n print(__label__ + \" Version \" + str(__version__) + \"\\n\" +\n \"Copyright (C) 2021 \\n\" +\n \"License GPL-2.0-or-later: https://spdx.org/licenses/GPL-2.0-or-later.html\\n\")\n main()\n","repo_name":"veins/sss4s","sub_path":"networks/sss4s.py","file_name":"sss4s.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"27651208850","text":"import copy\nimport logging\nfrom pathlib import Path\n\nimport ignite\nimport numpy as np\nimport torch\nfrom ignite.contrib.handlers.mlflow_logger import (\n MLflowLogger,\n OutputHandler,\n global_step_from_engine,\n)\nfrom ignite.contrib.handlers.tqdm_logger import ProgressBar\nfrom ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer\nfrom ignite.handlers import EarlyStopping, ModelCheckpoint\nfrom ignite.metrics import RunningAverage\nfrom torch.utils.data import DataLoader\n\nfrom ..handlers.time_limit import TimeLimit\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"NetworkTrain\"]\n\n\nclass NetworkTrain:\n \"\"\"Create a trainer for a supervised PyTorch model.\n\n Args:\n loss_fn (callable): Loss function used to train.\n Accepts an instance of loss functions at https://pytorch.org/docs/stable/nn.html#loss-functions\n epochs (int, optional): Max epochs to train\n seed (int, optional): Random seed for training.\n optimizer (torch.optim, optional): Optimizer used to train.\n Accepts optimizers at https://pytorch.org/docs/stable/optim.html\n optimizer_params (dict, optional): Parameters for optimizer.\n train_data_loader_params (dict, optional): Parameters for data loader for training.\n Accepts args at https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n val_data_loader_params (dict, optional): Parameters for data loader for validation.\n Accepts args at https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n evaluation_metrics (dict, optional): Metrics to compute for evaluation.\n Accepts dict of metrics at https://pytorch.org/ignite/metrics.html\n evaluate_train_data (str, optional): When to compute evaluation_metrics using training dataset.\n Accepts events at https://pytorch.org/ignite/engine.html#ignite.engine.Events\n evaluate_val_data (str, optional): When to compute evaluation_metrics using validation dataset.\n Accepts events at https://pytorch.org/ignite/engine.html#ignite.engine.Events\n progress_update (bool, optional): Whether to show progress bar using tqdm package\n scheduler (ignite.contrib.handle.param_scheduler.ParamScheduler, optional): Param scheduler.\n Accepts a ParamScheduler at\n https://pytorch.org/ignite/contrib/handlers.html#module-ignite.contrib.handlers.param_scheduler\n scheduler_params (dict, optional): Parameters for scheduler\n model_checkpoint (ignite.handlers.ModelCheckpoint, optional): Model Checkpoint.\n Accepts a ModelCheckpoint at https://pytorch.org/ignite/handlers.html#ignite.handlers.ModelCheckpoint\n model_checkpoint_params (dict, optional): Parameters for ModelCheckpoint at\n https://pytorch.org/ignite/handlers.html#ignite.handlers.ModelCheckpoint\n early_stopping_params (dict, optional): Parameters for EarlyStopping at\n https://pytorch.org/ignite/handlers.html#ignite.handlers.EarlyStopping\n time_limit (int, optioinal): Time limit for training in seconds.\n train_dataset_size_limit (int, optional): If specified, only the subset of training dataset is used.\n Useful for quick preliminary check before using the whole dataset.\n val_dataset_size_limit (int, optional): If specified, only the subset of validation dataset is used.\n useful for qucik preliminary check before using the whole dataset.\n cudnn_deterministic (bool, optional): Value for torch.backends.cudnn.deterministic.\n See https://pytorch.org/docs/stable/notes/randomness.html for details.\n cudnn_benchmark (bool, optional): Value for torch.backends.cudnn.benchmark.\n See https://pytorch.org/docs/stable/notes/randomness.html for details.\n mlflow_logging (bool, optional): If True and MLflow is installed, MLflow logging is enabled.\n\n Returns:\n trainer (callable): a callable to train a PyTorch model.\n\n \"\"\"\n\n def __init__(\n self,\n loss_fn=None,\n epochs=None,\n seed=None,\n optimizer=None,\n optimizer_params=dict(),\n train_data_loader_params=dict(),\n val_data_loader_params=dict(),\n evaluation_metrics=None,\n evaluate_train_data=None,\n evaluate_val_data=None,\n progress_update=None,\n scheduler=None,\n scheduler_params=dict(),\n model_checkpoint=None,\n model_checkpoint_params=dict(),\n early_stopping_params=dict(),\n time_limit=None,\n train_dataset_size_limit=None,\n val_dataset_size_limit=None,\n cudnn_deterministic=None,\n cudnn_benchmark=None,\n mlflow_logging=True,\n train_params=dict(),\n ):\n self.train_params = dict(\n loss_fn=loss_fn,\n epochs=epochs,\n seed=seed,\n optimizer=optimizer,\n optimizer_params=optimizer_params,\n train_data_loader_params=train_data_loader_params,\n val_data_loader_params=val_data_loader_params,\n evaluation_metrics=evaluation_metrics,\n evaluate_train_data=evaluate_train_data,\n evaluate_val_data=evaluate_val_data,\n progress_update=progress_update,\n scheduler=scheduler,\n scheduler_params=scheduler_params,\n model_checkpoint=model_checkpoint,\n model_checkpoint_params=model_checkpoint_params,\n early_stopping_params=early_stopping_params,\n time_limit=time_limit,\n train_dataset_size_limit=train_dataset_size_limit,\n val_dataset_size_limit=val_dataset_size_limit,\n cudnn_deterministic=cudnn_deterministic,\n cudnn_benchmark=cudnn_benchmark,\n )\n self.train_params.update(train_params)\n self.mlflow_logging = mlflow_logging\n\n def __call__(self, model, train_dataset, val_dataset=None, **_):\n \"\"\"Train a PyTorch model.\n\n Args:\n model (torch.nn.Module): PyTorch model to train.\n train_dataset (torch.utils.data.Dataset): Dataset used to train.\n val_dataset (torch.utils.data.Dataset, optional): Dataset used to validate.\n\n Returns:\n trained_model (torch.nn.Module): Trained PyTorch model.\n \"\"\"\n assert train_dataset is not None\n train_params = self.train_params\n mlflow_logging = self.mlflow_logging\n\n if mlflow_logging:\n try:\n import mlflow # NOQA\n except ImportError:\n log.warning(\"Failed to import mlflow. MLflow logging is disabled.\")\n mlflow_logging = False\n\n loss_fn = train_params.get(\"loss_fn\")\n assert loss_fn\n epochs = train_params.get(\"epochs\")\n seed = train_params.get(\"seed\")\n optimizer = train_params.get(\"optimizer\")\n assert optimizer\n optimizer_params = train_params.get(\"optimizer_params\", dict())\n train_dataset_size_limit = train_params.get(\"train_dataset_size_limit\")\n if train_dataset_size_limit:\n train_dataset = PartialDataset(train_dataset, train_dataset_size_limit)\n log.info(\"train dataset size is set to {}\".format(len(train_dataset)))\n\n val_dataset_size_limit = train_params.get(\"val_dataset_size_limit\")\n if val_dataset_size_limit and (val_dataset is not None):\n val_dataset = PartialDataset(val_dataset, val_dataset_size_limit)\n log.info(\"val dataset size is set to {}\".format(len(val_dataset)))\n\n train_data_loader_params = train_params.get(\"train_data_loader_params\", dict())\n val_data_loader_params = train_params.get(\"val_data_loader_params\", dict())\n evaluation_metrics = train_params.get(\"evaluation_metrics\")\n evaluate_train_data = train_params.get(\"evaluate_train_data\")\n evaluate_val_data = train_params.get(\"evaluate_val_data\")\n progress_update = train_params.get(\"progress_update\")\n\n scheduler = train_params.get(\"scheduler\")\n scheduler_params = train_params.get(\"scheduler_params\", dict())\n\n model_checkpoint = train_params.get(\"model_checkpoint\")\n model_checkpoint_params = train_params.get(\"model_checkpoint_params\")\n early_stopping_params = train_params.get(\"early_stopping_params\")\n time_limit = train_params.get(\"time_limit\")\n\n cudnn_deterministic = train_params.get(\"cudnn_deterministic\")\n cudnn_benchmark = train_params.get(\"cudnn_benchmark\")\n\n if seed:\n torch.manual_seed(seed)\n np.random.seed(seed)\n if cudnn_deterministic:\n torch.backends.cudnn.deterministic = cudnn_deterministic\n if cudnn_benchmark:\n torch.backends.cudnn.benchmark = cudnn_benchmark\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n model.to(device)\n optimizer_ = optimizer(model.parameters(), **optimizer_params)\n trainer = create_supervised_trainer(\n model, optimizer_, loss_fn=loss_fn, device=device\n )\n\n train_data_loader_params.setdefault(\"shuffle\", True)\n train_data_loader_params.setdefault(\"drop_last\", True)\n train_data_loader_params[\"batch_size\"] = _clip_batch_size(\n train_data_loader_params.get(\"batch_size\", 1), train_dataset, \"train\"\n )\n train_loader = DataLoader(train_dataset, **train_data_loader_params)\n\n RunningAverage(output_transform=lambda x: x, alpha=0.98).attach(\n trainer, \"ema_loss\"\n )\n\n RunningAverage(output_transform=lambda x: x, alpha=2 ** (-1022)).attach(\n trainer, \"batch_loss\"\n )\n\n if scheduler:\n\n class ParamSchedulerSavingAsMetric(\n ParamSchedulerSavingAsMetricMixIn, scheduler\n ):\n pass\n\n cycle_epochs = scheduler_params.pop(\"cycle_epochs\", 1)\n scheduler_params.setdefault(\n \"cycle_size\", int(cycle_epochs * len(train_loader))\n )\n scheduler_params.setdefault(\"param_name\", \"lr\")\n scheduler_ = ParamSchedulerSavingAsMetric(optimizer_, **scheduler_params)\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler_)\n\n if evaluate_train_data:\n evaluator_train = create_supervised_evaluator(\n model, metrics=evaluation_metrics, device=device\n )\n\n if evaluate_val_data:\n val_data_loader_params[\"batch_size\"] = _clip_batch_size(\n val_data_loader_params.get(\"batch_size\", 1), val_dataset, \"val\"\n )\n val_loader = DataLoader(val_dataset, **val_data_loader_params)\n evaluator_val = create_supervised_evaluator(\n model, metrics=evaluation_metrics, device=device\n )\n\n if model_checkpoint_params:\n assert isinstance(model_checkpoint_params, dict)\n minimize = model_checkpoint_params.pop(\"minimize\", True)\n save_interval = model_checkpoint_params.get(\"save_interval\", None)\n if not save_interval:\n model_checkpoint_params.setdefault(\n \"score_function\", get_score_function(\"ema_loss\", minimize=minimize)\n )\n model_checkpoint_params.setdefault(\"score_name\", \"ema_loss\")\n mc = model_checkpoint(**model_checkpoint_params)\n trainer.add_event_handler(Events.EPOCH_COMPLETED, mc, {\"model\": model})\n\n if early_stopping_params:\n assert isinstance(early_stopping_params, dict)\n metric = early_stopping_params.pop(\"metric\", None)\n assert (metric is None) or (metric in evaluation_metrics)\n minimize = early_stopping_params.pop(\"minimize\", False)\n if metric:\n assert (\n \"score_function\" not in early_stopping_params\n ), \"Remove either 'metric' or 'score_function' from early_stopping_params: {}\".format(\n early_stopping_params\n )\n early_stopping_params[\"score_function\"] = get_score_function(\n metric, minimize=minimize\n )\n\n es = EarlyStopping(trainer=trainer, **early_stopping_params)\n if evaluate_val_data:\n evaluator_val.add_event_handler(Events.COMPLETED, es)\n elif evaluate_train_data:\n evaluator_train.add_event_handler(Events.COMPLETED, es)\n elif early_stopping_params:\n log.warning(\n \"Early Stopping is disabled because neither \"\n \"evaluate_val_data nor evaluate_train_data is set True.\"\n )\n\n if time_limit:\n assert isinstance(time_limit, (int, float))\n tl = TimeLimit(limit_sec=time_limit)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, tl)\n\n pbar = None\n if progress_update:\n if not isinstance(progress_update, dict):\n progress_update = dict()\n progress_update.setdefault(\"persist\", True)\n progress_update.setdefault(\"desc\", \"\")\n pbar = ProgressBar(**progress_update)\n pbar.attach(trainer, [\"ema_loss\"])\n\n else:\n\n def log_train_metrics(engine):\n log.info(\n \"[Epoch: {} | {}]\".format(engine.state.epoch, engine.state.metrics)\n )\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, log_train_metrics)\n\n if evaluate_train_data:\n\n def log_evaluation_train_data(engine):\n evaluator_train.run(train_loader)\n train_report = _get_report_str(engine, evaluator_train, \"Train Data\")\n if pbar:\n pbar.log_message(train_report)\n else:\n log.info(train_report)\n\n eval_train_event = (\n Events[evaluate_train_data]\n if isinstance(evaluate_train_data, str)\n else Events.EPOCH_COMPLETED\n )\n trainer.add_event_handler(eval_train_event, log_evaluation_train_data)\n\n if evaluate_val_data:\n\n def log_evaluation_val_data(engine):\n evaluator_val.run(val_loader)\n val_report = _get_report_str(engine, evaluator_val, \"Val Data\")\n if pbar:\n pbar.log_message(val_report)\n else:\n log.info(val_report)\n\n eval_val_event = (\n Events[evaluate_val_data]\n if isinstance(evaluate_val_data, str)\n else Events.EPOCH_COMPLETED\n )\n trainer.add_event_handler(eval_val_event, log_evaluation_val_data)\n\n if mlflow_logging:\n mlflow_logger = MLflowLogger()\n\n logging_params = {\n \"train_n_samples\": len(train_dataset),\n \"train_n_batches\": len(train_loader),\n \"optimizer\": _name(optimizer),\n \"loss_fn\": _name(loss_fn),\n \"pytorch_version\": torch.__version__,\n \"ignite_version\": ignite.__version__,\n }\n logging_params.update(_loggable_dict(optimizer_params, \"optimizer\"))\n logging_params.update(_loggable_dict(train_data_loader_params, \"train\"))\n if scheduler:\n logging_params.update({\"scheduler\": _name(scheduler)})\n logging_params.update(_loggable_dict(scheduler_params, \"scheduler\"))\n\n if evaluate_val_data:\n logging_params.update(\n {\n \"val_n_samples\": len(val_dataset),\n \"val_n_batches\": len(val_loader),\n }\n )\n logging_params.update(_loggable_dict(val_data_loader_params, \"val\"))\n\n mlflow_logger.log_params(logging_params)\n\n batch_metric_names = [\"batch_loss\", \"ema_loss\"]\n if scheduler:\n batch_metric_names.append(scheduler_params.get(\"param_name\"))\n\n mlflow_logger.attach(\n trainer,\n log_handler=OutputHandler(\n tag=\"step\",\n metric_names=batch_metric_names,\n global_step_transform=global_step_from_engine(trainer),\n ),\n event_name=Events.ITERATION_COMPLETED,\n )\n\n if evaluate_train_data:\n mlflow_logger.attach(\n evaluator_train,\n log_handler=OutputHandler(\n tag=\"train\",\n metric_names=list(evaluation_metrics.keys()),\n global_step_transform=global_step_from_engine(trainer),\n ),\n event_name=Events.COMPLETED,\n )\n if evaluate_val_data:\n mlflow_logger.attach(\n evaluator_val,\n log_handler=OutputHandler(\n tag=\"val\",\n metric_names=list(evaluation_metrics.keys()),\n global_step_transform=global_step_from_engine(trainer),\n ),\n event_name=Events.COMPLETED,\n )\n\n trainer.run(train_loader, max_epochs=epochs)\n\n try:\n if pbar and pbar.pbar:\n pbar.pbar.close()\n except Exception as e:\n log.error(e, exc_info=True)\n\n model = load_latest_model(model_checkpoint_params)(model)\n\n return model\n\n\ndef get_score_function(metric, minimize=False):\n def _score_function(engine):\n m = engine.state.metrics.get(metric)\n return -m if minimize else m\n\n return _score_function\n\n\ndef load_latest_model(model_checkpoint_params=None):\n if model_checkpoint_params and \"model_checkpoint_params\" in model_checkpoint_params:\n model_checkpoint_params = model_checkpoint_params.get(\"model_checkpoint_params\")\n\n def _load_latest_model(model=None):\n if model_checkpoint_params:\n try:\n dirname = model_checkpoint_params.get(\"dirname\")\n assert dirname\n dir_glob = Path(dirname).glob(\"*.pth\")\n files = [str(p) for p in dir_glob if p.is_file()]\n if len(files) >= 1:\n model_path = sorted(files)[-1]\n log.info(\"Model path: {}\".format(model_path))\n loaded = torch.load(model_path)\n save_as_state_dict = model_checkpoint_params.get(\n \"save_as_state_dict\", True\n )\n if save_as_state_dict:\n assert model\n model.load_state_dict(loaded)\n else:\n model = loaded\n else:\n log.warning(\"Model not found at: {}\".format(dirname))\n except Exception as e:\n log.error(e, exc_info=True)\n return model\n\n return _load_latest_model\n\n\ndef _name(obj):\n return getattr(obj, \"__name__\", None) or getattr(obj.__class__, \"__name__\", \"_\")\n\n\ndef _clip_batch_size(batch_size, dataset, tag=\"\"):\n dataset_size = len(dataset)\n if batch_size > dataset_size:\n log.warning(\n \"[{}] batch size ({}) is clipped to dataset size ({})\".format(\n tag, batch_size, dataset_size\n )\n )\n return dataset_size\n else:\n return batch_size\n\n\ndef _get_report_str(engine, evaluator, tag=\"\"):\n report_str = \"[Epoch: {} | {} | Metrics: {}]\".format(\n engine.state.epoch, tag, evaluator.state.metrics\n )\n return report_str\n\n\ndef _loggable_dict(d, prefix=None):\n return {\n (\"{}_{}\".format(prefix, k) if prefix else k): (\n \"{}\".format(v) if isinstance(v, (tuple, list, dict, set)) else v\n )\n for k, v in d.items()\n }\n\n\nclass ParamSchedulerSavingAsMetricMixIn:\n \"\"\"Base code:\n https://github.com/pytorch/ignite/blob/v0.2.1/ignite/contrib/handlers/param_scheduler.py#L49\n https://github.com/pytorch/ignite/blob/v0.2.1/ignite/contrib/handlers/param_scheduler.py#L163\n \"\"\"\n\n def __call__(self, engine, name=None):\n\n if self.event_index != 0 and self.event_index % self.cycle_size == 0:\n self.event_index = 0\n self.cycle_size *= self.cycle_mult\n self.cycle += 1\n self.start_value *= self.start_value_mult\n self.end_value *= self.end_value_mult\n\n value = self.get_param()\n\n for param_group in self.optimizer_param_groups:\n param_group[self.param_name] = value\n\n if name is None:\n name = self.param_name\n\n if self.save_history:\n if not hasattr(engine.state, \"param_history\"):\n setattr(engine.state, \"param_history\", {})\n engine.state.param_history.setdefault(name, [])\n values = [pg[self.param_name] for pg in self.optimizer_param_groups]\n engine.state.param_history[name].append(values)\n\n self.event_index += 1\n\n if not hasattr(engine.state, \"metrics\"):\n setattr(engine.state, \"metrics\", {})\n engine.state.metrics[self.param_name] = value # Save as a metric\n\n\nclass PartialDataset:\n def __init__(self, dataset, size):\n size = int(size)\n assert hasattr(dataset, \"__getitem__\")\n assert hasattr(dataset, \"__len__\")\n assert dataset.__len__() >= size\n self.dataset = dataset\n self.size = size\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, item):\n return self.dataset[item]\n\n\nclass CopiedPartialDataset:\n def __init__(self, dataset, size):\n size = int(size)\n assert hasattr(dataset, \"__getitem__\")\n assert hasattr(dataset, \"__len__\")\n assert dataset.__len__() >= size\n self.dataset = [copy.deepcopy(dataset[i]) for i in range(size)]\n self.size = size\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, item):\n return self.dataset[item]\n\n\nclass GetPartialDataset:\n def __init__(self, size):\n self.size = size\n\n def __call__(self, dataset):\n return CopiedPartialDataset(dataset, self.size)\n","repo_name":"Minyus/pipelinex","sub_path":"src/pipelinex/extras/ops/ignite/declaratives/declarative_trainer.py","file_name":"declarative_trainer.py","file_ext":"py","file_size_in_byte":22425,"program_lang":"python","lang":"en","doc_type":"code","stars":212,"dataset":"github-code","pt":"85"} +{"seq_id":"4043809366","text":"class MinStack:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.items = []\n self.max = 100\n self.n = 0\n\n def push(self, val: int) -> None:\n if self.n >= self.max:\n return None\n self.items.append(val)\n self.n += 1\n\n def pop(self) -> None:\n if self.n == 0:\n return\n self.n -= 1\n\n def top(self) -> int:\n if self.n == 0:\n return None\n self.n -= 1\n cur = self.items[self.n]\n return cur\n\n def getMin(self) -> int:\n min = self.items[0]\n for i in range(0, self.n):\n if min > self.items[i]:\n min = self.items[i]\n return min\n\n\nif __name__ == '__main__':\n obj = MinStack()\n obj.push(1)\n obj.push(2)\n obj.push(3)\n obj.push(-1)\n param_4 = obj.getMin()\n print(param_4)\n obj.pop()\n param_3 = obj.top()\n print(param_3)\n\n# Your MinStack object will be instantiated and called as such:\n# obj = MinStack()\n# obj.push(val)\n# obj.pop()\n# param_3 = obj.top()\n# param_4 = obj.getMin()\n","repo_name":"ykdsg/myPython","sub_path":"geekbang/algo/08_stack/MinStack.py","file_name":"MinStack.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9850642427","text":"from src.character.character_story import CharacterStory, Fashion, National, Personality\nfrom src.event import Event, EventType, RollToEvent\nfrom src.events import Fact, Friends, Romantic\nfrom src.family import Family\nfrom src.utils import roll_6, roll_10\nfrom src.utils.human import Sex\n\nclass CharacterFactory:\n @staticmethod\n def make_character(name: str, sex: Sex) -> CharacterStory:\n character: CharacterStory = CharacterStory(name, sex)\n character.set_fashion(Fashion())\n character.set_national(National())\n character.set_family(Family())\n character.set_personality(Personality())\n\n age: int = roll_6() + roll_6()\n character.set_age(age)\n\n for year in range(age):\n event_type = RollToEvent[roll_10()]\n event = Event()\n if event_type == EventType.FACT:\n event = Fact()\n elif event_type == EventType.FRIENDS:\n event = Friends()\n elif event_type == EventType.ROMANTIC:\n event = Romantic()\n character.add_events(year, event)\n\n return character\n","repo_name":"Resinchen/CharacterGeneratorCyberpunk2020","sub_path":"src/character_factory.py","file_name":"character_factory.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31887523239","text":"#!/usr/bin/env python3\nimport numpy, pickle, glob, sys, os\nfrom glbase3 import *\nimport matplotlib.pyplot as plot\n\nsys.path.append('../')\nimport shared\n\n'''\n\nCollect all the peptide hits using the models defined in stage 3.\n\n'''\n\n[os.remove(f) for f in glob.glob('model_hits/*.tsv')]\n[os.remove(f) for f in glob.glob('model_hits_glbs/*.glb')]\n\nfinal_results = {}\nmodel_matrix = glload('../3.model/AUCtable.glb')\ndynamicE = {d['domain']: float(d['e']) for d in model_matrix}\n\n#########\n\nfor species in glob.glob('search_all/domtbl/*.tsv.gz'):\n try:\n hmmer_search = genelist(filename=species, format=format.hmmer_domtbl, gzip=True)\n except IndexError:\n print(f'ERROR! {species} IndexError')\n continue\n\n species = os.path.split(species)[1].replace('.tsv.gz', '')\n if species[0] == '_':\n continue\n\n matches = shared.get_dynamic_e(hmmer_search, dynamicE)\n\n if matches: # Sometimes it's empty;\n gl = genelist()\n gl.load_list(matches)\n gl = gl.removeDuplicates('unq_key') # unq_key = ensp-domain\n gl = gl.getColumns(['ensp', 'e', 'domain', 'len', 'dom_loc'])\n gl.sort('ensp')\n gl.saveTSV('model_hits/{0}.matches.tsv'.format(species), key_order=['ensp', 'domain', 'e'])\n gl.save('model_hits_glbs/{0}.matches.glb'.format(species))\n","repo_name":"oaxiom/episcan","sub_path":"6.all_species/1.get_dynamic_hits.py","file_name":"1.get_dynamic_hits.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39080709665","text":"import tensorflow as tf\n\nhyp = {\n 'degrees': 0.,\n 'translate': 0.2,\n 'scale': 0.9,\n 'shear': 0.,\n 'perspective': 0.,\n 'mixup': 0.15,\n 'paste_in': 0.15,\n 'fliplr': 0.5,\n 'hsv_h': 0.015,\n 'hsv_s': 0.7,\n 'hsv_v': 0.4\n}\n\nimg_size = 640\nmosaic_border = [-img_size // 2, -img_size // 2]\n\nnl = 3 # number of detection layer\nna = 3 # number of anchors per detection layer\nnc = 80 # number of classes\nanchor_t = 4.\nweight_decay = 0.0005\n\ngr = 1.0\ncn = 0.0\ncp = 1.0\n\nbalance = tf.constant([4.0, 1.0, 0.4]) #weights for the three detection layers\nloss_box = 0.05\nloss_obj = 0.7\nloss_cls = 0.3\n\nbatch_size = 32\nval_batch_size = 8\nstride = [img_size//80, img_size//40, img_size//20]\nstride = tf.constant(stride)\n\nanchors_constant = tf.constant(\n [[[ 1.50000, 2.00000],\n [ 2.37500, 4.50000],\n [ 5.00000, 3.50000]],\n [[ 2.25000, 4.68750],\n [ 4.75000, 3.43750],\n [ 4.50000, 9.12500]],\n [[ 4.43750, 3.43750],\n [ 6.00000, 7.59375],\n [14.34375, 12.53125]]])\nanchors_reshape = tf.reshape(anchors_constant, [nl, 1, na, 1, 2]) \nbatch_no_constant = tf.reshape(tf.range(batch_size, dtype=tf.float32), [batch_size, 1, 1])\nanchor_no_constant = tf.reshape(tf.tile(tf.range(na, dtype=tf.float32), [batch_size]), [batch_size, na, 1, 1])\nlayer_no_constant = tf.repeat([x for x in range(nl)], [a*a for a in [int(img_size/b) for b in stride]])\nlayer_no_constant = tf.reshape(layer_no_constant, [1,-1,1])\nval_layer_no_constant = tf.repeat(layer_no_constant, [val_batch_size*na], axis=0)\nval_layer_no_constant = tf.reshape(val_layer_no_constant, [val_batch_size, na, -1, 1])\nlayer_no_constant = tf.repeat(layer_no_constant, [batch_size*na], axis=0)\nlayer_no_constant = tf.reshape(layer_no_constant, [batch_size, na, -1, 1])\nval_batch_no_constant = tf.reshape(tf.range(val_batch_size, dtype=tf.float32), [val_batch_size, 1, 1])\n\nclassnames = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',\n 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',\n 'hair drier', 'toothbrush' ]\n\ncoco_id_name_map={1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane',\n 6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light',\n 11: 'fire hydrant', 13: 'stop sign', 14: 'parking meter', 15: 'bench',\n 16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow',\n 22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 27: 'backpack',\n 28: 'umbrella', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee',\n 35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite', 39: 'baseball bat',\n 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard', 43: 'tennis racket',\n 44: 'bottle', 46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon',\n 51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange',\n 56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut',\n 61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed', 67: 'dining table',\n 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard',\n 77: 'cell phone', 78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink',\n 82: 'refrigerator', 84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors',\n 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush'}","repo_name":"gzroy/yolov7_tf2","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"23056858320","text":"'''FASTA Renamer BioPython uses BioPython to Process Files\n\nGoes through each sequence and retrieves an accession number and header based\non the regular expression defined below. It reorganizes it to remove all\nspaces from the header line and format it to be in the\n{accession number}_{name of virus} form.\n\nIf additional modifications are required for this use-case, it is unlikely this\nscript will be modified as it runs slower for large datasets. Please see\nFASTA_Renamer_Python module found in this same package.\n'''\n\nfrom Bio import SeqIO\nfrom sys import stdout\nimport textwrap\nimport re\n\n# Configuration\ninput_path = \"data/BegomoSDTseqsmissing.fasta\"\noutput_path = \"data/BegomoSDTseqsmissing_OUTPUT.fasta\"\nvirus_name_pattern = \"^.+\\|.+\\|.+\\|.+\\|(.*[vV]irus).*\"\n\ninput_handle = open(input_path, \"rU\")\noutput_handle = open(output_path, \"w\")\nvirus_name_prog = re.compile(virus_name_pattern)\nvirus_errors = []\n\nprint(\"\\n=========================================\")\nprint(\"FASTA Renamer BioPython\")\nprint(\"=========================================\")\n\nvirusCtr = 0\nvirusErr = 0\nfor record in SeqIO.parse(input_handle, \"fasta\"):\n # {...accession #2...}.1|\n virusAccession2 = record.id.split(\"|\")[3].split(\".\")[0]\n virusDescription = record.description\n virusNameMatch = re.match(virus_name_prog, virusDescription)\n\n stdout.write(\"Number of Viruses Processed: {0}, Number of Errors: {1}\\r\"\n .format(virusCtr, virusErr))\n stdout.flush()\n if virusNameMatch:\n virusCtr = virusCtr + 1\n virusName = virusNameMatch.groups(1)[0].replace(\" \", \"_\")\n # Wrapping causes the function to go into O(m^2) time since it is now\n # traversing each line, counting the characters up to 80. This slows\n # down the running of this program.\n virusSeq = textwrap.fill(str(record.seq), 80)\n virusFormat = \">\" + virusAccession2 + virusName + \"\\n\" \\\n + virusSeq + \"\\n\"\n output_handle.write(virusFormat)\n else:\n virus_errors.append(virusDescription)\n virusErr = virusErr + 1 \noutput_handle.close()\ninput_handle.close()\n\n# Print Virus Sequences that had errors.\nprint(\"\\n\\n=========================================\")\nif virus_errors:\n print(\"Error extracting information for viruses:\")\n for virusErr in virus_errors:\n print(virusErr)\nelse:\n print(\"\\nAll sequences successfully processed!\")\nprint(\"=========================================\")\n\nprint(\"Output written to: \" + output_path)\nprint(\"\\nGoodbye! The Momo loves you!\")\n","repo_name":"phoenixcoder/VirologistsToolkit","sub_path":"FASTA_Header_Renamer/FASTA_Header_Renamer_BioPython.py","file_name":"FASTA_Header_Renamer_BioPython.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19618807055","text":"import torch\r\nimport torch.nn as nn\r\nimport functools\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\nclass S2T_G(nn.Module):\r\n def __init__(self, config, norm_layer=nn.BatchNorm2d):\r\n super(S2T_G, self).__init__()\r\n\r\n # define a rnn sequence for T time steps\r\n # init hidden state with latent vector learnt from shape\r\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\r\n use_bias = norm_layer.func == nn.InstanceNorm2d\r\n else:\r\n use_bias = norm_layer == nn.InstanceNorm2d\r\n self.latent_net = [\r\n nn.Conv3d(4, 64, kernel_size=4, stride=1, padding=0),\r\n nn.LeakyReLU(0.2, True),\r\n nn.Conv3d(64, 128, kernel_size=4, stride=2, padding=0, bias=True),\r\n # norm_layer(128),\r\n nn.LeakyReLU(0.2, True),\r\n nn.Conv3d(128,256, kernel_size=4, stride=2, padding=0, bias=True),\r\n nn.Conv3d(256,256, kernel_size=4, stride=2, padding=0, bias=True)]\r\n\r\n self.latent_net = nn.Sequential(*self.latent_net)\r\n\r\n self.lstm = nn.LSTM(300,256,num_layers=1, bidirectional=False, batch_first=True)\r\n self.linear1 = nn.Linear(256, config.num_vocab)\r\n self.config = config\r\n # self.max_time_steps = 10\r\n\r\n def sample_gumbel(self,shape, eps=1e-20):\r\n U = torch.rand(shape).cuda()\r\n return -Variable(torch.log(-torch.log(U + eps) + eps))\r\n\r\n def gumbel_softmax_sample(self,logits, temperature=0.5):\r\n y = F.log_softmax(logits, dim=-1) + self.sample_gumbel(logits.size())\r\n return F.softmax(y / temperature, dim=-1)\r\n\r\n def gumbel_softmax(self, logits, temperature=0.5):\r\n \"\"\"\r\n input: [*, n_class]\r\n return: [*, n_class] an one-hot vector\r\n \"\"\"\r\n y = self.gumbel_softmax_sample(logits, temperature)\r\n shape = y.size()\r\n _, ind = y.max(dim=-1)\r\n y_hard = torch.zeros_like(y).view(-1, shape[-1])\r\n y_hard.scatter_(1, ind.view(-1, 1), 1)\r\n y_hard = y_hard.view(*shape)\r\n return (y_hard - y).detach() + y\r\n\r\n def forward(self,input,vocab_embedding, max_time_steps):\r\n # print(max_time_steps)\r\n # print(max_time_steps.get_device())\r\n # import sys\r\n # sys.exit()\r\n vocab_embedding = vocab_embedding.squeeze(0)\r\n max_time_steps = int(max_time_steps.squeeze(0).item())\r\n # print(\"S2T_G\")\r\n batch_size = input.size()[0]\r\n # print(\"Input : \", input)\r\n\r\n latent = self.latent_net(input)\r\n # print(\"Latent : \", latent.size())\r\n latent = latent.view(1, latent.size()[0], latent.size()[1])\r\n\r\n \r\n decoder_hidden = latent\r\n decoder_cell = latent.clone()\r\n decoder_input = torch.from_numpy(self.config.bos_embedding).float().to(self.config.device)\r\n decoder_input = decoder_input.view(1,1,decoder_input.size()[-1])\r\n decoder_input = decoder_input.repeat(batch_size, 1 ,1)\r\n\r\n # print(decoder_input.size())\r\n # import sys\r\n # sys.exit()\r\n\r\n all_outs=[]\r\n self.lstm.flatten_parameters()\r\n\r\n for t in range(max_time_steps):\r\n # print(\"decoder input :\", decoder_input.size())\r\n # print(\"decoder hidden : \", decoder_hidden.size())\r\n # print(\"decoder cell : \", decoder_cell.size())\r\n\r\n output , (decoder_hidden, decoder_cell) = self.lstm(decoder_input, (decoder_hidden,decoder_cell))\r\n\r\n # gumbel softmax \r\n output = output.squeeze(1)\r\n logits = self.linear1(output)\r\n gumbled = self.gumbel_softmax(logits)\r\n # print(\"Gumbled : \", gumbled.size())\r\n # print(\"vocab : \", vocab_embedding.size())\r\n\r\n # print(\"gumbled :\", gumbled.size())\r\n # print(\"vocab_embedding : \", vocab_embedding.size())\r\n # import sys\r\n # sys.exit()\r\n decoder_input = torch.matmul(gumbled.detach(), vocab_embedding.detach())\r\n decoder_input = decoder_input.detach()\r\n decoder_input = decoder_input.view(batch_size,1,decoder_input.size()[-1])\r\n all_outs.append(gumbled)\r\n\r\n all_outs = torch.stack(all_outs, dim=1)\r\n # print(\"All outs : \", all_outs.size())\r\n # import sys\r\n # sys.exit()\r\n \r\n return all_outs","repo_name":"sgdgp/Text2Shape_BicycleGAN","sub_path":"s2t_generator.py","file_name":"s2t_generator.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2280546484","text":"import sys\n\nimport boto3\nimport click\n\nfrom ..utils.constants import DEFAULT_AWS_PROFILE, STAGING\n\n# we use writer because reader might also point to writer making it not safe\nENDPOINT_TYPE = \"writer\"\n\n\n@click.command()\n@click.option(\n \"-i\",\n \"--input_env\",\n required=False,\n default=STAGING,\n show_default=True,\n help=\"Input environment of the RDS server.\",\n)\n@click.option(\n \"-u\",\n \"--user\",\n required=False,\n default=\"dev_role\",\n show_default=True,\n help=\"User to connect as (role is the same as user).\",\n)\n@click.option(\n \"-r\",\n \"--region\",\n required=False,\n default=\"eu-west-1\",\n show_default=True,\n help=\"Region the RDS server is in.\",\n)\n@click.option(\n \"-s\",\n \"--sandbox_id\",\n required=False,\n default=\"default\",\n show_default=True,\n help=\"Default sandbox id.\",\n)\n@click.option(\n \"-p\",\n \"--aws_profile\",\n required=False,\n default=DEFAULT_AWS_PROFILE,\n show_default=True,\n help=\"The name of the profile stored in ~/.aws/credentials to use.\",\n)\ndef token(input_env, user, region, sandbox_id, aws_profile):\n \"\"\"\n Generates a temporary token that can be used to login to the database\n (through the ssh tunnel).\\n\n\n Examples.:\\n\n biomage rds token\\n\n biomage rds token -i staging\n \"\"\"\n password = None\n\n db_port = 5432\n\n session = boto3.Session(profile_name=aws_profile)\n rds_client = session.client(\"rds\")\n\n remote_endpoint = get_rds_endpoint(input_env, sandbox_id, rds_client, ENDPOINT_TYPE)\n\n print(f\"Generating temporary token for {input_env}\", file=sys.stderr)\n password = rds_client.generate_db_auth_token(remote_endpoint, db_port, user, region)\n\n print(f\"User: {user}\")\n print(f\"Password: {password}\")\n\n\ndef get_rds_endpoint(input_env, sandbox_id, rds_client, endpoint_type):\n response = rds_client.describe_db_cluster_endpoints(\n DBClusterIdentifier=f\"aurora-cluster-{input_env}-{sandbox_id}\",\n Filters=[\n {\"Name\": \"db-cluster-endpoint-type\", \"Values\": [endpoint_type]},\n ],\n )\n\n return response[\"DBClusterEndpoints\"][0][\"Endpoint\"]\n","repo_name":"biomage-org/biomage-utils","sub_path":"biomage/rds/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30029714918","text":"# You are given a matrix A which represent operations of size N x 2.\r\n# Assume initially you have a stack-like data structure you have to perform operations on it.\r\n# Operations are of two types:\r\n# 1 x: push an integer x onto the stack and return -1\r\n# 2 0: remove and return the most frequent element in the stack.\r\n# If there is a tie for the most frequent element,\r\n# the element closest to the top of the stack is removed and returned.\r\n# A[i][0] describes the type of operation to be performed.\r\n# A[i][1] describe the element x or 0 corresponding to the operation performed.\r\n# 1 <= N <= 100000\r\n# 1 <= A[i][0] <= 2\r\n# 0 <= A[i][1] <= 10^9\r\nfreqMap = {}\r\nsetMap = {}\r\nmaxFreq = 0\r\n\r\n\r\n# Function to insert x in the stack\r\ndef push(x):\r\n global maxFreq\r\n if x not in freqMap:\r\n freqMap[x] = 0\r\n\r\n freq = freqMap[x] + 1\r\n\r\n freqMap[x] = freq\r\n\r\n if freq > maxFreq:\r\n maxFreq = freq\r\n\r\n if freq not in setMap:\r\n setMap[freq] = []\r\n\r\n setMap[freq].append(x)\r\n\r\n return -1\r\n\r\n\r\ndef pop():\r\n global maxFreq\r\n\r\n top = setMap[maxFreq][-1]\r\n setMap[maxFreq].pop()\r\n\r\n freqMap[top] -= 1\r\n\r\n if len(setMap[maxFreq]) == 0:\r\n maxFreq -= 1\r\n\r\n return top\r\n\r\n\r\ndef solve(A):\r\n global freqMap\r\n global setMap\r\n global maxFreq\r\n freqMap = {}\r\n setMap = {}\r\n maxFreq = 0\r\n result = []\r\n for entry in A:\r\n if entry[0] == 1:\r\n result.append(push(entry[1]))\r\n\r\n elif entry[0] == 2:\r\n result.append(pop())\r\n\r\n return result\r\n\r\n\r\nprint(solve([[1, 5], [2, 0], [1, 4]])) # [-1, 5, -1]\r\nprint(solve([[1, 5], [1, 7], [1, 5], [1, 7], [1, 4], [1, 5], [2, 0], [2, 0], [2, 0], [2, 0]])) # [-1, -1, -1, -1, -1, -1, 5, 7, 5, 4]\r\n","repo_name":"deysantanu84/python-portfolio","sub_path":"problemSolving/stacks/maximumFrequencyStack.py","file_name":"maximumFrequencyStack.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39545307636","text":"from typing import List\n\nimport pytest\n\n\nclass Solution:\n def three_sum(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n n = len(nums)\n result = []\n\n for i in range(n - 2):\n if nums[i] > 0:\n break\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n j = i + 1\n k = n - 1\n while j != k:\n total = nums[i] + nums[j] + nums[k]\n if total > 0:\n k -= 1\n elif total < 0:\n j += 1\n else:\n result.append([nums[i], nums[j], nums[k]])\n prev_lo = nums[j]\n prev_hi = nums[k]\n while j < k and nums[j] == prev_lo:\n j += 1\n while j < k and nums[k] == prev_hi:\n k -= 1\n return result\n\n\n@pytest.fixture\ndef solution():\n return Solution()\n\n\n@pytest.mark.parametrize(\n \"case,expected\",\n [\n ([-1, 0, 1, 2, -1, -4], [[-1, -1, 2], [-1, 0, 1]]),\n ([0, 1, 1], []),\n ([0, 0, 0], [[0, 0, 0]]),\n ],\n)\ndef test_solution(solution, case, expected):\n actual = solution.three_sum(case)\n assert actual == expected\n","repo_name":"mammothb/lc-solutions","sub_path":"array_strings/15_3sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26871864908","text":"from typing import List, Tuple, Optional, Dict, cast\nimport os.path\nimport shutil\nimport json\nimport time # type: ignore\n\nimport numpy as np # type: ignore\nimport tensorflow as tf # type: ignore\nimport nltk # type: ignore\nimport nltk.corpus # type: ignore\nimport sklearn.metrics # type: ignore\n\nimport utils.file_manip as fmanip\nfrom data_extraction.wikipedia import * \nfrom custom_types import *\n\nfrom models.model import Model\nfrom utils.unks import prep_train, shuffle, prep_test, Paragraph, WordId, ParagraphVec, Label, VocabMap\n\n\nclass RnnClassifier(Model[str]):\n base_log_dir = \"runs/rnn/run{}\"\n\n '''\n RNN classifier\n\n --comment_size [int; default=100]\n How long to cap the length of each comment (padding if\n the comment is shorter)\n\n --batch_size [int; default=125]\n\n --epoch_size [int; default=10]\n\n --n_hidden_layers [int; default=120]\n\n --vocab_size [int; default=141000]\n\n --embedding_size [int; default=32]\n '''\n def __init__(self, restore_from: Optional[str] = None,\n run_num: Optional[int] = None,\n comment_size: int = 100,\n batch_size: int = 125,\n epoch_size: int = 10,\n n_hidden_layers: int = 120,\n vocab_size: int = 141000,\n embedding_size: int = 32,\n n_classes: int = 2,\n input_keep_prob: float = 1.0,\n output_keep_prob: float = 1.0,\n learning_rate: float = 0.001,\n beta1: float = 0.9,\n beta2: float = 0.999,\n epsilon: float = 1e-08) -> None:\n\n # Hyperparameters\n self.comment_size = comment_size\n self.batch_size = batch_size\n self.epoch_size = epoch_size\n self.n_hidden_layers = n_hidden_layers\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.n_classes = n_classes\n self.input_keep_prob = input_keep_prob\n self.output_keep_prob = output_keep_prob\n self.learning_rate = learning_rate\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n\n # Particular tensorflow nodes worth keeping a reference to\n # Types are set to Any because mypy doesn't yet understand\n # the tensorflow library\n self.x_input = None # type: Any\n self.y_input = None # type: Any\n self.x_lengths = None # type: Any\n self.y_hot = None # type: Any\n self.input_keep = None # type: Any\n self.output_keep = None # type: Any\n self.predictor = None # type: Any\n self.loss = None # type: Any\n self.optimizer = None # type: Any\n self.summary = None # type: Any\n self.output = None # type: Any\n self.output_prob = None # type: Any\n self.init = None # type: Any\n self.logger = None # type: Any\n self.session = None # type: Any\n\n self.vocab_map = None # type: Optional[Dict[str, WordId]]\n\n super().__init__(restore_from, run_num)\n\n if restore_from is None:\n self._build_model()\n\n def _assert_all_setup(self) -> None:\n assert self.x_input is not None\n assert self.y_input is not None\n assert self.x_lengths is not None\n assert self.y_hot is not None\n assert self.input_keep is not None\n assert self.output_keep is not None\n assert self.predictor is not None\n assert self.loss is not None\n assert self.optimizer is not None\n assert self.summary is not None\n assert self.output is not None\n assert self.output_prob is not None\n assert self.init is not None\n assert self.logger is not None\n assert self.session is not None\n assert self.vocab_map is not None\n\n def _get_parameters(self) -> Dict[str, Any]:\n return {\n 'comment_size': self.comment_size,\n 'batch_size': self.batch_size,\n 'epoch_size': self.epoch_size,\n 'n_hidden_layers': self.n_hidden_layers,\n 'embedding_size': self.embedding_size,\n 'n_classes': self.n_classes,\n 'input_keep_prob': self.input_keep_prob,\n 'output_keep_prob': self.output_keep_prob,\n 'learning_rate': self.learning_rate,\n 'beta1': self.beta1,\n 'beta2': self.beta2,\n 'epsilon': self.epsilon,\n }\n\n def _save_model(self, path: str) -> None:\n with open(fmanip.join(path, 'vocab_map.json'), 'w') as stream:\n json.dump(self.vocab_map, stream)\n saver = tf.train.Saver()\n\n tf.add_to_collection('x_input', self.x_input)\n tf.add_to_collection('y_input', self.y_input)\n tf.add_to_collection('x_lengths', self.x_lengths)\n tf.add_to_collection('y_hot', self.y_hot)\n tf.add_to_collection('input_keep', self.input_keep)\n tf.add_to_collection('output_keep', self.output_keep)\n tf.add_to_collection('predictor', self.predictor)\n tf.add_to_collection('loss', self.loss)\n tf.add_to_collection('optimizer', self.optimizer)\n tf.add_to_collection('summary', self.summary)\n tf.add_to_collection('output', self.output)\n tf.add_to_collection('output_prob', self.output_prob)\n tf.add_to_collection('init', self.init)\n\n saver.save(self.session, fmanip.join(path, 'model'))\n tf.train.export_meta_graph(filename=fmanip.join(path, 'tensorflow_graph.meta'))\n\n def _restore_model(self, path: str) -> None:\n with open(fmanip.join(path, 'vocab_map.json'), 'r') as stream:\n self.vocab_map = json.load(stream)\n\n self.session = tf.Session(graph = tf.get_default_graph())\n saver = tf.train.import_meta_graph(fmanip.join(path, 'tensorflow_graph.meta'))\n saver.restore(self.session, fmanip.join(path, 'model'))\n\n self.x_input = tf.get_collection('x_input')[0]\n self.y_input = tf.get_collection('y_input')[0]\n self.x_lengths = tf.get_collection('x_lengths')[0]\n self.y_hot = tf.get_collection('y_hot')[0]\n self.input_keep = tf.get_collection('input_keep')[0]\n self.output_keep = tf.get_collection('output_keep')[0]\n self.predictor = tf.get_collection('predictor')[0]\n self.loss = tf.get_collection('loss')[0]\n self.optimizer = tf.get_collection('optimizer')[0]\n self.summary = tf.get_collection('summary')[0]\n self.output = tf.get_collection('output')[0]\n self.output_prob = tf.get_collection('output_prob')[0]\n self.init = tf.get_collection('init')[0]\n self.logger = tf.summary.FileWriter(self._get_log_dir(), graph=tf.get_default_graph())\n\n self._assert_all_setup()\n\n def _build_model(self) -> None:\n '''Builds the model, using the currently set params.'''\n with tf.name_scope('rnn-classifier'):\n self._build_input()\n self._build_predictor()\n self._build_evaluator()\n\n print('output_shape', self.output.shape)\n\n self.summary = tf.summary.merge_all()\n self.logger = tf.summary.FileWriter(self._get_log_dir(), graph=tf.get_default_graph())\n self.init = tf.global_variables_initializer()\n\n #self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n self.session = tf.Session(graph = tf.get_default_graph())\n\n def _build_input(self) -> None:\n with tf.name_scope('inputs'):\n self.x_input = tf.placeholder(\n tf.int32, \n shape=(None, self.comment_size),\n name='x_input')\n self.y_input = tf.placeholder(\n tf.int32,\n shape=(None,),\n name='y_input')\n self.x_lengths = tf.placeholder(\n tf.int32,\n shape=(None,),\n name='x_lengths')\n self.input_keep = tf.placeholder(\n tf.float32,\n shape=tuple(),\n name='input_keep')\n self.output_keep = tf.placeholder(\n tf.float32,\n shape=tuple(),\n name='output_keep')\n self.y_hot = tf.one_hot(\n self.y_input,\n depth=self.n_classes,\n on_value=tf.constant(1.0, dtype=tf.float32),\n off_value=tf.constant(0.0, dtype=tf.float32),\n dtype=tf.float32,\n name='y_hot_encoded')\n print('y_hot_shape', self.y_hot.shape)\n\n def _build_predictor(self) -> None:\n with tf.name_scope('prediction'):\n # Make embedding vector for words\n # Shape is [?, vocab_size, embedding_size]\n embedding = tf.Variable(\n tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0, dtype=tf.float32),\n dtype=tf.float32,\n name=\"embedding\")\n word_vectors = tf.nn.embedding_lookup(embedding, self.x_input)\n\n self.predictor = self._make_bidirectional_rnn(word_vectors)\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.predictor, \n labels=self.y_hot,\n #targets=self.y_hot,\n ),\n name='loss')\n\n self.optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate,\n beta1=self.beta1,\n beta2=self.beta2,\n epsilon=self.epsilon).minimize(self.loss)\n\n tf.summary.scalar('loss', self.loss)\n\n def _build_evaluator(self) -> None:\n with tf.name_scope('evaluation'):\n correct_prediction = tf.equal(\n tf.argmax(self.predictor, 1), \n tf.argmax(self.y_hot, 1))\n accuracy = tf.reduce_mean(\n tf.cast(correct_prediction, tf.float32),\n name='accuracy')\n self.output = tf.argmax(self.predictor, 1, name='output')\n self.output_prob = tf.nn.softmax(self.predictor, name='output_prob')\n\n tf.summary.scalar('batch-accuracy', accuracy)\n\n def _make_bidirectional_rnn(self, word_vectors: Any) -> Any:\n with tf.name_scope('bidirectional_rnn'):\n # Convert shape of [?, comment_size, embedding_size] into\n # a list of [?, embedding_size]\n x_unstacked = tf.unstack(word_vectors, self.comment_size, 1)\n output_weight = tf.Variable(\n tf.random_normal([self.n_hidden_layers * 2, self.n_classes], dtype=tf.float32),\n dtype=tf.float32,\n name='output_weight')\n output_bias = tf.Variable(\n tf.random_normal([self.n_classes], dtype=tf.float32),\n dtype=tf.float32,\n name='output_bias')\n\n\n # Defining the bidirectional rnn\n layer = x_unstacked\n for i in range(1):\n with tf.name_scope('layer_{}'.format(i)):\n forwards_cell = tf.contrib.rnn.DropoutWrapper(\n tf.contrib.rnn.BasicLSTMCell(self.n_hidden_layers),\n input_keep_prob=self.input_keep,\n output_keep_prob=self.output_keep)\n backwards_cell = tf.contrib.rnn.DropoutWrapper(\n tf.contrib.rnn.BasicLSTMCell(self.n_hidden_layers),\n input_keep_prob=self.input_keep,\n output_keep_prob=self.output_keep)\n #forwards_cell = tf.contrib.rnn.GRUCell(self.n_hidden_layers)\n #backwards_cell = tf.contrib.rnn.GRUCell(self.n_hidden_layers)\n\n forwards_cells = [tf.contrib.rnn.DropoutWrapper(\n tf.contrib.rnn.BasicLSTMCell(self.n_hidden_layers),\n input_keep_prob=self.input_keep,\n output_keep_prob=self.output_keep) for i in range(2)]\n backwards_cells = [tf.contrib.rnn.DropoutWrapper(\n tf.contrib.rnn.BasicLSTMCell(self.n_hidden_layers),\n input_keep_prob=self.input_keep,\n output_keep_prob=self.output_keep) for i in range(2)]\n '''\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(\n forwards_cell,\n backwards_cell,\n #x_unstacked,\n inputs=word_vectors,\n sequence_length=self.x_lengths,\n dtype=tf.float32,\n scope='bidirectional_rnn_{}'.format(i))\n \n # Need to connect outputs\n outputs = tf.concat(outputs, 2)\n last_output = outputs[:,0,:]\n\n # Use the output of the last rnn cell for classification\n prediction = tf.matmul(last_output, output_weight) + output_bias\n '''\n\n outputs, fw, bw = tf.contrib.rnn.static_bidirectional_rnn(\n # tf.contrib.rnn.MultiRNNCell(forwards_cells),\n # tf.contrib.rnn.MultiRNNCell(backwards_cells),\n forwards_cell,\n backwards_cell,\n layer,\n dtype=tf.float32,\n sequence_length=self.x_lengths,\n scope='bidirectional_rnn_{}'.format(i))\n layer = outputs\n\n # This is an abuse of scope, but whatever.\n \n # Use the output of the last rnn cell for classification\n foo = tf.layers.batch_normalization(tf.concat([fw.h, bw.h], axis=1))\n prediction = tf.matmul(foo, output_weight) + output_bias\n return prediction\n\n def train(self, xs: List[str], ys: List[int], **params: Any) -> None:\n '''Trains the model. The expectation is that this method is called\n exactly once.'''\n if len(params) != 0:\n raise Exception(\"RNN does not take in any extra params to train\")\n\n x_final, x_lengths, vocab_map = prep_train(xs, self.comment_size, self.vocab_size)\n self.vocab_map = vocab_map\n\n n_batches = len(x_final) // self.batch_size\n\n self._assert_all_setup()\n\n self.session.run(self.init)\n for i in range(self.epoch_size):\n x_final_new, x_lengths_new, ys_new = shuffle(x_final, x_lengths, ys)\n self.train_epoch(i, n_batches, x_lengths, x_final, ys)\n\n def train_epoch(self, iteration: int,\n n_batches: int, \n x_lengths: List[int],\n xs: List[List[int]], \n ys: List[int]) -> None:\n start = time.time()\n\n losses = 0.0\n\n # Train on dataset\n for batch_num in range(n_batches):\n start_idx = batch_num * self.batch_size\n end_idx = (batch_num + 1) * self.batch_size\n\n x_batch = xs[start_idx: end_idx]\n y_batch = ys[start_idx: end_idx]\n x_len_batch = x_lengths[start_idx: end_idx]\n\n batch_data = {\n self.x_lengths: x_len_batch, \n self.x_input: x_batch, \n self.y_input: y_batch,\n self.input_keep: self.input_keep_prob,\n self.output_keep: self.output_keep_prob,\n }\n\n summary_data, batch_loss, _ = self.session.run(\n [self.summary, self.loss, self.optimizer], \n feed_dict=batch_data)\n losses += batch_loss\n self.logger.add_summary(summary_data, batch_num + n_batches * iteration)\n\n # Report results, using last x_batch and y_batch\n delta = time.time() - start \n print(\"Iteration {}, avg batch loss = {:.6f}, num batches = {}, time elapsed = {:.3f}\".format(\n iteration, \n losses / n_batches, \n n_batches,\n delta))\n\n def predict(self, xs: List[str]) -> List[List[float]]:\n assert self.vocab_map is not None\n x_final, x_lengths = prep_test(xs, self.comment_size, self.vocab_map)\n batch_data = {\n self.x_input: x_final, \n self.x_lengths: x_lengths,\n self.input_keep: 1.0,\n self.output_keep: 1.0,\n }\n return cast(List[List[float]], self.session.run(self.output_prob, feed_dict=batch_data))\n\n","repo_name":"Michael0x2a/nlp-capstone","sub_path":"abuse/models/rnn_classifier.py","file_name":"rnn_classifier.py","file_ext":"py","file_size_in_byte":17061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"7139232627","text":"import requests\nimport json\n\nif __name__ == '__main__':\n url = 'https://fanyi.baidu.com/sug'\n word = input('>>> ')\n data = {\n 'kw': word\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:108.0) Gecko/20100101 Firefox/108.0'\n }\n response = requests.post(url=url, data=data, headers=headers)\n text = response.json()\n fp = open('./baidu_translation_{word}.json', 'w', encoding='utf-8')\n json.dump(text, fp=fp, ensure_ascii=False)\n","repo_name":"waHhhHao/python_crawler","sub_path":"crawler/case2_baidu_translation.py","file_name":"case2_baidu_translation.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"925044890","text":"# ╔═╗╔═╗ ╔╗ ╔╗ ╔╗ ╔══╗ ╔╗ ╔═══╗╔═══╗╔═══╗\n# ║║╚╝║║ ║║ ║║ ║║ ║╔╗║ ╔╝╚╗ ║╔═╗║║╔═╗║║╔═╗║\n# ║╔╗╔╗║╔══╗╔══╗╔═╗ ║╚═╗╔══╗ ║║ ║║ ║╚╝╚╗╔══╗╚╗╔╝ ║║ ╚╝║║ ║║║║ ╚╝\n# ║║║║║║║╔╗║║╔╗║║╔╗╗║╔╗║╚ ╗║ ║║ ║║ ║╔═╗║║╔╗║ ║║ ╔═══╗ ║║ ╔╗║║ ║║║║╔═╗\n# ║║║║║║║╚╝║║╚╝║║║║║║╚╝║║╚╝╚╗║╚╗║╚╗ ║╚═╝║║╚╝║ ║╚╗ ╚═══╝ ║╚═╝║║╚═╝║║╚╩═║\n# ╚╝╚╝╚╝╚══╝╚══╝╚╝╚╝╚══╝╚═══╝╚═╝╚═╝ ╚═══╝╚══╝ ╚═╝ ╚═══╝╚═══╝╚═══╝\n#\n#\n# This is a cog belonging to the Moonball Bot.\n# We are Open Source => https://moonball.io/opensource\n#\n# This cog (info) must be edited to fit your bot's needs.\n# You can find more info about how to do this on the GitHub page.\n#\n\nimport discord\nfrom discord.ext import commands\nfrom backend import embed_header, embed_footer, embed_color, bot_version, embed_icon, guild_id, embed_url # Import bot variables\nfrom backend import checkperm, logger, ip_embed, version_embed, log # Import functions\n\n\nclass Info(commands.Cog):\n \"\"\"Commands which provide information relating to Servers and our Network\"\"\"\n def __init__(self, client):\n self.client = client\n\n\n @commands.Cog.listener()\n async def on_ready(self):\n log.info(\"Cog : Info.py Loaded\")\n\n\n\n @commands.slash_command(name=\"ip\", description=\"Sends the Server IP\", guild_ids=[guild_id])\n async def getip(self, ctx): await ip_embed(ctx, isslash=True)\n\n @commands.slash_command(name=\"version\", description=\"Sends the Server Version\",guild_ids=[guild_id])\n async def getversion(self, ctx): await version_embed(ctx, isslash=True)\n\n @commands.slash_command(name=\"shop\", description=\"Sends a URL to the Moonball Shop\", guild_ids=[guild_id])\n async def shop(self, ctx):\n if await checkperm(ctx, 0): return\n await ctx.respond(\"Visit our shop here!- \\nhttps://shop.moonball.io\")\n await logger(\"i\", f\"Sent Shop link to message of `{ctx.author.name}#{ctx.author.discriminator}`\", self.client)\n\n @commands.slash_command(name=\"opensource\", description=\"Sends a link to the Moonball Network's Discord Bot's Open Source Repository\", guild_ids=[guild_id])\n async def opensource(self, ctx):\n if await checkperm(ctx, 0): return\n await ctx.respond(f\"This Discord Bot is opensource and made with py-cord in Python.\\nIf you would like to check out the source code, Visit the GitHub Repo here - https://moonball.io/opensource\", ephemeral=True)\n await logger(\"i\", f\"Sent Bot GitHub URL to message of `{ctx.author.name}#{ctx.author.discriminator}`\", self.client)\n\n @commands.slash_command(name=\"botversion\", description=\"Sends the current version of the bot\", guild_ids=[guild_id])\n async def botversion(self, ctx):\n if await checkperm(ctx, 0): return\n await ctx.respond(f\"I am currently on Version `{bot_version}`!\", ephemeral=True)\n await logger(\"i\", f\"Sent Bot Version to message of `{ctx.author.name}#{ctx.author.discriminator}`\", self.client)\n\n\n @commands.slash_command(name=\"socials\", description=\"Sends a link to the Moonball Network's Social Media\", guild_ids=[guild_id])\n async def socials(self, ctx):\n if await checkperm(ctx, 0): return\n embed = discord.Embed(title=\"Social Media\", description=\"Here are the links to our Socials!\", url=embed_url, color=embed_color).set_footer(text=embed_footer).set_author(name=embed_header,icon_url=embed_icon).set_thumbnail(url=embed_icon)\n embed.add_field(name=\"<:discordlogo:1003979264846204938> Discord\", value=\"https://moonball.io/discord\", inline=True)\n embed.add_field(name=\"<:twitterlogo:985601023995441202> Twitter\", value=\"https://moonball.io/twitter\", inline=False)\n embed.add_field(name=\"<:youtubelogo:985600997541961779> YouTube\", value=\"https://moonball.io/youtube\", inline=False)\n embed.add_field(name=\"<:instagram:985601063509979256> Instagram\", value=\"https://moonball.io/instagram\", inline=False)\n embed.add_field(name=\"<:redditlogo:1003978205021077606> Reddit\", value=\"https://moonball.io/reddit\", inline=False)\n await ctx.respond(embed=embed)\n await logger(\"i\", f\"Sent Socials to message of `{ctx.author.name}#{ctx.author.discriminator}`\", self.client)\n\n\ndef setup(client):\n client.add_cog(Info(client))\n","repo_name":"Shenanigans-Network/Discordbot","sub_path":"cogs/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25092416286","text":"import functools\r\nimport os\r\nimport requests\r\nfrom ics_parser import ics_parse\r\nfrom diff import difference\r\nimport flask\r\nfrom flask import Flask, render_template, flash, redirect, url_for, request\r\nfrom authlib.client import OAuth2Session\r\nimport google.oauth2.credentials\r\nimport googleapiclient.discovery\r\nimport google_auth\r\nfrom progressbar import ProgressBar\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom forms import AuthForm\r\nimport json\r\nimport pickle\r\nfrom pymongo import MongoClient\r\nfrom threading import Thread\r\nimport time\r\n\r\n# Initialize App\r\napp = flask.Flask(__name__)\r\napp.register_blueprint(google_auth.app)\r\n\r\n# Next 3 lines connect to our database, where users contains all users and their attributes\r\nclient = MongoClient(\"\")\r\ndb = client.wtcal\r\nusers = db.user\r\n\r\n# Used to insert a user into the database\r\ndef insert_user(ical, uid, calcreds, sync, calendardata):\r\n new_user = {\r\n \"icalurl\": ical,\r\n \"googleuid\": uid,\r\n \"calcreds\": calcreds,\r\n \"initSync\": sync,\r\n \"caldata\": calendardata\r\n }\r\n users.insert_one(new_user)\r\n\r\napp.route('/upload')\r\ndef upload():\r\n return redirect(url_for('index'))\r\n\t\r\n@app.route('/uploader', methods = ['GET', 'POST'])\r\ndef upload_file():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n f.save(f.filename)\r\n #return 'file uploaded successfully'\r\n return redirect(url_for('index'))\r\n\r\n# The first route reached by accessing the website;\r\n# checks login status and redirects to other routes\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n form = AuthForm()\r\n # This block runs after the user logs in with Google and submits the form data.\r\n if form.validate_on_submit():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n f.save(\"learn.ics\")\r\n # Grab WTClass calendar URL\r\n iCalLink = form.icalURL.data\r\n # Grab Google user information, will be using UID\r\n user_info = google_auth.get_user_info()\r\n # Create flow object that generates permissions for calendar access\r\n scopes = ['https://www.googleapis.com/auth/calendar']\r\n flow = InstalledAppFlow.from_client_secrets_file(\"client_secret.json\", scopes=scopes, redirect_uri='urn:ietf:wg:oauth:2.0:oob')\r\n # Grab calendar permission token from form\r\n flow.fetch_token(code=form.auth.data)\r\n # Once token is given to the flow, credentials are created\r\n # for the user allowing us to access their calendar via the \r\n # Google API, convert these crendentials to binary for database\r\n pickled_data = pickle.dumps(flow.credentials)\r\n # Insert the user into the database\r\n insert_user(iCalLink, user_info['id'], pickled_data, False, None)\r\n # Convert the binary credentials back for later use\r\n restored_data = pickle.loads(pickled_data)\r\n # Redirect to next page\r\n return redirect(url_for('index'))\r\n \r\n if google_auth.is_logged_in():\r\n # Get user's Google information\r\n user_info = google_auth.get_user_info()\r\n # Tell the user to go to the authorization URL.\r\n authURL = 'https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=53539138489-5d8chv0kvpesoo7qkrc8h6n8gkjpk4n6.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcalendar&state=wZGgkhnab3HiagE4p3tKjAPmOkohh2&prompt=consent&access_type=offline'\r\n \r\n # User is logged in, check if they exist in the database:\r\n userID = users.find_one({ \"googleuid\": str(user_info['id']) })\r\n\r\n # If they exist, redirect to main page\r\n if userID:\r\n return flask.redirect(url_for('logged_in'))\r\n\r\n # If not, redirect to form page until valid information is provided\r\n else:\r\n return render_template('index.html', url=authURL, form=form)\r\n \r\n return render_template('login.html')\r\n\r\n@app.route('/loggedin')\r\ndef logged_in():\r\n # This is the main page users will see once they've logged in and provided\r\n # valid form entries - the script is run from this page\r\n # This block will run only if the user is logged in with Google.\r\n if google_auth.is_logged_in():\r\n # Find the current user and prepare to update their calendar\r\n # Get user's Google information which contains their google uid\r\n user_info = google_auth.get_user_info()\r\n # From that UID, get user from database\r\n user = users.find_one({ \"googleuid\": str(user_info['id']) })\r\n # Retrieve their Google calendar credentials, use pickle to read binary file.\r\n creds = pickle.loads(user['calcreds'])\r\n # syncedBefore is boolean to check if user has already synced their calendar before\r\n syncedBefore = user['initSync']\r\n # Initialize service to build calendar events\r\n service = build(\"calendar\", \"v3\", credentials=creds)\r\n result = service.calendarList().list().execute()\r\n # Begin a new thread to process the calendar in the background so that the next webpage can load.\r\n thread = Thread(target=process_calendar, args=(user,service,result,syncedBefore))\r\n thread.daemon = True\r\n # Start the process\r\n thread.start()\r\n return render_template('main.html')\r\n else:\r\n return redirect(url_for('index'))\r\n\r\ndef process_calendar(user,service,result,syncedBefore):\r\n # This function runs in the background and adds events to the user's Google Calendar\r\n icsURL = user['icalurl']\r\n currentCalendar = requests.get(icsURL).text\r\n \r\n if not syncedBefore:\r\n # No previous calendar, add every event\r\n cal = currentCalendar\r\n icsFileName = \"learn.ics\"\r\n else:\r\n # Previous events added, only add new events\r\n # by comparing their last synced calendar\r\n # against a new pull and storing the difference \r\n oldCalendar = pickle.loads(user['caldata'])\r\n cal = difference(oldCalendar, currentCalendar)\r\n icsFileName = \"write.ics\"\r\n # If there are new events to be added:\r\n if cal.strip():\r\n # Write events to an ics file\r\n with open('write.ics', 'w', newline='') as outfile:\r\n outfile.write(str(cal))\r\n #print(\"Wrote to write.ics\")\r\n \r\n # Parse the ics and build a calendar event object\r\n # one-by-one for each event, store into an array\r\n icsName = icsFileName\r\n calendar_events = ics_parse(icsName)\r\n calendar_id = result['items'][0]['id']\r\n\r\n # There is at least 1 event to be pushed to the calendar,\r\n # begin inserting and update progress to progress bar\r\n pbar = ProgressBar()\r\n print(\"Adding\",len(calendar_events),\"new events to the calendar:\\n\")\r\n for event in pbar(calendar_events):\r\n service.events().insert(calendarId=calendar_id, body=event).execute()\r\n\r\n # All events have been inserted, convert the user's ics file to binary\r\n # and update their last synced calendar to include these events\r\n caldata = pickle.dumps(currentCalendar)\r\n newvalues = { \"$set\": { \"initSync\": True, \"caldata\": caldata} }\r\n users.update_one(user, newvalues)\r\n\r\n if icsName == 'learn.ics':\r\n os.remove('learn.ics')\r\n\r\n\r\n # No new calendar entries were found, do not do anything\r\n else:\r\n print(\"No new events to add.\")\r\n","repo_name":"OnlyInSpace/wtCal","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4343679637","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\n\n# Following is the optimized Input for Linked List\ndef InputLinkedList():\n inputList = [int(ele) for ele in input().split()]\n head = None\n tail = None\n for ele in inputList:\n if ele == -1:\n break\n newNode = Node(ele)\n if head is None:\n head = newNode\n tail = newNode\n else:\n tail.next = newNode\n tail = newNode\n return head \n\n\n# Print a Linked List\n\ndef PrintLL(head):\n while head is not None:\n print(str(head.data) + \"-->\", end='')\n head = head.next\n print(\"None\")\n return\n\ndef length(head):\n count=0\n while head is not None:\n count += 1\n head = head.next\n return count\n\ndef ReverseLL(head):\n fwd = None\n prev = None\n currn = head\n while(currn is not None):\n fwd = currn.next\n currn.next = prev\n prev = currn\n currn = fwd\n return prev\n\ndef Palindrome(head):\n if head is None or head.next is None:\n return True\n fast = head\n slow = head\n while(fast.next is not None and fast.next.next is not None):\n fast = fast.next.next\n slow = slow.next\n second = slow.next\n slow.next = None\n second = ReverseLL(second)\n \n\n #compare\n firstList = second\n secondList = head\n while(firstList is not None):\n if(firstList.data != secondList.data):\n return False\n firstList = firstList.next\n secondList = secondList.next\n return True\n\n\n\n\n\nnode = InputLinkedList()\nPrintLL(node)\nprint(Palindrome(node))\n \n\n\n","repo_name":"Mriganka5137/DSA-Python-","sub_path":"Linked List/Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72265189718","text":"from requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\nimport os\nimport time\nimport sys\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of resoinse is some kind of HTML/XML, return the\n text content, otherwise return None\"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns true if response seems to be HTML, false otherwise\n :param resp:\n :return:\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\ndef log_error(e):\n print(e)\n\n\ndef get_data():\n \"\"\"\n Downloads the page where the list of data is found\n and returns a list of strings, one per data item\n :return:\n \"\"\"\n url = input(\"Please enter full url of data source:\\n>> \")\n\n response = simple_get(url)\n\n if response is not None:\n html = BeautifulSoup(response, 'html.parser')\n names = set()\n html_tag = input(\"Enter HTML tag to select:\\n>> \")\n for html_tag in html.select(html_tag):\n for name in html_tag.text.split('\\n'):\n if len(name) > 0:\n names.add(name.strip())\n return list(names)\n\n # Raise exception if failed to get data from url\n raise Exception('Error retrieving contents at {}'.format(url))\n\n\ndef local_get_data():\n \"\"\"\n Just like get_names() only local\n \"\"\"\n url = input(\"Please enter full file path of data source:\\n>> \")\n\n raw_html = open(url).read()\n html = BeautifulSoup(raw_html, 'html.parser')\n names = set()\n html_tag = input(\"Enter HTML tag to select:\\n>> \")\n for html_tag in html.select(html_tag):\n for name in html_tag.text.split('\\n'):\n if len(name) > 0:\n names.add(name.strip())\n return list(names)\n\n\ndef get_hits_on_data(name):\n \"\"\"\n Accepts a `name` of data and returns the number of hits\n that data has on wikipedia (or your site of choice) in the last\n 60 days, as an `int`\n \"\"\"\n url_root = 'https://xtools.wmflabs.org/articleinfo/en.wikipedia.org/{}'\n response = simple_get(url_root.format(name))\n\n if response is not None:\n html = BeautifulSoup(response, 'html.parser')\n\n hit_link = [a for a in html.select('a')\n if a['href'].find('latest-60') > -1]\n if len(hit_link) > 0:\n # Strip commas:\n link_text = hit_link[0].text.replace(',','')\n try:\n # Convert to int\n return int(link_text)\n except:\n log_error(\"Couldn't parse {} as an `int`\".format(link_text))\n\n log_error('No page views found for {}'.format(name))\n return None\n\n\nif __name__ == '__main__':\n running = True\n os.system('clear')\n print(\"Welcome to WebScrape!\")\n print(\"---\"*20)\n print(\"WebScrape is a command line utility written purely in Python designed to grab an HTML page\\n\"\n \" and the data contained in HTML tags (of which you specify).\\n After grabbing this data, \"\n \"WebScrape will then get the number of\\n `hits` your data has gotten on Wikipedia.\\n\")\n time.sleep(3)\n while(True):\n print(\"===\"*20)\n print(\"WebScrape\")\n print(\"===\"*20)\n data_location = int(input(\"Please choose your data source:\\n1) Website\\n2) Local File\\n3) Quit ScrapeMe\\n>> \"))\n if data_location == 1:\n names = get_data()\n print('Gathering data...\\n')\n print('... done.\\n')\n elif data_location == 2:\n names = local_get_data()\n print('Gathering data...\\n')\n print('... done.\\n')\n elif data_location == 3:\n sys.exit(0)\n\n results = []\n\n length_of_list = int(input(\"How many data points would you like to return? Enter [999999] for all items.\\n>> \"))\n\n print(\"Getting stats for each data item...\")\n for name in names:\n try:\n hits = get_hits_on_data(name)\n if hits is None:\n hits = -1\n results.append((hits, name))\n except:\n results.append((-1, name))\n log_error('error encountered while processing '\n '{}, skipping'.format(name))\n print('... done.\\n')\n\n results.sort()\n results.reverse()\n\n if len(results) > length_of_list:\n top_marks = results[:length_of_list]\n else:\n top_marks = results\n\n print('\\nThe most popular data objects from your query are:\\n')\n for (mark, data) in top_marks:\n print('{} with {} page views'.format(data, mark))\n\n no_results = len([res for res in results if res[0] == -1])\n print('\\nI could not locate results for '\n '{} data objects on the list'.format(no_results))\n\n input(\"Press [Return/Enter] to continue...\")\n\n\n","repo_name":"MasonCyberUtils/WebScrape","sub_path":"webscrape.py","file_name":"webscrape.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15709535063","text":"import numpy as np\nimport sys\nimport torch \nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n\ndef saturateAmplitudes(data_set):\n data_set[data_set > 10000] = 10000\n data_set[data_set < -10000] = -10000\n return data_set\n\ndef balanceClasses(data_set, label_set):\n zero_indices = np.where(label_set == 0)[0]\n one_indices = np.where(label_set == 1)[0]\n\n max_count = min(len(zero_indices), len(one_indices))\n\n zero_keep_indices = np.random.choice(zero_indices, size=max_count, replace=False)\n one_keep_indices = np.random.choice(one_indices, size=max_count, replace=False)\n\n keep_indices = np.sort(np.concatenate((zero_keep_indices, one_keep_indices)))\n\n return data_set[keep_indices,:,:], label_set[keep_indices]\n\ndef includeChannel(data_set, channels):\n return data_set[:,channels,:]\n\ndef getDataSamples(EEG_samples, partic_id):\n\n data = EEG_samples['data'][0,partic_id]\n sick, sick_lables = data['sick'][0][0], []\n non_sick, non_sick_lables = data['non_sick'][0][0], []\n\n if np.any(sick):\n sick_lables = np.ones((sick.shape[0],1))\n\n if np.any(non_sick):\n non_sick_lables = np.zeros((non_sick.shape[0], 1))\n\n\n if (np.any(sick)) and (np.any(non_sick)):\n epoches = np.concatenate((sick, non_sick), axis=0)\n lables = np.concatenate((sick_lables, non_sick_lables), axis=0)\n elif (np.any(sick)) and (not np.any(non_sick)):\n epoches = sick\n lables = sick_lables\n elif (not np.any(sick)) and (np.any(non_sick)):\n epoches = non_sick\n lables = non_sick_lables\n\n epoches = saturateAmplitudes(epoches)\n epoches = includeChannel(epoches, range(4,20)) # Uncomment to inclead specifi channels\n\n return epoches, lables\n\ndef normalizeSamples(EEG_samples):\n\n mean = np.mean(EEG_samples)\n std = np.std(EEG_samples)\n\n return (EEG_samples - mean)/std\n\ndef generateTrainTest(EEG_samples, LOU_subject_id, normalize = False):\n train_data_set, train_label_set = [], []\n no_participants = EEG_samples.shape[1]\n for partic_id in range(0,no_participants):\n if LOU_subject_id != partic_id:\n epoches, lables = getDataSamples(EEG_samples, partic_id)\n if not np.any(train_data_set):\n train_data_set = epoches\n train_label_set = lables\n else:\n train_data_set = np.concatenate((train_data_set, epoches), axis=0)\n train_data_set = np.nan_to_num(train_data_set, nan=0) # replace nan values with 0\n if normalize:\n train_data_set = normalizeSamples(train_data_set) # normalize the data\n\n train_label_set = np.concatenate((train_label_set, lables), axis=0)\n else:\n test_data_set, test_label_set = getDataSamples(EEG_samples, partic_id)\n test_data_set = np.nan_to_num(test_data_set, nan=0) # replace nan values with 0\n if normalize:\n test_data_set = normalizeSamples(test_data_set) # normalize the data\n\n valid_data_set, valid_label_set = getDataSamples(EEG_samples, partic_id)\n valid_data_set = np.nan_to_num(valid_data_set, nan=0) # replace nan values with 0\n if normalize:\n valid_data_set = normalizeSamples(valid_data_set) # normalize the data\n\n train_data_set, train_label_set = balanceClasses(train_data_set, train_label_set) # for ensuring a 50:50 ratio between sick and non-sick\n\n data_set = np.concatenate((train_data_set, test_data_set), axis=0)\n label_set = np.concatenate((train_label_set, test_label_set), axis=0)\n\n return [data_set, label_set], [train_data_set, train_label_set], [test_data_set, test_label_set]\n\n\ndef writeStatsToFile(full_data, train_data, test_data, file_name):\n\n data_set, label_set = full_data[0], full_data[1]\n train_data_set, train_label_set = train_data[0], train_data[1]\n test_data_set, test_label_set = test_data[0], test_data[1]\n\n with open(file_name, 'w') as file:\n file.write(\"-------- INFORMATION ON THE DATA ---------\\n\")\n file.write(\"Shapes\\n\")\n file.write(\"Train Data: \"+str(train_data_set.shape)+\" Train Lable: \"+str(train_label_set.shape)+\"\\n\")\n file.write(\"Test Data: \"+str(test_data_set.shape)+\" Test Lable: \"+str(test_label_set.shape)+\"\\n\")\n\n file.write(\"***********************************************************************************\\n\")\n\n file.write(\"Number of NANs in the data\\n\")\n file.write(\"Train Data: \"+str(np.sum(np.isnan(train_data_set)))+\" Train Lable: \"+str(np.sum(np.isnan(train_label_set)))+\"\\n\")\n file.write(\"Test Data: \"+str(np.sum(np.isnan(test_data_set)))+\" Test Lable: \"+str(np.sum(np.isnan(test_label_set)))+\"\\n\")\n\n file.write(\"***********************************************************************************\\n\")\n\n file.write(\"Some stats on the data\\n\")\n file.write(\"Number of 'non sick' samples: \"+ str(np.count_nonzero(label_set == 0))+\"\\n\")\n file.write(\"Count voltage values > 10,000: \"+ str(np.count_nonzero(data_set > 10000))+\"\\n\")\n file.write(\"Count voltage values < -10,000: \"+ str(np.count_nonzero(data_set < -10000))+\"\\n\")\n\n file.write(\"**************************************** FULL DATASET *******************************************\\n\")\n\n file.write(\"Number of 'sick' samples: \"+ str(np.count_nonzero(label_set == 1))+\"\\n\")\n file.write(\"Number of 'non sick' samples: \"+ str(np.count_nonzero(label_set == 0))+\"\\n\")\n file.write(\"Maximum Value in Data Set: \" + str(np.amax(data_set))+\"\\n\")\n file.write(\"Minimum Value in Data Set: \" + str(np.amin(data_set))+\"\\n\")\n file.write(\"Mean Value in Data Set: \" + str(np.mean(data_set))+\"\\n\")\n file.write(\"Standard Daviation in Data Set: \" + str(np.std(data_set))+\"\\n\")\n\n file.write(\"************************************* TRAINING DATASET **********************************************\\n\")\n\n file.write(\"Number of 'sick' samples: \"+ str(np.count_nonzero(train_label_set == 1))+\"\\n\")\n file.write(\"Number of 'non sick' samples: \"+ str(np.count_nonzero(train_label_set == 0))+\"\\n\")\n file.write(\"Maximum Value in Training Set: \" + str(np.amax(train_data_set))+\"\\n\")\n file.write(\"Minimum Value in Data Set: \" + str(np.amin(train_data_set))+\"\\n\")\n file.write(\"Mean Value in Data Set: \" + str(np.mean(train_data_set))+\"\\n\")\n file.write(\"Standard Daviation in Data Set: \" + str(np.std(train_data_set))+\"\\n\")\n\n file.write(\"*************************************** TESTING DATASET ********************************************\\n\")\n\n file.write(\"Number of 'sick' samples: \"+ str(np.count_nonzero(test_label_set == 1))+\"\\n\")\n file.write(\"Number of 'non sick' samples: \"+ str(np.count_nonzero(test_label_set == 0))+\"\\n\")\n file.write(\"Maximum Value in Testing Set: \" + str(np.amax(test_data_set))+\"\\n\")\n file.write(\"Minimum Value in Data Set: \" + str(np.amin(test_data_set))+\"\\n\")\n file.write(\"Mean Value in Data Set: \" + str(np.mean(test_data_set))+\"\\n\")\n file.write(\"Standard Daviation in Data Set: \" + str(np.std(test_data_set))+\"\\n\")\n file.close()\n\n return 0\n\ndef appendToFile(info, file):\n\n with open(file, 'a') as file:\n file.write(info+'\\n')\n file.close()\n\ndef generateTorchLoaders(data_set, data_label, EEGDataset):\n N = 40 #number of batches\n W = 0 #worker threads\n data_transform = transforms.Compose([transforms.ToTensor()])\n\n train_set = EEGDataset(data_set=data_set, label_set=data_label, transform=data_transform)\n train_loader = DataLoader(train_set, batch_size=N, shuffle=True, num_workers=W)\n\n return train_loader\n\n# evaluation function\ndef eval(net, data_loader, file_name=[]):\n loss_function = torch.nn.CrossEntropyLoss()\n # TODO: build your SGD optimizer with learning rate=0.01, momentum=0.9\n # your code here\n optimizer = torch.optim.SGD(net.parameters(), lr=0.002, momentum=0.9, weight_decay=np.exp(-7))\n use_cuda = torch.cuda.is_available()\n if use_cuda:\n net = net.cuda()\n #net.eval()\n correct = 0.0\n num_images = 0.0\n for i_batch, (images, labels) in enumerate(data_loader):\n labels = torch.reshape(labels, (-1,))\n labels = labels.type(torch.LongTensor)\n if use_cuda:\n images = images.cuda()\n labels = labels.cuda()\n outs = net(images.float()) \n _, preds = outs.max(1)\n correct += preds.eq(labels).sum()\n num_images += len(labels)\n\n acc = correct / num_images\n\n if file_name != []:\n appendToFile(\"Calculated Testing Accuracy: \"+str(acc), file_name)\n\n return acc\n\n# training function\ndef train(net, train_loader, valid_loader, epoches, file_name):\n loss_function = torch.nn.CrossEntropyLoss()\n # TODO: build your SGD optimizer with learning rate=0.01, momentum=0.9\n # your code here\n optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=np.exp(-7))\n # optimizer = torch.optim.RMSprop(net.parameters(), lr=0.002, momentum=0.9, weight_decay=np.exp(-7))\n # optimizer = torch.optim.Adam(net.parameters(), lr=0.01, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0)\n\n use_cuda = torch.cuda.is_available()\n if use_cuda:\n net = net.cuda()\n print(\"Cuda is Avaliable\")\n \n \n for epoch in range(epoches):\n net.train() \n correct = 0.0 # used to accumulate number of correctly recognized images\n num_images = 0.0 # used to accumulate number of images\n for i_batch, (images, labels) in enumerate(train_loader):\n t_size = images.size()\n labels = torch.reshape(labels, (-1,))\n labels = labels.type(torch.LongTensor)\n\n if use_cuda:\n images = images.cuda()\n labels = labels.cuda()\n \n optimizer.zero_grad()\n test_image = images.float()\n test_weights = net.conv1.weight.data\n outputs = net(images.float())\n loss = loss_function(outputs, labels)\n loss.backward()\n\n optimizer.step()\n \n dummy,predicted = outputs.max(1)\n correct += predicted.eq(labels).sum()\n num_images += len(labels)\n\n acc = correct / num_images\n acc_eval = eval(net, valid_loader, [])\n print('epoch: %d, lr: %f, accuracy: %f, loss: %f, valid accuracy: %f' % (epoch, optimizer.param_groups[0]['lr'], acc, loss.item(), acc_eval))\n info = \"epoch: \"+str(epoch)+\", lr: \"+str(optimizer.param_groups[0]['lr'])+\", accuracy: \"+str(acc)+\", loss: \"+str(loss.item())+\", valid accuracy: \"+str(acc_eval)\n appendToFile(info, file_name)\n\n return net","repo_name":"AbdulsatarAboud/ReverseNeuro","sub_path":"source/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1615998117","text":"from abc import abstractclassmethod\nfrom asyncio.tasks import _unregister_task\nfrom functools import wraps\nfrom json import dumps\nfrom typing import Any\n\nclass IRBase(object):\n def __init__(self, opcode : str):\n self._opcode = opcode\n def to_dict(self, const_bag: list = None) -> dict[str]:\n ret = dict()\n ret[\"opcode\"] = self._opcode\n return ret\n def to_json(self, indent = None, **kwargs) -> str:\n kwargs[\"indent\"] = indent\n return dumps(self.to_dict(), **kwargs)\n def lift_const_and_jsonify(self, indent = None, **kwargs):\n bag = list()\n kwargs[\"indent\"] = indent\n jsonified = dumps(self.to_dict(bag), **kwargs) \n return (jsonified, bag)\n def defs(self) -> list[str] :\n ret = []\n for key in dir(self):\n value = self.__getattribute__(key)\n if isinstance(value, IRBase):\n ret.extend(value.defs())\n return ret\n def uses(self) -> list[str] :\n ret = []\n for key in dir(self):\n value = self.__getattribute__(key)\n if isinstance(value, IRBase):\n ret.extend(value.uses())\n return ret\n def imports(self) -> list[str]:\n defs = self.defs()\n uses = self.uses()\n ret = []\n for item in uses:\n if item not in defs:\n ret.append(item)\n return ret\n def exports(self) -> list[str]:\n defs = self.defs()\n uses = self.uses()\n ret = []\n for item in defs:\n if item not in uses:\n ret.append(item)\n return ret\n\ndef make_const_bag_ref(value, bag):\n key_id = len(bag)\n bag.append(value)\n return {\n \"const_bag_key\": key_id\n }\ndef lift_constant_to_env(dict_value: dict[str, Any], const_bag: list):\n if const_bag == None:\n return\n for key in dict_value.keys():\n if key == \"opcode\":\n continue\n key_type = type(dict_value[key])\n if key_type == int or key_type == float or key_type == str:\n dict_value[key] = make_const_bag_ref(dict_value[key], const_bag)\ndef try_lift_const(inner):\n @wraps(inner)\n def _to_dict_and_lift(self, bag : list = None):\n ret = inner(self, bag)\n lift_constant_to_env(ret, bag)\n return ret\n return _to_dict_and_lift\n\n# Actual IR representations\n\n## Random generated bed3\nclass SortedRandomInterval(IRBase):\n def __init__(self, count: int, min_len: int, max_len: int):\n super().__init__(\"SortedRandom\")\n self._count = count\n self._min_len = min_len\n self._max_len = max_len\n @try_lift_const\n def to_dict(self, bag) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"count\"] = self._count\n ret[\"min_length\"] = self._min_len\n ret[\"max_length\"] = self._max_len\n return ret\n\n## Inline Rust Source Code\nclass InlineRust(IRBase):\n def __init__(self, env : dict[str, IRBase], src):\n super().__init__(\"InlineRust\")\n self._env = env\n self._src = src\n def to_dict(self, bag) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"env\"] = {}\n for key, val in self._env.items():\n if isinstance(val, IRBase):\n value = {\"Iter\": val.to_dict(bag)}\n elif bag == None:\n value = {\"Const\": val}\n else:\n value = {\"Const\": make_const_bag_ref(val, bag)}\n ret[\"env\"][key] = value\n ret[\"src\"] = self._src\n return ret\n def uses(self):\n ret = []\n for var in self._env.value():\n ret.extend(var.uses())\n return ret\n def defs(self):\n ret = []\n for var in self._env.value():\n ret.extend(var.defs())\n return ret\n\n## Genome file manipulation\nclass LoadGenomeFile(IRBase):\n def __init__(self, path: str):\n super().__init__(\"LoadGenomeFile\")\n self._path = path\n @try_lift_const\n def to_dict(self, bag) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"File\"] = self._path\n return ret\n\n## Label assignment\nclass LabelAssignmentBase(IRBase):\n pass\n\nclass Let(LabelAssignmentBase):\n def __init__(self, id : str, value : IRBase):\n super().__init__(\"Let\")\n self._id = id\n self._value = value\n def defs(self) -> list[str]:\n return [self._id] + super().defs()\n def to_dict(self, bag = None) -> dict:\n ret = super().to_dict(bag)\n ret[\"id\"] = self._id\n ret[\"value\"] = self._value.to_dict(bag)\n return ret\n\nclass Ref(LabelAssignmentBase):\n def __init__(self, id : str):\n super().__init__(\"Ref\")\n self._id = id\n def uses(self) -> list[str]:\n return [self._id] + super().uses()\n def to_dict(self, bag) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"id\"] = self._id\n return ret\n\n## The Data Sources\n\nclass BatchOperationBase(IRBase):\n pass\n\nclass OpenFile(BatchOperationBase):\n def __init__(self, target: dict, format : str, sorted : bool = False, ref : str = None, compression : bool = False, num_of_fields : int = 3):\n super().__init__(\"Open\")\n self._target = target \n self._format = format\n self._ref = ref\n self._compression = compression\n self._num_of_fields = num_of_fields\n self._sorted = sorted\n def to_dict(self, bag) -> dict[str]:\n ret = super().to_dict(bag)\n if \"Path\" in self._target and bag != None:\n ret[\"target\"] = {\n \"Path\": make_const_bag_ref(self._target[\"Path\"], bag)\n }\n else:\n ret[\"target\"] = self._target\n ret[\"format\"] = self._format\n ret[\"num_of_fields\"] = self._num_of_fields\n ret[\"compression\"] = self._compression\n ret[\"sorted\"] = self._sorted\n return ret\n\n## Record type casting\nclass CastToBed(BatchOperationBase):\n def __init__(self, inner : IRBase, num_of_fields: int, sorted: bool):\n super().__init__(\"CastToBed\")\n self._inner = inner\n self._nof = num_of_fields\n self._sorted = sorted\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n ret[\"num_of_fields\"] = self._nof\n ret[\"sorted\"] = self._sorted\n return ret\n\n## Record collection operators\nclass GroupBy(BatchOperationBase):\n def __init__(self, inner :IRBase, key_func : list[IRBase]):\n super().__init__(\"GroupBy\")\n self._inner = inner\n self._key_func = key_func\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n ret[\"keys\"] = [key.to_dict(bag) for key in self._key_func]\n return ret\n\nclass Format(BatchOperationBase):\n def __init__(self, inner : IRBase, fmt_str : str, values : dict[str, Any]):\n super().__init__(\"Format\")\n self._inner = inner\n self._fmt_str = fmt_str\n self._values = values\n \n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n ret[\"fmt_str\"] = self._fmt_str\n ret[\"values\"] = dict()\n for key in self._values:\n ret[\"values\"][key] = self._values[key].to_dict(bag)\n return ret\n\nclass AssumeSortedIR(BatchOperationBase):\n def __init__(self, inner: IRBase):\n super().__init__(\"AssumeSorted\")\n self._inner = inner\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n return ret\n\nclass InternalSort(BatchOperationBase):\n def __init__(self, base: IRBase):\n super().__init__(\"InternalSort\")\n self._inner = base\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n return ret\n\nclass Nop(BatchOperationBase):\n def __init__(self, inner: IRBase):\n super().__init__(\"Nop\")\n self._inner = inner\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n return ret\n\nclass Alter(BatchOperationBase):\n def __init__(self, base : IRBase, target_field : str, value_expr : IRBase, sorted: bool):\n super().__init__(\"Alter\")\n self._inner = base\n self._target_field = target_field\n self._value_expr = value_expr\n self._sorted = sorted\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n ret[\"field\"] = self._target_field\n ret[\"value\"] = self._value_expr.to_dict(bag)\n ret[\"sorted\"] = self._sorted\n return ret\n\nclass Filter(BatchOperationBase):\n def __init__(self, base : IRBase, cond : IRBase):\n super().__init__(\"Filter\")\n self._inner = base\n self._cond = cond\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n ret[\"cond\"] = self._cond.to_dict(bag)\n return ret\n\nclass Invert(BatchOperationBase):\n def __init__(self, inner: IRBase):\n super().__init__(\"Invert\")\n self._inner = inner\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n return ret\n\nclass AssignTag(BatchOperationBase):\n def __init__(self, inner: IRBase, tag):\n super().__init__(\"AssignTag\")\n self._inner = inner\n self._tag = tag\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n ret[\"tag\"] = self._tag\n return ret\n\nclass TwoWayMerge(BatchOperationBase):\n def __init__(self, a: IRBase, b: IRBase):\n super().__init__(\"TwoWayMerge\")\n self._a = a\n self._b = b\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"expr_1\"] = self._a.to_dict(bag)\n ret[\"expr_2\"] = self._b.to_dict(bag)\n return ret\n\nclass MergeOverlap(BatchOperationBase):\n def __init__(self, inner : IRBase):\n super().__init__(\"MergeOverlap\")\n self._inner = inner\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"inner\"] = self._inner.to_dict(bag)\n return ret\n\nclass Intersection(BatchOperationBase):\n def __init__(self, lhs : IRBase, rhs : IRBase, flavor : str, sorted : bool):\n super().__init__(\"Intersection\")\n if flavor not in [\"inner\", \"outer\", \"left-outer\", \"right-outer\"]:\n raise RuntimeError(\"Unexpected intersection flavor\")\n self._flavor = flavor\n self._lhs = lhs\n self._rhs = rhs\n self._sorted = sorted\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"flavor\"] = self._flavor\n ret[\"lhs\"] = self._lhs.to_dict(bag)\n ret[\"rhs\"] = self._rhs.to_dict(bag)\n ret[\"sorted\"] = self._sorted\n return ret\n\n\n## Drain Functions\nclass WriteFile(BatchOperationBase):\n def __init__(self, target : Any, what : IRBase):\n super().__init__(\"WriteFile\")\n self._what = what\n self._target = target\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"what\"] = self._what.to_dict(bag)\n if type(self._target) == str and bag != None:\n ret[\"target\"] = make_const_bag_ref(self._target, bag)\n else:\n ret[\"target\"] = self._target\n return ret\n\nclass Count(BatchOperationBase):\n def __init__(self, what : IRBase):\n super().__init__(\"Count\")\n self._what = what\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"what\"] = self._what.to_dict(bag)\n return ret\n\nclass Limit(BatchOperationBase):\n def __init__(self, what: IRBase, count: int):\n super().__init__(\"Limit\")\n self._what = what\n self._count = count\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"what\"] = self._what.to_dict(bag)\n if bag != None:\n ret[\"count\"] = make_const_bag_ref(self._count, bag) \n else:\n ret[\"count\"] = self._count\n return ret\n\n## The Field Expression\nclass FieldExpressionBase(IRBase):\n pass\n\nclass RuntimeValueBase(FieldExpressionBase):\n def __init__(self, opcode : str):\n super().__init__(opcode)\n def to_dict(self, bag) -> dict[str]:\n return super().to_dict(bag)\n\nclass UnaryBase(FieldExpressionBase):\n def __init__(self, opcode : str, operand_key : str, operand : IRBase):\n super().__init__(opcode)\n self._dict = dict[str, IRBase]()\n self._dict[operand_key] = operand\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n for key in self._dict:\n if isinstance(self._dict[key], IRBase):\n ret[key] = self._dict[key].to_dict(bag)\n else:\n ret[key] = self._dict[key]\n return ret\n\nclass BinaryBase(FieldExpressionBase):\n def __init__(self, \n opcode : str, \n lhs : IRBase, \n rhs : IRBase, \n lhs_key : str = \"lhs\", \n rhs_key : str = \"rhs\"\n ):\n super().__init__(opcode)\n self._dict = dict()\n self._dict[lhs_key] = lhs\n self._dict[rhs_key] = rhs\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n for key in self._dict:\n if isinstance(self._dict[key], IRBase):\n ret[key] = self._dict[key].to_dict(bag)\n else:\n ret[key] = self._dict[key]\n return ret\n\nclass Cond(FieldExpressionBase):\n def __init__(self, cond : IRBase, then : IRBase, elze : IRBase):\n super().__init__(\"Cond\")\n self._cond = cond\n self._then = then\n self._else = elze\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"cond\"] = self._cond.to_dict(bag)\n ret[\"then\"] = self._then.to_dict(bag)\n ret[\"elze\"] = self._else.to_dict(bag)\n return ret\n\nclass FieldRef(UnaryBase):\n def __init__(self, field_name : str):\n super().__init__(\"FieldRef\", \"field\", field_name)\n\nclass NumberOfComponents(RuntimeValueBase):\n def __init__(self):\n super().__init__(\"NumberOfComponents\")\n\nclass ComponentFieldRef(FieldExpressionBase):\n def __init__(self, target : int, field_name : str):\n super().__init__(\"ComponentFieldRef\")\n self._target = target\n self._field_name = field_name\n def to_dict(self, bag = None) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"target\"] = self._target\n ret[\"field_name\"] = self._field_name\n return ret\n\nclass ConstValue(UnaryBase):\n def __init__(self, value : Any):\n super().__init__(\"ConstValue\", \"value\", value)\n def to_dict(self, bag) -> dict[str]:\n ret = super().to_dict(bag)\n type_of_value = type(self._dict[\"value\"])\n if bag != None and (type_of_value in [int, float, str]):\n ret[\"value\"] = make_const_bag_ref(self._dict[\"value\"], bag)\n return ret\n\nclass FullRecordRef(RuntimeValueBase):\n def __init__(self):\n super().__init__(\"FullRecordRef\")\n\nclass RecordRef(IRBase):\n def __init__(self, id : int):\n super().__init__(\"RecordRef\")\n self._id = id\n def to_dict(self, bag) -> dict[str]:\n ret = super().to_dict(bag)\n ret[\"id\"] = self._id\n return ret\n\nclass StringRepr(UnaryBase):\n def __init__(self, operand: IRBase):\n super().__init__(\"StringRepr\", \"value\", operand)\n\nclass And(BinaryBase):\n def __init__(self, lhs : IRBase, rhs : IRBase):\n super().__init__(\"And\", lhs, rhs)\n\nclass Or(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Or\", lhs, rhs)\n\nclass Xor(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Or\", lhs, rhs)\n\nclass Not(UnaryBase):\n def __init__(self, operand : IRBase):\n super().__init__(\"Not\", \"operand\", operand)\n\nclass Neg(UnaryBase):\n def __init__(self, operand : IRBase):\n super().__init__(\"Neg\", \"operand\", operand)\n\nclass Add(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Add\", lhs, rhs)\n\nclass Sub(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Sub\", lhs, rhs)\n\nclass Mul(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Mul\", lhs, rhs)\n\nclass Div(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Div\", lhs, rhs)\n\nclass Mod(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Mod\", lhs, rhs)\n\nclass Eq(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Eq\", lhs, rhs)\n\nclass Ne(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Ne\", lhs, rhs)\n\nclass LessThan(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"LessThan\", lhs, rhs)\n\nclass GreaterThan(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"GreaterThan\", lhs, rhs)\n\nclass LessEqualThan(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"LessEqualThan\", lhs, rhs)\n\nclass GreaterEqualThan(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"GreaterEqualThan\", lhs, rhs)\n\n\nclass RightShift(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"RightShift\", lhs, rhs)\n\nclass LeftShift(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"LeftShift\", lhs, rhs)\n\nclass Neg(BinaryBase):\n def __init__(self, lhs : IRBase, rhs: IRBase):\n super().__init__(\"Neg\", lhs, rhs)\n\nclass RegexMatch(BinaryBase):\n def __init__(self, lhs: IRBase, rhs: IRBase):\n super().__init__(\"RegexMatch\", lhs, rhs)","repo_name":"38/grass","sub_path":"pygrass/pygrass/ir.py","file_name":"ir.py","file_ext":"py","file_size_in_byte":18299,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"85"} +{"seq_id":"38578666902","text":"''' Server is designed to run from klayout GSI, usually in GUI mode\n\n Current state: only way to stop serving is close the application.\n'''\nfrom __future__ import print_function\n\nimport socket\nimport lyipc\nfrom lygadgets import message, isGSI\n\nif not isGSI():\n raise RuntimeError('Non-klayout serving does not make sense')\nimport pya\n\n\n# As of now, port is hard coded and there is no way to stop it besided closing the app\n# We have to make sure that a second server doesn't come along and clash\n__active_server = None\ndef run_server():\n global __active_server\n if __active_server is None:\n __active_server = KlayoutServer()\n return __active_server\n\n\nclass KlayoutServer(pya.QTcpServer):\n def new_connection(self):\n from lyipc.interpreter import parse_message\n # Handle incoming connection\n connection = self.nextPendingConnection()\n msg = 'null'\n while connection.isOpen() and connection.state() == pya.QTcpSocket.ConnectedState:\n if connection.canReadLine():\n payload = connection.readLine()\n if isinstance(payload, bytes):\n payload = payload.decode()\n msg = payload.rstrip('\\n').rstrip('\\r')\n response = parse_message(msg)\n connection.write(response)\n connection.disconnectFromHost()\n else:\n connection.waitForReadyRead(500)\n signal = pya.qt_signal(\"disconnected()\")\n slot = pya.qt_slot(\"deleteLater()\")\n pya.QObject.connect(connection, signal, connection, slot)\n\n\n def __init__(self, port=lyipc.PORT, parent=None):\n pya.QTcpServer.__init__(self, parent)\n localhost = pya.QHostAddress()\n self.listen(localhost, port)\n self.newConnection(self.new_connection)\n message('Server initialized with {}, {}'.format(localhost, port))\n","repo_name":"atait/klayout-ipc","sub_path":"lyipc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"85"} +{"seq_id":"44317649997","text":"#coding:utf8\n'''\n2.改变三个参数,生成一个水平scale,最小值-500,最大值500,步距5\n'''\nfrom Tkinter import *\n\nroot = Tk()\nScale(root,\n from_=-500, # 设置最小值\n to=500, # 设置最大值\n resolution=5, # 设置步长\n orient=HORIZONTAL #设置水平方向\n ).pack()\nroot.mainloop()","repo_name":"gzgdouru/python_module","sub_path":"gui/scale/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74687593877","text":"# -*- coding: utf-8 -*-\nfrom pkg_resources import get_distribution, DistributionNotFound\n\ntry:\n # Change here if project is renamed and does not equal the package name\n dist_name = __name__\n __version__ = get_distribution(dist_name).version\nexcept DistributionNotFound:\n __version__ = 'unknown'\nfinally:\n del get_distribution, DistributionNotFound\n\n\nimport sys\nimport logging\n\nlogger = logging.getLogger(__name__)\nenv = sys.executable.split('\\\\')[-2]\n\nif env != 'pymuse':\n logger.warning(f'\\nYou are currently in the enviorment `{env}`.\\n' +\n f'It is recommendet to use this package in a dedicated enviornment `pymuse`.\\n' + \n f'To do this type `conda activate pymuse`.'\n )","repo_name":"fschmnn/pnlf","sub_path":"src/pnlf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"3391352466","text":"import os\nimport itertools\nimport time\nimport random\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport seaborn as sns\nfrom torch.utils.data import Dataset, DataLoader\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts, StepLR, ExponentialLR\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.metrics import accuracy_score, auc, f1_score, precision_score, recall_score\nfrom utils.util_utils import *\nfrom utils.pre_data import *\nfrom models.models import *\nimport torch.onnx as torch_onnx\nfrom torchvision import models\nfrom torchsummary import summary\nimport argparse\n\nimport pretty_errors\nimport warnings\nwarnings.filterwarnings(action='ignore') \n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--phase', type=str, default='train', help='train or test')\nparser.add_argument('--train_data', type=str, default='../data/mitbih_with_synthetic/mitbih_with_syntetic_train.csv', help='train csv path')\nparser.add_argument('--test_data', type=str, default='../data/mitbih_with_synthetic/mitbih_with_syntetic_test.csv', help='test csv path')\nparser.add_argument('--results', type=str, default='./results', help='results path')\nparser.add_argument('--device', type=str, default='cuda:0' if torch.cuda.is_available() else 'cpu' , help='GPU or CPU') \nparser.add_argument('--model', type=str, default= 'cnn+lstm', help='select the models : cnn, cnn+lstm, cnn+lstm+att ') # \n# parser.add_argument('--model_load', type=str, default= './models/pre_train/cnn.pth', help='train model path')\nparser.add_argument('--model_load', type=str, default= './results/pths/best_cnn_model_epoch4.pth', help='train model path')\nparser.add_argument('--batch_size', type=int, default=96, help='batch size')\nparser.add_argument('--epochs', type=int, default=1, help='epoch number')\nparser.add_argument('--seed', type=int, default=2002, help='seed number')\n\n\ndef seed_everything(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n\n\nconfig = parser.parse_args()\n\nid_to_label = {\n 0: \"Normal\",\n 1: \"Artial Premature\",\n 2: \"Premature ventricular contraction\",\n 3: \"Fusion of ventricular and normal\",\n 4: \"Fusion of paced and normal\"\n}\n\nseed_everything(config.seed)\n\ndef func_model(config):\n if config.model == 'cnn' : \n model = CNN(num_classes=5, hid_size=128)\n elif config.model == 'cnn+lstm' : \n model = RecurrentModel(1, 64, 'lstm', True)\n elif config.model == 'cnn+lstm+attn' : \n model = RecurrentAttentionModel(1, 64, 'lstm', False)\n return model\n \nmodel = func_model(config)\n\n\n\n\"\"\"\n\nTrain & Validation\n\n\"\"\"\n\n\nif config.phase == 'train':\n\n trainer = Trainer(config, net=model, lr=1e-3, batch_size = config.batch_size, num_epochs = config.epochs, model_type = config.model) #100)\n trainer.run()\n train_logs = trainer.train_df_logs\n train_logs.columns = [\"train_\"+ colname for colname in train_logs.columns]\n\n val_logs = trainer.val_df_logs\n val_logs.columns = [\"val_\"+ colname for colname in val_logs.columns]\n\n logs = pd.concat([train_logs,val_logs], axis=1)\n logs.reset_index(drop=True, inplace=True)\n logs = logs.loc[:, [\n 'train_loss', 'val_loss', \n 'train_accuracy', 'val_accuracy', \n 'train_f1', 'val_f1',\n 'train_precision', 'val_precision',\n 'train_recall', 'val_recall']\n ]\n logs.head()\n logs.to_csv(config.results + f\"/logs/{config.model}.csv\", index=False)\n\n\nelif config.phase == 'test':\n\n model.load_state_dict( torch.load(config.model_load, map_location=config.device))\n model.eval()\n\n test_df = pd.read_csv(config.test_data)\n test_dataset = ECGDataset(test_df)\n test_dataloader = DataLoader(dataset=test_dataset, batch_size=96, num_workers=0, shuffle=False)\n\n\n y_pred, y_true = func_tester(config, test_dataloader, model)\n y_pred.shape, y_true.shape\n\n\n report = pd.DataFrame(\n classification_report(\n y_pred,\n y_true,\n output_dict=True\n )\n ).transpose()\n\n print(report)\n\n clf_report = classification_report(y_pred, \n y_true,\n labels=[0,1,2,3,4],\n target_names=list(id_to_label.values()),#['N', 'S', 'V', 'F', 'Q'],\n output_dict=True)\n\n\n plt.figure(figsize=(10, 8))\n ax = sns.heatmap(pd.DataFrame(clf_report).iloc[:-1, :].T, annot=True)\n ax.set_xticklabels(ax.get_xticklabels(),fontsize=15)\n ax.set_yticklabels(ax.get_yticklabels(),fontsize=12, rotation=0)\n\n\n plt.title(\"Ensemble Classification Report\", fontsize=20)\n plt.savefig(f\"./results/figs/{config.model}_result.png\", format=\"png\",bbox_inches='tight', pad_inches=0.2)\n","repo_name":"WoodoLee/ecg_stage","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12226972285","text":"\"\"\" A simple example script to get all posts inside groups\n\"\"\"\n\nimport logging\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom utility import Dedup, accessToken, storeDSN\nfrom model.data import Group, Feed, Comment\nfrom api import GraphAPI\n\nlogging.basicConfig(filename='example.log', level=logging.INFO)\n\n\ndef get_feed(graph, group_id, args=None):\n feeds = graph.request('{}/feed'.format(group_id), args)\n ret_feeds = []\n for key in feeds.keys():\n feed = feeds[key]\n if 'message' not in feed:\n continue # skip story-related feed\n f = Feed()\n f.id = feed['id']\n f.message = feed['message']\n f.updated_time = feed['updated_time']\n ret_feeds.append(f)\n return ret_feeds\n\ndef get_comment(graph, feed_id, args=None):\n comments = graph.request('{}/comments'.format(feed_id), args)\n ret_comments = []\n for key in comments.keys():\n comment = comments[key]\n c = Comment()\n c.id = comment['id']\n c.feed_id = feed_id\n c.from_id = comment['from']['id']\n c.message = comment['message']\n c.created_time = comment['created_time']\n ret_comments.append(c)\n return ret_comments\n\ndef run(session, onNewFeed=None, onNewComment=None):\n groupIds = ['575953955944382']\n\n graph = GraphAPI(accessToken(), logging=logging)\n for gid in groupIds:\n # get all feeds\n feeds = get_feed(graph, gid, {})\n for feed in feeds:\n if not feed.id:\n continue\n comments= get_comment(graph, feed.id, {})\n # write feed if it does not exist\n if not session.query(Feed).filter_by(id=feed.id).scalar():\n if onNewFeed: # hooks for new feed\n onNewFeed(feed)\n session.add(feed)\n for comment in comments:\n if session.query(Comment).filter_by(id=comment.id).scalar():\n continue\n if onNewComment: # hook for new comment\n onNewComment(comment)\n session.add(comment)\n session.commit()\n\n\ndef main():\n engine = create_engine(storeDSN(), echo=False)\n connection = engine.connect()\n Session = sessionmaker(bind=engine)\n session = Session()\n\n def onNewComment(comment):\n print(comment)\n\n run(session, onNewComment=onNewComment)\n #dump(session)\n\n connection.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"hsinhoyeh/facebook-crawler","sub_path":"get_feeds.py","file_name":"get_feeds.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29312291999","text":"\"\"\"Parser for :class: and :func: roles.\n\nThis module implements a reST extension to put links for the ``:class:``\nand ``:func:`` roles based on links to the documentation. The links\nare stored in the site cache in the ``*_targets`` key, where the ``*``\nis one of the contexts, ``py``, ``mat``, and ``cti``. The targets are\ngenerated and stored in the cache by the ``parse_docs`` plugin.\n\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import roles\n\nfrom nikola.utils import split_explicit_title, get_logger\nfrom nikola.plugin_categories import RestExtension\n\n\nLOGGER = get_logger(\"rest_class\")\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for :class: and :func: role.\"\"\"\n\n name = \"rest_class\"\n\n def set_site(self, site):\n \"\"\"Set the Nikola site instance for this plugin.\"\"\"\n self.site = site\n roles.register_canonical_role(\"class\", class_role)\n roles.register_canonical_role(\"cti:class\", class_role)\n roles.register_canonical_role(\"py:class\", class_role)\n roles.register_canonical_role(\"mat:class\", class_role)\n roles.register_canonical_role(\"func\", class_role)\n roles.register_canonical_role(\"cti:func\", class_role)\n roles.register_canonical_role(\"py:func\", class_role)\n roles.register_canonical_role(\"mat:func\", class_role)\n class_role.site = site\n return super(Plugin, self).set_site(site)\n\n\ndef _class_link(name, rawtext, text):\n \"\"\"Handle the class role.\"\"\"\n if class_role.site.processing_targets:\n return True, True, None, None, None, None\n\n context_map = {\"py\": \"cython\", \"mat\": \"matlab\", \"cti\": \"cti\"}\n context = name.split(\":\", 1)[0]\n if context == name:\n context = class_role.site.config[\"DEFAULT_CONTEXT\"]\n default_context = True\n else:\n default_context = False\n\n target = \"{}_targets\".format(context_map[context])\n\n if class_role.site.cache.get(target) is not None:\n targets = class_role.site.cache.get(target).copy()\n else:\n targets = getattr(class_role.site, target).copy()\n\n has_explicit_title, title, label = split_explicit_title(text)\n\n if label not in targets and not default_context:\n LOGGER.error(\n \"The label {label} was not found in the context {context}\".format(\n label=label, context=context\n )\n )\n return False, False, None, None, None, label\n elif label not in targets and default_context:\n c_map = context_map.copy()\n c_map.pop(context)\n found_label = False\n for context, t in c_map.items():\n target = \"{}_targets\".format(t)\n if class_role.site.cache.get(target) is not None:\n targets = class_role.site.cache.get(target).copy()\n else:\n targets = getattr(class_role.site, target).copy()\n\n if label not in targets:\n continue\n else:\n found_label = True\n LOGGER.info(\n \"The label {} was found in the context {}. Consider explicitly \"\n \"specifying the context for this link\".format(label, context)\n )\n break\n\n if not found_label:\n LOGGER.error(\"The label {} could not be found in any context\".format(label))\n return False, False, None, None, None, label\n\n doc_file = targets[label][0]\n permalink = \"/documentation/docs-{}/\".format(\n class_role.site.config[\"CANTERA_VERSION\"]\n )\n permalink += doc_file + \"#\" + targets[label][1]\n code_node = nodes.literal(\n rawtext, targets[label][2] + \"()\", classes=[\"code\", \"xref\", context]\n )\n if not has_explicit_title:\n title = targets[label][1]\n\n return True, False, title, code_node, permalink, label\n\n\ndef class_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n \"\"\"Handle the class role.\"\"\"\n success, processing, title, code_node, permalink, label = _class_link(\n name, rawtext, text\n )\n if processing:\n return [nodes.raw(\"\", text, format=\"html\")], []\n if success:\n node = make_link_node(rawtext, title, code_node, permalink, options)\n return [node], []\n else:\n msg = inliner.reporter.warning(\n \"The label {0} was not found\".format(label), line=lineno\n )\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n\n\ndef make_link_node(rawtext, text, code_node, url, options):\n \"\"\"Make a docutils link node.\"\"\"\n node = nodes.reference(\"\", \"\", refuri=url, *options)\n node.append(code_node)\n return node\n","repo_name":"Cantera/cantera-website","sub_path":"plugins/rest_class.py","file_name":"rest_class.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"85"} +{"seq_id":"35726677791","text":"# 수학 이분탐색? | bj 1072 게임\nimport sys\n\n# sys.setrecursionlimit(10 ** 9)\ninp = sys.stdin.readline\n\nX, Y = map(int, inp().split())\nZ = int(Y * 100 / X) # 나누기를 먼저하면 소수점자리수 표시문제로 인해 오답발생\nif X == Y:\n print(-1)\n sys.exit(0)\nif Z >= 99:\n print(-1)\n sys.exit(0)\n# 시간 초과 코드\n# cnt = 0\n# while True:\n# cnt += 1\n# Y += 1\n# X += 1\n# new_Z = (Y / X) * 100\n# if int(new_Z) - int(Z) >= 1:\n# break\n# print(cnt)\n\n# 공식으로 푸는 방법\n# (y + k)/(x + k)*100 - z >= 1\n# (y+k)/(x+k) = (z+1)/100\n# y+k = (z+1)/100 *(x+k)\n# k -k(z+1)/100 = (z+1)/100 *(x) -y\n# k = ((z+1)*(x) -100*y )/((100-(z+1)))\n# k >= (xz + x - 100y) / (99-z)\n\nK = (X * Z + X - 100 * Y) / (99 - Z)\nif K % 1 == 0:\n K = int(K)\nelse:\n K = int(K) + 1\nprint(K)\n","repo_name":"chj3748/TIL","sub_path":"Algorithm/baekjoon/bj_1072.py","file_name":"bj_1072.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"36090903696","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils import timezone\n\nimport datetime\n\n# Create your models here.\nclass Questao(models.Model):\n\ttexto = models.CharField(max_length=200)\n\tdata = models.DateTimeField('publicacao')\n\n\tdef __str__(self):\n\t\treturn self.texto\n\n\tdef recente(self):\n\t\tnow = timezone.now()\n\t\treturn now - datetime.timedelta(days=1) <= self.data <= now\n\n\trecente.admin_order_field = 'data'\n\trecente.boolean = True\n\trecente.short_description = 'Novo?'\n\nclass Escolha(models.Model):\n\tquestao = models.ForeignKey(Questao, on_delete=models.CASCADE)\n\ttexto = models.CharField(max_length=100)\n\tvoto = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn self.texto\n","repo_name":"krloss/django-test","sub_path":"projeto/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8299709477","text":"import numpy as np\n\nclass MaxPoolLayer:\n\n def __init__(self, stride, pool_size, input_img):\n self.stride = stride\n self.pool_size = pool_size\n self.input_img = input_img\n self.im_height, self.im_width, self.im_depth = input_img.shape\n self.out_height = int((self.im_height-pool_size)/stride)+1\n self.out_width = int((self.im_width-pool_size) /stride)+1\n self.output = np.zeros((self.out_height, self.out_width, self.im_depth))\n\n def maxpool(self):\n for i in range(self.im_depth):\n curr_y = out_y = 0\n while curr_y + self.pool_size <= self.im_height:\n curr_x = out_x = 0\n while curr_x + self.pool_size <= self.im_width:\n self.output[out_y, out_x, i] = np.max(self.input_img[curr_y:curr_y + self.pool_size, curr_x:curr_x + self.pool_size, i])\n curr_x += self.stride\n out_x += 1\n curr_y += self.stride\n out_y += 1","repo_name":"geontackee/Vanilla-Python-ML","sub_path":"maxpool.py","file_name":"maxpool.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31950436793","text":"import time\nimport pathlib\nfrom os.path import isfile\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\n\nimport models\nfrom utils import *\nfrom config import config\nfrom data import DataLoader\n\n# for ignore imagenet PIL EXIF UserWarning\nimport warnings\nwarnings.filterwarnings(\"ignore\", \"(Possibly )?corrupt EXIF data\", UserWarning)\n\n\nbest_acc1 = 0\n\n\ndef main():\n global opt, start_epoch, best_acc1\n opt = config()\n\n if opt.cuda and not torch.cuda.is_available():\n raise Exception('No GPU found, please run without --cuda')\n\n print('\\n=> creating model \\'{}\\''.format(opt.arch))\n if opt.arch == 'shufflenet':\n model = models.__dict__[opt.arch](opt.dataset, opt.width_mult, opt.groups)\n else:\n model = models.__dict__[opt.arch](opt.dataset, opt.width_mult)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=opt.lr,\n momentum=opt.momentum, weight_decay=opt.weight_decay,\n nesterov=True)\n start_epoch = 0\n n_retrain = 0\n\n if opt.cuda:\n torch.cuda.set_device(opt.gpuids[0])\n with torch.cuda.device(opt.gpuids[0]):\n model = model.cuda()\n criterion = criterion.cuda()\n model = nn.DataParallel(model, device_ids=opt.gpuids,\n output_device=opt.gpuids[0])\n cudnn.benchmark = True\n\n # checkpoint file\n ckpt_dir = pathlib.Path('checkpoint')\n ckpt_file = ckpt_dir / opt.arch / opt.dataset / opt.ckpt\n\n # for resuming training\n if opt.resume:\n if isfile(ckpt_file):\n print('==> Loading Checkpoint \\'{}\\''.format(opt.ckpt))\n checkpoint = load_model(model, ckpt_file, opt)\n\n start_epoch = checkpoint['epoch']\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('==> Loaded Checkpoint \\'{}\\' (epoch {})'.format(\n opt.ckpt, start_epoch))\n else:\n print('==> no checkpoint found \\'{}\\''.format(\n opt.ckpt))\n return\n\n # Data loading\n print('==> Load data..')\n train_loader, val_loader = DataLoader(opt.batch_size, opt.workers,\n opt.dataset, opt.datapath,\n opt.cuda)\n\n # for evaluation\n if opt.evaluate:\n if isfile(ckpt_file):\n print('==> Loading Checkpoint \\'{}\\''.format(opt.ckpt))\n checkpoint = load_model(model, ckpt_file, opt)\n\n start_epoch = checkpoint['epoch']\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('==> Loaded Checkpoint \\'{}\\' (epoch {})'.format(\n opt.ckpt, start_epoch))\n\n # evaluate on validation set\n print('\\n===> [ Evaluation ]')\n start_time = time.time()\n acc1, acc5 = validate(val_loader, model, criterion)\n save_eval(['{}-{}-{}'.format(opt.arch, opt.dataset, opt.ckpt[:-4]),\n str(acc1)[7:-18], str(acc5)[7:-18]], opt)\n elapsed_time = time.time() - start_time\n print('====> {:.2f} seconds to evaluate this model\\n'.format(\n elapsed_time))\n return\n else:\n print('==> no checkpoint found \\'{}\\''.format(\n opt.ckpt))\n return\n\n # train...\n train_time = 0.0\n validate_time = 0.0\n for epoch in range(start_epoch, opt.epochs):\n adjust_learning_rate(optimizer, epoch, opt.lr)\n print('\\n==> {}/{} training'.format(opt.arch, opt.dataset))\n print('==> Epoch: {}, lr = {}'.format(\n epoch, optimizer.param_groups[0][\"lr\"]))\n\n # train for one epoch\n print('===> [ Training ]')\n start_time = time.time()\n acc1_train, acc5_train = train(train_loader,\n epoch=epoch, model=model,\n criterion=criterion, optimizer=optimizer)\n elapsed_time = time.time() - start_time\n train_time += elapsed_time\n print('====> {:.2f} seconds to train this epoch\\n'.format(\n elapsed_time))\n\n # evaluate on validation set\n print('===> [ Validation ]')\n start_time = time.time()\n acc1_valid, acc5_valid = validate(val_loader, model, criterion)\n elapsed_time = time.time() - start_time\n validate_time += elapsed_time\n print('====> {:.2f} seconds to validate this epoch\\n'.format(\n elapsed_time))\n\n # remember best Acc@1 and save checkpoint and summary csv file\n is_best = acc1_valid > best_acc1\n best_acc1 = max(acc1_valid, best_acc1)\n state = {'epoch': epoch + 1,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict()}\n summary = [epoch,\n str(acc1_train)[7:-18], str(acc5_train)[7:-18],\n str(acc1_valid)[7:-18], str(acc5_valid)[7:-18]]\n save_model(state, epoch, is_best, opt)\n save_summary(summary, opt)\n\n avg_train_time = train_time / (opt.epochs-start_epoch)\n avg_valid_time = validate_time / (opt.epochs-start_epoch)\n total_train_time = train_time + validate_time\n print('====> average training time per epoch: {:,}m {:.2f}s'.format(\n int(avg_train_time//60), avg_train_time%60))\n print('====> average validation time per epoch: {:,}m {:.2f}s'.format(\n int(avg_valid_time//60), avg_valid_time%60))\n print('====> training time: {}h {}m {:.2f}s'.format(\n int(train_time//3600), int((train_time%3600)//60), train_time%60))\n print('====> validation time: {}h {}m {:.2f}s'.format(\n int(validate_time//3600), int((validate_time%3600)//60), validate_time%60))\n print('====> total training time: {}h {}m {:.2f}s'.format(\n int(total_train_time//3600), int((total_train_time%3600)//60), total_train_time%60))\n\n\ndef train(train_loader, **kwargs):\n epoch = kwargs.get('epoch')\n model = kwargs.get('model')\n criterion = kwargs.get('criterion')\n optimizer = kwargs.get('optimizer')\n\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(train_loader), batch_time, data_time,\n losses, top1, top5, prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if opt.cuda:\n target = target.cuda(non_blocking=True)\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n\n if i % opt.print_freq == 0:\n progress.print(i)\n\n end = time.time()\n\n print('====> Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg\n\n\ndef validate(val_loader, model, criterion):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n if opt.cuda:\n target = target.cuda(non_blocking=True)\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n\n if i % opt.print_freq == 0:\n progress.print(i)\n\n end = time.time()\n\n print('====> Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg\n\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n elapsed_time = time.time() - start_time\n print('====> total time: {}h {}m {:.2f}s'.format(\n int(elapsed_time//3600), int((elapsed_time%3600)//60), elapsed_time%60))\n","repo_name":"2KangHo/PyTorch-ImageClassification-TestBench","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"9005560008","text":"import pandas as pd\n\nfrom data import datasets_location\n\n# Removes all rows which are completely empty\ndef remove_empty_rows(df):\n rows = df.shape[0]\n cols = df.shape[1]\n\n drop_list = []\n\n nans = df.isna()\n\n for i in range(rows):\n remove = True\n for j in range(cols):\n if(j == 0):\n pass\n elif not nans.iloc[i,j]:\n remove = False\n break\n if remove:\n drop_list.append(i)\n\n print(\"Dropping {} elements from {}. {} entries remain.\".format(\n len(drop_list), len(df), len(df) - len(drop_list)))\n\n return df.drop(drop_list)\n\n# Removes the entries that are completely empty and then saves it as a new file\ndef remove_completely_empty():\n for key in datasets_location:\n data = pd.read_csv(datasets_location[key] + \".csv\")\n print(datasets_location[key] + \".csv\")\n print(\"Shape {}\".format(data.shape))\n data = remove_empty_rows(data)\n\n new_name = datasets_location[key] + \"_1.csv\"\n print(new_name)\n data.to_csv(new_name)\n print()\n\nremove_completely_empty()","repo_name":"AdwaitB/greco-notes","sub_path":"influxdb_study/data_clean.py","file_name":"data_clean.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7385894790","text":"from bidsschematools.render import utils\n\n\ndef test_combine_extensions():\n \"\"\"A unit test for utils.combine_extensions.\"\"\"\n test_extensions = [\"nii.gz\", \"nii\", \"json\"]\n target_combined = [\"nii[.gz]\", \"json\"]\n test_combined = utils.combine_extensions(test_extensions, pdf_format=True)\n assert test_combined == target_combined\n\n\ndef test_resolve_metadata_type():\n \"\"\"A unit test for utils.resolve_metadata_type.\"\"\"\n base_definition = {\n \"name\": \"Term\",\n \"description\": \"A description\",\n }\n\n # Basic string\n term_definition1 = base_definition.copy()\n term_definition1[\"type\"] = \"string\"\n target_description = \"[string](https://www.w3schools.com/js/js_json_datatypes.asp)\"\n type_description = utils.resolve_metadata_type(term_definition1)\n assert target_description == type_description\n\n # When n/a is the only allowed value, the type should say \"n/a\"\n term_definition1[\"enum\"] = [\"n/a\"]\n target_description = '`\"n/a\"`'\n type_description = utils.resolve_metadata_type(term_definition1)\n assert target_description == type_description\n","repo_name":"bids-standard/bids-specification","sub_path":"tools/schemacode/bidsschematools/tests/test_render_utils.py","file_name":"test_render_utils.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":230,"dataset":"github-code","pt":"85"} +{"seq_id":"40944874117","text":"import pandas as pd\nimport os\nimport re\nimport zillow_data\n\nfinal_df = pd.DataFrame()\n\npath = os.getcwd() + '/files'\nfor filename in os.listdir(path):\n if '90_Day_Defaults' in filename:\n data_column = '90 Days Delinquent'\n with open(os.path.join(path, filename)) as file:\n df = pd.read_csv('files/Single_Family_FHA_90_Day_Defaults_by_Tract.csv', converters={'STATEFP':str,'COUNTYFP':str})\n elif 'Active_Foreclosure' in filename:\n data_column = 'Active Foreclosures'\n with open(os.path.join(path, filename)) as file:\n df = pd.read_csv('files/Single_Family_FHA_Mortgages_in_Active_Foreclosure_by_Tract.csv', converters={'STATEFP':str,'COUNTYFP':str})\n else:\n continue\n\n columns_to_keep = ['STATEFP','COUNTYFP']\n date_columns_dict = {}\n\n for col in df.columns.values:\n if 'DEFAULTS_90_DAY' in col or 'FORECLOSURES_' in col:\n columns_to_keep.append(col)\n year = re.findall(\"(\\d\\d\\d\\d)\", col)\n month = col[-2:]\n\n date_columns_dict[col] = col[-2:] + '-01-' + str(year[0])\n\n df = df[columns_to_keep]\n df['CountyFIPS'] = df['STATEFP'] + df['COUNTYFP']\n df = df.drop(columns=['STATEFP','COUNTYFP'])\n df = df.melt(id_vars=['CountyFIPS'], var_name='Date', value_name=data_column)\n df[data_column] = df[data_column].fillna(0)\n df[data_column][df[data_column] < 0] = 0\n df['Date'] = df['Date'].replace(date_columns_dict)\n df = df.groupby(['CountyFIPS','Date'], as_index=False).sum()\n\n if final_df.empty:\n final_df = df\n else:\n final_df = pd.merge(final_df, df, how='inner', left_on=['CountyFIPS','Date'], right_on=['CountyFIPS','Date'])\n\n\n\nzillow_data = zillow_data.get_zillow_data(list(final_df['Date'].drop_duplicates()))\n\nfinal_df['Date'] = pd.to_datetime(final_df['Date'], format=\"%m-%d-%Y\")\n\nfinal_df = pd.merge(final_df, zillow_data, how='inner', left_on=['CountyFIPS', 'Date'], right_on=['CountyFIPS', 'Date'])\nfinal_df = final_df.melt(id_vars=['CountyFIPS','Date','RegionName'], var_name='Variable', value_name='Value')\n\n\n\nfinal_df.to_csv('files/data.csv',index=False)\n\n","repo_name":"jonathan-oh-89/hud-data","sub_path":"hud_data.py","file_name":"hud_data.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15081368777","text":"import io\nimport unittest\nfrom unittest import mock\nfrom unittest.mock import mock_open\n\nfrom generator import generator_text\n\n\nclass TestModel(unittest.TestCase):\n def test_generator_failed(self):\n with self.assertRaises(TypeError) as err:\n list(generator_text(2, []))\n self.assertEqual(type(err.exception), TypeError)\n with self.assertRaises(TypeError) as err:\n list(generator_text(\"../text\", 1))\n self.assertEqual(type(err.exception), TypeError)\n with self.assertRaises(FileNotFoundError) as err:\n list(generator_text(\"asd\", []))\n self.assertEqual(str(err.exception), \"Такого файла не существует\")\n\n def test_generator(self):\n file_content = 'МоРе вода трава\\nжук'\n expected_output = ['МоРе вода трава']\n with mock.patch('builtins.open', mock_open(read_data=file_content)):\n result = list(generator_text('test.txt', ['трава']))\n self.assertEqual(result, expected_output)\n result = list(generator_text('test.txt', ['трава', 'вода']))\n self.assertEqual(result, expected_output)\n result = list(generator_text('test.txt', ['море']))\n self.assertEqual(result, expected_output)\n expected_output = ['жук']\n result = list(generator_text('test.txt', ['жук']))\n self.assertEqual(result, expected_output)\n\n def test_generator_multiple_str(self):\n file_content = 'МоРе вода трава\\nжук'\n expected_output = ['МоРе вода трава', 'жук']\n with mock.patch('builtins.open', mock_open(read_data=file_content)):\n result = list(generator_text('test.txt', ['трава', 'жук']))\n self.assertEqual(result, expected_output)\n\n def test_generator_different_arguments_type(self):\n file = io.StringIO(\"МоРе вода трава\\nжук\")\n result = list(generator_text(file, ['море', 'вода']))\n self.assertEqual(len(result), 1)\n self.assertIn(\"МоРе вода трава\", result)\n result = list(generator_text('text', ['море', 'вода']))\n self.assertEqual(len(result), 2)\n self.assertIn(\"океан крыша море вода\", result)\n self.assertIn(\"джеймс бонд морей море\", result)\n\n\n def test_generator_not_find(self):\n file_content = 'А Роза упала на лапу Азора'\n expected_output = []\n with mock.patch('builtins.open', mock_open(read_data=file_content)):\n result = list(generator_text('test.txt', ['роз']))\n self.assertEqual(result, expected_output)\n result = list(generator_text('test.txt', ['розау']))\n self.assertEqual(result, expected_output)\n","repo_name":"pletnevaAD/vk_deep_python","sub_path":"01/test_generator.py","file_name":"test_generator.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9201028854","text":"#Function Name:Continue\r\n#Input:\r\n#Output:5 4 3 2 1 0\r\n#Description:Use of continue statement\r\n#Date: 09/07/2021\r\n#Author: Shubham Lodha\r\n\r\nx=10\r\nwhile x>=1:\r\n \r\n x=x-1\r\n if x>5:\r\n \tcontinue\r\n print(x)\r\nprint(\"End of Loop\")","repo_name":"shubhamlodha21/Python","sub_path":"Continue.py","file_name":"Continue.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17961457695","text":"from sklearn.feature_extraction.text import TfidfVectorizer, ENGLISH_STOP_WORDS\r\nfrom sklearn.metrics.pairwise import linear_kernel\r\nfrom os.path import dirname, join as pjoin\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.decomposition import PCA\r\nfrom sentence_embed import build_encoding, load_infersent_model\r\nfrom questions import isQuestion, get_Model\r\nfrom textblob import TextBlob\r\nfrom spacy import displacy\r\n\r\nimport matplotlib.pyplot as plot\r\nimport os\r\nimport pandas as pd\r\nimport spacy\r\n\r\ndef qa_pairs():\r\n vect = TfidfVectorizer(stop_words='english', max_df=0.50, min_df=2)\r\n email_dataFrame = read_email_data()\r\n sentences = []\r\n nlp = spacy.load(\"en_core_web_sm\")\r\n load_infersent_model()\r\n for i, text in email_dataFrame.iteritems():\r\n blob = TextBlob(text)\r\n for sentence in blob.sentences:\r\n sentences.append(sentence.raw)\r\n number = len(sentences)\r\n model, tfidf_vect = get_Model()\r\n detected_questions = []\r\n for index, each in enumerate(sentences):\r\n isQ = isQuestion(each, model, tfidf_vect)\r\n if isQ and index+10 < number:\r\n detected_questions.append(each)\r\n candidateSentences= sentences[index:index+10]\r\n candidateAnswers = []\r\n candidateAnswers.append(each)\r\n for every in candidateSentences:\r\n if not isQuestion(every, model, tfidf_vect):\r\n candidateAnswers.append(every)\r\n\r\n cosine_sim, euclidean = build_encoding(candidateAnswers)\r\n largest, ind = get_Index_Of_Closest(cosine_sim)\r\n largest, ind2 = get_Index_Of_Closest(euclidean) \r\n print(f'Question: {each}')\r\n print(f'Chosen answer by eucledean sim: {candidateAnswers[ind]}')\r\n print(f'Chosen answer by cosine sim: {candidateAnswers[ind2]}')\r\n\r\n for answer in candidateAnswers:\r\n confidence, root = is_root_equal(nlp, each, answer)\r\n if confidence: \r\n print(f'Based on equal root {root}, question-answer pair')\r\n print(f'Question {each}')\r\n print(f'Answer {answer}')\r\n\r\n\r\n data = vect.fit_transform(sentences)\r\n data_dense = data.todense()\r\n coordinates = PCA(n_components=2).fit_transform(data_dense)\r\n plot.scatter(coordinates[:, 0], coordinates[:, 1], c='m')\r\n plot.show()\r\n\r\ndef is_root_equal(nlp, question, candidateAnswer):\r\n q = nlp(question)\r\n a = nlp(candidateAnswer)\r\n root = \"\"\r\n root2 = \"\"\r\n for token in q:\r\n if(token.dep_ == 'ROOT' and token.text != 'is'):\r\n root = token.text \r\n\r\n for token in a:\r\n if(token.dep_ == 'ROOT'and token.text != 'is'):\r\n root2 = token.text \r\n areSame = root == root2 and root != ''\r\n # if areSame: \r\n # displacy.serve(q, style=\"dep\")\r\n # displacy.serve(a, style=\"dep\")\r\n return areSame, root\r\n\r\ndef get_Index_Of_Closest(cosine_sim):\r\n largest = -1\r\n index = 0\r\n for i in range(1, len(cosine_sim[0])):\r\n if cosine_sim[0][i] > largest:\r\n largest = cosine_sim[0][i]\r\n index = i\r\n return largest, index\r\n\r\n# helper code to read enron emails.\r\n# credit and referenced from https://github.com/anthdm/ml-email-clustering\r\ndef read_email_data():\r\n file_path = dirname(os.path.realpath(__file__))\r\n file_name = os.path.join(file_path, 'data', 'split_emails.csv')\r\n emails_data = pd.read_csv(f'{file_name}')\r\n email_dataFrame = pd.DataFrame(parse_into_emails(emails_data.message))\r\n email_dataFrame.drop(email_dataFrame.query(\"body == '' | to == '' | from_ == ''\").index, inplace=True)\r\n email_dataFrame.drop_duplicates(inplace=True)\r\n return email_dataFrame['body']\r\n\r\n# helper code to read enron emails.\r\n# credit and referenced from https://github.com/anthdm/ml-email-clustering\r\ndef parse_into_emails(messages):\r\n emails = []\r\n for each in messages:\r\n email_lines = each.split('\\n')\r\n email = {}\r\n contentSoFar = ''\r\n for eachline in email_lines:\r\n if ':' in eachline:\r\n temp = eachline.split(':')\r\n key = temp[0].lower().strip()\r\n value = temp[1].strip()\r\n if key == 'from' or key == 'to':\r\n email[key] = value\r\n else:\r\n contentSoFar += eachline.strip()\r\n email['body'] = contentSoFar\r\n emails.append(email)\r\n return {\r\n 'body': getListFromMap(emails, 'body'), \r\n 'to': getListFromMap(emails, 'to'), \r\n 'from_': getListFromMap(emails, 'from')\r\n }\r\n\r\n# helper code to read enron emails.\r\n# credit and referenced from https://github.com/anthdm/ml-email-clustering\r\ndef getListFromMap(emails, key):\r\n results = []\r\n for email in emails:\r\n if key not in email:\r\n results.append('')\r\n else:\r\n results.append(email[key])\r\n return results\r\n\r\nif __name__ == '__main__':\r\n qa_pairs()","repo_name":"apoorvab93/Autodocs","sub_path":"qa_pairs.py","file_name":"qa_pairs.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"35394669817","text":"from pyshell.arg.accessor.context import ContextAccessor\nfrom pyshell.arg.accessor.environment import EnvironmentAccessor\nfrom pyshell.arg.checker.default import DefaultChecker\nfrom pyshell.arg.checker.integer import IntegerArgChecker\nfrom pyshell.arg.checker.list import ListArgChecker\nfrom pyshell.arg.decorator import shellMethod\nfrom pyshell.utils.constants import CONTEXT_EXECUTION_KEY\nfrom pyshell.utils.constants import CONTEXT_EXECUTION_SHELL\nfrom pyshell.utils.constants import ENVIRONMENT_TAB_SIZE_KEY\nfrom pyshell.utils.constants import TAB_SIZE\nfrom pyshell.utils.misc import getTerminalSize\nfrom pyshell.utils.printing import printShell\nfrom pyshell.utils.printing import reduceFormatedString\nfrom pyshell.utils.printing import strLength\nfrom pyshell.utils.string65 import isString\n\n\n# TODO should also limit the single column size\n@shellMethod(result=ListArgChecker(DefaultChecker.getArg()))\ndef listResultHandler(result):\n if len(result) == 0:\n return result\n\n ret = \"\"\n for i in result:\n ret += str(i) + \"\\n\"\n\n printShell(ret[:-1])\n return result\n\n\n@shellMethod(result=ListArgChecker(DefaultChecker.getArg()))\ndef listFlatResultHandler(result):\n if len(result) == 0:\n printShell(\"\")\n return result\n\n s = \"\"\n for i in result:\n s += str(i) + \" \"\n\n printShell(s[:-1])\n\n return result\n\n\n@shellMethod(string=ListArgChecker(IntegerArgChecker(0, 255)))\ndef printStringCharResult(string):\n s = \"\"\n for char in string:\n s += chr(char)\n\n printShell(s)\n return string\n\n\n@shellMethod(byte_list=ListArgChecker(IntegerArgChecker(0, 255)))\ndef printBytesAsString(byte_list):\n if len(byte_list) == 0:\n printShell(\"\")\n return byte_list\n\n ret = \"\"\n for b in byte_list:\n ret += \"%-0.2X\" % b\n\n printShell(ret)\n\n return byte_list\n\n\n_defaultArgCheckerInstance = DefaultChecker.getArg()\n\n\ndef _computeSize(list_of_line, padding=2, extra_padding=0):\n sizes = [0] # at least one line\n\n increase_padding = True\n prefix_padding = 0\n for line in list_of_line:\n if isString(line) or not hasattr(line, \"__iter__\"):\n sizes[0] = max(sizes[0], strLength(str(line)) + padding)\n else:\n if len(line) > len(sizes):\n sizes.extend([0] * (len(line) - len(sizes)))\n\n for cindex in range(0, len(line)):\n if cindex < len(line)-1:\n new_value = prefix_padding + strLength(str(line[cindex]))\n new_value += padding\n else:\n new_value = prefix_padding + strLength(str(line[cindex]))\n sizes[cindex] = max(sizes[cindex], new_value)\n\n # space to add to have the data column slighty on the right in front\n # of the title line\n if increase_padding:\n prefix_padding = extra_padding\n increase_padding = False\n\n return sizes\n\n\ndef _printUntilColumn(column_size, con_execution, tab_size):\n if tab_size is None:\n tab_size = TAB_SIZE\n else:\n tab_size = tab_size.getValue()\n\n if (con_execution is not None and\n con_execution.getSelectedValue() == CONTEXT_EXECUTION_SHELL):\n # get sizes (tab, terminal, ...)\n width, height = getTerminalSize()\n\n # remove the tab size added by printShell\n width -= tab_size\n\n for i in range(0, len(column_size)):\n if column_size[i] >= width:\n return i, width\n width -= column_size[i]\n\n # there is room for every columns\n return len(column_size)-1, column_size[-1]\n\n\n@shellMethod(list_of_line=ListArgChecker(_defaultArgCheckerInstance),\n tab_size=EnvironmentAccessor(ENVIRONMENT_TAB_SIZE_KEY),\n con_execution=ContextAccessor(CONTEXT_EXECUTION_KEY))\ndef printColumnWithouHeader(list_of_line, tab_size=None, con_execution=None):\n if len(list_of_line) == 0:\n return list_of_line\n\n column_size = _computeSize(list_of_line)\n last_col, space_last_col = _printUntilColumn(column_size,\n con_execution,\n tab_size)\n\n to_print = \"\"\n for row_index in range(0, len(list_of_line)):\n line = list_of_line[row_index]\n\n if isString(line) or not hasattr(line, \"__iter__\"):\n to_print += str(line) + \"\\n\"\n # TODO must be reduce to IFF last column\n else: # no need of pading if the line has only the first column\n line_to_print = \"\"\n latest_column = min(len(line)-1, last_col)\n for column_index in range(0, latest_column+1):\n column = str(line[column_index])\n if column_index < latest_column:\n # no padding on last column\n padding = 0\n padding = column_size[column_index]\n padding -= strLength(str(line[column_index]))\n column += \" \"*padding\n else:\n column = reduceFormatedString(column, space_last_col)\n\n line_to_print += column\n to_print += line_to_print + \"\\n\"\n\n printShell(to_print[:-1])\n return list_of_line\n\n\n@shellMethod(list_of_line=ListArgChecker(_defaultArgCheckerInstance),\n tab_size=EnvironmentAccessor(ENVIRONMENT_TAB_SIZE_KEY),\n con_execution=ContextAccessor(CONTEXT_EXECUTION_KEY))\ndef printColumn(list_of_line, tab_size=None, con_execution=None):\n if len(list_of_line) == 0:\n return list_of_line\n\n column_size = _computeSize(list_of_line, extra_padding=1)\n last_col, space_last_col = _printUntilColumn(column_size,\n con_execution,\n tab_size)\n\n to_print = \"\"\n default_prefix = \"\"\n for row_index in range(0, len(list_of_line)):\n line = list_of_line[row_index]\n\n if row_index == 1:\n default_prefix = \" \"\n\n if isString(line) or not hasattr(line, \"__iter__\"):\n to_print += default_prefix + str(line) + \"\\n\"\n # TODO must be reduce to IFF last column\n else: # no need of pading if the line has only one column\n line_to_print = \"\"\n latest_column = min(len(line)-1, last_col)\n for column_index in range(0, latest_column+1):\n padding = 0\n column = default_prefix + str(line[column_index])\n if column_index < latest_column:\n # no padding on last column\n padding = column_size[column_index]\n padding -= strLength(str(line[column_index]))\n padding -= len(default_prefix)\n column += \" \"*padding\n else:\n column = reduceFormatedString(column, space_last_col)\n\n line_to_print += column\n to_print += line_to_print + \"\\n\"\n\n printShell(to_print[:-1])\n return list_of_line\n","repo_name":"djoproject/pyshell","sub_path":"pyshell/utils/postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"16801575349","text":"\n\"\"\"Selenium web scraping module.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom pathlib import Path\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nfrom selenium.webdriver.firefox.service import Service\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nimport autogpt.processing.text as summary\nfrom autogpt.commands.command import command\nfrom autogpt.config import Config\nfrom autogpt.processing.html import extract_hyperlinks, format_hyperlinks\nfrom autogpt.url_utils.validators import validate_url\n\nFILE_DIR = Path(__file__).parent.parent\nCFG = Config()\n\n\n@command(\n \"browse_website\",\n \"Browse Website\",\n '\"url\": \"\", \"question\": \"\"',\n)\n@validate_url\ndef browse_website(url: str, question: str) -> str:\n \"\"\"Browse a website and return the answer and links to the user\n\n Args:\n url (str): The url of the website to browse\n question (str): The question asked by the user\n\n Returns:\n Tuple[str, WebDriver]: The answer and links to the user and the webdriver\n \"\"\"\n try:\n driver, text = scrape_text_with_selenium(url)\n except WebDriverException as e:\n msg = e.msg.split(\"\\n\")[0]\n return f\"Error: {msg}\"\n\n add_header(driver)\n summary_text = summary.summarize_text(url, text, question, driver)\n links = scrape_links_with_selenium(driver, url)\n\n if len(links) > 5:\n links = links[:5]\n close_browser(driver)\n return f\"Answer gathered from website: {summary_text} \\n \\n Links: {links}\"\n\n\n\ndef scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:\n \"\"\"Scrape text from a website using selenium\n\n Args:\n url (str): The url of the website to scrape\n\n Returns:\n Tuple[WebDriver, str]: The webdriver and the text scraped from the website\n \"\"\"\n logging.getLogger(\"selenium\").setLevel(logging.CRITICAL)\n\n options = FirefoxOptions()\n if CFG.selenium_headless:\n options.headless = True\n\n driver = webdriver.Firefox(options=options, service=Service())\n driver.get(url)\n\n WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.TAG_NAME, \"body\"))\n )\n\n page_source = driver.execute_script(\"return document.body.outerHTML;\")\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n text = \"\\n\".join(chunk for chunk in chunks if chunk)\n return driver, text\n\n\n\ndef scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]:\n \"\"\"Scrape links from a website using selenium\n\n Args:\n driver (WebDriver): The webdriver to use to scrape the links\n\n Returns:\n List[str]: The links scraped from the website\n \"\"\"\n page_source = driver.page_source\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n hyperlinks = extract_hyperlinks(soup, url)\n\n return format_hyperlinks(hyperlinks)\n\n\ndef close_browser(driver: WebDriver) -> None:\n \"\"\"Close the browser\n\n Args:\n driver (WebDriver): The webdriver to close\n\n Returns:\n None\n \"\"\"\n driver.quit()\n\n\ndef add_header(driver: WebDriver) -> None:\n \"\"\"Add a header to the website\n\n Args:\n driver (WebDriver): The webdriver to use to add the header\n\n Returns:\n None\n \"\"\"\n try:\n with open(f\"{FILE_DIR}/js/overlay.js\", \"r\") as overlay_file:\n overlay_script = overlay_file.read()\n driver.execute_script(overlay_script)\n except Exception as e:\n print(f\"Error executing overlay.js: {e}\")\n","repo_name":"SpyderRex/AutoGPT-for-Android","sub_path":"autogpt/commands/web_selenium.py","file_name":"web_selenium.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"84"} +{"seq_id":"7075326211","text":"import sys,os\n# -- local\nimport tools as tl\n# -- matplotlib\nfrom matplotlib import pyplot as plt\n# --numpy\nimport numpy as np\n# -- sklearn\nfrom sklearn.preprocessing import MinMaxScaler, QuantileTransformer\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\n# -- pandas\nimport pandas as pd\n\n#--tensorflow\nimport tensorflow as tf\nimport tensorflow.keras as tk\nimport tensorflow.keras.layers as kl\nimport tensorflow.keras.models as km\nimport tensorflow.keras.optimizers as ko\nimport tensorflow.keras.callbacks as kc\nimport tensorflow.keras.regularizers as kr\n#from tensorflow.keras import Model\n\nimport matplotlib\nmatplotlib.rc('text',usetex=True)\n\nclass PCNNS():\n \n def __init__(self, outdir='.'):\n \n tl.checkdir(outdir)\n self.outdir=outdir\n self.input_shape = (5) \n self.output_shape = 1\n self.nodes = 1024\n self.batch_size = 512\n self.epochs = 1500\n self.Drate= 0.2\n self.l2_reg = 1e-4\n self.BATCH_SIZE= 512\n self.PHI= 4\n self.model_name = 'pcnn'\n self.opt = ko.Adam(learning_rate=1e-4)\n\n \n def normalized_data(self): \n param, param_rep,param_test, sigma, sigma_rep, sigma_test= tl.data_loader()\n self.param_sc = MinMaxScaler() \n self.param_norm = self.param_sc.fit_transform(param)\n self.param_rep_norm = self.param_sc.transform(param_rep) \n \n self.sigma_sc = QuantileTransformer(output_distribution='normal')\n self.sigma_norm = self.sigma_sc.fit_transform(sigma.reshape(-1,1))\n self.sigma_rep_norm = self.sigma_sc.transform(sigma_rep.reshape(-1,1))\n \n def split_train_test(self):\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.param_rep_norm, \n self.sigma_rep_norm, test_size=0.1, random_state=42)\n \n \n def create_pcnns(self):\n act = kl.LeakyReLU(alpha=0.1)\n inputs = kl.Input(shape = self.input_shape, name='pcnn_input')\n\n x1= kl.Dense(self.nodes , activation = act, input_dim= self.input_shape)(inputs)\n x2= kl.Dropout(self.Drate)(x1)\n x3= kl.Dense(self.nodes , activation = act, kernel_regularizer=kr.l2(self.l2_reg))(x2)\n x4= kl.Dropout(self.Drate)(x3)\n x5= kl.Dense(self.nodes , activation = act, kernel_regularizer=kr.l2(self.l2_reg))(x4)\n x6= kl.Dropout(self.Drate)(x5)\n\n yy= kl.Dense(self.output_shape)(x6)\n model= km.Model(inputs=inputs, outputs= yy)\n model. compile(loss='mse',optimizer= self.opt)\n return model\n \n def plot_loss(self,Mloss, Sloss, Closs):\n Mloss= np.array(Mloss)\n Sloss= np.array(Sloss)\n Closs= np.array(Closs)\n\n\n plt.figure(figsize=(10,6))\n plt.plot(np.arange(Mloss.shape[0]), Mloss, label= \"MSE\")\n plt.plot(np.arange(Sloss.shape[0]), Sloss,label= \"Symmetric loss\")\n plt.plot(np.arange(Closs.shape[0]), Closs, label=\"Edges loss \")\n plt.ylabel(\"Loss\",size=30)\n plt.xlabel(\"Epoch\",size=30)\n plt.xticks(size=20)\n plt.yticks(size=20)\n plt.legend(fontsize=20)\n plt.show()\n plt.tight_layout()\n\n # -- Train the model\n def train(self):\n model = self.create_pcnns()\n loss=[]\n loss_mse_= []\n loss_sym_=[]\n loss_closs_=[]\n print(\"Training started: \")\n print(\"==============================\")\n for epoch in range(self.epochs):\n self.X_train, self.y_train = shuffle(self.X_train, self.y_train)\n loss = 0.0\n loss_sym = 0.0\n loss_close = 0.0\n\n for i in range(self.X_train.shape[0]//self.BATCH_SIZE):\n loss = loss + model.train_on_batch(self.X_train[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE], self.y_train[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE])\n\n for j in range(8):\n X = np.copy(self.X_train) \n\n X[:, self.PHI] = np.random.uniform(0, 1, self.X_train.shape[0])\n\n XX = np.copy(X)\n XX[:, self.PHI] = 1.0 - XX[:, self.PHI]\n YY = model.predict(XX)\n for i in range(self.X_train.shape[0]//self.BATCH_SIZE):\n loss_sym = loss_sym + model.train_on_batch(X[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE], YY[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE])\n\n for k in range(5):\n X_head = np.copy(self.X_train)\n X_head[:, self.PHI] = 0\n y_head = model.predict(X_head)\n X_tail = np.copy(self.X_train)\n X_tail[:, self.PHI] = 1\n y_tail = model.predict(X_tail)\n\n for i in range(self.X_train.shape[0]//self.BATCH_SIZE):\n loss_close = loss_close + model.train_on_batch(X_head[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE], y_tail[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE])\n loss_close = loss_close + model.train_on_batch(X_tail[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE], y_head[i*self.BATCH_SIZE:(i+1)*self.BATCH_SIZE])\n\n loss = loss/(self.X_train.shape[0]//self.BATCH_SIZE)\n loss_sym = loss_sym/(self.X_train.shape[0]//self.BATCH_SIZE)\n loss_close = loss_close/(self.X_train.shape[0]//self.BATCH_SIZE)\n loss_mse_.append(loss)\n loss_sym_.append(loss_sym)\n loss_closs_.append(loss_close)\n\n print(\"epoch = \", epoch, \" loss = \", loss, \" loss_sym = \", loss_sym, \" loss_close = \", loss_close)\n model.save_weights('%s/%s.hdf5'%(self.outdir,self.model_name))\n\n\n self.plot_loss(loss_mse_,loss_sym_,loss_closs_)\n return model \n \n \n def plot_result_1(self):\n model= self.create_pcnns()\n model.load_weights('%s/%s.hdf5'%(self.outdir,self.model_name))\n pred= model.predict(self.X_test)\n pred = self.sigma_sc.inverse_transform(pred)\n true= self.sigma_sc.inverse_transform(self.y_test)\n \n plt.plot(true,true,'.',label=r'$\\rm True $')\n plt.plot(true,pred,'.',alpha=0.4,label= r'$\\rm PCCNNs $')\n plt.legend(fontsize=15)\n plt.semilogx()\n plt.semilogy()\n plt.show()\n \n \n\n \n def plot_result_2(self):\n model= self.create_pcnns()\n model.load_weights('%s/%s.hdf5'%(self.outdir,self.model_name))\n extrap_Example= tl.Ex_test()\n extrap_Example_norm= self.param_sc.transform(extrap_Example)\n pred_array = []\n for i in range(100):\n res = model(extrap_Example_norm, training=True)\n pred_array.append(self.sigma_sc.inverse_transform(res))\n pred = np.asarray(pred_array)\n pred=pred[:,:,0] \n #### mean and std ###\n mean = pred.mean(axis=0)\n std = pred.std(axis=0)\n true= np.load(\"ex_new_1.npy\")\n fig, ax = plt.subplots(figsize=(10,8))\n ax.fill_between(extrap_Example[:,4].flatten(), mean-2*std, mean+2*std, color=\"#fcecca\")\n ax.fill_between(extrap_Example[:,4].flatten(), mean-std, mean+std, color=\"#ffda8f\", label= r'$\\rm PCNNs$')\n plt.plot(extrap_Example[:,4].flatten(), mean, color= \"#fc7600\",lw=2)\n plt.plot(extrap_Example[:,4].flatten(),true,'o',color='r', label=r'$\\rm True $' )\n \n ax.set_xlabel(r\"$\\Phi$\", fontsize=40)\n ax.text(0.02, 0.3, r\"$\\rm X_{bj}=0.365$\", transform=ax.transAxes, fontsize = 25)\n ax.text(0.02, 0.2, r\"$\\rm t= -0.2 ~ GeV^{2}$\", transform=ax.transAxes, fontsize = 25)\n ax.text(0.02, 0.1, r\"$\\rm Q2= 2.0~ GeV^{2}$\", transform=ax.transAxes, fontsize = 25)\n plt.legend(fontsize =30, loc= 'upper center')\n ax.set_xticks([0,100,200,300],[\"0\",\"100\",\"200\",\"300\"],fontsize=20)\n ax.set_yticks([0.04,0.05,0.06,0.08],[\"0.04\",\"\",\"0.06\",\"0.08\"],fontsize=20)\n fig.text(-0.06, 0.5, r'$\\rm\\sigma_{UU}$ \\rm(nb/GeV$^4$)',size=40,va='center', rotation='vertical')\n plt.tight_layout()\n plt.show()\n\n\n\n\n \n","repo_name":"Malmaeen/FemtoNet_PCNNs","sub_path":"pcnns.py","file_name":"pcnns.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"11499752295","text":"# Osasere Ikponmwosa\n# for exceeding requirements i added a get function to get an adjective and add to my get_prepositional_phrase function \nimport random\n\n\ndef main():\n # these are the two arguments that will be used in the program\n quantity = random.choice([1,2])\n tense = random.choice(['present', 'past', 'future'])\n i = 0 \n \n while i < 6:\n # this is to make it print 6 sentences\n i += 1\n sentence = make_sentence(quantity=quantity, tense=tense)\n print(sentence)\n \ndef make_sentence(quantity, tense):\n if quantity == 1:\n sentence = f'{get_determiner(1).capitalize()} {get_noun(1)} {get_verb(quantity=1, tense=tense)} {get_prepositional_phrase(1)}.'\n elif quantity == 2:\n sentence = f'{get_determiner(2).capitalize()} {get_noun(2)} {get_verb(quantity=1, tense=tense)} {get_prepositional_phrase(2)}.'\n return sentence\n \ndef get_determiner(quantity):\n if quantity == 1:\n words = [\"a\", \"one\", \"the\"]\n else:\n words = [\"some\", \"many\", \"the\"]\n word = random.choice(words)\n return word\n\n\ndef get_noun(quantity):\n words_singular = [\"bird\", \"boy\", \"car\", \"cat\", \"child\",\"dog\", \"girl\", \"man\", \"rabbit\", \"woman\"]\n words_plural = [\"birds\", \"boys\", \"cars\", \"cats\", \"children\",\"dogs\", \"girls\", \"men\", \"rabbits\", \"women\"]\n if quantity == 1:\n word = words_singular\n else:\n word = words_plural\n word = random.choice(word)\n return word\n\n\ndef get_verb(quantity, tense):\n past_tense = [\"drank\", \"ate\", \"grew\", \"laughed\", \"thought\",\"ran\", \"slept\", \"talked\", \"walked\", \"wrote\"]\n singular_present_tense = [\"drinks\", \"eats\", \"grows\", \"laughs\", \"thinks\", \"runs\", \"sleeps\", \"talks\", \"walks\", \"writes\"]\n plural_present_tense = [ \"drink\", \"eat\", \"grow\", \"laugh\", \"think\", \"run\", \"sleep\", \"talk\", \"walk\", \"write\"]\n future_tense = [ \"will drink\", \"will eat\", \"will grow\", \"will laugh\",\"will think\", \"will run\", \"will sleep\", \"will talk\",\"will walk\", \"will write\"]\n if tense == 'past':\n word = past_tense\n elif tense == 'present':\n if quantity == 1:\n word = singular_present_tense\n else:\n word = plural_present_tense\n elif tense == 'future':\n word = future_tense\n word = random.choice(word)\n return word \n\n\ndef get_preposition():\n preposition = [ \"about\", \"above\", \"across\", \"after\", \"along\",\"around\", \"at\", \"before\", \"behind\", \"below\",\"beyond\", \"by\", \"despite\", \"except\", \"for\",\n \"from\", \"in\", \"into\", \"near\", \"of\",\"off\", \"on\", \"onto\", \"out\", \"over\",\"past\", \"to\", \"under\", \"with\", \"without\"]\n word = random.choice(preposition)\n return word\ndef get_adjective():\n adjectives = random.choice([\"enthusiastic\",\"serene\",\"jubilant\",\"mysterious\",\"radiant\",\"lively\",\"tranquil\",\"playful\",\"resilient\",\"majestic\",\n \"vibrant\",\"whimsical\",\"eloquent\",\"tenacious\",\"harmonious\"])\n return adjectives\n\n\ndef get_prepositional_phrase(quantity):\n if quantity == 1:\n prepositional_phrase = f'{get_preposition()} {get_determiner(1)} {get_adjective()} {get_noun(1)}'\n else:\n prepositional_phrase = f'{get_preposition()} {get_determiner(2)} {get_adjective()} {get_noun(2)}'\n return prepositional_phrase\n\n\nmain()","repo_name":"seregheik/CSE-111-Programming-with-Functions","sub_path":"week4/week4.py","file_name":"week4.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"18333861222","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0007_auto_20151229_1224'),\n ('inventory', '0015_unitconverter_company'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UnitConversion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('multiple', models.FloatField()),\n ('base_unit', models.ForeignKey(related_name='base_unit', to='inventory.Unit', null=True)),\n ('company', models.ForeignKey(to='users.Company')),\n ('unit_to_convert', models.ForeignKey(to='inventory.Unit', null=True)),\n ],\n ),\n migrations.RemoveField(\n model_name='unitconverter',\n name='base_unit',\n ),\n migrations.RemoveField(\n model_name='unitconverter',\n name='company',\n ),\n migrations.RemoveField(\n model_name='unitconverter',\n name='unit_to_convert',\n ),\n migrations.DeleteModel(\n name='UnitConverter',\n ),\n ]\n","repo_name":"iraycd/awecounting","sub_path":"apps/inventory/migrations/0016_auto_20160118_1645.py","file_name":"0016_auto_20160118_1645.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"2433647825","text":"from puzzle import Puzzle\nfrom constants import *\nimport pygame\n\n\npygame.init()\npygame.display.set_caption('Get Jiggy by Nick Sciarretta')\nscreen = pygame.display.set_mode((950, 800))\n\n\npuzzle = Puzzle()\npuzzle.create_puzzle()\npieces = puzzle.pieces\nnum_hints = NUM_HINTS\n\n\nrunning = True\nwhile running:\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n running = False\n\n # Select piece\n elif event.type == pygame.MOUSEBUTTONDOWN:\n for piece in pieces:\n if piece.select(puzzle.board):\n break\n\n # Move piece\n elif event.type == pygame.MOUSEBUTTONUP:\n for piece in pieces:\n if not piece.held: continue\n piece.release(puzzle.board)\n break\n\n elif event.type == pygame.KEYDOWN:\n\n # Spacebar = new puzzle\n if event.key == pygame.K_SPACE:\n puzzle.create_puzzle()\n pieces = puzzle.pieces\n num_hints = NUM_HINTS\n\n # Backspace = Remove all pieces from board\n elif event.key == pygame.K_BACKSPACE:\n for piece in pieces: \n if not piece.on_board: continue\n piece.remove_from_board(puzzle.board)\n\n # H = show hint\n elif event.key == pygame.K_h and num_hints > 0:\n for piece in pieces: piece.show_hint = 1\n num_hints -= 1\n\n puzzle.draw(screen)\n pygame.display.update()\n\n\npygame.quit()","repo_name":"nscrrtta/get_jiggy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"70542743314","text":"import pygame\nimport os\nfrom constants import *\nfrom simulation import *\nfrom interface import *\nfrom ui import Board\n\npygame.init()\n\n# Window Management\nWINDOW = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"CPU Simulation\")\n\n\ndef draw_window(window):\n window.fill((47, 47, 47))\n pygame.draw.rect(window, (43, 43, 43), BOX, border_radius=10)\n pygame.draw.rect(window, (60, 60, 60), BOX, 10, border_radius=10)\n\n\ndef main(window):\n clock = pygame.time.Clock()\n\n board = Board()\n\n while True:\n clock.tick(FPS)\n draw_window(window)\n events = pygame.event.get()\n\n board.update(window, events)\n\n for event in events:\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n\n pygame.display.update()\n\n\nmain(WINDOW)\n","repo_name":"ArjunSahlot/cpu_simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"39414382470","text":"import numpy as np\r\nimport random\r\nimport pygame\r\nimport sys\r\nimport math\r\n\r\nBLUE = (0,0,255)\r\nBLACK = (0,0,0)\r\nRED = (255,0,0)\r\nYELLOW = (255,255,0)\r\n\r\nROWCOUNT = 6\r\nCOLCOUNT = 7\r\n\r\nPLAYER = 0\r\nAI = 1\r\n\r\nEMPTY = 0\r\nPLAYERPIECE = 1\r\nAIPIECE = 2\r\n\r\nWINDOWLENGTH = 4\r\n\r\ndef createBoard():\r\n\tboard = np.zeros((ROWCOUNT,COLCOUNT))\r\n\treturn board\r\n\r\ndef dropPiece(board, row, col, piece):\r\n\tboard[row][col] = piece\r\n\r\ndef isValidLocation(board, col):\r\n\treturn board[ROWCOUNT-1][col] == 0\r\n\r\ndef getNextOpenRow(board, col):\r\n\tfor r in range(ROWCOUNT):\r\n\t\tif board[r][col] == 0:\r\n\t\t\treturn r\r\n\r\ndef printBoard(board):\r\n\tprint(np.flip(board, 0))\r\n\r\ndef winningMove(board, piece):\r\n\t# Check horizontal locations for win\r\n\tfor c in range(COLCOUNT-3):\r\n\t\tfor r in range(ROWCOUNT):\r\n\t\t\tif board[r][c] == piece and board[r][c+1] == piece and board[r][c+2] == piece and board[r][c+3] == piece:\r\n\t\t\t\treturn True\r\n\r\n\t# Check vertical locations for win\r\n\tfor c in range(COLCOUNT):\r\n\t\tfor r in range(ROWCOUNT-3):\r\n\t\t\tif board[r][c] == piece and board[r+1][c] == piece and board[r+2][c] == piece and board[r+3][c] == piece:\r\n\t\t\t\treturn True\r\n\r\n\t# Check positively sloped diaganols\r\n\tfor c in range(COLCOUNT-3):\r\n\t\tfor r in range(ROWCOUNT-3):\r\n\t\t\tif board[r][c] == piece and board[r+1][c+1] == piece and board[r+2][c+2] == piece and board[r+3][c+3] == piece:\r\n\t\t\t\treturn True\r\n\r\n\t# Check negatively sloped diaganols\r\n\tfor c in range(COLCOUNT-3):\r\n\t\tfor r in range(3, ROWCOUNT):\r\n\t\t\tif board[r][c] == piece and board[r-1][c+1] == piece and board[r-2][c+2] == piece and board[r-3][c+3] == piece:\r\n\t\t\t\treturn True\r\n\r\ndef evaluateWindow(window, piece):\r\n\tscore = 0\r\n\toppPiece = PLAYERPIECE\r\n\tif piece == PLAYERPIECE:\r\n\t\toppPiece = AIPIECE\r\n\r\n\tif window.count(piece) == 4:\r\n\t\tscore += 100\r\n\telif window.count(piece) == 3 and window.count(EMPTY) == 1:\r\n\t\tscore += 5\r\n\telif window.count(piece) == 2 and window.count(EMPTY) == 2:\r\n\t\tscore += 2\r\n\r\n\tif window.count(oppPiece) == 3 and window.count(EMPTY) == 1:\r\n\t\tscore -= 4\r\n\r\n\treturn score\r\n\r\ndef scorePosition(board, piece):\r\n\tscore = 0\r\n\r\n\t## Score center column\r\n\tcenterArray = [int(i) for i in list(board[:, COLCOUNT//2])]\r\n\tcenterCount = centerArray.count(piece)\r\n\tscore += centerCount * 3\r\n\r\n\t## Score Horizontal\r\n\tfor x in range(ROWCOUNT):\r\n\t\trow_array = [int(i) for i in list(board[x,:])]\r\n\t\tfor c in range(COLCOUNT-3):\r\n\t\t\twindow = row_array[c:c+WINDOWLENGTH]\r\n\t\t\tscore += evaluateWindow(window, piece)\r\n\r\n\t## Score Vertical\r\n\tfor y in range(COLCOUNT):\r\n\t\tcol_array = [int(i) for i in list(board[:,y])]\r\n\t\tfor r in range(ROWCOUNT-3):\r\n\t\t\twindow = col_array[r:r+WINDOWLENGTH]\r\n\t\t\tscore += evaluateWindow(window, piece)\r\n\r\n\t## Score posiive sloped diagonal\r\n\tfor r in range(ROWCOUNT-3):\r\n\t\tfor c in range(COLCOUNT-3):\r\n\t\t\twindow = [board[r+i][c+i] for i in range(WINDOWLENGTH)]\r\n\t\t\tscore += evaluateWindow(window, piece)\r\n\r\n\tfor r in range(ROWCOUNT-3):\r\n\t\tfor c in range(COLCOUNT-3):\r\n\t\t\twindow = [board[r+3-i][c+i] for i in range(WINDOWLENGTH)]\r\n\t\t\tscore += evaluateWindow(window, piece)\r\n\r\n\treturn score\r\n\r\ndef isTerminalNode(board):\r\n\treturn winningMove(board, PLAYERPIECE) or winningMove(board, AIPIECE) or len(getValidLocations(board)) == 0\r\n\r\ndef minimax(board, depth, alpha, beta, maximizingPlayer):\r\n\tvalidLocations = getValidLocations(board)\r\n\tisTerminal = isTerminalNode(board)\r\n\tif depth == 0 or isTerminal:\r\n\t\tif isTerminal:\r\n\t\t\tif winningMove(board, AIPIECE):\r\n\t\t\t\treturn (None, 10000000)\r\n\t\t\telif winningMove(board, PLAYERPIECE):\r\n\t\t\t\treturn (None, -10000000)\r\n\t\t\telse: # Game is over, no more valid moves\r\n\t\t\t\treturn (None, 0)\r\n\t\telse: # Depth is zero\r\n\t\t\treturn (None, scorePosition(board, AIPIECE))\r\n\tif maximizingPlayer:\r\n\t\tvalue = -math.inf\r\n\t\tcolumn = random.choice(validLocations)\r\n\t\tfor col in validLocations:\r\n\t\t\trow = getNextOpenRow(board, col)\r\n\t\t\tbCopy = board.copy()\r\n\t\t\tdropPiece(bCopy, row, col, AIPIECE)\r\n\t\t\tnew_score = minimax(bCopy, depth-1, alpha, beta, False)[1]\r\n\t\t\tif new_score > value:\r\n\t\t\t\tvalue = new_score\r\n\t\t\t\tcolumn = col\r\n\t\t\talpha = max(alpha, value)\r\n\t\t\tif alpha >= beta:\r\n\t\t\t\tbreak\r\n\t\treturn column, value\r\n\r\n\telse: # Minimizing player\r\n\t\tvalue = math.inf\r\n\t\tcolumn = random.choice(validLocations)\r\n\t\tfor col in validLocations:\r\n\t\t\trow = getNextOpenRow(board, col)\r\n\t\t\tbCopy = board.copy()\r\n\t\t\tdropPiece(bCopy, row, col, PLAYERPIECE)\r\n\t\t\tnew_score = minimax(bCopy, depth-1, alpha, beta, True)[1]\r\n\t\t\tif new_score < value:\r\n\t\t\t\tvalue = new_score\r\n\t\t\t\tcolumn = col\r\n\t\t\tbeta = min(beta, value)\r\n\t\t\tif alpha >= beta:\r\n\t\t\t\tbreak\r\n\t\treturn column, value\r\n\r\ndef getValidLocations(board):\r\n\tvalidLocations = []\r\n\tfor col in range(COLCOUNT):\r\n\t\tif isValidLocation(board, col):\r\n\t\t\tvalidLocations.append(col)\r\n\treturn validLocations\r\n\r\ndef pickBestMove(board, piece):\r\n\r\n\tvalidLocations = getValidLocations(board)\r\n\tbestScore = -10000\r\n\tbestCol = random.choice(validLocations)\r\n\tfor col in validLocations:\r\n\t\trow = getNextOpenRow(board, col)\r\n\t\ttempBoard = board.copy()\r\n\t\tdropPiece(tempBoard, row, col, piece)\r\n\t\tscore = scorePosition(tempBoard, piece)\r\n\t\tif score > bestScore:\r\n\t\t\tbestScore = score\r\n\t\t\tbestCol = col\r\n\r\n\treturn bestCol\r\n\r\ndef draw_board(board):\r\n\tfor c in range(COLCOUNT):\r\n\t\tfor r in range(ROWCOUNT):\r\n\t\t\tpygame.draw.rect(screen, BLUE, (c*SQUARESIZE, r*SQUARESIZE+SQUARESIZE, SQUARESIZE, SQUARESIZE))\r\n\t\t\tpygame.draw.circle(screen, BLACK, (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)\r\n\t\r\n\tfor c in range(COLCOUNT):\r\n\t\tfor r in range(ROWCOUNT):\t\t\r\n\t\t\tif board[r][c] == PLAYERPIECE:\r\n\t\t\t\tpygame.draw.circle(screen, RED, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\r\n\t\t\telif board[r][c] == AIPIECE: \r\n\t\t\t\tpygame.draw.circle(screen, YELLOW, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\r\n\tpygame.display.update()\r\n\r\nboard = createBoard()\r\nprintBoard(board)\r\ngameOver = False\r\n\r\npygame.init()\r\n\r\nSQUARESIZE = 100\r\n\r\nwidth = COLCOUNT * SQUARESIZE\r\nheight = (ROWCOUNT+1) * SQUARESIZE\r\n\r\nsize = (width, height)\r\n\r\nRADIUS = int(SQUARESIZE/2 - 5)\r\n\r\nscreen = pygame.display.set_mode(size)\r\ndraw_board(board)\r\npygame.display.update()\r\n\r\nmyfont = pygame.font.SysFont(\"monospace\", 75)\r\n\r\nturn = random.randint(PLAYER, AI)\r\n\r\nwhile not gameOver:\r\n\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\r\n\t\tif event.type == pygame.MOUSEMOTION:\r\n\t\t\tpygame.draw.rect(screen, BLACK, (0,0, width, SQUARESIZE))\r\n\t\t\tposx = event.pos[0]\r\n\t\t\tif turn == PLAYER:\r\n\t\t\t\tpygame.draw.circle(screen, RED, (posx, int(SQUARESIZE/2)), RADIUS)\r\n\r\n\t\tpygame.display.update()\r\n\r\n\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\tpygame.draw.rect(screen, BLACK, (0,0, width, SQUARESIZE))\r\n\t\t\t#print(event.pos)\r\n\t\t\t# Ask for Player 1 Input\r\n\t\t\tif turn == PLAYER:\r\n\t\t\t\tposx = event.pos[0]\r\n\t\t\t\tcol = int(math.floor(posx/SQUARESIZE))\r\n\r\n\t\t\t\tif isValidLocation(board, col):\r\n\t\t\t\t\trow = getNextOpenRow(board, col)\r\n\t\t\t\t\tdropPiece(board, row, col, PLAYERPIECE)\r\n\r\n\t\t\t\t\tif winningMove(board, PLAYERPIECE):\r\n\t\t\t\t\t\tlabel = myfont.render(\"Player 1 wins!!\", 1, RED)\r\n\t\t\t\t\t\tscreen.blit(label, (40,10))\r\n\t\t\t\t\t\tgameOver = True\r\n\r\n\t\t\t\t\tturn += 1\r\n\t\t\t\t\tturn = turn % 2\r\n\r\n\t\t\t\t\tprintBoard(board)\r\n\t\t\t\t\tdraw_board(board)\r\n\r\n\r\n\t# # Ask for Player 2 Input\r\n\tif turn == AI and not gameOver:\r\n\t\tcol, minimax_score = minimax(board, 6, -math.inf, math.inf, True)\r\n\r\n\t\tif isValidLocation(board, col):\r\n\t\t\t#pygame.time.wait(500)\r\n\t\t\trow = getNextOpenRow(board, col)\r\n\t\t\tdropPiece(board, row, col, AIPIECE)\r\n\r\n\t\t\tif winningMove(board, AIPIECE):\r\n\t\t\t\tlabel = myfont.render(\"Player 2 wins!!\", 1, YELLOW)\r\n\t\t\t\tscreen.blit(label, (40,10))\r\n\t\t\t\tgameOver = True\r\n\r\n\t\t\tprintBoard(board)\r\n\t\t\tdraw_board(board)\r\n\r\n\t\t\tturn += 1\r\n\t\t\tturn = turn % 2\r\n\r\n\tif gameOver:\r\n\t\tpygame.time.wait(3000)","repo_name":"vinishd/connect4","sub_path":"connect4AI.py","file_name":"connect4AI.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"33004193250","text":"# -*- coding: utf-8 -*-\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8');\n\n\nimport os\nimport fnmatch\nfrom ppg import BASE_DIR\nfrom ppg.utils import exist, load_json, dump_json\nfrom ppg.feature import extract_ppg45, extract_svri\n\n\ndef extract():\n preprocessed_data_dir = os.path.join(BASE_DIR, 'data', 'preprocessed')\n extracted_data_dir = os.path.join(BASE_DIR, 'data', 'extracted')\n\n if exist(pathname=preprocessed_data_dir):\n for filename_with_ext in fnmatch.filter(os.listdir(preprocessed_data_dir), '*.json'):\n pathname = os.path.join(preprocessed_data_dir, filename_with_ext)\n json_data = load_json(pathname=pathname)\n if json_data is not None:\n for label in json_data:\n json_data[label]['ppg45'] = [extract_ppg45(single_waveform=single_waveform, sample_rate=json_data[label]['sample_rate']) for single_waveform in json_data[label]['single_waveforms']]\n json_data[label]['svri'] = [extract_svri(single_waveform=single_waveform) for single_waveform in json_data[label]['single_waveforms']]\n del json_data[label]['single_waveforms']\n dump_json(data=json_data, pathname=os.path.join(extracted_data_dir, filename_with_ext), overwrite=True)\n\n\nif __name__ == '__main__':\n extract()","repo_name":"qiriro/PPG","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"84"} +{"seq_id":"18045864440","text":"\"\"\"\nGiven a string s, sort it in decreasing order based on the frequency of the characters. The frequency of a character is the number of times it appears in the string.\n\nReturn the sorted string. If there are multiple answers, return any of them.\n\ns = \"tree\"\noutput: \"eert\"\n\n\"\"\"\nfrom collections import defaultdict\n\ndef frequencySort(s: str) -> str:\n #First get freq of each String\n freq = defaultdict(int)\n for c in s:\n freq[c] += 1\n \n #Remap freq elems to count\n count_freq = defaultdict(list)\n for k,v in freq.items():\n count_freq[v].append(k)\n \n new_string = ''\n vals = sorted(count_freq,reverse=True)\n for val in vals:\n for c in count_freq[val]:\n new_string += (c*val)\n return new_string\n\n\nCase1 = \"tree\"\nprint(frequencySort(Case1))\n\n\"\"\"\nTime Complexity: O(NLogN) -> Sorting\nSpace: O(N)\n\n\"\"\"","repo_name":"ArcticRise/Leetcode","sub_path":"PythonSolution/DecemberChallenge/451_SortCharactersByFreq.py","file_name":"451_SortCharactersByFreq.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"21559553277","text":"import pytest\nfrom Application.WikiApp import App\nfrom Application.api import Api_from_facts\n\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\", action=\"store\", default=None)\n\n\n@pytest.fixture(scope=\"session\")\ndef wikifixture(request):\n browser = request.config.getoption(\"--browser\")\n app = App(browser)\n yield app\n app.destroy()\n\n\n\n@pytest.fixture(scope=\"session\")\ndef api():\n facts = Api_from_facts()\n return facts","repo_name":"nekitvand/Wikipedia","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"84"} +{"seq_id":"45473817287","text":"\"\"\"Tools to help simulate the moving mode of MSL's kibble balance.\r\n\r\nErrors not considered (not exhaustive list): thermal, alignment, gain errors of\r\nthe DVM, other interferometer beam errors (e.g. diffraction), PJVS errors,\r\nclock stability, current effect, laser frequency stability, DVM noise \r\n(incl 3458A auto-zero), float precision, mains power noise, transient coil \r\nimpedence, sine wave frequency measurement etc. Feel free to add them in!!!!\r\n\r\nEquipment not considered: multiple DVMs, multiple interferometers, PJVS, \r\nfrequency counter for oscillation frequency measurement.\r\n\r\nI have attempted to document my assumptions via the README and through code\r\ncomments however I will have undoubtably made some implicit assumptions that\r\nI have not recognised. \r\n\r\nThis code is currently functional as-is, however it contains a long list of \r\nimporovement TODOs - mostly to increase clarity and sanitise user inputs.\r\n\r\nCreated on Mon Oct 10 13:27:31 2022\r\n@author: F.Messerli\r\n\r\nTypical usage example:\r\n Initialise params\r\n e = MovingModeExperiment(objects, params)\r\n e.run_experiment()\r\n e.analyse_simple_sine_fit()\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\n# Used to create abstract methods: methods that need to be overridden by a\r\n# subclass\r\nfrom abc import ABCMeta, abstractmethod\r\n\r\n# Type hints e.g. error_name: str (error_name is a string type object)\r\nfrom typing import Optional, Tuple\r\nimport numpy as np\r\nimport numpy.typing as npt\r\nfrom scipy import interpolate\r\nimport sympy\r\n\r\n\r\nclass Signal(object):\r\n \"\"\"Parent for all signals; stores and applies time series functions.\r\n . \r\n Attributes:\r\n additional_signals: List of other Signals to add when generating the\r\n signal.\r\n \"\"\"\r\n\r\n __metaclass__ = ABCMeta\r\n\r\n def __init__(self) -> None:\r\n \"\"\"Initialise Signal.\"\"\"\r\n self.additional_signals = []\r\n\r\n @abstractmethod # This function must be overridden by subclass\r\n def generate_signal(self, t: npt.ArrayLike) -> None:\r\n \"\"\"Generate signal as a funciton of time.\r\n\r\n Args:\r\n t: Either array of times or single time for which to obtain signal\r\n value.\r\n \"\"\"\r\n pass\r\n\r\n def add_signal(self, new_signal: \"Signal\") -> None:\r\n \"\"\"Add a signal object whose generated signal will be added to this one.\"\"\"\r\n self.additional_signals.append(new_signal)\r\n\r\n\r\nclass LinearSignal(Signal):\r\n \"\"\"Linear signal with function y=v*t+y0.\r\n\r\n Could be used for signals like linear voltage drift or displacement in\r\n moving mode. A linear displacement signal used in most Kibble balances.\r\n \r\n Args:\r\n velocity: Velocity of the movement.\r\n offset: Offset constant for the linear signal i.e. could be used as the\r\n initial position for a linear displacement mode.\r\n \r\n Attributes:\r\n velocity: Velocity of the movement.\r\n offset: Offset constant for the linear signal i.e. could be used as the\r\n initial position for a linear displacement mode.\r\n \"\"\"\r\n\r\n def __init__(\r\n self, velocity: float, offset: float, signal_name: str = \"linear\"\r\n ) -> None:\r\n \"\"\"Init LinearSignal.\"\"\"\r\n super().__init__()\r\n self.velocity = velocity\r\n self.offset = offset\r\n\r\n def generate_signal(self, t: npt.ArrayLike) -> npt.ArrayLike:\r\n \"\"\"Generate linear signal as a funciton of time.\r\n\r\n Args:\r\n t: Either array of times or single time for which to obtain signal.\r\n\r\n Returns:\r\n Value of the linear signal + any additional signals at time(s) t as\r\n array or float.\r\n \"\"\"\r\n output_signal = self.velocity * t + self.offset\r\n for signal in self.additional_signals:\r\n output_signal += signal.generate_signal(t)\r\n return output_signal\r\n\r\n\r\nclass SineSignal(Signal):\r\n \"\"\"Sinusoidal signal with function y = A sine(wt+phase).\r\n\r\n Could be used for signals like voltage noise or sinusoidal displacement.\r\n A sinusoidal displacement signal was proposed for the MSL kibble balance\r\n and used in the PTB balances. Simulations so far have demonstrated good\r\n vibration rejection but high sensitivity to synchronisation between the\r\n voltage and velocity signal.\r\n \r\n Args:\r\n frequency: Frequency of the oscillation.\r\n amplitude: Amplitude of the oscillation.\r\n phase: Inital phase of the oscillation.\r\n offset: Initial offset/ midpoint of the oscillation.\r\n \r\n Attributes:\r\n frequency: Frequency of the oscillation.\r\n amplitude: Amplitude of the oscillation.\r\n phase: Inital phase of the oscillation.\r\n offset: Initial offset/ midpoint of the oscillation.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n frequency: float,\r\n amplitude: float,\r\n phase: float,\r\n offset: float,\r\n ) -> None:\r\n \"\"\"Initialise SineSignal with sine parameters.\"\"\"\r\n super().__init__()\r\n self.frequency = frequency\r\n self.amplitude = amplitude\r\n self.phase = phase\r\n self.offset = offset\r\n\r\n def generate_signal(self, t: npt.ArrayLike) -> npt.ArrayLike:\r\n \"\"\"Generate sinusoidal signal as a funciton of time.\r\n\r\n Args:\r\n t: Either array of times or single time for which to obtain signal.\r\n\r\n Returns:\r\n Value of the sinusoidal signal + any additional signals at time(s)\r\n t as array or float.\r\n \"\"\"\r\n output_signal = (\r\n self.amplitude\r\n * np.sin(2 * np.pi * self.frequency * t + self.phase)\r\n + self.offset\r\n )\r\n for signal in self.additional_signals:\r\n output_signal += signal.generate_signal(t)\r\n return output_signal\r\n\r\n\r\nclass VibrationNoiseFloor(Signal):\r\n \"\"\"Characterises the displacement noise floor in the frequency space.\r\n\r\n Args:\r\n frequencies: Frequencies of the noise sinusoids\r\n amplitudes: Amplitudes of the noise sinusoids.\r\n phases: Phases of the noise sinusoids.\r\n\r\n Attributes:\r\n frequencies: Frequencies of the noise sinusoids\r\n amplitudes: Amplitudes of the noise sinusoids.\r\n phases: Phases of the noise sinusoids.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n frequencies: npt.ArrayLike,\r\n amplitudes: npt.ArrayLike,\r\n phases: npt.ArrayLike,\r\n ) -> None:\r\n \"\"\"Initialise VibrationNoiseFloor with arrays of sine parameters.\"\"\"\r\n super().__init__()\r\n self.frequencies = np.asarray(frequencies)\r\n self.amplitudes = np.asarray(amplitudes)\r\n self.phases = np.asarray(phases)\r\n\r\n @classmethod\r\n # Use like: noise = VibrationNoiseFloor.from_csv(fname)\r\n def from_csv(\r\n cls,\r\n fname: str,\r\n delimiter: str = \",\",\r\n skip_header: int = 0,\r\n phase: bool = True,\r\n ) -> \"VibrationNoiseFloor\":\r\n \"\"\"Import frequencies, amplitudes, and maybe phases from csv.\r\n\r\n Import noise floor with frequencies as the first column, amplitudes\r\n as the second column, and phase as the third column if phase==True.\r\n\r\n Args:\r\n fname: Name of csv file to import.\r\n delimiter: Delimiter used in csv file.\r\n skip_header: Number of header rows to skip.\r\n phase: Whether the file contains phases of the signals.\r\n \"\"\"\r\n # TODO(finneganc): precondition on phases and width of csv\r\n csv_read = np.genfromtxt(\r\n fname, delimiter=delimiter, skip_header=skip_header\r\n )\r\n frequencies = csv_read[:, 0]\r\n amplitudes = csv_read[:, 1]\r\n if phase:\r\n phases = csv_read[:, 2]\r\n else:\r\n phases = np.zeros(len(frequencies))\r\n return cls(frequencies, amplitudes, phases)\r\n\r\n def randomise_phase(self) -> None:\r\n \"\"\"Set phase values to random values between 0 and 2pi.\"\"\"\r\n self.phases = np.random.uniform(0, 2 * np.pi, len(self.frequencies))\r\n\r\n def remove_low_amplitude_oscillations(\r\n self, cutoff_velocity: float\r\n ) -> None:\r\n \"\"\"Remove noise signals with low velocity amplitude.\r\n\r\n Including lots of noise signals/frequency components can be slow. As Bl\r\n error seems to track with the velocity amplitude of the noise, removing\r\n the low amplitude signals could speed up the process without compromising\r\n on accuracy. This has not been extensively tested.\r\n\r\n Args:\r\n cutoff_velocity: All signals with velocity amplitude below this value\r\n will be removed.\r\n \"\"\"\r\n new_amps = []\r\n new_freqs = []\r\n new_phases = []\r\n for freq, amp, phase in zip(\r\n self.frequencies, self.amplitudes, self.phases\r\n ):\r\n if amp * freq * 2 * np.pi >= cutoff_velocity:\r\n new_freqs.append(freq)\r\n new_amps.append(amp)\r\n new_phases.append(phase)\r\n\r\n self.frequencies = np.asarray(new_freqs)\r\n self.amplitudes = np.asarray(new_amps)\r\n self.phases = np.asarray(new_phases)\r\n\r\n def generate_signal(self, t: npt.ArrayLike) -> npt.ArrayLike:\r\n \"\"\"Generate noise floor signal as a funciton of time.\r\n\r\n Args:\r\n t: Either array of times or single time for which to obtain signal.\r\n\r\n Returns:\r\n Value of the sinusoidal signal + any additional signals at time(s)\r\n t as array or float.\r\n Raises:\r\n ValueError: If t is not scalar or one dimensional array.\r\n \"\"\"\r\n t = np.asarray(t)\r\n if len(t.shape) == 1:\r\n noise = np.zeros(len(t))\r\n elif len(t.shape) == 1:\r\n noise = 0\r\n else:\r\n raise ValueError(\"Must be a scalar or one dimensional array.\")\r\n\r\n for freq, amp, phase in zip(\r\n self.frequencies, self.amplitudes, self.phases\r\n ):\r\n noise += amp * np.sin(2 * np.pi * freq * t + phase)\r\n\r\n output_signal = noise\r\n for signal in self.additional_signals:\r\n output_signal += signal.generate_signal(t)\r\n return output_signal\r\n\r\n\r\nclass InterpolatedSignal(Signal):\r\n \"\"\"Create signal from discrete data with polynomial interpolation.\r\n\r\n Uses cubic spline interpolation with 'not a knot' boundary condition.\r\n \r\n Args:\r\n times: Array of sampled times corresponding to signal values.\r\n signal_values: Value of the signal at corresponding times.\r\n \r\n Attributes:\r\n signal_interp: Scipy CubicSpline interpolation object of provided data.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n times: npt.ArrayLike,\r\n signal_values: npt.ArrayLike,\r\n ) -> None:\r\n \"\"\"Initialise InterpolatedSignal.\"\"\"\r\n super().__init__()\r\n self.signal_interp = interpolate.CubicSpline(\r\n times, signal_values, extrapolate=False\r\n )\r\n\r\n @classmethod\r\n # Use like: noise = VibrationNoiseFloor.from_csv(fname)\r\n def from_csv(\r\n cls,\r\n fname: str,\r\n time_range: Optional[Tuple[float, float]] = None,\r\n delimiter: str = \",\",\r\n skip_header: int = 0,\r\n ) -> \"VibrationNoiseFloor\":\r\n \"\"\"Import signal timeseries from csv.\r\n\r\n Import signal with times as the first column, and signal values\r\n as the second column.\r\n\r\n Args:\r\n fname: Name of csv file to import.\r\n time_range: Time range in csv to use for signal in a tuple (start,\r\n end).\r\n delimiter: Delimiter used in csv file.\r\n skip_header: Number of header rows to skip.\r\n \"\"\"\r\n csv_read = np.genfromtxt(\r\n fname, delimiter=delimiter, skip_header=skip_header\r\n )\r\n times = csv_read[:, 0]\r\n signal_values = csv_read[:, 1]\r\n\r\n if time_range != None:\r\n start_idx = np.argmin(np.abs(times - time_range[0]))\r\n end_idx = np.argmin(np.abs(times - time_range[1])) + 1\r\n\r\n # Truncate arrays\r\n signal_values = signal_values[start_idx:end_idx]\r\n times = times[start_idx:end_idx]\r\n # Let the first time be zero\r\n times = times - times[0]\r\n\r\n return cls(times, signal_values)\r\n\r\n def generate_signal(self, t: npt.ArrayLike) -> npt.ArrayLike:\r\n \"\"\"Generate noise floor signal as a funciton of time.\r\n\r\n Args:\r\n t: Either array of times or single time for which to obtain signal.\r\n\r\n Returns:\r\n Value of the sinusoidal signal + any additional signals at time(s)\r\n t as array or float.\r\n Raises:\r\n ValueError: If t contains a value outside the range provided when\r\n constructing the interpolator.\r\n \"\"\"\r\n output_signal = self.signal_interp(t)\r\n if np.isnan(output_signal).any():\r\n raise ValueError(\r\n f\"Either {max(t)} or {min(t)} or both are outside of the time range provided to the interpolator.\"\r\n )\r\n for signal in self.additional_signals:\r\n output_signal += signal.generate_signal(t)\r\n return output_signal\r\n\r\n\r\nclass RandomNoise(object):\r\n \"\"\"Randomly generatde noise to be added to time-series data.\r\n\r\n Can be applied to either the measurement signal or other parameters like\r\n the timesteps or phase. \r\n \"\"\"\r\n\r\n __metaclass__ = ABCMeta\r\n\r\n @abstractmethod # This function must be overridden by subclass\r\n def generate_noise(self, num_samples) -> None:\r\n \"\"\"Create an array to be added to a measurement signal.\r\n \r\n Args:\r\n num_samples: Length of array of random noise samples.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass Agwn(RandomNoise):\r\n \"\"\"Additive gaussian white noise with zero mean.\r\n\r\n Can be applied to either the measurement signal or other parameters like\r\n the timesteps or phase.\r\n\r\n Args:\r\n standard_deviation: Standard deviation of the gaussian white noise.\r\n \r\n Attributes:\r\n sigma: Standard deviation of the gaussian white noise.\r\n \"\"\"\r\n\r\n def __init__(self, standard_deviation: float) -> None:\r\n \"\"\"Init Agwn with name and standard deviation.\"\"\"\r\n self.sigma = standard_deviation\r\n\r\n def generate_noise(self, num_samples: int) -> np.ndarray:\r\n \"\"\"\r\n Create an array to be added to a measurement signal.\r\n\r\n Args:\r\n num_samples: Number of samples in the measurement signal.\r\n\r\n Returns:\r\n Numpy array of the added noise.\r\n \"\"\"\r\n return np.random.normal(0, self.sigma, num_samples)\r\n\r\n\r\nclass Clock(object):\r\n \"\"\"Time reference object.\r\n\r\n Clock stability is not considered here and is currently assumed to be\r\n perfect.\r\n\r\n Args:\r\n freq: Frequency of the time reference.\r\n phase: Phase of the clock relative to the time reference. So if the\r\n reference is at phase pi/2 when this clock ticks, the phase is\r\n pi/2.\r\n time_jitter: Random noise in timing.\r\n \r\n Attributes:\r\n freq: Frequency of the time reference.\r\n phase: Phase of the clock relative to the time reference. So if the\r\n reference is at phase pi/2 when this clock ticks, the phase is\r\n pi/2.\r\n time_jitter: Random noise in timing.\r\n \"\"\"\r\n\r\n def __init__(\r\n self, freq: float, phase: float, time_jitter: RandomNoise = Agwn(0)\r\n ) -> None:\r\n \"\"\"Init Clock with frequency, phase, and jitter.\"\"\"\r\n self.freq = freq\r\n self.phase = phase\r\n self.time_jitter = time_jitter\r\n\r\n\r\nclass QuantisationError(object):\r\n \"\"\"Quantisation error associated with analogue to digital converters.\r\n\r\n Resolution is typically measured in digits, which can include a half digit.\r\n The extra .5 means there can be an extra most significant digit with value\r\n 1 or 0.\r\n\r\n Args:\r\n digit_resolution_float: Resolution in digits, must be multiple of\r\n 0.5.\r\n\r\n Attributes:\r\n digit_resultion: Integer digit cutoff for quantisation error.\r\n half_digit: Boolean. If resolution includes a half digit then the\r\n number may have an additional most significant digit with value 1.\r\n \"\"\"\r\n\r\n def __init__(self, digit_resolution_float: float) -> None:\r\n \"\"\"Init QuantisationError with resolution.\"\"\"\r\n self.digit_resolution = int(digit_resolution_float)\r\n if (\r\n float(2 * digit_resolution_float).is_integer()\r\n and not float(digit_resolution_float).is_integer()\r\n ):\r\n self.half_digit = True\r\n elif float(digit_resolution_float).is_integer():\r\n self.half_digit = False\r\n else:\r\n raise ValueError(\"digit_resolution must be multiple of 0.5\")\r\n\r\n def apply_quantisation(self, x: npt.ArrayLike) -> np.ndarray:\r\n \"\"\"Apply quantisation error to an array.\r\n\r\n Currently a placeholder implementation.\r\n The 3458A likely has a different implementation with its SINT and DINT\r\n data formats. Hopefully this is a resonable approximation, however if\r\n it stores a scale factor the error could be much worse for a highly\r\n variable signal.\r\n\r\n Args:\r\n x: one dimensional array to be quantised\r\n \"\"\"\r\n # TODO(finneganc): Check how the quantisation works based on data format\r\n # I worry it was something like it store a scale factor for the whole\r\n # run to avoid using up bits as exponents? Need to find out.\r\n # TODO(finneganc): precond, make sure digit_resolution is an integer\r\n x = np.asarray(x)\r\n\r\n # Do not accept 0 dimensional arrays\r\n if len(x.shape) != 1:\r\n raise ValueError(\"x must be a 1D array\")\r\n\r\n # abs(x) where x=0 becomes 10^(resolution-1)\r\n x_positive = np.where(\r\n np.isfinite(x) & (x != 0),\r\n np.abs(x),\r\n 10 ** (self.digit_resolution - 1),\r\n )\r\n if self.half_digit:\r\n extra_digit = np.asarray(\r\n [\r\n 1 if (str(x[i])[0] == \"1\" or str(x[i])[0:2] == \"-1\") else 0\r\n for i in range(len(x))\r\n ]\r\n )\r\n else:\r\n extra_digit = np.zeros(len(x))\r\n mags = 10 ** (\r\n self.digit_resolution\r\n + extra_digit\r\n - 1\r\n - np.floor(np.log10(x_positive))\r\n )\r\n return np.round(x * mags) / mags\r\n\r\n\r\nclass Coil(object):\r\n \"\"\"Primary coil for the Kibble Balance.\r\n\r\n Args:\r\n height: Height (in z direction) of the coil.\r\n radius: Radius of the coil.\r\n turns: Number of turns in the coil.\r\n capacitance: Inter-winding capacitance and capacitance of the coil and\r\n wires to the DVM to surrounding surfaces.\r\n inductance: Self inductance of the coil.\r\n resistance: Resistance of the wire in the coil and wires to the DVM.\r\n \r\n Attributes:\r\n height: Height (in z direction) of the coil.\r\n radius: Radius of the coil.\r\n turns: Number of turns in the coil.\r\n c: Inter-winding capacitance and capacitance of the coil and\r\n wires to the DVM to surrounding surfaces.\r\n l: Self inductance of the coil.\r\n r: Resistance of the wire in the coil and wires to the DVM.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n height: float,\r\n radius: float,\r\n turns: float,\r\n capacitance: float,\r\n inductance: float,\r\n resistance: float,\r\n ):\r\n \"\"\"Init coil.\"\"\"\r\n self.height = height\r\n self.radius = radius\r\n self.turns = turns\r\n self.c = capacitance\r\n self.l = inductance\r\n self.r = resistance\r\n\r\n def get_voltage_sin_scale_factor(self, w, r_i):\r\n \"\"\"Find scale factor for sin component of the voltage due to LRC nature of the coil.\r\n\r\n Assumes the voltage is of the form V=V0 * sin(wt+phase)\r\n\r\n Args:\r\n w: Angular frequency of the displacement component.\r\n r_i: Internal resistance of the DVM.\r\n\r\n \"\"\"\r\n scale_factor = (\r\n r_i\r\n * (r_i + self.r - self.c * self.l * r_i * w**2)\r\n / (\r\n r_i**2\r\n + 2 * r_i * self.r\r\n + self.r**2\r\n + self.l**2 * w**2\r\n - 2 * self.c * self.l * r_i**2 * w**2\r\n + self.c**2 * self.r**2 * r_i**2 * w**2\r\n + self.c**2 * self.l**2 * r_i**2 * w**4\r\n )\r\n )\r\n return scale_factor\r\n\r\n def get_voltage_cos_scale_factor(self, w, r_i):\r\n \"\"\"Find scale factor for additional cos component of the voltage due to LRC nature of the coil.\r\n\r\n Assumes the voltage is of the form V=V0 * sin(wt+phase)\r\n\r\n Args:\r\n w: Angular frequency of the displacement component.\r\n r_i: Internal resistance of the DVM.\r\n\r\n \"\"\"\r\n scale_factor = (\r\n r_i\r\n * (self.l + self.c * r_i * self.r)\r\n * w\r\n / (\r\n r_i**2\r\n + 2 * r_i * self.r\r\n + self.r**2\r\n + self.l**2 * w**2\r\n - 2 * self.c * self.l * r_i**2 * w**2\r\n + self.c**2 * self.r**2 * r_i**2 * w**2\r\n + self.c**2 * self.l**2 * r_i**2 * w**4\r\n )\r\n )\r\n return scale_factor\r\n\r\n # Add coil to Bl rather than parameters\r\n # Modify measure_voltage with an optional tag for 'add_coil_effects' or\r\n # something - that then takes coil object, and calculates the voltage from that\r\n\r\n\r\nclass Bl(object):\r\n \"\"\"Bl or gamma: magnetic field strength x coil wire length.\r\n\r\n The above relation is only true if everything is properly aligned.\r\n\r\n Args:\r\n b_field: Magnetic field strength array with each point corresponding\r\n to the location in _b_displacement.\r\n b_displacement: Displacement array corresponding to _b_field.\r\n coil: Coil object containing coil dimensions.\r\n polyfit_order: Order of polynomial fit for Bl.\r\n \r\n Attributes:\r\n bl_polyfit: np.poly1d polynomial fit for Bl\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n b_field: npt.ArrayLike,\r\n b_displacement: npt.ArrayLike,\r\n coil: Coil,\r\n polyfit_order: int = 8,\r\n ) -> None:\r\n \"\"\"Init bl.\"\"\"\r\n # Attributes are private because changing the variables wouldn't auto-\r\n # matically change the bl_polyfit. They could be made properties.\r\n self._b_field = b_field\r\n self._b_displacement = b_displacement\r\n self.coil = coil\r\n self._polyfit_order = polyfit_order\r\n # Lengh of coil wire in magnetic field\r\n self._l = 2 * np.pi * self.coil.radius * self.coil.turns\r\n self.bl_polyfit = self._create_bl_polyfit()\r\n\r\n @classmethod\r\n def from_csv(\r\n cls,\r\n fname: str,\r\n coil: Coil,\r\n polyfit_order: int = 8,\r\n delimiter: str = \",\",\r\n skip_header: int = 0,\r\n ):\r\n \"\"\"Import B(z) field from csv.\r\n\r\n Import B with position as the first column, and field values as the\r\n second column.\r\n\r\n Args:\r\n fname: Name of csv file to import.\r\n delimiter: Delimiter used in csv file.\r\n coil: Coil object containing coil dimensions.\r\n polyfit_order: Order of polynomial fit for Bl.\r\n skip_header: Number of header rows to skip.\r\n \"\"\"\r\n csv_read = np.genfromtxt(\r\n fname, delimiter=delimiter, skip_header=skip_header\r\n )\r\n b_displacement = csv_read[:, 0]\r\n b_field = csv_read[:, 1]\r\n\r\n return cls(\r\n b_field,\r\n b_displacement,\r\n coil,\r\n polyfit_order,\r\n )\r\n\r\n def _create_bl_polyfit(self) -> np.poly1d:\r\n \"\"\"Create a polynomial fit for the Bl field-position data.\r\n\r\n Both creates a fit for the B field then considers the effect of the\r\n coil width to get an average Bl experienced by the coil at any position.\r\n Note that the polyfit only seems accurate to about ~1e-12 in scenarios\r\n tested.\r\n \"\"\"\r\n # Fit the Bl data\r\n bl_fit = np.polyfit(\r\n self._b_displacement, self._b_field * self._l, self._polyfit_order\r\n )\r\n # Initialise polynomial for definite integration\r\n definite_integral_poly = np.poly1d([0])\r\n # Integrate the initial fit\r\n indefinite_integral_poly = np.flip(np.polyint(bl_fit))\r\n for i, coefficient in enumerate(indefinite_integral_poly):\r\n if i != 0: # Don't care about the 0th order\r\n # Add polynomials defined by their roots to compute definite\r\n # integration across the coil\r\n definite_integral_poly += np.poly1d(\r\n coefficient\r\n * np.poly1d(np.zeros(i) - self.coil.height / 2, True).c\r\n )\r\n definite_integral_poly -= np.poly1d(\r\n coefficient\r\n * np.poly1d(np.zeros(i) + self.coil.height / 2, True).c\r\n )\r\n # Divide definite integral by integration width to obtain average bl\r\n bl_interp_poly = np.polydiv(\r\n definite_integral_poly, np.poly1d([self.coil.height])\r\n )[0]\r\n # TODO(finneganc): handle 0 coil width\r\n return bl_interp_poly\r\n\r\n def _create_spline_interpolation(self) -> interpolate.CubicSpline:\r\n # TODO(finnegac): Cubic spline is probably more appropriate here? It would\r\n # mean that polynomial fitting cannot necessarily be perfect (which is\r\n # true IRL).\r\n raise NotImplementedError(\r\n \"Spline interpolation not yet implemented because the integration problem is a bit of a pain. I think it can be done with Simpsons rule.\"\r\n )\r\n\r\n def at_z(self, z: npt.ArrayLike) -> np.ndarray:\r\n \"\"\"Evaluate the polyomial fit at a specific position.\r\n\r\n Args:\r\n z: Vertical position of the coil relative to weighing position.\r\n Raises:\r\n ValueError: if z is outside of the range of B data provided.\r\n \"\"\"\r\n z = np.asarray(z)\r\n if np.min(z) < np.min(self._b_displacement) or np.max(z) > np.max(\r\n self._b_displacement\r\n ):\r\n raise ValueError(\r\n f\"z of {z} is outside the range of B field data provided.\"\r\n )\r\n return np.polyval(self.bl_polyfit, z)\r\n\r\n\r\n# TODO: Change DVM to DigitalVoltmeter or TimeIntervalAnalyser to TIA\r\n# TODO: Implement integral and differential noise\r\n# TODO: Implement processing delay\r\nclass Dvm(object):\r\n \"\"\"Digital voltmeter class containing error contributions and parameters.\r\n\r\n Used to contain all relevant information for a DVM with default parameters\r\n referring to the 3458A specifications. Factors not considered here include\r\n the 100ns time jumps that can occur even when only using the internal\r\n clock, the 0.01% frequency variation, and linearity. If linearity becomes\r\n a problem a quantum sampling voltmeter could be used i.e. output a sine\r\n signal from the PJVS.\r\n\r\n Args:\r\n integration_time: Time over which the voltage measurement is integrated.\r\n clock: Clock object containing relavent information for the internal\r\n clock of the DVM.\r\n timing_latency: Timing latency when using an external trigger. The\r\n 3458A can differ by up to 125ns model to model. Worth testing as it\r\n was much larger than specified for Lapuh (2018).\r\n quantisation_digits: Digit resolution at the integration time thresholds.\r\n quantisation_thresholds: Integration time thresholds for resolution.\r\n internal_resistance: Internal resistance of the DVM.\r\n\r\n Attributes:\r\n clock: Clock object containing relavent information for the internal\r\n clock of the DVM.\r\n timing_latency: Timing latency when using an external trigger. The\r\n 3458A can differ by up to 125ns model to model. Worth testing as it\r\n was much larger than specified for Lapuh (2018).\r\n integration_time: Time over which the voltage measurement is integrated.\r\n internal_resistance: Internal resistance of the DVM.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n integration_time: float,\r\n clock: Clock = Clock(\r\n 1e7, np.random.uniform(0, 2 * np.pi), Agwn(1e-10)\r\n ),\r\n timing_latency: float = 175e-9,\r\n quantisation_digits: Optional[npt.ArrayLike] = (\r\n 8.5,\r\n 7.5,\r\n 6.5,\r\n 5.5,\r\n 4.5,\r\n ),\r\n quantisation_thresholds: Optional[npt.ArrayLike] = (\r\n 0.166,\r\n 10e-3,\r\n 100e-6,\r\n 1.5e-6,\r\n 0.5e-6,\r\n ),\r\n internal_resistance: float = 1e10,\r\n ) -> None:\r\n \"\"\"Init Dvm.\"\"\"\r\n # TODO(finneganc): make integration_time & quantisations properties\r\n self.clock = clock\r\n self.timing_latency = timing_latency\r\n self._integration_time = integration_time\r\n self.internal_resistance = internal_resistance\r\n # TODO(finneganc): Lots of preconditioning - relationship, ordering, type etc.\r\n self._quantisation_digits = quantisation_digits\r\n self._quantisation_thresholds = quantisation_thresholds\r\n\r\n if type(self._quantisation_digits) != type(None):\r\n self.quantisation_error = self._get_quantisation_error()\r\n\r\n # Allow changing of _integration_time but recompute quantisation error if\r\n # changed\r\n @property\r\n def integration_time(self) -> float:\r\n \"\"\"Get integration time.\"\"\"\r\n return self._integration_time\r\n\r\n @integration_time.setter\r\n def integration_time(self, integration_time) -> None:\r\n self._integration_time = integration_time\r\n self.quantisation_error = self._get_quantisation_error()\r\n\r\n def _get_quantisation_error(self) -> float:\r\n \"\"\"Get quantisation error object depending on Dvm parameters.\"\"\"\r\n # TODO(finneganc): There must be a better way of doing this\r\n # Initialise the quantisation error based on the integration time\r\n # If quantisation error is integration_time independant\r\n if len(np.asarray(self._quantisation_digits).shape) == 0:\r\n return QuantisationError(self._quantisation_digits)\r\n else:\r\n # find the correct digit resolution based on integration time.\r\n idx = 0\r\n for threshold in self._quantisation_thresholds:\r\n if self.integration_time > threshold:\r\n break\r\n else:\r\n idx += 1\r\n else:\r\n raise ValueError(\r\n f\"Dvm integration time of {self.integration_time} is lower than minimum threshold of {self._quantisation_thresholds[-1]}.\"\r\n )\r\n return QuantisationError(self._quantisation_digits[idx])\r\n\r\n def determine_sample_times(\r\n self, samp_times: npt.ArrayLike, time_reference: Clock\r\n ) -> np.ndarray:\r\n \"\"\"Determine the actual start times of the voltage measurement.\r\n\r\n This implementation assumes that the timing delay in measurement occurs\r\n before the internal clock sees the trigger signal. This implies that a\r\n nominal delay of say 175ns with no timing jitter will result in a delay\r\n greater than 175ns.\r\n\r\n Args:\r\n samp_times: The times of the trigger signals linked to the time\r\n reference.\r\n time_reference: Time reference clock for the balance.\r\n\r\n Returns:\r\n Numpy array of actual start times of the voltage measurement.\r\n \"\"\"\r\n # Lapuh (2018) p 119 has more to say on optimising the relative\r\n # frequencies of the internal and trigger clocks for minimum time jitter\r\n\r\n # np.ceil will ceil an int cast as a floating point number due to floating point errors\r\n rough_ceil = lambda x, threshold: np.where(\r\n (x != 0) & (abs((np.floor(x) - x) / x) < threshold),\r\n np.floor(x),\r\n np.ceil(x),\r\n )\r\n # TODO(finneganc): get rid of the divide by zero warning above\r\n\r\n samp_times_clock_ticks = rough_ceil(\r\n samp_times * time_reference.freq, 1e-15\r\n )\r\n\r\n if (\r\n samp_times_clock_ticks / time_reference.freq - samp_times > 1e-15\r\n ).any():\r\n raise ValueError(\r\n f\"sampling times must be governed by clock reference i.e on the counts of frequency {time_reference.freq}\"\r\n )\r\n\r\n internal_jitter = self.clock.time_jitter.generate_noise(\r\n len(samp_times)\r\n )\r\n\r\n num_internal_ticks = self.clock.freq * (\r\n samp_times\r\n + time_reference.time_jitter.generate_noise(len(samp_times))\r\n + internal_jitter\r\n + self.timing_latency\r\n - self.clock.phase / (2 * np.pi * self.clock.freq)\r\n )\r\n\r\n num_internal_ticks = rough_ceil(num_internal_ticks, 1e-15)\r\n\r\n # TODO(finneganc): Check whether this aligns with Lapuh et. al. 2015.\r\n\r\n voltage_time = (\r\n num_internal_ticks / self.clock.freq\r\n + internal_jitter\r\n + self.clock.phase / (2 * np.pi * self.clock.freq)\r\n )\r\n return voltage_time\r\n\r\n def get_voltage_integral(self, displacement_signal, bl):\r\n if type(displacement_signal) != SineSignal:\r\n # At this stage the symbolic integration only works with sinusoids\r\n raise ValueError(\r\n f\"displacement signal must be of type SineSignal not {type(displacement_signal)}\"\r\n )\r\n # Prepare an array where each entry represents one signal saved as\r\n # [w, A, phase, correction factor for sine, correction factor for cosine]\r\n signal_array = []\r\n w = displacement_signal.frequency * 2 * np.pi\r\n signal_array.append(\r\n [\r\n w,\r\n displacement_signal.amplitude,\r\n displacement_signal.phase,\r\n bl.coil.get_voltage_sin_scale_factor(\r\n w, self.internal_resistance\r\n ),\r\n bl.coil.get_voltage_cos_scale_factor(\r\n w, self.internal_resistance\r\n ),\r\n ]\r\n )\r\n # If the noise floor is present, add each component to the array\r\n for signal in displacement_signal.additional_signals:\r\n if type(signal) != VibrationNoiseFloor:\r\n # At this stage the symbolic integration only works with sinusoids\r\n raise ValueError(\r\n f\"additional signal must be of type VibrationNoiseFloor not {type(signal)}\"\r\n )\r\n for f, a, phase in zip(\r\n signal.frequencies, signal.amplitudes, signal.phases\r\n ):\r\n w = 2 * np.pi * f\r\n signal_array.append(\r\n [\r\n w,\r\n a,\r\n phase,\r\n bl.coil.get_voltage_sin_scale_factor(\r\n w, self.internal_resistance\r\n ),\r\n bl.coil.get_voltage_cos_scale_factor(\r\n w, self.internal_resistance\r\n ),\r\n ]\r\n )\r\n\r\n\r\n t = sympy.Symbol(\"t\")\r\n position = 0\r\n # Set up symbolic position signal\r\n phis = sympy.symbols('phi:'+str(len(signal_array)))\r\n for i, signal in enumerate(signal_array):\r\n position += signal[1] * sympy.sin(signal[0] * t + phis[i])\r\n bl_at_t = 0\r\n # Use polynomial form of B field to find Bl(z(t))\r\n for i, c in enumerate(bl.bl_polyfit.c):\r\n bl_at_t += bl.bl_polyfit.c[i] * position ** (\r\n len(bl.bl_polyfit.c) - i - 1\r\n )\r\n # Determine the integrand according to the circuit model\r\n to_integrate = 0\r\n for signal in signal_array:\r\n to_integrate += (\r\n bl_at_t\r\n * signal[0]\r\n * signal[1]\r\n * (\r\n signal[3]\r\n * sympy.sin(signal[0] * t + signal[2] + np.pi / 2)\r\n + signal[4]\r\n * sympy.cos(signal[0] * t + signal[2] + np.pi / 2)\r\n )\r\n )\r\n # Integrate\r\n voltage_integral = sympy.integrate(to_integrate, t)\r\n # Determine measured voltage\r\n phi_dict = {phi: signal_array[i][2] for i, phi in enumerate(phis)}\r\n \r\n return voltage_integral, t, phi_dict\r\n\r\n def measure_voltage(\r\n self,\r\n displacement_signal: Signal,\r\n samp_times: npt.ArrayLike,\r\n time_reference: Clock,\r\n bl: Bl,\r\n coil_correction: bool = False,\r\n coil_correction_params: tuple=None,\r\n ) -> np.ndarray:\r\n \"\"\"Measure the average voltage over the integration time.\r\n\r\n Args:\r\n displacement signal: signal object of the displacement in moving\r\n mode. Typically a combination of linear or sinusoidal, with\r\n noise.\r\n samp_times: the times at which samples are desired.\r\n time_reference: the time reference creating the trigger signals.\r\n bl: Bl object containing position dependance of the B field.\r\n\r\n Returns:\r\n voltage measurement at (roughly) the specified sampled times\r\n \"\"\"\r\n if min(np.diff(samp_times)) < self.integration_time:\r\n # This is because the DVM needs time to integrate (and maybe process)\r\n raise ValueError(\r\n \"Sampling time cannot be less than integration time\"\r\n )\r\n voltage_time = self.determine_sample_times(samp_times, time_reference)\r\n displacement_start = displacement_signal.generate_signal(voltage_time)\r\n displacement_end = displacement_signal.generate_signal(\r\n voltage_time + self.integration_time\r\n )\r\n\r\n # TODO(finneganc): protect against an out of range z\r\n poly_int = np.polyint(bl.bl_polyfit)\r\n average_voltage = (\r\n np.polyval(poly_int, displacement_end)\r\n - np.polyval(poly_int, displacement_start)\r\n ) / self.integration_time\r\n\r\n # TODO(finneganc): Consider implementing the transient coil effect for\r\n # linear or at least determine its time constant to ensure it isn't a\r\n # problem\r\n if coil_correction:\r\n if coil_correction_params != None:\r\n voltage_integral, t, phi_dict = coil_correction_params\r\n else:\r\n voltage_integral, t, phi_dict = self.get_voltage_integral(\r\n displacement_signal, bl)\r\n \r\n phi_symbols, phi_values = zip(*phi_dict.items())\r\n \r\n voltage_fn = sympy.lambdify([t] + list(phi_symbols), voltage_integral, \"numpy\")\r\n average_voltage = (voltage_fn(voltage_time+self.integration_time, *phi_values)\r\n - voltage_fn(voltage_time, *phi_values))/self.integration_time\r\n\r\n\r\n if type(self._quantisation_digits) != type(None):\r\n average_voltage = self.quantisation_error.apply_quantisation(\r\n average_voltage\r\n )\r\n return average_voltage\r\n\r\n\r\nclass TimeIntervalAnalyser(object):\r\n \"\"\"Measures time margins between data signals.\r\n\r\n Instead of fully simulating the TIA's operation the errors are approximated\r\n from the datasheet of the Carmel Instruments NK732. This is done for\r\n simplicity at the expense of specificity and generality.\r\n\r\n Args:\r\n clock: Internal time reference.\r\n base_resolution: Max resolution of the TIA (rms/std dev).\r\n base_accuracy: Maximum accuracy of the TIA, origins unclear.\r\n internal_noise: Internal noise in V^2\r\n \r\n Attributes:\r\n clock: Internal time reference.\r\n base_resolution: Max resolution of the TIA (rms/std dev).\r\n base_accuracy: Maximum accuracy of the TIA, origins unclear.\r\n internal_noise: Internal noise in V^2\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n clock: Clock,\r\n base_resolution: float = 2e-12,\r\n base_accuracy: float = 20e-12,\r\n internal_noise: float = 440e-6,\r\n ):\r\n \"\"\"Init the TIA.\"\"\"\r\n self.clock = clock\r\n self.base_resolution = base_resolution\r\n self.base_accuracy = base_accuracy\r\n self.internal_noise = internal_noise\r\n\r\n def measure_interval(self, interval_times, slew_rate, external_noise):\r\n \"\"\"Add TIA errors to the interval times.\r\n\r\n Args:\r\n interval_times: interval times measured by the TIA (the differences\r\n between the start and end points).\r\n slew_rate: Input signal slew rate at zero crossing in V/s.\r\n external_noise: RMS noise in the TIA input signals.\r\n \"\"\"\r\n n = len(interval_times)\r\n base_res = np.random.normal(0, self.base_resolution, n)\r\n\r\n start_trigger_error = np.random.normal(\r\n 0,\r\n np.sqrt(self.internal_noise**2 + external_noise**2)\r\n / slew_rate,\r\n n,\r\n )\r\n stop_trigger_error = np.random.normal(\r\n 0,\r\n np.sqrt(self.internal_noise**2 + external_noise**2)\r\n / slew_rate,\r\n n,\r\n )\r\n # TODO(finneganc): figure out exactly how to implement timebase error\r\n timebase_error = self.clock.time_jitter.generate_noise(\r\n n\r\n ) + self.clock.time_jitter.generate_noise(n)\r\n\r\n trigger_level_time_error = np.random.normal(0, 5e-3 / slew_rate, n)\r\n\r\n # Assume base accuracy is constant over a run but varies experiment to\r\n # experiment (and is uniform). This error is likely a result of the\r\n # digital interpolation scheme used to exceed the 10MHz (100ns) limit\r\n # so this seems like a reasonable approximation to the error. Might be\r\n # single shot though, which would be better. Sticking with worst case\r\n # for now.\r\n base_accuracy = np.random.uniform(\r\n -self.base_accuracy / 2, self.base_accuracy / 2\r\n )\r\n\r\n return (\r\n interval_times\r\n + base_res\r\n + start_trigger_error\r\n + stop_trigger_error\r\n + timebase_error\r\n + trigger_level_time_error\r\n + base_accuracy\r\n )\r\n\r\n\r\nclass Interferometer(object):\r\n \"\"\"Heterodyne Michelson interferometer for accurate position measurement.\r\n\r\n In the MSL kibble balance three of these will be used to determine the z\r\n position of the center of the coil.\r\n\r\n Args:\r\n integration_time: Time over which one displacement measurement is taken.\r\n clock: Internal clock, currently does nothing.\r\n interferometer_reference: A clock representing the reference beat\r\n signal of the interferometer.\r\n tia: Time interval analyser associated with the interferometer.\r\n square_slew_rate: Slew rate (gradient) of the square waves that are\r\n passed to the tia.\r\n square_noise_rms: RMS noise of the square waves that are passed to the\r\n tia.\r\n timing_latency: Time between receiving the trigger signal and being\r\n prepared to make the measurement.\r\n wavelength: Wavelength of the laser.\r\n xi: Intensity of the measurement arm as it reaches the polariser, arb\r\n units.\r\n chi: Intensity of the reference arm as it reaches the polariser.\r\n phi: Rotation of the half wave plate relative to its ideal value (\r\n matching the incoming light and the polarising beam splitter)\r\n in degrees.\r\n theta: Angle of the polariser relative to 45 degrees.\r\n dE1: Ellipticity of the light meant to go into the measurement arm (deg).\r\n dE2: Ellipticity of the light meant to go into the reference arm (deg). \r\n \r\n Attributes:\r\n integration_time: Time over which one displacement measurement is taken.\r\n clock: Internal clock, currently does nothing.\r\n interferometer_reference: A clock representing the reference beat\r\n signal of the interferometer.\r\n tia: Time interval analyser associated with the interferometer.\r\n square_slew_rate: Slew rate (gradient) of the square waves that are\r\n passed to the tia.\r\n square_noise_rms: RMS noise of the square waves that are passed to the\r\n tia.\r\n timing_latency: Time between receiving the trigger signal and being\r\n prepared to make the measurement.\r\n wavelength: Wavelength of the laser.\r\n xi: Intensity of the measurement arm as it reaches the polariser, arb\r\n units.\r\n chi: Intensity of the reference arm as it reaches the polariser.\r\n alpha: Offset angle of the polarised light meant for the reference arm\r\n (rad).\r\n beta: Offset angle of the polarised light meant for the measurement arm\r\n (rad).\r\n theta: Angle of the polariser relative to pi/4 radians.\r\n dE1: Ellipticity of the light meant to go into the measurement arm (rad).\r\n dE2: Ellipticity of the light meant to go into the reference arm (rad).\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n integration_time: float,\r\n clock: Clock, # TODO(finneganc): implement clock - currently assumes it's the time reference\r\n interferometer_reference: Clock, # Clock(2.6e6, 0, Agwn('time-jitter', 0))\r\n tia: TimeIntervalAnalyser,\r\n square_slew_rate: float = 2e8,\r\n square_noise_rms: float = 0,\r\n timing_latency: float = 0,\r\n wavelength: float = 633e-9,\r\n xi: float = 1.0,\r\n chi: float = 1.8,\r\n phi: float = 0.5,\r\n theta: float = 0.5,\r\n dE1: float = 0.05,\r\n dE2: float = 0.05,\r\n ) -> None:\r\n \"\"\"Init Interferometer with key parameters including NLE params.\"\"\"\r\n # TODO(finneganc): find out about interferometer latency\r\n self.integration_time = integration_time\r\n self.clock = clock\r\n self.interferometer_reference = interferometer_reference\r\n self.tia = tia\r\n self.square_slew_rate = square_slew_rate\r\n self.square_noise_rms = square_noise_rms\r\n self.timing_latency = timing_latency\r\n self.wavelength = wavelength\r\n self.xi = xi\r\n self.chi = chi\r\n self.alpha = phi * np.pi * 2 / 180\r\n self.beta = -phi * np.pi * 2 / 180\r\n self.theta = theta * np.pi / 180\r\n self.dE1 = dE1 * np.pi / 180\r\n self.dE2 = dE2 * np.pi / 180\r\n\r\n # TODO(finneganc): Change this to a warning and instead just alter the integration time to be one cycle less\r\n if (\r\n (self.integration_time * self.interferometer_reference.freq) % 1\r\n ) / max(\r\n (self.integration_time, self.interferometer_reference.freq)\r\n ) > 1e-15:\r\n raise ValueError(\r\n f\"integration time of {self.integration_time} must include an integer number of reference cycles (frequency {self.interferometer_reference.freq})\"\r\n )\r\n\r\n def get_nonlinearity_from_signal(\r\n self, carrier_signal: npt.ArrayLike\r\n ) -> np.ndarray:\r\n \"\"\"Create nonlinearity error to add to measurement signal.\r\n\r\n Model from (Cosijins et. al, 2002). This has been model has been tested\r\n at MSL and provides similar results to experiments though with\r\n deviations on the same order of magnitude as the result. The effect of\r\n this error can be minimised by sampling over one fringe.\r\n\r\n Args:\r\n carrier_signal: Signal for which the nle applies.\r\n\r\n Returns:\r\n Numpy array of the added non-linearity error.\r\n \"\"\"\r\n conversion = 4 * np.pi\r\n del_phi = conversion * carrier_signal / self.wavelength\r\n\r\n A = (\r\n -(\r\n self.xi**2 * (np.sin(self.beta)) ** 2\r\n + self.chi**2 * (np.cos(self.beta)) ** 2\r\n )\r\n * np.cos(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n - (\r\n self.xi**2 * (np.cos(self.alpha)) ** 2\r\n + self.chi**2 * (np.sin(self.alpha)) ** 2\r\n )\r\n * np.sin(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n ) * np.cos(del_phi) + (\r\n self.xi**2 * np.cos(self.alpha) * np.sin(self.beta)\r\n + self.chi**2 * np.sin(self.alpha) * np.cos(self.beta)\r\n ) * np.cos(\r\n 0.5 * self.dE1 + 0.5 * self.dE2\r\n ) * np.sin(\r\n del_phi\r\n )\r\n\r\n B = (\r\n (\r\n self.xi**2 * (np.sin(self.beta)) ** 2\r\n - self.chi**2 * (np.cos(self.beta)) ** 2\r\n )\r\n * np.cos(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n + (\r\n self.xi**2 * (np.cos(self.alpha)) ** 2\r\n - self.chi**2 * (np.sin(self.alpha)) ** 2\r\n )\r\n * np.sin(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n ) * np.cos(del_phi) + (\r\n -self.xi**2 * np.cos(self.alpha) * np.sin(self.beta)\r\n + self.chi**2 * np.sin(self.alpha) * np.cos(self.beta)\r\n ) * np.cos(\r\n 0.5 * self.dE1 + 0.5 * self.dE2\r\n ) * np.sin(\r\n del_phi\r\n )\r\n\r\n C = (\r\n self.xi\r\n * self.chi\r\n * (\r\n np.cos(self.beta)\r\n * np.sin(self.beta)\r\n * np.cos(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n * (1 - np.cos(2 * del_phi))\r\n + np.sin(self.alpha)\r\n * np.sin(self.beta)\r\n * np.cos(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n * np.sin(2 * del_phi)\r\n - np.cos(self.alpha)\r\n * np.cos(self.beta)\r\n * np.sin(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n * np.sin(2 * del_phi)\r\n - np.sin(self.alpha)\r\n * np.cos(self.alpha)\r\n * np.sin(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n * (1 + np.cos(2 * del_phi))\r\n )\r\n )\r\n\r\n D = (\r\n (\r\n self.xi**2 * (np.sin(self.beta)) ** 2\r\n + self.chi**2 * (np.cos(self.beta)) ** 2\r\n )\r\n * np.cos(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n + (\r\n self.xi**2 * (np.cos(self.alpha)) ** 2\r\n + self.chi**2 * (np.sin(self.alpha)) ** 2\r\n )\r\n * np.sin(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n ) * np.sin(del_phi) + (\r\n self.xi**2 * np.cos(self.alpha) * np.sin(self.beta)\r\n + self.chi**2 * np.sin(self.alpha) * np.cos(self.beta)\r\n ) * np.cos(\r\n 0.5 * self.dE1 + 0.5 * self.dE2\r\n ) * np.cos(\r\n del_phi\r\n )\r\n\r\n E = (\r\n (\r\n -self.xi**2 * (np.sin(self.beta)) ** 2\r\n + self.chi**2 * (np.cos(self.beta)) ** 2\r\n )\r\n * np.cos(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n + (\r\n -self.xi**2 * (np.cos(self.alpha)) ** 2\r\n + self.chi**2 * (np.sin(self.alpha)) ** 2\r\n )\r\n * np.sin(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n ) * np.sin(del_phi) + (\r\n -self.xi**2 * np.cos(self.alpha) * np.sin(self.beta)\r\n + self.chi**2 * np.sin(self.alpha) * np.cos(self.beta)\r\n ) * np.cos(\r\n 0.5 * self.dE1 + 0.5 * self.dE2\r\n ) * np.cos(\r\n del_phi\r\n )\r\n\r\n F = (\r\n self.xi\r\n * self.chi\r\n * (\r\n np.cos(self.beta)\r\n * np.sin(self.beta)\r\n * np.cos(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n * np.sin(2 * del_phi)\r\n + np.cos(self.alpha)\r\n * np.cos(self.beta)\r\n * (\r\n np.cos(0.5 * self.dE1) * np.cos(0.5 * self.dE2)\r\n - np.sin(0.5 * self.dE1)\r\n * np.sin(0.5 * self.dE2)\r\n * np.cos(2 * del_phi)\r\n )\r\n + np.sin(self.alpha)\r\n * np.sin(self.beta)\r\n * (\r\n -np.sin(0.5 * self.dE1) * np.sin(0.5 * self.dE2)\r\n + np.cos(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n * np.cos(2 * del_phi)\r\n )\r\n + np.sin(self.alpha)\r\n * np.cos(self.alpha)\r\n * np.sin(0.5 * self.dE1)\r\n * np.cos(0.5 * self.dE2)\r\n * np.sin(2 * del_phi)\r\n )\r\n )\r\n\r\n # Combining terms\r\n nonlin_model = -np.arctan(\r\n (A + B * np.sin(2 * self.theta) + C * np.cos(2 * self.theta))\r\n / (D + E * np.sin(2 * self.theta) + F * np.cos(2 * self.theta))\r\n )\r\n nonlin_model = nonlin_model * self.wavelength / conversion\r\n self.nonlin_model = nonlin_model\r\n return nonlin_model\r\n\r\n def determine_sample_times(\r\n self, samp_times: npt.ArrayLike, time_reference: Clock\r\n ) -> np.ndarray:\r\n \"\"\"Determine the actual start and stop times of the velocity measurement.\r\n\r\n Args:\r\n samp_times: The times of the trigger signals linked to the time\r\n reference.\r\n time_reference: Time reference clock for the balance.\r\n\r\n Returns:\r\n Numpy array of actual start times of the velocity measurement.\r\n\r\n Raises:\r\n ValueError: If the sampling time does not align with time\r\n reference.\r\n \"\"\"\r\n # Lapoh (2018) p 119 has more to say on optimising the relative\r\n # frequencies of the internal and trigger clocks for minimum time jitter\r\n\r\n # np.ceil will ceil an int cast as a floating point number due to floating point errors\r\n rough_ceil = lambda x, threshold: np.where(\r\n (x != 0) & (abs((np.floor(x) - x) / x) < threshold),\r\n np.floor(x),\r\n np.ceil(x),\r\n )\r\n # TODO(finneganc): get rid of the divide by zero warning above\r\n\r\n samp_times_clock_ticks = rough_ceil(\r\n samp_times * time_reference.freq, 1e-15\r\n )\r\n\r\n if (\r\n samp_times_clock_ticks / time_reference.freq - samp_times > 1e-15\r\n ).any():\r\n raise ValueError(\r\n f\"sampling times must be governed by clock reference i.e on the counts of frequency {time_reference.freq}\"\r\n )\r\n\r\n internal_jitter = (\r\n self.interferometer_reference.time_jitter.generate_noise(\r\n len(samp_times)\r\n )\r\n )\r\n\r\n # TODO(finneganc): synchronise jitter with voltage\r\n # TODO(finneganc): add jitters to end times too\r\n num_internal_ticks = self.interferometer_reference.freq * (\r\n samp_times\r\n + time_reference.time_jitter.generate_noise(len(samp_times))\r\n + self.timing_latency\r\n + internal_jitter\r\n - self.interferometer_reference.phase\r\n / (2 * np.pi * self.interferometer_reference.freq)\r\n )\r\n\r\n num_internal_ticks = rough_ceil(num_internal_ticks, 1e-15)\r\n\r\n samp_times = (\r\n num_internal_ticks / self.interferometer_reference.freq\r\n + internal_jitter\r\n + self.interferometer_reference.phase\r\n / (2 * np.pi * self.interferometer_reference.freq)\r\n )\r\n return samp_times\r\n\r\n def measure_position(\r\n self, displacement_signal, samp_times, time_reference\r\n ):\r\n \"\"\"Measure the displacement across the integration time.\r\n\r\n Args:\r\n displacement_signal: The true displacement of the coil.\r\n samp_times: Sample trigger signals linked to time reference.\r\n time_reference: Clock acting as time reference for the balance.\r\n\r\n\r\n Returns:\r\n List of displacements at the start of each measurement and list of\r\n displacements at the eand of each measurement, in a tuple.\r\n\r\n Raises:\r\n ValueError: If sampling time less than integration time.\r\n \"\"\"\r\n if min(np.diff(samp_times)) < self.integration_time:\r\n # This is because the DVM needs time to integrate (and maybe process)\r\n raise ValueError(\r\n \"Sampling time cannot be less than integration time\"\r\n )\r\n\r\n velocity_time = self.determine_sample_times(samp_times, time_reference)\r\n displacement_start = displacement_signal.generate_signal(velocity_time)\r\n displacement_end = displacement_signal.generate_signal(\r\n velocity_time + self.integration_time\r\n )\r\n\r\n displacement_start = (\r\n displacement_start\r\n + self.get_nonlinearity_from_signal(displacement_start)\r\n )\r\n\r\n # Change in time seen by TIA due to phase shift due to doppler. Note\r\n # This could correspond to a phase shift of more than 2 pi so is not\r\n # actually the phase difference between the nearest reference edge\r\n # and next measurement edge.\r\n start_dt = (\r\n 2\r\n * displacement_start\r\n / (self.wavelength * self.interferometer_reference.freq)\r\n )\r\n start_dt = self.tia.measure_interval(\r\n start_dt, self.square_slew_rate, self.square_noise_rms\r\n )\r\n displacement_start = (\r\n self.wavelength * self.interferometer_reference.freq * start_dt / 2\r\n )\r\n\r\n displacement_end = (\r\n displacement_end\r\n + self.get_nonlinearity_from_signal(displacement_end)\r\n )\r\n\r\n end_dt = (\r\n 2\r\n * displacement_end\r\n / (self.wavelength * self.interferometer_reference.freq)\r\n )\r\n end_dt = self.tia.measure_interval(\r\n end_dt, self.square_slew_rate, self.square_noise_rms\r\n )\r\n displacement_end = (\r\n self.wavelength * self.interferometer_reference.freq * end_dt / 2\r\n )\r\n\r\n return displacement_start, displacement_end\r\n\r\n def measure_velocity(\r\n self,\r\n displacement_signal,\r\n samp_times,\r\n time_reference,\r\n return_position=False,\r\n ):\r\n \"\"\"Measure the average velocity across the integration time.\r\n\r\n Args:\r\n displacement_signal: The true displacement of the coil.\r\n samp_times: Sample trigger signals linked to time reference.\r\n time_reference: Clock acting as time reference for the balance.\r\n return_position: If true, also return the start and end position\r\n for each sample.\r\n\r\n Returns:\r\n Either just the average velocity for each sample time or that and\r\n the displacement at the start of each measurement and the\r\n displacement at the eand of each measurement, in a tuple.\r\n \"\"\"\r\n displacement_start, displacement_end = self.measure_position(\r\n displacement_signal, samp_times, time_reference\r\n )\r\n average_velocity = (\r\n displacement_end - displacement_start\r\n ) / self.integration_time\r\n if return_position:\r\n return average_velocity, displacement_start, displacement_end\r\n else:\r\n return average_velocity\r\n\r\n\r\nclass MovingModeExperiment(object):\r\n \"\"\"One or more continuous moving mode measurements.\r\n\r\n Setup, run, and analyse.\r\n \r\n Args:\r\n dvm: Digital voltmeter used in the experiment.\r\n interferometer: Laser interferometer used in experiment.\r\n displacement_signal: The true displacement of the coil.\r\n time_reference: The time reference clock.\r\n bl: The true Bl of the magnet/coil setup.\r\n samp_times: When to send the trigger signal to the DVM and\r\n interferometer.\r\n weighing_pos: The z position to be used in the weighing mode.\r\n coil_correction: Whether alter measured voltage due to LRC nature of\r\n the coil.\r\n \r\n Attributes:\r\n dvm: Digital voltmeter used in the experiment.\r\n interferometer: Laser interferometer used in experiment.\r\n displacement_signal: The true displacement of the coil.\r\n time_reference: The time reference clock.\r\n bl: The true Bl of the magnet/coil setup.\r\n samp_times: When to send the trigger signal to the DVM and\r\n interferometer.\r\n weighing_pos: The z position to be used in the weighing mode.\r\n u_results: List of voltage measurements for each run.\r\n v_results: List of velocity measurements for each run.\r\n displacement_start: The displacement recorded at the start of each\r\n velocity measurement.\r\n displacement_end: The displacement recorded at the end of each\r\n velocity measurement.\r\n bl_weighing_pos: The value of Bl at the weighing position. This is the\r\n target for the experiment and analysis.\r\n coil_correction: Whether alter measured voltage due to LRC nature of\r\n the coil.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n dvm: Dvm,\r\n interferometer: Interferometer,\r\n displacement_signal: Signal,\r\n time_reference: Clock,\r\n bl: Bl,\r\n samp_times: npt.ArrayLike,\r\n weighing_pos: float = 0,\r\n coil_correction: bool = False,\r\n reuse_integral = True\r\n ) -> None:\r\n \"\"\"Set up experiment.\"\"\"\r\n self.dvm = dvm\r\n self.interferometer = interferometer\r\n self.displacement_signal = displacement_signal\r\n self.time_reference = time_reference\r\n self.bl = bl\r\n self.samp_times = samp_times\r\n self.u_results = None\r\n self.v_results = None\r\n self.displacement_start = None\r\n self.displacement_end = None\r\n self.reuse_integral = reuse_integral\r\n self.voltage_integral = None\r\n self.weighing_pos = weighing_pos\r\n # TODO(finneganc): Rename to bl_at_weighing_pos\r\n self.bl_weighing_pos = self.bl.at_z(self.weighing_pos)\r\n self.coil_correction = coil_correction\r\n\r\n # TODO(finneganc): Consider moving this to a utils class/module\r\n @classmethod\r\n def sine_fit(cls, w: float, t: np.ndarray, y: np.ndarray) -> tuple:\r\n \"\"\"Compute a three-parameter sine fit for time series data.\r\n\r\n A three parameter sine fit was chosen over the Fast Fourier Transform (FFT)\r\n as the FFT struggles with the picket fence effect. Under the condition of\r\n coherent sampling at a frequency sampled by the FFT the result should be\r\n identical. A four parameter sine fit may be necessary if frequency is not\r\n known beforehand.\r\n\r\n Args:\r\n w: Angular frequency of the sinusoid.\r\n t: Array of times corresponding to y values.\r\n y: Time-series data to fit.\r\n\r\n Returns:\r\n Output of sine fitting with tuple (amplitude, phase, offset).\r\n \"\"\"\r\n D1 = np.sin(w * t)\r\n D2 = np.cos(w * t)\r\n D3 = np.ones(len(t))\r\n D = np.column_stack((D1, D2, D3))\r\n\r\n params, _, _, _ = np.linalg.lstsq(D, y, rcond=None)\r\n amplitude = np.sqrt(params[0] ** 2 + params[1] ** 2)\r\n\r\n phase = np.arctan2(params[1], params[0])\r\n offset = params[2]\r\n\r\n return amplitude, phase, offset\r\n\r\n def run_experiment(\r\n self,\r\n num_runs: int = 1,\r\n bl_compensation: bool = False,\r\n bl_poly: Optional[np.poly1d] = None,\r\n ) -> None:\r\n \"\"\"Return and overwrite results.\r\n\r\n Args:\r\n num_runs: The number times to repeat the experiment.\r\n bl_compensation: Whether to try remove the effect of a curved Bl(z)\r\n with an estimate of Bl(z)/Bl(meas_pos).\r\n bl_poly: Polynomial approximating Bl(z)/Bl(meas_pos) for use in Bl\r\n compensation.\r\n \"\"\"\r\n self.u_results = []\r\n self.v_results = []\r\n self.displacement_start = []\r\n self.displacement_end = []\r\n if self.coil_correction and self.reuse_integral and self.voltage_integral==None:\r\n self.voltage_integral = self.dvm.get_voltage_integral(\r\n self.displacement_signal, self.bl)\r\n for run in range(num_runs):\r\n measured_voltage = self.dvm.measure_voltage(\r\n self.displacement_signal,\r\n self.samp_times,\r\n self.time_reference,\r\n self.bl,\r\n coil_correction=self.coil_correction,\r\n coil_correction_params = self.voltage_integral\r\n )\r\n\r\n (\r\n measured_velocity,\r\n disp_start,\r\n disp_end,\r\n ) = self.interferometer.measure_velocity(\r\n self.displacement_signal,\r\n self.samp_times,\r\n self.time_reference,\r\n return_position=True,\r\n )\r\n\r\n # To compensate for bl voltage measurement from a value derived\r\n # from a measurement of Bl(z)/Bl(0) (only the value relative to\r\n # Bl(0) matters).\r\n if bl_compensation:\r\n # Normalise to weighing_pos value\r\n bl_poly /= np.polyval(bl_poly, self.weighing_pos)\r\n # Integrate as it is the voltage integral that is measured\r\n poly_comp = np.polyint(bl_poly)\r\n # Meas = Bl(0)*(polycomp(disp_end)-polycomp(disp_start))\r\n # Want = Bl(0)*(disp_end-disp_start) (what you get if Bl(z)=Bl(0))\r\n # So multiply Meas by Want/Meas to get Want!\r\n to_multiply = (disp_end - disp_start) / (\r\n np.polyval(poly_comp, disp_end)\r\n - np.polyval(poly_comp, disp_start)\r\n )\r\n measured_voltage *= to_multiply\r\n\r\n self.u_results.append(measured_voltage)\r\n self.v_results.append(measured_velocity)\r\n self.displacement_start.append(disp_start)\r\n self.displacement_end.append(disp_end)\r\n\r\n def analyse_average(self) -> Tuple[float, ...]:\r\n \"\"\"Average all Bl(z) measurements and compare to true Bl(weighing_pos).\r\n\r\n All metrics are normalised to the true value of Bl at the weighing\r\n position.\r\n\r\n Returns:\r\n Tuple of metrics including: rel_bias - the bias of the measured\r\n Bl, rel_stddev - the stddev of the measured Bl across multiple\r\n runs, and rel_avg_residuals - the average across all runs of the\r\n MAE (mean absolute error) of the fit.\r\n \"\"\"\r\n bls_meas = []\r\n residuals = []\r\n for u_result, v_result in zip(self.u_results, self.v_results):\r\n result = u_result / v_result\r\n bls_meas.append(np.average(result))\r\n residuals.append(abs(np.average(result) - result))\r\n rel_bias = (\r\n self.bl_weighing_pos - np.average(bls_meas)\r\n ) / self.bl_weighing_pos\r\n rel_stddev = np.std(bls_meas) / self.bl_weighing_pos\r\n rel_avg_residuals = np.average(residuals) / self.bl_weighing_pos\r\n return (rel_bias, rel_stddev, rel_avg_residuals)\r\n\r\n # TODO(finneganc): Make 4 parameter sine fit\r\n def analyse_simple_sine_fit(\r\n self,\r\n w: tuple,\r\n ) -> Tuple[Tuple[float, ...], float]:\r\n \"\"\"Sine fit U and v with Bl as the ratio of the amplitudes.\r\n\r\n All metrics are normalised to the true value of Bl at the weighing\r\n position.\r\n\r\n Args:\r\n w: The angular frequency of the sine to be fitted.\r\n\r\n Returns:\r\n Tuple of metrics including: rel_bias - the bias of the measured\r\n Bl, rel_stddev - the stddev of the measured Bl across multiple\r\n runs, and rel_avg_residuals - the average across all runs of the\r\n MAE (mean average error) of the fit propogated and normalised to\r\n Bl. Params is the last item: a list of dictionaries each containing\r\n the sine fit parameters for the run corresponding to the position\r\n in the list.\r\n \"\"\"\r\n params = []\r\n bls_meas = []\r\n residuals = []\r\n for u_result, v_result in zip(self.u_results, self.v_results):\r\n # Have the sample time be the middle of the integrated sample\r\n u_t = self.samp_times + self.dvm.integration_time / 2\r\n u_amp, u_phase, u_offset = MovingModeExperiment.sine_fit(\r\n w, u_t, u_result\r\n )\r\n u_residuals = np.average(\r\n abs(u_result - u_amp * np.sin(w * u_t + u_phase) - u_offset)\r\n )\r\n\r\n # Have the sample time be the middle of the integrated sample\r\n v_t = self.samp_times + self.interferometer.integration_time / 2\r\n v_amp, v_phase, v_offset = MovingModeExperiment.sine_fit(\r\n w, v_t, v_result\r\n )\r\n v_residuals = np.average(\r\n abs(v_result - v_amp * np.sin(w * v_t + v_phase) - v_offset)\r\n )\r\n\r\n # Propagate residuals with the partial derivative method\r\n residuals.append(\r\n u_residuals / v_amp + u_amp * v_residuals / (v_amp**2)\r\n )\r\n bls_meas.append(u_amp / v_amp)\r\n params.append(\r\n {\r\n \"U amp\": u_amp,\r\n \"U offset\": u_offset,\r\n \"U phase\": u_phase,\r\n \"v amp\": v_amp,\r\n \"v offset\": v_offset,\r\n \"v phase\": v_phase,\r\n }\r\n )\r\n rel_bias = (\r\n self.bl_weighing_pos - np.average(bls_meas)\r\n ) / self.bl_weighing_pos\r\n rel_stddev = np.std(bls_meas) / self.bl_weighing_pos\r\n rel_avg_residuals = np.average(residuals) / self.bl_weighing_pos\r\n\r\n return (rel_bias, rel_stddev, rel_avg_residuals), params\r\n\r\n # TODO(finneganc): reconcile sine_fit and polyfit naming\r\n def analyse_simple_polyfit(\r\n self, deg: tuple\r\n ) -> Tuple[Tuple[float, ...], float]:\r\n \"\"\"Polynomial fit Bl(z) to find Bl(0), and analyse.\r\n\r\n All metrics are normalised to the true value of Bl at the weighing\r\n position.\r\n\r\n Args:\r\n deg: The polynomail degree to be fitted to the measured data.\r\n\r\n Returns:\r\n Tuple of metrics including: rel_bias - the bias of the measured\r\n Bl, rel_stddev - the stddev of the measured Bl across multiple\r\n runs, and rel_avg_residuals - the average across all runs of the\r\n MAE (mean average error) of the fit propogated and normalised to\r\n Bl. Params is the last item: a list of arrays with each array\r\n containing the coefficients for the run corresponding to its\r\n position in the list. Highest order coefficients are first.\r\n \"\"\"\r\n bls_meas = []\r\n residuals = []\r\n params = []\r\n for u_result, v_result, disp_start, disp_end in zip(\r\n self.u_results,\r\n self.v_results,\r\n self.displacement_start,\r\n self.displacement_end,\r\n ):\r\n bls_meas_of_z = u_result / v_result\r\n\r\n # Measurement position at the middle integration range\r\n # Alternatively could do middle of the integration time. For linear\r\n # this would be the same.\r\n # There is some kind of correction to be done here for Bl(z)!=Bl(0)\r\n meas_pos = disp_start + (disp_end - disp_start) / 2\r\n\r\n coeffs = np.polyfit(meas_pos, bls_meas_of_z, deg=deg)\r\n poly = np.poly1d(coeffs)\r\n bls_meas.append(np.polyval(poly, self.weighing_pos))\r\n residuals.append(\r\n np.average(abs(bls_meas_of_z - np.polyval(poly, meas_pos)))\r\n )\r\n params.append(coeffs)\r\n\r\n rel_bias = (\r\n self.bl_weighing_pos - np.average(bls_meas)\r\n ) / self.bl_weighing_pos\r\n rel_stddev = np.std(bls_meas) / self.bl_weighing_pos\r\n rel_avg_residuals = np.average(residuals) / self.bl_weighing_pos\r\n\r\n return (rel_bias, rel_stddev, rel_avg_residuals), params\r\n\r\n\r\n# TODO LIST: ##=priority\r\n# Put in proper file structure?\r\n## Finish interferometer tests\r\n# Check types, comments, and docstrings\r\n## Finish tests\r\n# Add preconditions, check types\r\n# - write function to convert arraylike to ndarray or float or something\r\n# Run better linter\r\n# Run typing\r\n# Add more features\r\n# - e.g. 4 param sine fit, frequency counter for sine wave etc.\r\n## Write README (with citations)\r\n# Put analyses in their own object/objects\r\n# Finish TODOs\r\n# Add a re-randomise everything option to MovingModeExperiment if necessary\r\n","repo_name":"fcmesserli/kibble-sim","sub_path":"moving_mode/error_sim.py","file_name":"error_sim.py","file_ext":"py","file_size_in_byte":73917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"22404888468","text":"import contextlib\n\nimport pytest\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy import text\nfrom sqlalchemy_utils import create_database, database_exists, drop_database\n\nfrom app.db.base import Base\nfrom app.db.session import SessionLocal, engine\nfrom app.main import create_app\n\n\n@pytest.fixture\ndef app():\n return create_app()\n\n\n@pytest.fixture\ndef client(app):\n return TestClient(app)\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef create_test_database():\n # Create the database and tables if they don't exist\n if database_exists(engine.url):\n # Drop tables\n Base.metadata.drop_all(bind=engine) # type: ignore\n else:\n create_database(engine.url)\n\n # Create UUID extension\n db = SessionLocal()\n db.execute(text('CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";'))\n db.commit()\n db.close()\n\n # Create tables\n Base.metadata.create_all(bind=engine) # type: ignore\n\n yield\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef reset_db():\n meta = Base.metadata\n with contextlib.closing(engine.connect()) as connection:\n transaction = connection.begin()\n for table in reversed(meta.sorted_tables):\n connection.execute(table.delete())\n transaction.commit()\n\n\n@pytest.fixture\ndef db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@pytest.fixture\ndef anyio_backend():\n return \"asyncio\"\n","repo_name":"Mdslino/template-fastapi","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19843217688","text":"\nclass OddEvenMergeSortNetwork(object):\n def __init__(self, vars):\n self.vars = vars\n self.next_var = max(vars) + 1\n\n def oddevenmergesort(self, a):\n \n def make_and_clauses(res, a, b):\n return [[-res, a], [-res, b], [res, -a, -b]]\n \n def make_or_clauses(res, a, b):\n return [[res, -a], [res, -b], [-res, a, b]]\n \n def construct_block(i1, i2):\n in1, in2 = self.vars[i1], self.vars[i2]\n self.vars[i1] = self.next_var\n self.next_var += 1\n self.vars[i2] = self.next_var\n self.next_var += 1\n out1, out2 = self.vars[i1], self.vars[i2]\n return make_or_clauses (out1, in1, in2) + make_and_clauses(out2, in1, in2)\n \n def oddevenmerge(a):\n clauses = []\n if len(a) > 2:\n clauses += oddevenmerge(a[::2])\n clauses += oddevenmerge(a[1::2])\n for i in range(len(a))[1:len(a)-2:2]:\n clauses += construct_block(a[i], a[i+1])\n else:\n clauses += construct_block(a[0], a[1])\n return clauses\n \n clauses = []\n if len(a) > 1:\n clauses += self.oddevenmergesort(a[:len(a)/2]) \n clauses += self.oddevenmergesort(a[len(a)/2:]) \n clauses += oddevenmerge(a)\n return clauses\n \n###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### \n\nclass CardinalityNetwork(object):\n def __init__(self, vars, next_var=None):\n self.vars = vars\n if next_var:\n self.next_var = next_var\n else:\n self.next_var = max(vars) + 1\n self.level = 0\n \n# def indent(self):\n# self.level += 1\n# \n# def outdent(self):\n# self.level -= 1\n# \n# def indstr(self):\n# return self.level * \"| \" + \"+- \"\n \n def make_and_clauses(self, res, a, b):\n# return [[-res, a], [-res, b], [res, -a, -b]]\n return [[res, -a, -b]]\n \n def make_or_clauses(self, res, a, b):\n# return [[res, -a], [res, -b], [-res, a, b]]\n return [[res, -a], [res, -b]]\n \n def construct_block(self, i1, i2):\n# self.indent()\n# print self.indstr() + \"comparing\", i1, \"and\", i2\n# self.outdent()\n in1, in2 = self.vars[i1], self.vars[i2]\n self.vars[i1] = self.next_var\n self.next_var += 1\n self.vars[i2] = self.next_var\n self.next_var += 1\n out1, out2 = self.vars[i1], self.vars[i2]\n return self.make_or_clauses (out1, in1, in2) + self.make_and_clauses(out2, in1, in2)\n \n def smerge(self, a):\n# print self.indstr() + \"smerge(%s)\" % map(lambda x: x+1, a)\n clauses = []\n if len(a) > 2:\n# self.indent()\n clauses += self.smerge(a[::2])\n clauses += self.smerge(a[1::2])\n# self.outdent()\n# for i in range(len(a))[1:len(a)-2:2]:\n for i in range(len(a))[1:len(a)/2+1:2]:\n clauses += self.construct_block(a[i], a[i+1])\n else:\n clauses += self.construct_block(a[0], a[1])\n return clauses\n\n def hsort(self,a):\n# print self.indstr() + \"hsort(%s)\" % map(lambda x: x+1, a)\n clauses = []\n if len(a) > 1:\n# self.indent()\n clauses += self.hsort(a[:len(a)/2]) \n clauses += self.hsort(a[len(a)/2:]) \n clauses += self.smerge(a)\n# self.outdent()\n return clauses\n \n def card(self, a, k):\n# print self.indstr() + \"card(%s, %d)\" % (map(lambda x: x+1, a), k)\n# self.indent()\n clauses = []\n if len(a) == k:\n clauses = self.hsort(a)\n else:\n clauses += self.card(a[:k], k)\n clauses += self.card(a[k:], k)\n clauses += self.smerge(a[:2*k])\n# self.outdent()\n return clauses\n \n \n \n \n","repo_name":"zhoujh5510/myProject","sub_path":"MyMasterWork/PyMBD/pymbd/sat/oddevenmergesort.py","file_name":"oddevenmergesort.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"74493328594","text":"dna = ''\ndataset = open('input.txt', 'r')\n#names = [] REMOVED FOR SPEED\n# imports sequence names, dna strings, stores in separate lists\nappended = False\nfor line in dataset:\n if line.startswith('>'):\n pass\n appended = False\n else:\n if appended is False:\n dna = line.strip()\n appended = True\n else:\n dna += line.strip()\ndataset.close()\n##########################################\ndef revComp(d): #*Provides reverse complement to DNA string\n rcomp = ''\n for char in d:\n if char == 'A':\n rcomp = 'T' + rcomp\n if char == 'T':\n rcomp = 'A' + rcomp\n if char == 'C':\n rcomp = 'G' + rcomp\n if char == 'G':\n rcomp = 'C' + rcomp\n return rcomp\n##########################################\nfor index in range(0, len(dna)-1): # test each position\n for l in range(4,14, 2): # test each potential length\n test = dna[index: index+l] # lengths are 4-12, evens\n if len(test) == l: # filter for shorter strings\n halfF = test[0: l/2]\n halfB = revComp(test[l/2: l]) # identify halves of palindrome\n if halfF == halfB: # test for palindrome\n print (index+1, l)\n","repo_name":"asgray/Ros","sub_path":"String_Algorithms_REVP_restrictionSites.py","file_name":"String_Algorithms_REVP_restrictionSites.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"10216073370","text":"# coding: utf-8\n\nfrom typing import Dict\n\nimport pytest\nfrom httpx import AsyncClient\nfrom src.models.get_level_list_response import GetLevelListResponse # noqa: F401\nfrom src.models.get_level_response import GetLevelResponse # noqa: F401\n\n\n@pytest.mark.asyncio\nasync def test_get_accounts_level(client: AsyncClient) -> None:\n \"\"\"Test case for get_accounts_level\n\n Get accounts level\n \"\"\"\n\n headers: Dict[str, str] = {}\n response = await client.request(\n \"GET\",\n \"/accounts/{accountKey}/levels/{levelName}\".format(\n accountKey=\"account_key_example\", levelName=\"level_name_example\"\n ),\n headers=headers,\n )\n\n assert response.status_code != 500\n\n\n@pytest.mark.asyncio\nasync def test_get_accounts_levels(client: AsyncClient) -> None:\n \"\"\"Test case for get_accounts_levels\n\n Get accounts level list\n \"\"\"\n params: Dict[str, str] = {\n \"localization\": \"en\",\n \"page\": \"0\",\n \"keywords\": \"Chino\",\n \"sort\": \"0\",\n \"order\": \"0\",\n \"status\": \"0\",\n \"author\": \"any\",\n \"random\": \"0\",\n }\n headers: Dict[str, str] = {}\n response = await client.request(\n \"GET\",\n \"/accounts/{accountKey}/levels/list\".format(accountKey=\"account_key_example\"),\n headers=headers,\n params=params,\n )\n\n assert response.status_code != 500\n","repo_name":"PurplePalette/sp-api-v3","sub_path":"tests/test_accounts_levels_api.py","file_name":"test_accounts_levels_api.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"464882547","text":"\"\"\"Images for plot functions\"\"\"\nfrom pathlib import Path\nfrom typing import List, Any\n\nfrom sphinx.application import Sphinx\nfrom sphinx.ext.autodoc import Options\n\n\ndef insert_function_images(\n app: Sphinx, what: str, name: str, obj: Any, options: Options, lines: List[str]\n):\n path = app.config.api_dir / f\"{name}.png\"\n if what != \"function\" or not path.is_file():\n return\n lines[0:0] = [\n f\".. image:: {path.name}\",\n \" :width: 200\",\n \" :align: right\",\n \"\",\n ]\n\n\ndef setup(app: Sphinx):\n app.add_config_value(\"api_dir\", Path(), \"env\")\n app.connect(\"autodoc-process-docstring\", insert_function_images)\n","repo_name":"scverse/scanpy","sub_path":"docs/extensions/function_images.py","file_name":"function_images.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1641,"dataset":"github-code","pt":"84"} +{"seq_id":"21767767269","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 15 15:17:15 2023\n\n@author: rafae\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import wilcoxon\n\noverlaps = pd.read_excel('outputs/results/fineTunedNoDup/overlapTFs/overlappingDF.xlsx')\n\ndifferences = pd.DataFrame(columns= ['region', 'overlapOriginal', 'overlapShuffled', 'difference'])\n\nlistDifferences = []\nfor region in np.unique(overlaps['region']):\n\n overlapInitial = overlaps[(overlaps['region'] == region) & (overlaps['randomized'] == 0)][ 'overlapCount'].values[0]\n avgRandomized = np.mean(overlaps[(overlaps['region'] == region) & (overlaps['randomized'] == 1)][ 'overlapCount'])\n difference = overlapInitial- avgRandomized\n listDifferences.append(difference)\n dictForDF = {'region' : region, \n 'overlapOriginal' : overlapInitial, \n 'overlapShuffled' : avgRandomized, \n 'difference' : difference}\n df_dictionary = pd.DataFrame([dictForDF])\n differences= pd.concat([differences, df_dictionary], ignore_index=True)\n \nprint (differences)\n\nres = wilcoxon(listDifferences, alternative = 'greater')\nprint ('statistic =', res.statistic, '\\t', 'p-value =', res.pvalue)\n\nif (res.pvalue <0.05):\n print ('Difference is significant')\nelse:\n print('Difference is not significant')","repo_name":"rafaella-buzatu/GENALM_Finetune_regElem","sub_path":"WilcoxonTest.py","file_name":"WilcoxonTest.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"73245769553","text":"import sys\r\n# sys.stdin = open('test.txt', 'r')\r\n\r\n# 1929.\r\nM, N = map(int, sys.stdin.readline().split())\r\nfor i in range(M, N+1) :\r\n tnf = 1\r\n\r\n for j in range(2, int(i**0.5)+1) :\r\n if i % j == 0 :\r\n tnf = 0\r\n break\r\n\r\n if tnf == 1 and i != 1:\r\n print(i)\r\n\r\n","repo_name":"jeongyun11/algorithm","sub_path":"백준/Silver/1929. 소수 구하기/소수 구하기.py","file_name":"소수 구하기.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"22598568392","text":"#Driver for CJMCU-IR temperature sensor. Uses mlx90614 but adds serial interface to it.\n\nfrom machine import UART\nfrom utime import sleep_ms\n\n#Format of data output from the device:\n#length: 9\n# Byte0 Header Flags 0x66\n# Byte1 Header Flags 0x66\n# Byte2 data output mode (0x01 continuous output; 0x02 query output, the default for continuous output mode)\n# Byte3 Measured data length (counted by Byte)\n# Byte4 Temperature 1 Upper 8 bits\n# Byte5 Temperature 1 Lower 8 bits\n# Byte6 Temperature 2 Upper 8 bits\n# Byte7 Temperature 2 Lower 8 bits\n# Byte8 data parity (all data accumulation, take the low 8-bit)\n#\n#Celcius Temperature calculation method:\n#\n#Temperature = Data High 8 bits << 8 | Lower 8 bits of data, the result is the actual temperature multiplied by 100.\n#\n#Command instructions:\n#Lenght: 4\n#Byte0 Header Flags 0x66\n#Byte1 Header Flags 0x66\n#Byte2 Sets the command:\n# 0x01 Continuous output mode\n# 0x02 Query output mode\n# 0x11 Set the baud rate to 9600\n# 0x12 Set the baud rate to 57600\n# 0x13 Set the baud rate to 115200\n#Byte3 End of frame flag 0x56\n\ndef c2f( aValue ):\n '''Celcius to Farenheit conversion.'''\n return (aValue * 9.0 / 5.0) + 32.0\n\nclass cjmcu(object) :\n \"\"\"docstring for cjmcu\"\"\"\n _CONTINUOUS = const(1)\n _POLL = const(2)\n\n _RATEBASE = 0x11\n _BAUD9600 = const(0)\n _BAUD19200 = const(1)\n _BAUD38400 = const(2)\n\n def __init__(self, aLoc):\n print('super')\n super(cjmcu, self).__init__()\n print('creating uart')\n sleep_ms(1000)\n self._uart = UART(aLoc, 19200)\n print('init')\n sleep_ms(1000)\n self._mode = _POLL\n self._output = bytearray(4)\n self._output[0] = 0x66\n self._output[1] = 0x66\n self._output[2] = self._mode\n self._output[3] = 0x56\n self._input = bytearray(9)\n\n self.update()\n\n def write( self ) :\n print('write', self._output)\n self._uart.write(self._output)\n\n def read( self ) :\n print('read', len(self.input))\n self._uart.readinto(self._input)\n\n def update( self ) :\n self.write()\n self.read()\n\n def setbaud( self, aBaud ) :\n self._output[2] = _BAUDBASE + aBaud\n self.update()\n self._output[2] = self._mode\n self._uart.deinit()\n self._uart.init(9600 << aBaud)\n\n def temps( self ) :\n v1 = (self._input[4] << 8) | self._input[5]\n v2 = (self._input[6] << 8) | self._input[7]\n return (v1 / 100.0, v2 / 100.0)\n\n\n\n","repo_name":"GuyCarver/MicroPython","sub_path":"esp8266/cjmcu.py","file_name":"cjmcu.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"84"} +{"seq_id":"72636623956","text":"from nsetools import Nse\nimport pandas as pd\nimport time\nnse = Nse()\nall_stock_codes = nse.get_stock_codes()\ndf_final = pd.DataFrame()\ni = 0\nwith open(\"daily_data/0_log.txt\", \"w\") as log:\n for key in all_stock_codes:\n try:\n stock_code = key\n q = nse.get_quote(key)\n df = pd.DataFrame([q])\n \n if i%50 == 0:\n print(f'Finished getting data for: {i} stocks, last stock code:{stock_code}')\n i += 1\n df.to_csv('daily_data/'+stock_code+'.csv',mode='a', index = False, header=None)\n \n except Exception as e:\n log.write(f'{time.strftime(\"%Y%m%d-%H%M%S\")} ERROR: Failed downloading {stock_code}, Reason: {str(e)}')\n continue\n#df_final.to_csv('daily_data/Stocks_daily_data.xlsx')","repo_name":"nageen20/stock_markets","sub_path":"stocks_daily_data.py","file_name":"stocks_daily_data.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"16045104388","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\nfrom django.contrib import messages\n\nfrom .forms import SubmitFeedbackForm\nfrom .models import SubmitFeedback\n\ndef submit_feedback(request):\n # POST request -- save the submitted feedback if it's valid\n if request.method == \"POST\":\n form = SubmitFeedbackForm(request.POST)\n if form.is_valid():\n if form.cleaned_data['your_email']:\n send_email(form.cleaned_data)\n messages.success(request, \"Thank you for your feedback, %s.\"\n %(form.cleaned_data[\"your_name\"].split()[0])) \n form = form.save(commit=False)\n form.date = timezone.now() \n form.save()\n return HttpResponseRedirect(reverse('feedback:submit_feedback'))\n \n # Any other kind of request -- create the empty feedback form \n else:\n form = SubmitFeedbackForm()\n \n # Serve the empty feedback form to the user \n return render(request, \"feedback/submit_feedback.html\", {\"form\": form})\n\ndef view_feedback_list(request):\n feedback = SubmitFeedback.objects.order_by('-date')\n \n return render(request, 'feedback/view_feedback_list.html', {'feedback': feedback})\n\ndef view_feedback(request, feedback_id):\n feedback = get_object_or_404(SubmitFeedback, pk=feedback_id)\n feedback.has_been_read = True\n feedback.save()\n \n return render(request, 'feedback/view_feedback.html', {'feedback': feedback})\n \n \n\ndef send_email(form_data):\n subject = 'Thank you for your feedback'\n message = '''Hi %s, \nThank you for your feedback. We may\ncontact you for more information with regard to your feedback. \nFor your convenience, we have reproduced your feedback below: \n\n%s'''%(form_data['your_name'], form_data['your_feedback'])\n sender = 'joshgoddard@gmail.com'\n receiver = (form_data['your_email'],) \n send_mail(subject, message, sender, receiver)\n \n","repo_name":"josh-signbank/django-feedback","sub_path":"feedback/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"23946688792","text":"import os\r\nfrom django.conf import settings\r\nfrom django.http import Http404\r\nfrom django.shortcuts import render, HttpResponse\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib.auth.models import User\r\nfrom .models import Video, Message, Pdf\r\n\r\n# Create your views here.\r\n\r\ndef Home(request):\r\n return render(request, \"Home.html\")\r\n\r\ndef Signup(request):\r\n return render(request, \"Signup.html\")\r\n\r\ndef Signing_In(request):\r\n \"\"\"Creates New User If It Does Not Exist Already.\"\"\"\r\n if request.method == 'POST':\r\n email = request.POST['email']\r\n fname = request.POST['fname']\r\n lname = request.POST['lname']\r\n username = request.POST['username']\r\n password = request.POST['password']\r\n \r\n if not username.isalnum():\r\n messages.error(request, \"Username Can Only Contain Letters And Numbers !\")\r\n return render(request, \"Login.html\")\r\n else:\r\n user = User.objects.create_user(username, email, password)\r\n user.first_name = fname\r\n user.last_name = lname\r\n user.save()\r\n messages.success(request, \"Account Created Successfully ! You May Login Now !\")\r\n return render(request, \"Home.html\")\r\n\r\ndef Login(request):\r\n return render(request, \"Login.html\")\r\n\r\ndef Logging_In(request):\r\n \"\"\"Log Ins User If It Exists.\"\"\"\r\n if request.method == 'POST':\r\n username = request.POST['username']\r\n password = request.POST['password']\r\n\r\n user = authenticate(request,username=username, password=password)\r\n\r\n if user is not None:\r\n login(request, user)\r\n messages.success(request, \"Logged In Successfully !\")\r\n return render(request, \"Home.html\")\r\n \r\n else:\r\n messages.error(request, \"Invalid Credentials ! Please Try Again !\")\r\n return render(request, \"Login.html\")\r\n\r\ndef Logout(request):\r\n \"\"\"Logs Out The User.\"\"\"\r\n logout(request)\r\n messages.success(request, \"Logged Out Successfully !\")\r\n return render(request, \"Login.html\")\r\n\r\ndef Videos(request):\r\n \"\"\"Fetches Videos From Database And Shows To The User.\"\"\"\r\n obj = Video.objects.all()\r\n return render(request, \"Videos.html\", {'obj' : obj})\r\n\r\ndef Contact(request):\r\n \"\"\"Shows Contact Page To The User.\"\"\"\r\n return render(request, \"Contact.html\")\r\n\r\ndef Sending_Message(request):\r\n \"\"\"Saves Contact Message To Database.\"\"\"\r\n if request.method == \"POST\":\r\n Email = request.user.email\r\n username = request.user.username\r\n fname = request.POST['fname']\r\n lname = request.POST['lname']\r\n msg = request.POST['msg']\r\n data = Message(Email=Email, Username=username, First_Name=fname, Last_Name=lname, Message=msg)\r\n data.save()\r\n messages.success(request, \"I have received Your Message ! I will Reach Out to you via Email As Soon As Possible !\")\r\n return render(request, \"Home.html\")\r\n\r\ndef PDF(request):\r\n \"\"\"Shows PDF Page To The User.\"\"\"\r\n context = {'files' : Pdf.objects.all()}\r\n return render(request, \"PDFs.html\", context)\r\n\r\ndef Download(request, path):\r\n file_path = os.path.join(settings.MEDIA_ROOT,path)\r\n if os.path.exists(file_path):\r\n with open(file_path, 'rb') as fh:\r\n response = HttpResponse(fh.read(), content_type=\"application/File\")\r\n response['Content-Diposition'] = 'inline;filename=' + os.path.basename(file_path)\r\n return response\r\n raise Http404\r\n\r\ndef Search(request):\r\n \"\"\"Handles Search Function For Videos.\"\"\"\r\n if request.method == 'GET':\r\n query = request.GET.get('search')\r\n if query:\r\n vid = Video.objects.filter(Title__icontains=query) \r\n return render(request, 'Results_Videos.html', {'results':vid})\r\n else:\r\n return HttpResponse(\"No Information To Show !\")\r\n\r\ndef Search_PDF(request):\r\n \"\"\"Handles Search Function For PDFs..\"\"\"\r\n if request.method == 'GET':\r\n query = request.GET.get('search')\r\n if query:\r\n pdf = Pdf.objects.filter(Title__icontains=query) \r\n return render(request, 'Results_PDFs.html', {'results':pdf})\r\n else:\r\n return HttpResponse(\"No Information To Show !\")\r\n ","repo_name":"MannAaturi/MannAaturi","sub_path":"MannAaturi/Website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"32715609975","text":"import logging\n\nfrom cafe.resources.launchpad.config import LaunchpadTrackerConfig\nfrom lplight.client import LaunchpadClient\n\n\nclass LaunchpadTracker(object):\n\n @classmethod\n def is_bug_open(cls, bug_id):\n \"\"\"Checks whether the Launchpad bug for the given bug id is open.\n An issue is considered open if its status is either \"Fix Committed\"\n or \"Fix Released.\"\n \"\"\"\n config = LaunchpadTrackerConfig()\n log = logging.getLogger('RunnerLog')\n launchpad = LaunchpadClient()\n\n resp = launchpad.get_bug_tasks(bug_id)\n if resp.status_code == 404:\n log.info('Couldn\\'t find bug with ID {0}'.format(bug_id))\n\n tasks = resp.model or []\n for bug_task in tasks:\n if bug_task.bug_target_name == config.project:\n return bug_task.status not in ('Fix Committed', 'Fix Released')\n\n log.info('Bug does not affect project {0} '\n 'or project name is not correct.'.format(config.project))\n return False\n","repo_name":"jaydeepc/opencafe_better","sub_path":"plugins/skip_on_issue/cafe/resources/launchpad/issue_tracker.py","file_name":"issue_tracker.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"38587156521","text":"import asyncio\n\n\nclass RequestServer:\n def __init__(self, loop, port):\n self.loop = loop\n self.port = port\n\n async def handle_request(self, reader, writer):\n request = await reader.read(100)\n data = request.decode().strip()\n writer.write(f\"Ack {data}\\n\".encode())\n await writer.drain()\n writer.close()\n\n async def start_server(self):\n coro = asyncio.start_server(self.handle_request,\n '127.0.0.1',\n self.port,\n loop=self.loop)\n server = await coro\n\n\ndef main():\n loop = asyncio.get_event_loop()\n server = RequestServer(loop, 8888)\n loop.create_task(server.start_server())\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"nchazin/pycon2019","sub_path":"addendum/network_service.py","file_name":"network_service.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"84"} +{"seq_id":"27707042640","text":"'''\n把一个数组最开始的若干个元素搬到数组的末尾,我们称之为数组的旋转。\n输入一个递增排序的数组的一个旋转,输出旋转数组的最小元素。\n例如数组{3,4,5,1,2}为{1,2,3,4,5}的一个旋转,该数组的最小值为1。\nNOTE:给出的所有元素都大于0,若数组大小为0,请返回0。\n'''\n\n# -*- coding:utf-8 -*-\nclass Solution:\n def minNumberInRotateArray(self, rotateArray):\n if len(rotateArray) == 0:\n return 0\n front = 0\n rear = len(rotateArray) - 1\n minVal = rotateArray[0]\n if rotateArray[front] < rotateArray[rear]:\n return rotateArray[front]\n else:\n while (rear - front) > 1:\n mid = (rear + front)//2\n if rotateArray[mid] > rotateArray[rear]:\n front = mid\n elif rotateArray[mid] < rotateArray[front]:\n rear = mid\n elif rotateArray[mid] == rotateArray[front] and rotateArray[front] == rotateArray[rear]:\n for i in range(1, len(rotateArray)):\n if rotateArray[i] < minVal:\n minVal = rotateArray[i]\n rear = i\n minVal = rotateArray[rear]\n return minVal\n # 书上方法\n def minNumberInRotateArray2(self, rotateArray):\n if len(rotateArray) == 0:\n return 0\n front, rear = 0, len(rotateArray) - 1\n midIndex = 0\n while rotateArray[front] >= rotateArray[rear]:\n if rear - front == 1:\n midIndex = rear\n break\n midIndex = (front + rear) // 2\n if rotateArray[front] == rotateArray[rear] and rotateArray[front] == rotateArray[midIndex]:\n return self.MinInOrder(rotateArray, front, rear)\n\n if rotateArray[midIndex] >= rotateArray[front]:\n front = midIndex\n elif rotateArray[midIndex] <= rotateArray[rear]:\n rear = midIndex\n return rotateArray[midIndex]\n def MinInOrder(self, array, front, end):\n result = array[0]\n for i in array[front:end+1]:\n if i < result:\n result = i\n return result\n\nTest = Solution()\nprint(Test.minNumberInRotateArray([3, 4, 5, 1, 2]))\nprint(Test.minNumberInRotateArray([1, 2, 3, 4, 5]))\nprint(Test.minNumberInRotateArray([1, 1, 1, 0, 1]))\nprint(Test.minNumberInRotateArray([1, 0, 1, 1, 1]))\nprint(Test.minNumberInRotateArray([]))\nprint(Test.minNumberInRotateArray([1]))","repo_name":"Jack-Lee-Hiter/AlgorithmsByPython","sub_path":"Target Offer/旋转数组的最小数字.py","file_name":"旋转数组的最小数字.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":3829,"dataset":"github-code","pt":"84"} +{"seq_id":"879266371","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport yelp_df as ydf\nplt.style.use('ggplot')\nplt.rcParams.update({'font.size': 14})\n\n# Assign default colors for the types of things to plot\nbasic_color = 'black'\nstars_color = 'orange'\ncategory_colors = ['red', 'orange', 'blue', 'purple', 'green', 'black', 'pink', 'c', 'm', 'y']\n\n\ndef plot_barh(x, y, title='', x_label='', y_label='', legend_label='', color='black', save=False):\n fig, ax = plt.subplots(1, 1, figsize=(8, 6))\n ax.barh(x, y, color=color)\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.legend()\n plt.gca().invert_yaxis()\n plt.tight_layout(pad=2)\n if save:\n fig.savefig(f'../images/{title}.png')\n\n\ndef plot_stars_violin(df, label_col, label_names, color=stars_color, save=False):\n\n data = [np.array(df[df[label_col]==lab]['stars']) for lab in label_names]\n \n fig, ax = plt.subplots(1, 1, figsize=(8, 6))\n parts = ax.violinplot(data, vert=False, widths=0.8)\n\n for pc in parts['bodies']:\n pc.set_facecolor(color)\n\n ax.set_yticks(np.arange(1, len(label_names) + 1))\n ax.set_yticklabels(label_names)\n\n title = f'Star Distributions by {label_col}'\n ax.set_title(title)\n fig.tight_layout(pad=1)\n if save:\n fig.savefig(f'../images/{title}.png')\n\n\nif __name__ == '__main__':\n #Load the pickeled dataframes and convert to YelpDF's to use the class plotting functions\n businesses_df = pd.read_pickle('../data/pickled_businesses_df')\n businesses_df = ydf.YelpDF(businesses_df, 'stars', 'review_count')\n\n category_counts = pd.read_pickle('../data/pickled_category_counts')\n\n users_df = pd.read_pickle('../data/pickled_user_df')\n users_df = ydf.YelpDF(users_df, 'average_stars', 'review_count')\n\n # Plot top 10 category frequency counts\n x = category_counts['elem'][0:10]\n y = category_counts['count'][0:10]\n title = 'Top 10 business categories'\n plot_barh(x, y, title=title, color=category_colors, save=True)\n\n # Plot histrograms businesses\n businesses_df.plot_stars_hist(bins=9, title='Avg. Star Ratings for Businesses', save=True)\n businesses_df.plot_review_counts_hist(cutoff=2000, title='Review Counts for Businesses', save=True)\n\n # Violin plots for businesses\n top_5_cities = businesses_df['city'].value_counts()[0:5].index\n plot_stars_violin(businesses_df, 'city', top_5_cities, save=True)\n plot_stars_violin(businesses_df, 'Restaurant', [True, False], save=True)\n\n # Star rating comparisons for other business attributes\n businesses_df.plot_stars_hist(view_by_col='DogsAllowed', title='Star Ratings for Allows Dogs', save=False)\n businesses_df.plot_stars_hist(view_by_col='BYOB', filter_by=('Restaurant', [True]), title='Star Ratings for BYOB', save=False)\n businesses_df.plot_stars_hist(view_by_col='OutdoorSeating', filter_by=('Restaurant', [True]), title='Star Ratings for BYOB', save=False)\n\n # Plot histograms for users\n users_df.plot_stars_hist(bins=20, title='User Avg. Star Ratings', save=True)\n users_df.plot_review_counts_hist(cutoff=2000, title='User Review Counts', save=True)\n\n # Scatter plot of the average star rating vs. number of reviews (for businesses with between 100-5000 reviews)\n fig, ax = plt.subplots()\n data = businesses_df[businesses_df['review_count']<5000]\n x = data['stars']\n y = data['review_count']\n ax.scatter(x, y, color=stars_color)\n ax.set_xlabel('avg star rating')\n ax.set_ylabel('number of reviews')\n title = 'Avg. Star Rating vs. Number of Reviews'\n ax.set_title(title)\n plt.tight_layout(pad=2)\n fig.savefig(f'../images/{title}.png')\n\n\n # Plot count of businesses that accept bitcoin, by city\n col_name = 'city'\n col, vals = ('BusinessAcceptsBitcoin', ['True'])\n filtered = businesses_df[businesses_df[col].isin(vals)]\n legend_label = f'count of businesses where {col} is in {vals}'\n\n fig, ax = plt.subplots(1, 1, figsize=(10, 6))\n data = filtered[col_name].value_counts()[0:10]\n labels = data.index\n N = len(labels)\n tick_locations = np.arange(N)\n\n restaurant_counts = []\n non_restaurant_counts = []\n\n for lab in labels:\n restaurant_data = filtered[(filtered['Restaurant']==True) & (filtered[col_name]==lab)]\n non_restaurant_data = filtered[(filtered['Restaurant']==False) & (filtered[col_name]==lab)]\n restaurant_counts.append(len(restaurant_data))\n non_restaurant_counts.append(len(non_restaurant_data))\n \n ax.barh(tick_locations, restaurant_counts, label='Restaurants')\n ax.barh(tick_locations, non_restaurant_counts, label='Not Restaurants', left=restaurant_counts)\n ax.set_yticks(ticks=tick_locations)\n ax.set_yticklabels(labels)\n ax.set_xticks(np.arange(0, max(restaurant_counts) + max(non_restaurant_counts) + 1, step=5))\n ax.set_xlabel('number of businesses')\n ax.set_ylabel(f'{col_name}')\n title = f'Businesses that Accept Bitcoin by {col_name}'\n ax.set_title(title)\n ax.legend()\n plt.gca().invert_yaxis()\n fig.savefig(f'../images/{title}.png')","repo_name":"Laura-Kimble/Yelp-Reviews","sub_path":"src/yelp_plots.py","file_name":"yelp_plots.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"29489517344","text":"import random\r\n\r\n\r\nwhile True:\r\n ai_score, player_score = 0, 0\r\n game = ['R','P','S']\r\n rounds = int(input('Welcome to the Rock Paper Scissor game. Before starting, how many rounds would you like to choose : 3, 5 or 7?'))\r\n\r\n for round in range(rounds):\r\n player_choice = input('Please choose your move : R for rock, P for paper, S for scissors :')\r\n ai_choice = random.choice(game)\r\n if player_choice in game:\r\n if player_choice == ai_choice:\r\n print(\"it's a draw\")\r\n elif player_choice == 'R':\r\n if ai_choice == 'S':\r\n player_score += 1\r\n print(\"Rock('R') smashes scissors('S')! You win!\")\r\n elif ai_choice == 'P':\r\n ai_score += 1\r\n print(\"Paper covers rock! You lose.\")\r\n elif player_choice == 'P':\r\n if ai_choice == 'R':\r\n player_score += 1\r\n print(\"Paper('P') covers rock! You win!\")\r\n elif ai_choice == 'S':\r\n ai_score += 1\r\n print(\"Scissors cuts paper! You lose.\")\r\n elif player_choice == 'S':\r\n if ai_choice == 'P':\r\n player_score += 1\r\n print(\"Scissors('S') cuts paper! You win!\")\r\n elif ai_choice == 'R':\r\n ai_score += 1\r\n print(\"Rock smashes scissors! You lose.\")\r\n print(\"This is round %d\" % round)\r\n score = [ai_score, player_score]\r\n print(\"The score is %s\" % score)\r\n print(score)\r\n else:\r\n print('Please enter one of the 3 choices')\r\n ","repo_name":"Malone247/MY-GAME","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"28610220903","text":"#Impares naturais\r\na = int(input(\"Digite um número: \"))\r\nb = 1\r\nif a <= 0 :\r\n print(\"Numéro inválido\")\r\n exit()\r\nif a == 1:\r\n print(b)\r\nelse:\r\n print(b)\r\n while a != 0 :\r\n b = b + 2\r\n a = a - 1\r\n print(b)\r\nexit()\r\n","repo_name":"jpteixei/python","sub_path":"imparesnaturais.py","file_name":"imparesnaturais.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"15579672601","text":"def score_analysis():\n scores = input(\"Enter the scores separated by a space: \").split()\n\n \n scores = [float(score) for score in scores]\n\n total_scores = len(scores)\n sum_scores = sum(scores)\n average = sum_scores / total_scores\n\n above_average = 0\n below_average = 0\n\n for score in scores:\n if score >= average:\n above_average += 1\n else:\n below_average += 1\n\n \n print(\"Average is:\", average)\n print(\"Number of scores above or equal to average:\", above_average)\n print(\"Number of scores below average:\", below_average)\n\n\nscore_analysis()\n","repo_name":"MiroZukina/UCDPA-PYTHON","sub_path":"Class 7/Analyze scores.py","file_name":"Analyze scores.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"72463231954","text":"import machine\nimport utime\nimport random\n\n\ndef play_tone(frequency):\n buzzer.duty_u16(1000)\n buzzer.freq(frequency)\n\n\ndef be_quiet():\n buzzer.duty_u16(0)\n\n\ndef initialize_leds(pin_numbers):\n return [machine.Pin(pin, machine.Pin.OUT) for pin in pin_numbers]\n\n\ndef choose_random_led(led_list):\n return random.choice(led_list)\n\n\ndef turn_on_led(led):\n led.value(1)\n\n\ndef turn_off_led(led):\n led.value(0)\n\n\ndef generate_random_delay():\n return random.uniform(0.1, 1)\n\n\ndef light_up_leds(led_list):\n for led in led_list:\n led.value(1)\n\n\ndef turn_off_all_leds(led_list):\n for led in led_list:\n led.value(0)\n\n\n# Define the GPIO pins connected to the LEDs\nled_pins = [] # Replace pin1, pin2, pin3 with the actual GPIO pin numbers\nfor i in range(0, 28):\n if i != 18:\n led_pins.append(i)\n\n# Set up the GPIO pins as output\nleds = initialize_leds(led_pins)\nbuzzer = machine.PWM(machine.Pin(18))\n\n\nwhile True:\n led1 = choose_random_led(leds)\n led2 = choose_random_led(leds)\n\n turn_on_led(led1)\n delay = generate_random_delay()\n utime.sleep(delay)\n\n turn_on_led(led2)\n delay = generate_random_delay()\n utime.sleep(delay)\n\n turn_off_led(led2)\n turn_off_led(led1)\n\n if led1 == led2:\n play_tone(988)\n light_up_leds(leds)\n delay = generate_random_delay()\n utime.sleep(delay)\n be_quiet()\n turn_off_all_leds(leds)\n utime.sleep(1)","repo_name":"Paul3103/LED-Matching","sub_path":"blinkingTaskSolution.py","file_name":"blinkingTaskSolution.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"31328371587","text":"class Solution:\n def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:\n if grid[0][0] == 1:\n return -1\n r,c = len(grid), len(grid[0])\n que = [(0,0)]\n vis = set([(0,0)])\n cnt = 0\n dirs= [(-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0),(1,1)]\n while que:\n for _ in range(len(que)):\n cur = que.pop(0)\n if cur ==(r-1,c-1):\n return cnt+1\n for i,j in dirs:\n x = cur[0]+i\n y = cur[1]+j\n if 0<=x should be inside the loop, otherwise TLE\n cnt +=1\n return -1","repo_name":"zdadadaz/coding_practice","sub_path":"1091_shortest_path_in_binary_matrix.py","file_name":"1091_shortest_path_in_binary_matrix.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"71937209235","text":"import pickle\nimport os\nfrom functools import wraps\nimport itertools as it\n\ndef write_tweets(tweets, filename):\n \"\"\"Write some useful tweets informations\"\"\"\n \n with open(filename, 'w') as out_file:\n for tweet in tweets:\n out_file.write('TEXT: ' + tweet.text.with_headings() + '\\n')\n out_file.write('HASHTAGS: ' + ' '.join(tweet.hashtags) + '\\n')\n if hasattr(tweet, 'exhibition'):\n out_file.write('EXHIBITION: ' + str(tweet.exhibition) +\n '\\n\\n')\n\ndef dump_tweets(tweets, filename):\n \"\"\"Dump tweets to a binary file\"\"\"\n \n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'wb') as out_file:\n pickle.dump(tweets, out_file, pickle.HIGHEST_PROTOCOL)\n\ndef load_tweets(filename):\n \"\"\"Load tweets from a binary file\"\"\"\n\n tweets = []\n with open(filename, 'rb') as in_file:\n tweets = pickle.load(in_file)\n return tweets\n\ndef get_io_args(argparser, output_suffix=''):\n argparser.add_argument('input_file')\n argparser.add_argument('-o', dest='output_file')\n args = argparser.parse_args()\n if not args.output_file:\n args.output_file = os.path.splitext(args.input_file)[0]+output_suffix\n return args\n\ndef group_by_exhibition(tweets):\n \"\"\"Returns a dictionary from exhibitions to tweets\"\"\"\n \n exh_to_tweets = {}\n exhibition_key = lambda x: x.exhibition\n tweets = sorted(tweets, key=exhibition_key)\n for exhibition, group in it.groupby(tweets, key=exhibition_key):\n exh_to_tweets[exhibition] = list(group)\n return exh_to_tweets\n \nif __name__ == '__main__':\n import argparse as ap\n \n argparser = ap.ArgumentParser(description=\"Write tweets to a text file\")\n args = get_io_args(argparser, '.txt')\n write_tweets(load_tweets(args.input_file), args.output_file)\n","repo_name":"carlo-nonato/GAvI","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"38445092077","text":"\"\"\"\r\n@created: Dec 29, 2017\r\n@Edited: Feb 27, 2018\r\n@author: Doron Veltzer\r\n\"\"\"\r\n\r\nimport functools\r\n\r\nimport numpy as np\r\nimport fractions\r\nimport math\r\nimport re\r\n\r\nimport sys\r\n\r\n\r\n# define useful methods\r\n# read line from file split it according to separator and convert it to type\r\ndef process_input_line(input_file, \r\n input_mapping=int,\r\n input_type_array=None,\r\n input_number=None,\r\n force_list=False,\r\n separator=' '):\r\n input_line = input_file.readline().rstrip()\r\n if input_number is None:\r\n input_vector = input_line.split(separator)\r\n else:\r\n input_vector = input_line.split(separator, input_number)\r\n output_vector = list(map(input_mapping, input_vector))\r\n if len(output_vector) == 1 and not force_list:\r\n return output_vector[0]\r\n else:\r\n return output_vector\r\n\r\n# print debug output to standard error file (since we are using standard input and output)\r\ndef eprint(*args, **kwargs):\r\n print(*args, file=sys.stderr, **kwargs)\r\n\r\n\r\ndef gcd(a, b):\r\n a = abs(a)\r\n b = abs(b)\r\n while a:\r\n a, b = b % a, a\r\n return b\r\n\r\n\r\ndef lcm(a, b):\r\n return (a * b) // gcd(a, b)\r\n\r\n\r\ndef gcd_list(v):\r\n return functools.reduce(gcd, v)\r\n\r\n\r\ndef lcm_list(v):\r\n return functools.reduce(lcm, v)\r\n\r\n\r\ndef identity(x):\r\n return x\r\n\r\n\r\n# define useful constants\r\n\r\n\r\ndef solve(input_file, output_file, error_file):\r\n # read case number\r\n t = process_input_line(input_file)\r\n\r\n # iterate on all cases\r\n for i in range(t):\r\n error_file.write('Solving problem #{}\\n'.format(i + 1))\r\n # read input\r\n game = process_input_line(input_file, str)\r\n c = int(game[0])\r\n combines = [None] * c\r\n for j in range(c):\r\n combines[j] = [sorted(game[j + 1][0:2]), game[j + 1][2]]\r\n d = int(game[c + 1])\r\n opposed = [None] * d\r\n for j in range(d):\r\n opposed[j] = game[c + j + 2]\r\n n = int(game[-2])\r\n s = game[-1] \r\n \r\n # print input\r\n #eprint(c, combines, d, opposed, n, s)\r\n\r\n # check input\r\n\r\n # calculate output\r\n sj = \"\"\r\n for j in range(n):\r\n sj += s[j]\r\n # try combines\r\n combining = True\r\n while combining:\r\n for k in range(len(sj) - 1):\r\n for combine in combines:\r\n if sorted(sj[k:k + 2]) == combine[0]:\r\n # combine\r\n sj = sj[:k] + combine[1] + sj[k + 2:]\r\n combining = True\r\n break\r\n else:\r\n combining = False\r\n if combining:\r\n break\r\n else:\r\n combining = False\r\n # try opposed\r\n for oppose in opposed:\r\n if all(ch in sj for ch in oppose):\r\n sj = \"\"\r\n break\r\n \r\n\r\n # set output\r\n output = '[' + ', '.join(sj) + ']'\r\n\r\n # print output\r\n output_file.write('Case #{}: {}\\n'.format(i + 1, output))\r\n output_file.flush()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n solve(sys.stdin, sys.stdout, sys.stderr)\r\n","repo_name":"veltzerdoron/GCJ","sub_path":"src/2011/qual/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24745177787","text":"#!python\nsplits = 'dummy test dev train'.split()\nimport os\nimport pathlib\nfrom tqdm import tqdm\nfor split in splits:\n fname = f'data/golden_corpus/{split}_result_adv.tsv'\n rawfile = open(fname)\n with os.popen(f'wc -l {fname}') as f:\n total = int(f.read().split()[0])\n\n fileptrs = {}\n for dirname, ext in [\n (\"collunits\", \"collunit\"),\n (\"paths\", \"path\"),\n (\"endelengths\", \"endelength\"),\n (\"units\", \"unit\"),\n (\"subwords\", \"subword\"),\n (\"delengths\", \"delength\"),\n (\"translation\", \"de\"),\n (\"wordlengths\", \"wordlen\"),\n (\"symbolunits\", \"symbolunit\"),\n (\"desubwords\", \"desubword\"),\n (\"lengths\", \"len\"),\n (\"endesubwords\", \"endesubword\"),\n (\"texts\", \"txt\"),\n ]:\n pathlib.Path(f'data/CoVoSTUnitsNew/{dirname}').mkdir(exist_ok=True, parents=True)\n fileptrs[dirname] = open(f'data/CoVoSTUnitsNew/{dirname}/{split}.{ext}', 'w')\n \n\n\n\n print(rawfile.readline())\n\n for line in tqdm(rawfile, total=total):\n linesplit = line.strip().split('\\t')\n for (datacol, ext), content in zip([\n (\"paths\", \"path\"),\n (\"texts\", \"txt\"),\n (\"translation\", \"de\"),\n (\"units\", \"unit\"),\n (\"lengths\", \"len\"),\n (\"subwords\", \"subword\"),\n \n ], linesplit):\n print(content, file=fileptrs[datacol])\n \n\n[\n (\"symbolunits\", \"symbolunit\"), # for i in units/*; do cat $i | python3 '/storage/LabJob/Projects/AudioWords/AudioSentencePiece/utils/unitfy.py' > symbolunits/$(basename $i .unit).symbolunit; done \n (\"collunits\", \"collunit\"), # mkdir collunits; for i in symbolunits/*; do cat $i | python3 '/storage/LabJob/Projects/AudioWords/AudioSentencePiece/utils/collapse.py' > collunits/$(basename $i .symbolunit).collunit; done\n\n (\"wordlengths\", \"wordlen\"), # zsh '/storage/LabJob/Projects/AudioWords/AudioSentencePiece/utils/word_len.sh'\n\n # zsh '/storage/LabJob/Projects/AudioWords/AudioSentencePiece/utils/split_len.sh'\n (\"delengths\", \"delength\"),\n (\"desubwords\", \"desubword\"),\n (\"endelengths\", \"endelength\"),\n (\"endesubwords\", \"endesubword\"),\n \n]\n","repo_name":"jeffeuxMartin/AudioSentencePiece","sub_path":"utils/splitagain.py","file_name":"splitagain.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"32895893850","text":"from unittest.mock import patch\nfrom datetime import timedelta\nfrom time import sleep\n\nfrom django.test import TestCase\n\nfrom core.tests.factories import (DatasetFactory, DatasetVersionFactory, CollectionFactory, DocumentFactory,\n ElasticIndexFactory)\nfrom core.tests.mocks import get_search_client_mock\nfrom core.models import ElasticIndex\nfrom core.tasks import sync_indices\n\n\ndef create_dataset_data(dataset):\n previous_version = DatasetVersionFactory.create(dataset=dataset, is_current=False)\n current_version = DatasetVersionFactory.create(dataset=dataset, version=\"0.0.2\")\n previous_edusources = CollectionFactory.create(dataset_version=previous_version, name=\"edusources\")\n previous_wikiwijs = CollectionFactory.create(dataset_version=previous_version, name=\"wikiwijs\")\n current_edusources = CollectionFactory.create(dataset_version=current_version, name=\"edusources\")\n current_wikiwijs = CollectionFactory.create(dataset_version=current_version, name=\"wikiwijs\")\n # Dutch documents\n DocumentFactory.create(dataset_version=previous_version, collection=previous_edusources)\n DocumentFactory.create(dataset_version=previous_version, collection=previous_wikiwijs)\n DocumentFactory.create(dataset_version=current_version, collection=current_edusources)\n DocumentFactory.create(dataset_version=current_version, collection=current_wikiwijs)\n sleep(1) # creates a 1s difference in modified_at datetimes (these are hard to mock with factory_boy)\n DocumentFactory.create(dataset_version=previous_version, collection=previous_edusources,\n reference=\"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257\")\n DocumentFactory.create(dataset_version=previous_version, collection=previous_wikiwijs,\n reference=\"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257\")\n DocumentFactory.create(dataset_version=current_version, collection=current_edusources,\n reference=\"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257\")\n DocumentFactory.create(dataset_version=current_version, collection=current_wikiwijs,\n reference=\"5be6dfeb-b9ad-41a8-b4f5-94b9438e4257\")\n # English documents\n DocumentFactory.create(dataset_version=previous_version, collection=previous_edusources, language=\"en\")\n DocumentFactory.create(dataset_version=previous_version, collection=previous_wikiwijs, language=\"en\")\n DocumentFactory.create(dataset_version=current_version, collection=current_edusources, language=\"en\")\n DocumentFactory.create(dataset_version=current_version, collection=current_wikiwijs, language=\"en\")\n # Unknown documents\n DocumentFactory.create(dataset_version=previous_version, collection=previous_edusources, language=\"other\")\n DocumentFactory.create(dataset_version=previous_version, collection=previous_wikiwijs, language=\"other\")\n DocumentFactory.create(dataset_version=current_version, collection=current_edusources, language=\"other\")\n DocumentFactory.create(dataset_version=current_version, collection=current_wikiwijs, language=\"other\")\n\n return current_version, previous_version\n\n\ndef create_dataset_version_indices(dataset_version):\n pushed_at = dataset_version.created_at.replace(microsecond=0) + timedelta(seconds=1)\n for language in [\"nl\", \"en\", \"unk\"]:\n ElasticIndexFactory.create( # this gets ignored for inactive datasets\n name=f\"{dataset_version.dataset.name}-{dataset_version.version}-{dataset_version.id}\",\n dataset_version=dataset_version,\n language=language,\n pushed_at=pushed_at,\n configuration={}\n )\n return pushed_at\n\n\nclass TestSyncIndices(TestCase):\n\n search_client = get_search_client_mock(has_history=True)\n\n def setUp(self):\n super().setUp()\n datasets = {\n \"inactive\": DatasetFactory.create(name=\"inactive\", is_active=False),\n \"secondary\": DatasetFactory.create(name=\"secondary\"),\n \"primary\": DatasetFactory.create(name=\"primary\"),\n }\n self.pushed_ats = {}\n for dataset_type, dataset in datasets.items():\n dataset_versions = create_dataset_data(dataset)\n for dataset_version in dataset_versions:\n pushed_at = create_dataset_version_indices(dataset_version)\n self.pushed_ats[dataset_version.id] = pushed_at\n sleep(3)\n\n @patch(\"core.models.search.index.get_opensearch_client\", return_value=search_client)\n @patch(\"core.models.search.index.streaming_bulk\")\n def test_sync_indices(self, streaming_bulk_mock, get_search_client_mock):\n sync_indices()\n # Check if data was send to search engine\n for args, kwargs in streaming_bulk_mock.call_args_list:\n client, docs = args\n index_name, version, version_id, language = kwargs[\"index\"].split(\"-\")\n if language == \"nl\":\n self.assertEqual(\n len(list(docs)), 2,\n \"Expected both an edusources and wikwijs Document to get pushed to nl\"\n )\n elif language == \"en\":\n self.assertEqual(\n len(list(docs)), 2,\n \"Expected both an edusources and wikwijs Document to get pushed to en\"\n )\n elif language == \"unk\":\n self.assertEqual(\n len(list(docs)), 2,\n \"Expected both an edusources and wikwijs Document to get pushed to unk\"\n )\n self.assertEqual(index_name, \"primary\")\n self.assertEqual(version, \"002\")\n # Check that pushed_at was updated\n for index in ElasticIndex.objects.filter(name__contains=\"primary-0.0.2\"):\n self.assertGreater(index.pushed_at, self.pushed_ats[index.dataset_version_id])\n for index in ElasticIndex.objects.exclude(name__contains=\"primary-0.0.2\"):\n self.assertEqual(index.pushed_at, self.pushed_ats[index.dataset_version_id],\n \"Only the latest DatasetVersions of the newest Dataset should get pushed\")\n\n @patch(\"core.models.search.index.get_opensearch_client\", return_value=search_client)\n @patch(\"core.models.search.index.streaming_bulk\")\n def test_sync_indices_new(self, streaming_bulk_mock, get_search_client_mock):\n ElasticIndex.objects.update(pushed_at=None) # this makes all indices look like they're just created\n sync_indices()\n self.assertEqual(streaming_bulk_mock.call_count, 0)\n for index in ElasticIndex.objects.all():\n self.assertIsNone(index.pushed_at)\n","repo_name":"surfedushare/search-portal","sub_path":"harvester/core/tests/tasks/test_sync_indices.py","file_name":"test_sync_indices.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"40954985894","text":"from functools import cmp_to_key\nfrom enum import Enum\nfrom typing import Optional\nimport numpy as np\nimport torch\n\nfrom yolo.nms import YoloNMS\n\nfrom . import globalvar as G\nfrom yolo.converter import Yolo2BBox\n\n\n__all__ = ['InterpolationMethod', 'CalculationMetrics', 'ObjectDetectionMetricsCalculator']\n\n\nclass InterpolationMethod(Enum):\n\tInterpolation_11 = 1\n\tInterpolation_101 = 2\n\n\nclass CalculationMetrics():\n\tdef __init__(self, IoU: float, confidence: float, mustbe_FP: bool, is_difficult: bool):\n\t\t\"\"\"Initialization for `CalculationMetrics`\n\n\t\tArgs:\n\t\t\tIoU (float): intersection over union with ground truth\n\t\t\tconfidence (float): detection confidence\n\t\t\tmustbe_FP (bool): if there is already another detection having higher IoU with the same ground truth, then this detection must be False Positive\n\t\t\tis_difficult (bool): if the according ground truth is difficult\n\t\t\"\"\"\n\t\tself.IoU = IoU\n\t\tself.confidence = confidence\n\t\tself.mustbe_FP = mustbe_FP\n\t\tself.is_difficult = is_difficult\n\n\ndef compare_metrics(metrics1: CalculationMetrics, metrics2: CalculationMetrics):\n\tif metrics1.confidence == metrics2.confidence:\n\t\treturn metrics2.IoU - metrics1.IoU\n\treturn metrics2.confidence - metrics1.confidence\n\n\nclass ObjectDetectionMetricsCalculator():\n\t\"\"\"data\n\t[ # classes\n\t\t{\n\t\t\t\"data\": [],\n\t\t\t\"detection\": ,\n\t\t\t\"truth\": \n\t\t}\n\t]\n\t\"\"\"\n\n\tdef __init__(self, num_classes: int, confidence_thres: float):\n\t\t\"\"\"ObjectDetectionMetricsCalculator Initialization\n\n\t\tArgs:\n\t\t\tnum_classes (int): number of classes detector can classify\n\t\t\tconfidence_thres (float): confidence threshold. if the detection's confidence is smaller than the threshold, it would not be counted as a detection. In other words, it would be neither TP nor FP.\n\t\t\"\"\"\n\t\t# initialize data\n\t\tself.data = [{\"data\": [], \"detection\": 0, \"truth\": 0} for _ in range(num_classes)]\n\t\tself.confidence_thres = confidence_thres\n\n\n\tdef _add_data(self, pred: torch.Tensor, truth: torch.Tensor):\n\t\t\"\"\"Add single image data\n\n\t\tArgs:\n\t\t\tpred (torch.Tensor): detection prediction (S, S, (5+num_classes)*B)\n\t\t\ttruth (torch.Tensor): ground truth (YOLO v2 format) (S, S, (5+num_classes)*B)\n\t\t\"\"\"\n\t\tnum_classes = G.get('num_classes')\n\n\t\t# Optimized version\n\t\t# Time Complexity: O(valid detection count)\n\t\t\n\t\t# obtain objectiveness, categories, and confidences\n\t\tscore_hat, cat_hat = pred[:, 5:(5 + num_classes)].max(dim=1)\n\t\tconfidence_hat = pred[:, 4]\n\n\t\t# filter out the detection with confidence lower than the threshold\n\t\tpred = pred[confidence_hat * score_hat > self.confidence_thres]\n\t\t# update the score and category\n\t\tscore_hat, cat_hat = pred[:, 5:(5 + num_classes)].max(dim=1)\n\t\tconfidence_hat = pred[:, 4]\n\t\tprob_hat = score_hat * confidence_hat\n\n\t\t# filter out true ground truth\n\t\ttruth = truth[truth[:, 4] > 0]\n\t\t# obtain the ground truth category\n\t\t_, cat_truth = truth[:, 5:(5 + num_classes)].max(dim=1)\n\t\t# obtain the difficult ground truth (they are encoded 1+1e-7)\n\t\ttruth_difficult = truth[:, 4] > 1\n\n\t\t# obtain size\n\t\tN = pred.shape[0]\n\t\tM = truth.shape[0]\n\n\t\t# expand data for IoU matrix calculation\n\t\t# [N, 5+num_classes] => [N, 1, 5+num_classes] => [N, M, 5+num_classes]\n\t\tpred = pred.unsqueeze(1).expand(N, M, 5 + num_classes)\n\t\t# [M, 5+num_classes] => [1, M, 5+num_classes] => [N, M, 5+num_classes]\n\t\ttruth = truth.unsqueeze(0).expand(N, M, 5 + num_classes)\n\n\t\t# calculate IoU\n\t\t# [N, M]\n\t\tx1, y1, x2, y2 = truth[:, :, 0], truth[:, :, 1], truth[:, :, 2], truth[:, :, 3]\n\t\tx1_hat, y1_hat, x2_hat, y2_hat = pred[:, :, 0], pred[:, :, 1], pred[:, :, 2], pred[:, :, 3]\n\t\t# [N, M]\n\t\twi = torch.min(x2, x2_hat) - torch.max(x1, x1_hat)\n\t\thi = torch.min(y2, y2_hat) - torch.max(y1, y1_hat)\n\t\twi = torch.max(wi, torch.zeros_like(wi))\n\t\thi = torch.max(hi, torch.zeros_like(hi))\n\t\t# [N, M]\n\t\tintersection = wi * hi\n\t\tunion = (x2 - x1) * (y2 - y1) + (x2_hat - x1_hat) * (y2_hat - y1_hat) - intersection\n\t\tIoU = intersection / (union + 1e-16)\n\n\t\tfor c in range(num_classes):\n\t\t\t# filter out the detection with category not equal to c\n\t\t\tpred_cat_idx = (cat_hat == c)\n\t\t\ttruth_cat_idx = (cat_truth == c)\n\n\t\t\tcategory_difficult = truth_difficult[truth_cat_idx]\n\n\t\t\t# update ground truth\n\t\t\tself.data[c]['truth'] += int(truth_cat_idx.sum()) - int(category_difficult.sum())\n\n\t\t\tif pred_cat_idx.sum() == 0: continue\n\t\t\tif truth_cat_idx.sum() == 0:\n\t\t\t\tfor conf in prob_hat[pred_cat_idx]:\n\t\t\t\t\tself.data[c]['data'].append(CalculationMetrics(IoU=0, confidence=float(conf), mustbe_FP=True, is_difficult=False))\n\t\t\t\tcontinue\n\n\t\t\tmustbe_FP = torch.zeros((truth_cat_idx.count_nonzero()), dtype=torch.bool, device=pred.device)\n\n\t\t\t# [N', M']\n\t\t\tIoU_C = IoU[pred_cat_idx][:, truth_cat_idx]\n\t\t\t# Choose the maximum IoU for each detection (not ground truth)\n\t\t\t# [N']\n\t\t\tIoU_C_max, truth_idx = IoU_C.max(dim=1)\n\t\t\t# sort IoU_C_max in descending order\n\t\t\tsort_idx = torch.argsort(IoU_C_max, descending=True)\n\n\t\t\tfor j in sort_idx:\n\t\t\t\t# add data\n\t\t\t\tself.data[c]['data'].append(CalculationMetrics(float(IoU_C_max[j]), float(prob_hat[pred_cat_idx][j]), bool(mustbe_FP[truth_idx[j]]), bool(category_difficult[truth_idx[j]])))\n\t\t\t\t# update must be False Positive (FP)\n\t\t\t\tmustbe_FP[truth_idx[j]] = True\n\t\t\t\t# update detection\n\t\t\t\tself.data[c]['detection'] += 1\n\n\t\t# Naive Algorithm (Not executed)\n\t\t# Time Complexity: O(N^2) = O(S^4 * B^2 * C^2)\n\t\treturn\n\t\tchoose_truth_index = [None for _ in range(pred.shape[0])]\n\t\tiou = [0 for _ in range(pred.shape[0])]\n\n\t\tfor i in range(pred.shape[0]):\n\t\t\tscore_hat, cat_hat = pred[i][5:(5 + num_classes)].max(dim=0)\n\t\t\tconfidence_hat = pred[i][4]\n\t\t\t# filter by confidence threshold\n\t\t\tif confidence_hat * score_hat < self.confidence_thres: continue\n\n\t\t\tx1hat, y1hat, x2hat, y2hat = pred[i][0:4]\n\n\t\t\tfor j in range(truth.shape[0]):\n\t\t\t\tscore, cat = truth[j][5:(5 + num_classes)].max(dim=0)\n\t\t\t\tconfidence = truth[j][4]\n\t\t\t\t# filter true truth\n\t\t\t\tif confidence < 1: continue\n\t\t\t\t# judge whether is same class\n\t\t\t\tif cat != cat_hat: continue\n\t\t\t\t# calculate IoU\n\t\t\t\tx1, y1, x2, y2 = truth[j][0:4]\n\n\t\t\t\twi = min(x2, x2hat) - max(x1, x1hat)\n\t\t\t\thi = min(y2, y2hat) - max(y1, y1hat)\n\t\t\t\twi = max(0, wi)\n\t\t\t\thi = max(0, hi)\n\t\t\t\tintersection = wi * hi\n\t\t\t\tunion = (x2 - x1) * (y2 - y1) + (x2hat - x1hat) * (y2hat - y1hat) - intersection\n\t\t\t\tthis_iou = intersection / (union + 1e-16)\n\t\t\t\t# determine whether to choose this ground truth\n\t\t\t\tif iou[i] is None: choose = True\n\t\t\t\telif iou[i] < this_iou: choose = True\n\t\t\t\telse: choose = False\n\t\t\t\t# if choose, assign value\n\t\t\t\tif choose:\n\t\t\t\t\tiou[i] = float(this_iou)\n\t\t\t\t\tchoose_truth_index[i] = j\n\t\t# init a bool array for judging mustbe_FP later\n\t\ttruth_chosen = [False for _ in range(len(truth))]\n\t\t# sort according to IoU\n\t\tsort_idx = np.argsort(iou)[::-1]\n\t\t# add into metrics\n\t\tfor i in sort_idx:\n\t\t\tscore, cat = pred[i][5:(5 + num_classes)].max(dim=0)\n\t\t\tconfidence = pred[i][4]\n\t\t\t# filter by confidence threshold\n\t\t\tif confidence * score < self.confidence_thres: continue\n\n\t\t\ttruth_index = choose_truth_index[i]\n\t\t\tif truth_index == None: \n\t\t\t\tmustbe_FP = True\n\t\t\telif truth_chosen[truth_index]:\n\t\t\t\tmustbe_FP = True\n\t\t\telse: \n\t\t\t\tmustbe_FP = False\n\t\t\t\ttruth_chosen[choose_truth_index[i]] = True\n\t\t\t\n\t\t\tself.data[cat]['data'].append(CalculationMetrics(iou[i], float(confidence * score), mustbe_FP))\n\n\t\t\t# update detection statistics\n\t\t\tself.data[cat]['detection'] += 1\n\t\t# update ground truth statistics\n\t\tfor i in range(truth.shape[0]):\n\t\t\tscore, cat = truth[i][5:(5 + num_classes)].max(dim=0)\n\t\t\tconfidence = truth[i][4]\n\t\t\tif confidence < 1: continue\n\t\t\tself.data[cat]['truth'] += 1\n\n\n\tdef add_data(self, pred: torch.Tensor, truth: torch.Tensor):\n\t\t\"\"\"Add data for calculating metrics\n\n\t\tArgs:\n\t\t\tpred (torch.Tensor): detection prediction, can be either batch result or single result (#, S, S, (5+num_classes)*B) or (S, S, (5+num_classes)*B)\n\t\t\ttruth (torch.Tensor): ground truth (YOLO v2 format), can be either batch result or single result (#, S, S, (5+num_classes)*B) or (S, S, (5+num_classes)*B)\n\t\t\"\"\"\n\t\tconverter = Yolo2BBox()\n\t\tnms = YoloNMS()\n\t\t\n\t\tpred = converter(pred)\n\t\ttruth = converter(truth)\n\n\t\t# check shape\n\t\tif len(pred.shape) == 2:\n\t\t\tpred.unsqueeze_(0)\n\t\tif len(truth.shape) == 2:\n\t\t\ttruth.unsqueeze_(0)\n\t\t# assert size\n\t\tassert pred.shape[0] == truth.shape[0]\n\t\tcnt = pred.shape[0]\n\n\t\tfor i in range(cnt):\n\t\t\tself._add_data(nms(pred[i]), truth[i])\n\n\n\tdef calculate_precision_recall(self, iou_thres: float, class_idx: int) -> list:\n\t\t\"\"\"Calculate Precision-Recall Data according to IoU threshold\n\n\t\tArgs:\n\t\t\tiou_thres (float): IoU threshold\n\t\t\tclass_idx (int): Class Index\n\n\t\tReturns:\n\t\t\tlist: `[{\"precision\": , \"recall\": }]`\n\t\t\"\"\"\n\t\tret = []\n\t\t# retrieve count\n\t\ttruth_cnt = self.data[class_idx]['truth']\n\t\t# accumulated TP\n\t\tacc_TP = 0\n\t\t# accumulated difficult count\n\t\tacc_difficult = 0\n\t\t# sort metrics by confidence\n\t\tdata = sorted(self.data[class_idx]['data'], key=cmp_to_key(compare_metrics))\n\t\tfor i, metrics in enumerate(data):\n\t\t\tif metrics.IoU >= iou_thres and not metrics.mustbe_FP and not metrics.is_difficult:\n\t\t\t\tacc_TP += 1\n\t\t\tif metrics.is_difficult:\n\t\t\t\tacc_difficult += 1\n\t\t\tif i + 1 - acc_difficult > 0:\n\t\t\t\tret.append({\n\t\t\t\t\t'precision': acc_TP / (i + 1 - acc_difficult),\n\t\t\t\t\t'recall': acc_TP / truth_cnt\n\t\t\t\t})\n\t\treturn ret\n\n\n\tdef calculate_average_precision(self, itpl_option: InterpolationMethod, iou_thres: Optional[float]=None, class_idx: Optional[int]=None, prl: Optional[list]=None) -> float:\n\t\t\"\"\"Calculate Average Precision (AP)\n\n\t\tArgs:\n\t\t\titpl_option (InterpolationMethod): Interpolation Method\n\t\t\tiou_thres (float | None): IoU Threshold [Optional if given prl]\n\t\t\tclass_idx (int | None): Class Index [Optional if given prl]\n\t\t\tprl (list | None): Precision-Recall Data [Optional if given iou_thres and class_idx]\n\n\t\tReturns:\n\t\t\tfloat: AP of specified class using provided interpolation method\n\t\t\"\"\"\n\t\tif prl is None:\n\t\t\tprl = self.calculate_precision_recall(iou_thres=iou_thres, class_idx=class_idx)\n\n\t\tif itpl_option == InterpolationMethod.Interpolation_11:\n\t\t\tintp_pts = [0.1 * i for i in range(11)]\n\t\telif itpl_option == InterpolationMethod.Interpolation_101:\n\t\t\tintp_pts = [0.01 * i for i in range(101)]\n\t\telse:\n\t\t\traise Exception('Unknown Interpolation Method')\n\n\t\tmax_dict = {}\n\t\tgmax = 0\n\n\t\tfor pr in prl[::-1]:\n\t\t\tgmax = max(gmax, pr['precision'])\n\t\t\tmax_dict[pr['recall']] = gmax\n\n\t\tif len(max_dict) < 1: return 0.\n\n\t\tmax_keys = max_dict.keys()\n\t\tmax_keys = sorted(max_keys)\n\n\t\tkey_ptr = len(max_keys) - 2\n\t\tlast_key = max_keys[-1]\n\n\t\tAP = 0\n\n\t\tfor query in intp_pts[::-1]:\n\t\t\tif key_ptr < 0:\n\t\t\t\tif query > last_key:\n\t\t\t\t\tans = 0\n\t\t\t\telse:\n\t\t\t\t\tans = max_dict[last_key]\n\t\t\telse:\n\t\t\t\tif query > last_key:\n\t\t\t\t\tans = 0\n\t\t\t\telif query > max_keys[key_ptr]:\n\t\t\t\t\tans = max_dict[last_key]\n\t\t\t\telse:\n\t\t\t\t\twhile key_ptr >= 0:\n\t\t\t\t\t\tif query > max_keys[key_ptr]:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tlast_key = max_keys[key_ptr]\n\t\t\t\t\t\tkey_ptr -= 1\n\t\t\t\t\tans = max_dict[last_key]\n\t\t\tAP += ans\n\n\t\tAP /= len(intp_pts)\n\t\treturn AP\n\n\n\tdef calculate_mAP(self, iou_thres: float, itpl_option: InterpolationMethod) -> float:\n\t\t\"\"\"calculate mAP using given IoU threshold and interpolation method\n\n\t\tArgs:\n\t\t\tiou_thres (float): IoU threshold\n\t\t\titpl_option (InterpolationMethod): Interpolation Method\n\n\t\tReturns:\n\t\t\tfloat: Mean Average Precision (mAP)\n\t\t\"\"\"\n\t\tmAP = 0\n\t\tfor c in range(len(self.data)):\n\t\t\tmAP += self.calculate_average_precision(iou_thres=iou_thres, class_idx=c, itpl_option=itpl_option)\n\t\tmAP /= len(self.data)\n\n\t\treturn mAP\n\n\n\tdef calculate_VOCmAP(self) -> float:\n\t\t\"\"\"calculate VOCmAP: mAP with IoU thres = .5, interpolate by 0.1\n\n\t\tReturns:\n\t\t\tfloat: VOC mAP\n\t\t\"\"\"\n\t\treturn self.calculate_mAP(0.5, InterpolationMethod.Interpolation_11)\n\n\n\tdef calculate_COCOmAP50(self) -> float:\n\t\t\"\"\"calculate COCO mAP @50 (AP@.5): expand VOCmAP50, interpolate by 0.01\n\n\t\tReturns:\n\t\t\tfloat: AP@.5\n\t\t\"\"\"\n\t\treturn self.calculate_mAP(0.5, InterpolationMethod.Interpolation_101)\n\n\n\tdef calculate_COCOmAP75(self) -> float:\n\t\t\"\"\"calculate COCO mAP @75 (AP@.75): AP@.5, but with IoU thres = .75\n\n\t\tReturns:\n\t\t\tfloat: AP@.75\n\t\t\"\"\"\n\t\treturn self.calculate_mAP(0.75, InterpolationMethod.Interpolation_101)\n\n\n\tdef calculate_COCOmAP(self) -> float:\n\t\t\"\"\"calculate COCO mAP: expand AP@.5 and AP@.75. IoU thres from .5 to .95\n\n\t\tReturns:\n\t\t\tfloat: COCO mAP\n\t\t\"\"\"\n\t\tious = [0.5 + 0.05 * i for i in range(10)]\n\t\tcoco_map = 0\n\t\tfor iou in ious:\n\t\t\tcoco_map += self.calculate_mAP(iou, InterpolationMethod.Interpolation_101)\n\t\tcoco_map /= len(ious)\n\t\treturn coco_map\n","repo_name":"JeffersonQin/yolo-v2-pytorch","sub_path":"utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":12453,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"84"} +{"seq_id":"8289616584","text":"import wechatsogou\nfrom wechatsogou import WechatSogouAPI, WechatSogouConst\nimport time\nws_api = wechatsogou.WechatSogouAPI(proxies={\n \"http\": \"127.0.0.1:9999\",\n \"https\": \"127.0.0.1:9999\",\n})\n\nkeywords=['娱乐', '八卦', '新闻', '明星']\n\ndef we_spider():\n for word in keywords:\n for i in range(1,10):\n time.sleep(1)\n res=ws_api.search_article(word,page=i,timesn=WechatSogouConst.search_article_time.day,article_type=WechatSogouConst.search_article_type.all)\n for j in range(0,len(res)):\n print(res[j]['article']['title'])\n\n time.sleep(60*60*24)##one day\n\nwe_spider()","repo_name":"ambigudus/social_media_searcher","sub_path":"wechatspider.py","file_name":"wechatspider.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"15109635378","text":"import pinecone\nfrom colorama import Fore, Style\n\nfrom autogpt.llm import get_ada_embedding\nfrom autogpt.logs import logger\nfrom autogpt.memory.base import MemoryProviderSingleton\n\n\nclass PineconeMemory(MemoryProviderSingleton):\n def __init__(self, cfg):\n pinecone_api_key = cfg.pinecone_api_key\n pinecone_region = cfg.pinecone_region\n pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)\n dimension = 1536\n metric = \"cosine\"\n pod_type = \"p1\"\n table_name = \"auto-gpt\"\n # this assumes we don't start with memory.\n # for now this works.\n # we'll need a more complicated and robust system if we want to start with\n # memory.\n self.vec_num = 0\n\n try:\n pinecone.whoami()\n except Exception as e:\n logger.typewriter_log(\n \"FAILED TO CONNECT TO PINECONE\",\n Fore.RED,\n Style.BRIGHT + str(e) + Style.RESET_ALL,\n )\n logger.double_check(\n \"Please ensure you have setup and configured Pinecone properly for use.\"\n + f\"You can check out {Fore.CYAN + Style.BRIGHT}\"\n \"https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup\"\n f\"{Style.RESET_ALL} to ensure you've set up everything correctly.\"\n )\n exit(1)\n\n if table_name not in pinecone.list_indexes():\n logger.typewriter_log(\n \"Connecting Pinecone. This may take some time...\", Fore.MAGENTA, \"\"\n )\n pinecone.create_index(\n table_name, dimension=dimension, metric=metric, pod_type=pod_type\n )\n self.index = pinecone.Index(table_name)\n\n def add(self, data):\n vector = get_ada_embedding(data)\n # no metadata here. We may wish to change that long term.\n self.index.upsert([(str(self.vec_num), vector, {\"raw_text\": data})])\n _text = f\"Inserting data into memory at index: {self.vec_num}:\\n data: {data}\"\n self.vec_num += 1\n return _text\n\n def get(self, data):\n return self.get_relevant(data, 1)\n\n def clear(self):\n self.index.delete(deleteAll=True)\n return \"Obliviated\"\n\n def get_relevant(self, data, num_relevant=5):\n \"\"\"\n Returns all the data in the memory that is relevant to the given data.\n :param data: The data to compare to.\n :param num_relevant: The number of relevant data to return. Defaults to 5\n \"\"\"\n query_embedding = get_ada_embedding(data)\n results = self.index.query(\n query_embedding, top_k=num_relevant, include_metadata=True\n )\n sorted_results = sorted(results.matches, key=lambda x: x.score)\n return [str(item[\"metadata\"][\"raw_text\"]) for item in sorted_results]\n\n def get_stats(self):\n return self.index.describe_index_stats()\n","repo_name":"kaqijiang/Auto-GPT-ZH","sub_path":"autogpt/memory/pinecone.py","file_name":"pinecone.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":2310,"dataset":"github-code","pt":"85"} +{"seq_id":"25083560811","text":"# coding = utf-8\nfrom libs.common import try_catch\nfrom libs.common import Common\nimport re\n\n\nclass Smart:\n\tdevice_name = \"\"\n\tattr_multi_list = []\n\tattr_list = []\n\ttype = \"SATA\"\n\n\t@try_catch\n\tdef __init__(self, device_name=\"\"):\n\t\tself.device_name = device_name\n\t\tsmart_tmp = Common.exe_shell(\"smartctl -a %s\" % device_name)\n\t\tsearch_sata = re.search(r'SATA', smart_tmp, re.M)\n\t\tsearch_nvme = re.search(r'NVM', smart_tmp, re.M)\n\t\tsearch_sas = re.search(r'SAS', smart_tmp, re.M)\n\t\tif search_sata:\n\t\t\tself.type = \"SATA\"\n\t\t\tattr_tmp_list = re.findall(r'.* 0x.*-*', smart_tmp)\n\t\t\t#attr_tmp_list = Common.exe_shell(\"smartctl -A %s|grep -P '0x'\" % self.device_name).splitlines()\n\t\t\tfor i in attr_tmp_list:\n\t\t\t\ttmp = i.strip().split()\n\t\t\t\tself.attr_list.append(tmp[1].strip())\n\t\t\t\tself.attr_multi_list.append(tmp[:10])\n\t\tif search_nvme:\n\t\t\tself.type = \"NVME\"\n\t\tif search_sas:\n\t\t\tself.type = \"SAS\"\n\n\t@try_catch\n\tdef smart_to_dict(self):\n\t\tdictb = {}\n\t\tif \"SATA\" in self.type:\n\t\t\tfor i in self.attr_multi_list:\n\t\t\t\tin_dict = {\n\t\t\t\t\t\"ID\": i[0],\n\t\t\t\t\t\"FLAG\": i[2],\n\t\t\t\t\t\"VALUE\": i[3],\n\t\t\t\t\t\"WORST\": i[4],\n\t\t\t\t\t\"THRESH\": i[5],\n\t\t\t\t\t\"TYPE\": i[6],\n\t\t\t\t\t\"UPDATED\": i[7],\n\t\t\t\t\t\"WHEN_FAILED\": i[8],\n\t\t\t\t\t\"RAW_VALUE\": i[9]\n\t\t\t\t}\n\t\t\t\tdictb[i[1]] = in_dict\n\t\telif \"SAS\" in self.type:\n\t\t\tpass\n\t\tassert isinstance(dictb, dict)\n\t\treturn dictb\n\n\nclass DiskAttr:\n\tmodel = \"\"\n\tfw = \"\"\n\tsn = \"\"\n\tinterface = \"\"\n\tdevice_name = \"\"\n\tfrom_chip = \"\"\n\tsmart = None\n\n\tdef __init__(self, devicename):\n\t\tself.device_name = devicename\n\t\tself.smart = Smart(self.device_name)\n\t\tsmart_str = Common.exe_shell(\"smartctl -a %s\" % self.device_name)\n\t\tlsscsi_str = Common.exe_shell(\"lsscsi|grep -P '%s *$'\" % self.device_name)\n\t\tself.sn = Common.exe_shell(\"echo \\\"%s\\\"|grep 'Serial Number'|awk '{print$3}'\" % smart_str).strip()\n\t\tself.model = Common.exe_shell(\"echo \\\"%s\\\"|awk '{print$4}'\" % lsscsi_str).strip()\n\t\tself.fw = Common.exe_shell(\"echo \\\"%s\\\"|awk '{print$5}'\" % lsscsi_str).strip()\n\t\tself.interface = Common.exe_shell(\"echo \\\"%s\\\"|awk '{print$3}'\" % lsscsi_str).strip()\n\n\t@try_catch\n\tdef attr_to_dict(self):\n\t\tdicta = {\n\t\t\t'model': self.model,\n\t\t\t'from_chip': self.from_chip,\n\t\t\t'fw': self.fw,\n\t\t\t'interface': self.interface,\n\t\t\t'sn': self.sn,\n\t\t\t'smart': self.smart.smart_to_dict()\n\t\t}\n\t\tassert isinstance(dicta, dict)\n\t\treturn dicta\n","repo_name":"StevyZheng/foxcli-python","sub_path":"storage/disk.py","file_name":"disk.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4285398269","text":"import blogger\r\nimport deviantart\r\nimport facebook\r\nimport flickr\r\nimport googleplus\r\nimport instagram\r\nimport tumblr\r\nimport mastodonxyz\r\n\r\nimport mouse_keyboard_input as kmi\r\nimport scrseek\r\n\r\nfrom time import sleep\r\nimport os\r\nimport inspect\r\n\r\nwin_key=0x5B\r\ntab=0x09\r\nctrl=0x11\r\nshift_key=0x10\r\npage_down=0x22\r\nenter_key=0x0D\r\nesc_key=0x1B\r\na_key=0x41\r\nc_key=0x43\r\ni_key=0x49\r\nv_key=0x56\r\nleft_arrow=0x25\r\nup_arrow=0x26\r\nright_arrow=0x27\r\ndown_arrow=0x28\r\nbrowser = \"chrome\"\r\nimage_preview = True\r\n\r\npath_core=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\r\n\r\n\r\ndef Clear_ScreensaverAndDialog():\r\n kmi.TapKey(esc_key)\r\n sleep(3)\r\n kmi.TapKey(esc_key)\r\n sleep(3)\r\n kmi.TapKey(esc_key)\r\n\r\ndef Click_Chrome():\r\n #click google chrome\r\n os.chdir(path_core)\r\n chrome_btn=scrseek.positionOf('chrome_taskbar.png')\r\n chrome_btn=[x+30 for x in chrome_btn]\r\n kmi.Click(*chrome_btn)\r\n sleep(.21)\r\n kmi.HoldKey(win_key,up_arrow)\r\n sleep(1)\r\n\r\ndef Click_Addressbar():\r\n #click address bar\r\n kmi.Click(420,50)\r\n\r\ndef Device_Mode_Activate():\r\n os.chdir(path_core)\r\n kmi.HoldKey(ctrl,shift_key,i_key)\r\n sleep(10)\r\n dm_pos=scrseek.positionOf('devicemode_unselected.png')\r\n kmi.Click(dm_pos[0]+50,dm_pos[1]+15)\r\n\r\ndef Device_Mode_Deactivate():\r\n os.chdir(path_core)\r\n dm_pos=scrseek.positionOf('devicemode_selected.png')\r\n kmi.Click(dm_pos[0]+50,dm_pos[1]+15)\r\n sleep(5)\r\n kmi.HoldKey(ctrl,shift_key,i_key)\r\n\r\ndef Blogger_Post(filename):\r\n #blogger section\r\n Click_Addressbar()\r\n sleep(2)\r\n blogger.Paste_bloggerAddress()\r\n sleep(7)\r\n blogger.Submit_Blogger(filename)\r\n\r\ndef DeviantArt_Post(filename):\r\n #deviantart\r\n Click_Addressbar()\r\n sleep(2)\r\n deviantart.Paste_Address()\r\n sleep(7)\r\n deviantart.Submit_DA(filename)\r\n\r\ndef Facebook_Post(filename):\r\n #facebook\r\n Click_Addressbar()\r\n sleep(2)\r\n facebook.Paste_FacebookAddress()\r\n sleep(7)\r\n facebook.Submit_FB(filename)\r\n\r\ndef Flickr_Post(filename):\r\n #flickr\r\n Click_Addressbar()\r\n sleep(2)\r\n flickr.Paste_FlickrAddress()\r\n sleep(7)\r\n flickr.Submit_FL(filename)\r\n\r\ndef GooglePlus_Post(filename):\r\n #google plus\r\n Click_Addressbar()\r\n sleep(2)\r\n googleplus.Paste_GooglePlusAddress()\r\n sleep(7)\r\n googleplus.Submit_GP(filename)\r\n\r\ndef Instagram_Post(filename):\r\n #instagram\r\n Device_Mode_Activate()\r\n sleep(2)\r\n Click_Addressbar()\r\n sleep(2)\r\n instagram.Paste_InstagramAddress()\r\n sleep(7)\r\n instagram.Submit_IG(filename)\r\n sleep(2)\r\n Device_Mode_Deactivate()\r\n\r\ndef Tumblr_Post(filename):\r\n #tumblr\r\n Click_Addressbar()\r\n sleep(2)\r\n tumblr.Paste_TumblrAddress()\r\n sleep(7)\r\n tumblr.Submit_TB(filename)\r\n\r\ndef Mastodonxyz_Post(filename):\r\n #tumblr\r\n Click_Addressbar()\r\n sleep(2)\r\n mastodonxyz.Paste_mastodonxyzAddress()\r\n sleep(7)\r\n mastodonxyz.Submit_mastodonxyz(filename)\r\n\r\ndef Submit_All(filename):\r\n os.chdir(path_core)\r\n #Set up\r\n Click_Chrome()\r\n sleep(7)\r\n #blogger section\r\n Blogger_Post(filename)\r\n sleep(7)\r\n #deviantart\r\n DeviantArt_Post(filename)\r\n sleep(7)\r\n #facebook\r\n Facebook_Post(filename)\r\n sleep(7)\r\n #flickr\r\n Flickr_Post(filename)\r\n sleep(7)\r\n #google plus\r\n GooglePlus_Post(filename)\r\n sleep(7)\r\n #tumblr\r\n Tumblr_Post(filename)\r\n sleep(7)\r\n #instagram\r\n Instagram_Post(filename)\r\n\r\ndef Submit_Folder(folder,delay=120):\r\n os.chdir(path_core)\r\n Click_Chrome()\r\n sleep(3)\r\n for file in os.listdir(folder):\r\n if '.jpg' in file:\r\n print(file)\r\n #Submit file from folder\r\n Submit_All(folder+\"\\\\\"+file)\r\n sleep(delay)\r\n\r\ndef test_mode():\r\n sleep(3)\r\n Click_Chrome()\r\n sleep(3)\r\n mastodonxyz.Submit_Folder(\"K:\\\\art\\\\script_drawing\\\\x-men\")","repo_name":"J216/socialblaster","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25217544496","text":"from yandex_music import Client\nfrom yandex_music.utils.captcha_response import CaptchaResponse\nfrom yandex_music.rotor.station import Station\nfrom yandex_music.rotor.station_tracks_result import StationTracksResult\nfrom yandex_music.track.track import Track\nfrom yandex_music.download_info import DownloadInfo\n\nfrom random import randint\n\nfrom typing import Callable, List, Optional\n\n\nclass Music:\n def __init__(self, token: Optional[str]=None, username: Optional[str]=None, password: Optional[str]=None, captcha_callback: Optional[Callable[[CaptchaResponse], str]]=None, station: Optional[str]=\"user:onyourwave\") -> None:\n if captcha_callback:\n self._captcha_callback = captcha_callback\n\n if token:\n try:\n self._client: Client = Client.from_token(\n token = token,\n report_new_fields = False\n )\n\n except:\n if not username or not password:\n raise Exception(\"No Yandex.Music account credentials\")\n\n self._client: Client = Client.from_credentials(\n username = username,\n password = password,\n captcha_callback = self._captcha_callback,\n report_new_fields = False\n )\n\n else:\n if not username or not password:\n raise Exception(\"No Yandex.Music account credentials\")\n\n self._client: Client = Client.from_credentials(\n username = username,\n password = password,\n captcha_callback = self._captcha_callback,\n report_new_fields = False\n )\n\n self.station: Station = self._client.rotor_station_info(\n station = \"user:onyourwave\"\n )[0].station\n\n self.station_id: str = \"{type_}:{tag}\".format(\n type_ = self.station.id.type,\n tag = self.station.id.tag\n )\n\n self.station_from: str = self.station.id_for_from\n\n self.play_id: str = None\n self.index: int = 0\n self.current_track: Track = None\n self.station_tracks: StationTracksResult = None\n self.on_replay: bool = False\n self.is_playing_track: bool = False\n\n def _captcha_callback(self, captcha: CaptchaResponse) -> str:\n return input(\n \"{captcha}\\nEnter captcha: \".format(\n captcha = captcha.x_captcha_url\n )\n )\n\n def search_tracks(self, query: str) -> List[Track]:\n return self._client.search(\n text = query,\n type_ = \"track\"\n ).tracks.results\n\n def track_download_url(self, track: Track) -> str:\n download_info: DownloadInfo = sorted(\n filter(\n self._sort_tracks_codec,\n track.get_download_info()\n ),\n key=self._sort_tracks_kbps\n )[0]\n\n return download_info.get_direct_link()\n\n def _sort_tracks_codec(self, download_info: DownloadInfo) -> bool:\n return download_info.codec == \"mp3\"\n\n def _sort_tracks_kbps(self, download_info: DownloadInfo) -> int:\n return -download_info.bitrate_in_kbps\n\n def start_radio(self) -> Track:\n self.station_id = self.station_id\n self.station_from = self.station_from\n\n self.__update_radio_batch()\n\n self.current_track = self.__update_current_track()\n\n return self.current_track\n\n def play_next(self) -> Track:\n self.__send_play_end_track(\n track = self.current_track,\n play_id = self.play_id\n )\n\n self.__send_play_end_radio(\n track = self.current_track,\n batch_id = self.station_tracks.batch_id\n )\n\n self.index += 1\n\n if self.index >= len(self.station_tracks.sequence):\n self.__update_radio_batch(\n queue = self.current_track.track_id\n )\n\n self.current_track: Track = self.__update_current_track()\n\n return self.current_track\n\n def __update_radio_batch(self, queue: Optional[str]=None):\n self.index: int = 0\n\n self.station_tracks: StationTracksResult = self._client.rotor_station_tracks(\n station = self.station_id,\n queue = queue\n )\n\n self.__send_start_radio(\n batch_id = self.station_tracks.batch_id\n )\n\n def __update_current_track(self) -> Track:\n self.play_id: str = self.__generate_play_id()\n\n track: List[Track] = self._client.tracks([\n self.station_tracks.sequence[self.index].track.track_id\n ])[0]\n\n self.__send_play_start_track(\n track = track,\n play_id = self.play_id\n )\n\n self.__send_play_start_radio(\n track = track,\n batch_id = self.station_tracks.batch_id\n )\n\n return track\n\n def __send_start_radio(self, batch_id: str):\n self._client.rotor_station_feedback_radio_started(\n station = self.station_id,\n from_ = self.station_from,\n batch_id = batch_id\n )\n\n def __send_play_start_track(self, track: Track, play_id: str):\n total_seconds: float = track.duration_ms / 1000\n\n self._client.play_audio(\n track_id = track.id,\n from_ = \"desktop_win-home-playlist_of_the_day-playlist-default\",\n album_id = track.albums[0].id,\n play_id = play_id,\n track_length_seconds = 0,\n total_played_seconds = 0,\n end_position_seconds = total_seconds,\n )\n\n def __send_play_start_radio(self, track: Track, batch_id: str):\n self._client.rotor_station_feedback_track_started(\n station = self.station_id,\n track_id = track.id,\n batch_id = batch_id\n )\n\n def __send_play_end_track(self, track: Track, play_id: str):\n played_seconds: float = track.duration_ms / 1000\n total_seconds: float = track.duration_ms / 1000\n\n self._client.play_audio(\n track_id = track.id,\n from_ = \"desktop_win-home-playlist_of_the_day-playlist-default\",\n album_id = track.albums[0].id,\n play_id = play_id,\n track_length_seconds = int(total_seconds),\n total_played_seconds = played_seconds,\n end_position_seconds = total_seconds,\n )\n\n def __send_play_end_radio(self, track: Track, batch_id: str):\n played_seconds: float = track.duration_ms / 1000\n\n self._client.rotor_station_feedback_track_finished(\n station = self.station_id,\n track_id = track.id,\n total_played_seconds = played_seconds,\n batch_id = batch_id\n )\n\n @staticmethod\n def __generate_play_id():\n return \"%s-%s-%s\" % (randint(1, 999), randint(1, 999), randint(1, 999))\n","repo_name":"arynyklas/ArynMusic","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20102200149","text":"def dijkstra_v2(start, matrix):\n INF = 10 ** 18\n n = len(matrix)\n used = [False] * n\n dist = [INF] * n\n dist[start] = 0\n while True:\n v = -1\n for u in range(n):\n if not used[u] and (v == -1 or dist[u] < dist[v]):\n v = u\n if v == -1:\n break\n used[v] = True\n for nxt_v in range(n):\n dist[nxt_v] = min(dist[nxt_v], dist[v] + matrix[v][nxt_v])\n return dist\n","repo_name":"Neterukun1993/Library","sub_path":"Graph/ShortestPath/dijkstra_v2.py","file_name":"dijkstra_v2.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"32708446585","text":"from botinterface.preprocessor import MessagePreprocessor\nimport mock_tokenizer\nimport mock_stopwordremover\nimport mock_stemmer\n\nWORDTOREMOVE = \"removeme\"\n\npreprocessor = MessagePreprocessor(tokenizer=mock_tokenizer.MockTokenizer(),\n stopwordRemover=mock_stopwordremover.MockSingleStopwordRemover(WORDTOREMOVE),\n stemmer=mock_stemmer.MockStemmer())\n\ndef test_init():\n\n assert preprocessor is not None\n\n\ndef test_runstemmer():\n\n message = [\"I\",\"do\",\"not\",\"believe\",\"in\",\"fairies\"]\n expected = [\"\",\"d\",\"no\",\"believ\",\"i\",\"fairie\"]\n\n actual = preprocessor._stem(message)\n\n assert expected == actual\n\ndef test_tokenize():\n\n message = \"I do not believe in fairies\"\n expected = [\"I\",\"do\",\"not\",\"believe\",\"in\",\"fairies\"]\n\n actual = preprocessor._tokenize(message)\n\n assert expected == actual\n\ndef test_removestopwords():\n\n tokens = [WORDTOREMOVE, \"from\", \"this\", \"message\"]\n\n result = preprocessor._removeStopwords(tokens)\n\n assert len(result) == len(tokens) - 1\n\ndef test_process():\n message = \"The message is so very full of stopwords in so and such ways {}\".format(WORDTOREMOVE)\n\n expected = \"Th messag i s ver ful o stopword i s an suc way\"\n actual = preprocessor.process(message)\n\n assert expected == actual\n\n#\n# def test_runstemmer():\n#\n# message = [\"I\",\"do\",\"not\",\"believe\",\"in\",\"fairies\"]\n# expected = [\"i\",\"do\",\"not\",\"believ\",\"in\",\"fairy\"]\n#\n# actual = preprocessor._stem(message)\n#\n# assert expected == actual\n#\n# def test_tokenize():\n#\n# message = \"I do not believe in fairies\"\n# expected = [\"I\",\"do\",\"not\",\"believe\",\"in\",\"fairies\"]\n#\n# actual = preprocessor._tokenize(message)\n#\n# assert expected == actual\n#\n# def test_join():\n# tokens = [\"I\",\"do\",\"not\",\"believe\",\"in\",\"fairies\"]\n# expected = \"I do not believe in fairies\"\n#\n# actual = preprocessor._join(tokens)\n#\n# assert expected == actual\n#\n# def test_removestopwords():\n#\n# tokens = [\"the\", \"message\", \"is\", \"so\", \"very\", \"full\", \"of\", \"stopwords\",\n# \"in\", \"so\",\"and\",\"such\",\"ways\"]\n#\n# result = preprocessor._removeStopwords(tokens)\n#\n# assert len(result) < len(tokens)\n#\n# def test_process():\n# message = \"The message is so very full of stopwords in so and such ways\"\n#\n# expected = \"mess is ful stopword way\"\n# actual = preprocessor.process(message)\n#\n# assert expected == actual\n","repo_name":"andreallorerung/peach-chatbot-alpha","sub_path":"FlaskWebProject/chatbot/tests/tests_unit/test_botinterface/test_preprocessor.py","file_name":"test_preprocessor.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"39226695802","text":"from model import *\n\n# Top level function that interprets an entire program. It creates the\n# initial environment that's used for storing variables.\n\ndef interpret_program(model):\n # Make the initial environment (a dict)\n env = [{}]\n interpret(model, env)\n\n\n# Internal function to interpret a node in the environment\n@singledispatch\ndef interpret(node, env):\n raise RuntimeError(f\"Can't interpret {node}\")\n\neval_literals = {\n Float: float,\n Integer: int}\n\ntype_to_literal = {\n 'float': Float,\n 'int': Integer,\n 'char': Char}\n\neval_binops = {\n '>': lambda x, y: x > y,\n '<': lambda x, y: x < y,\n '<=': lambda x, y: x <= y,\n '>=': lambda x, y: x >= y,\n '==': lambda x, y: x == y,\n '+': lambda x, y: x + y,\n '-': lambda x, y: x - y,\n '*': lambda x, y: x * y,\n '/': lambda x, y: x / y\n }\n\n@interpret.register(Literal)\ndef _(node, env):\n if node.value is None:\n return node\n return eval_literals[type(node)](node.value)\n\n@interpret.register(BinOp)\ndef _(node, env):\n return eval_binops[node.op](interpret(node.left, env), interpret(node.right, env))\n\n@interpret.register(Body)\ndef _(node, env):\n for statement in node.statements:\n res = interpret(statement, env)\n return res\n\n@interpret.register(Print)\ndef _(node, env):\n return print(interpret(node.value, env))\n\n@interpret.register(VarDecl)\ndef _(node, env):\n env[-1][node.name] = interpret(node.value, env)\n\n@interpret.register(ConstAssign)\ndef _(node, env):\n if node.name in env[-1]:\n raise NameError(f\"Const {node} already assigned\")\n env[-1][node.name] = interpret(node.value, env)\n\n@interpret.register(VarAssign)\ndef _(node, env):\n if node.name in env[-1]:\n env[-1][node.name] = interpret(node.value, env)\n return\n raise NameError(f\"Node {node} not initialized\")\n\n@interpret.register(Ref)\ndef _(node, env):\n for ev in env[::-1]:\n if node.name in ev:\n return ev[node.name]\n raise NameError(f\"Node {node} not initialized\")\n\n@interpret.register(If)\ndef _(node, env):\n if interpret(node.cond, env):\n interpret(node.body, env)\n elif node.cdr is not None:\n interpret(node.cdr, env)\n\n@interpret.register(While)\ndef _(node, env):\n while interpret(node.cond, env):\n interpret(node.body, env)\n\n@interpret.register(CompoundExpression)\ndef _(node, env):\n env_inner = {}\n for e in env:\n for k, v in e.items():\n env_inner[k] = v\n return interpret(node.body, [env_inner])\n\n@interpret.register(Func)\ndef _(node, env):\n def f(env, *binds):\n for arg, bind in zip(node.args, binds):\n env[-1][arg.name] = bind\n try:\n return interpret(node.body, env)\n except ReturnValue as r:\n return r.args[0]\n env[-1][node.name] = f\n\n@interpret.register(Prog)\ndef _(node, env=[{}]):\n for statement in node.statements:\n interpret(statement, env)\n if 'main' in env[-1]:\n return interpret(Call('main'), env)\n\n@interpret.register(Call)\ndef _(node, env):\n for e in env[::-1]:\n if node.name in e:\n return e[node.name](env + [{}], *(interpret(arg, env) for arg in node.args))\n raise NameError(f\"Function {node} not declared\")\n\nclass ReturnValue(Exception):\n pass\n\n@interpret.register(Return)\ndef _(node, env):\n raise ReturnValue(interpret(node.value, env))\n\n@interpret.register(Struct)\ndef _(node, env):\n raise NotImplemented\n","repo_name":"aeftimia/dabeaz-compilers-2020","sub_path":"interp.py","file_name":"interp.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12370029086","text":"import unittest\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nfrom lab_3.pages.base_page import BasePage\nfrom lab_3.pages.register_page import RegisterPage\n\n\nclass TestRegisterPageOpen(unittest.TestCase):\n def setUp(self) -> None:\n self.driver = webdriver.Chrome()\n self.base_page = BasePage(self.driver)\n self.driver.get(self.base_page.URL)\n\n def tearDown(self) -> None:\n self.driver.quit()\n\n def test_should_succeed_when_open_from_account_dropdown(self):\n self.base_page.get_account().click()\n self.base_page.get_account_dropdown_register().click()\n self.assertEqual(RegisterPage.URL, self.driver.current_url)\n\n def test_should_succeed_when_open_from_account_login_page(self):\n self.base_page.get_account().click()\n self.base_page.get_account_dropdown_login().click()\n self.driver.find_element(By.XPATH, '//*[@id=\"login-form\"]/div/div[1]/div[2]/a').click()\n self.assertEqual(RegisterPage.URL, self.driver.current_url)\n","repo_name":"Ravenen/automation-testing-labs","sub_path":"lab_3/tests/test_register_page_open.py","file_name":"test_register_page_open.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15875203920","text":"import flask, requests, replitdb\n\ndb = replitdb.Client()\n\napp = flask.Flask(__name__)\n\n@app.route('/')\ndef main():\n\treturn flask.render_template('index.html', url=None)\n\n@app.route('/Invalid URL')\ndef invalid():\n\treturn flask.redirect('/')\n\n@app.route('/', methods=['POST'])\ndef add():\t\n\tnewURL = flask.request.form['add']\n\tvalid = False\n\tallURLs = db.view('allURLs')\n\t\n\ttry:\n\t\trequests.get(newURL)\n\t\tvalid = True\n\texcept:\n\t\turl = 'Invalid URL'\n\tif valid:\n\t\tlastNum = int(allURLs[-1], base=16)\n\t\tnewNum = str(hex(lastNum + 1))[2:]\n\t\tallURLs.append(newNum)\n\t\tdb.set(allURLs=allURLs)\n\t\tdb.set_dict({newNum:newURL})\n\t\turl = 'https://url.vivaansa.repl.co' + newNum\n\treturn flask.render_template('index.html', url=url, valid=valid)\n\n@app.route('/')\ndef url(num):\n\turl = db.view(num)\n\tif url != None:\n\t\treturn flask.redirect(url)\n\telse:\n\t\treturn flask.render_template('index.html')\n\n@app.route('/favicon.ico')\ndef favicon():\n\treturn flask.send_file('favicon.ico')\n\n\napp.run('0.0.0.0')\n","repo_name":"simplelivinghighthinking/url-shorten","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42685271838","text":"import sqlalchemy\nimport psycopg2\nfrom credentials import postgresql as credentials\nfrom pprint import pprint\n\n\ndef get_engine(user, password, host, port, db):\n url = f\"postgresql://{user}:{password}@{host}:{port}/{db}\"\n engine = sqlalchemy.create_engine(url)\n return engine\n\n\nengine = get_engine(credentials[\"pguser\"],\n credentials[\"pgpassword\"],\n credentials[\"pghost\"],\n credentials[\"pgport\"],\n credentials[\"pgdb\"])\n\nconnection = engine.connect()\n\n# 1. Name and year of albums released in 2018\nselect_year = connection.execute(\"\"\"\n SELECT name_album, year_album\n FROM albums\n WHERE year_album = 2018;\n\"\"\").fetchall()\npprint(f\"Name and year of albums released in 2018: {select_year}\")\n\n# 2. Name and duration of the longest track\nselect_duration = connection.execute(\"\"\"\n SELECT name_tracks, duration_tracks\n FROM tracks\n WHERE duration_tracks = (SELECT MAX(duration_tracks) FROM tracks);\n\"\"\").fetchone()\nprint()\npprint(f\"Name and duration of the longest track: {select_duration}\")\n\n# OR\n\nselect_duration_1 = connection.execute(\"\"\"\n SELECT name_tracks, duration_tracks\n FROM tracks\n ORDER BY duration_tracks DESC\n LIMIT 1\n\"\"\").fetchall()\nprint()\npprint(f\"Name and duration of the longest track: {select_duration_1}\")\n\n# 3. Name of the tracks with duration not less than 3.05\nselect_3_05 = connection.execute(\"\"\"\n SELECT name_tracks, duration_tracks\n FROM tracks\n WHERE duration_tracks >= 03.05\n\"\"\").fetchall()\nprint()\npprint(f\"Name of the tracks with duration not less than 3.05: {select_3_05}\")\n\n# 4. Name of collections released between 2018 and 2020\nselect_2018_2020 = connection.execute(\"\"\"\n SELECT name_collection\n FROM collection\n WHERE year_collection BETWEEN 2018 AND 2020;\n\"\"\").fetchall()\nprint()\npprint(f\"Name of collections released between 2018 and 2020: {select_2018_2020}\")\n\n# 5. Nickname of singers with one word name\nselect_nickname = connection.execute(\"\"\"\n SELECT nickname FROM singers\n WHERE nickname NOT LIKE '%% %%';\n\"\"\").fetchall()\nprint()\npprint(f\"Nickname of singers with one word name: {select_nickname}\")\n\n# 6. Name of the tracks with word \"my\"\nselect_track = connection.execute(\"\"\"\n SELECT name_tracks FROM tracks\n WHERE name_tracks LIKE '%%my%%';\n\"\"\").fetchall()\nprint()\npprint(f\"Name of the tracks with word 'my': {select_track}\")\n\n\n","repo_name":"AnastasiaLunina/SQL","sub_path":"5. Select_Join/SELECT_SINGLE.py","file_name":"SELECT_SINGLE.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"30687619146","text":"from typing import TYPE_CHECKING\n\nfrom curio import subprocess\n\nfrom ..cmdy_defaults import STDOUT, STDERR\nfrom ..cmdy_exceptions import CmdyActionError\n\nif TYPE_CHECKING:\n from ..cmdy_bakeable import Bakeable\n\n\ndef vendor(bakeable: \"Bakeable\"):\n \"\"\"Vendor the plugins with the bakeable._plugin_factory\"\"\"\n\n @bakeable._plugin_factory.register\n class PluginPipe:\n \"\"\"Plugin: pipe\n Allow piping from one command to another\n `cmdy.ls().pipe() | cmdy.cat()`\n \"\"\"\n\n @bakeable._plugin_factory.add_property(bakeable.CmdyResult)\n def piped_cmds(self):\n \"\"\"Get cmds that along the piping path\n\n Example:\n ```python\n c = cmdy.echo(123).p() | cmdy.cat()\n c.piped_cmds == ['echo 123', 'cat']\n ```\n \"\"\"\n piped_from = self.holding.data.get(\"pipe\", {}).get(\"from\")\n if piped_from:\n return piped_from.piped_cmds + [self.cmd]\n return [self.cmd]\n\n @bakeable._plugin_factory.add_property(bakeable.CmdyResult)\n def piped_strcmds(self):\n \"\"\"Get cmds that along the piping path\n\n Example:\n ```python\n c = cmdy.echo(123).p() | cmdy.cat()\n c.piped_cmds == ['echo 123', 'cat']\n ```\n \"\"\"\n piped_from = self.holding.data.get(\"pipe\", {}).get(\"from\")\n if piped_from:\n return piped_from.piped_strcmds + [self.strcmd]\n return [self.strcmd]\n\n @bakeable._plugin_factory.add_property(bakeable.CmdyHolding)\n def piped_cmds_(self):\n \"\"\"Get cmds that along the piping path\n\n Example:\n ```python\n c = cmdy.echo(123).p() | cmdy.cat()\n c.piped_cmds == ['echo 123', 'cat']\n ```\n \"\"\"\n piped_from = self.data.get(\"pipe\", {}).get(\"from\")\n if piped_from:\n return piped_from.piped_cmds + [self.cmd]\n return [self.cmd]\n\n @bakeable._plugin_factory.add_property(bakeable.CmdyHolding)\n def piped_strcmds_(self):\n \"\"\"Get cmds that along the piping path\n\n Example:\n ```python\n c = cmdy.echo(123).p() | cmdy.cat()\n c.piped_cmds == ['echo 123', 'cat']\n ```\n \"\"\"\n piped_from = self.data.get(\"pipe\", {}).get(\"from\")\n if piped_from:\n return piped_from.piped_strcmds + [self.strcmd]\n return [self.strcmd]\n\n @bakeable._plugin_factory.add_method(bakeable.CmdyHolding)\n def __or__(\n self,\n other: bakeable.CmdyHolding, # type: ignore\n ):\n\n if not self.data.get(\"pipe\"):\n raise CmdyActionError(\n \"Piping options have been consumed or trying \"\n \"to pipe from non-piping command\"\n )\n\n assert isinstance(other, bakeable.CmdyHolding), (\n \"Can only pipe to \" \"a CmdyHolding object.\"\n )\n\n other_pipe_data = other.data.setdefault(\"pipe\", {})\n other_pipe_data[\"from\"] = self\n\n # We shall not check the event, because the purpose here\n # is to clear the EVENT\n # But we need to check if other is also a piping command\n # which will be set if .pipe() is called\n if not other._onhold(check_event=False) and not other.data.get(\n \"pipe\", {}\n ).get(\"which\"):\n self.bakeable._event.clear()\n return other.run()\n\n return other\n\n @bakeable._plugin_factory.hold_then(\"p\")\n def pipe(self, which=None):\n \"\"\"Allow command piping\"\"\"\n if self.data.get(\"pipe\"):\n raise CmdyActionError(\"Unconsumed piping action.\")\n\n # initialize data\n which = which or STDOUT\n self.data.pipe.which = which\n\n if (which == STDOUT and self.stdout != subprocess.PIPE) or (\n which == STDERR and self.stderr != subprocess.PIPE\n ):\n\n raise CmdyActionError(\"Cannot pipe from a redirected PIPE.\")\n self.bakeable._event.set()\n return self\n\n @bakeable._plugin_factory.add_method(bakeable.CmdyHolding)\n def run(self, wait=None):\n \"\"\"From from prior piped command\"\"\"\n orig_run = self._original(\"run\")\n\n if not self.data.get(\"pipe\", {}).get(\"from\"):\n return orig_run(self, wait)\n\n prior = self.data.pipe[\"from\"]\n prior_result = prior.run(False)\n self.data.pipe[\"from\"] = prior\n\n self.stdin = (\n prior_result.proc.stdout\n if prior.data.pipe.which == STDOUT\n else prior_result.proc.stderr\n )\n\n ret = orig_run(self, wait)\n prior_result.wait()\n return ret\n\n return PluginPipe()\n","repo_name":"pwwang/cmdy","sub_path":"cmdy/cmdy_plugins/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"9534427553","text":"import tensorflow as tf\nfrom tensorflow.image import ResizeMethod\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.applications.vgg16 import VGG16\n\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\nimport time\nimport cv2\nimport os\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport imutils\nimport matplotlib.image as mpimg\nfrom collections import OrderedDict\nfrom skimage import io, transform\nfrom math import *\nimport xml.etree.ElementTree as ET\n\n\ndef load_files(xmlpath, folderpath):\n tree = ET.parse(xmlpath)\n root = tree.getroot()\n image_filenames = []\n landmarks = []\n crops = []\n imagename = []\n\n root_dir = folderpath\n\n for filename in root[2]:\n image_filenames.append(os.path.join(root_dir, filename.attrib['file']))\n crops.append(filename[0].attrib)\n imagename.append(filename.attrib['file'])\n\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmarks.append(landmark)\n return image_filenames, landmarks, crops, imagename\n\n\ndef read_image(image_pat):\n image = cv2.imread(image_pat)[:, :, ::-1]\n return image\n\n\ndef crop_face(image, landmarks, crops):\n left = int(crops['left'])\n top = int(crops['top'])\n width = int(crops['width'])\n height = int(crops['height'])\n\n image = tf.image.crop_to_bounding_box(\n image, left//2, top//2, width+top, height+left)\n img_shape = np.array(image).shape\n landmarks = tf.constant(landmarks, dtype='int32') - \\\n tf.constant([[left//2, top//2]], dtype='int32')\n image = tf.image.resize(image, (100, 100), method=ResizeMethod.BILINEAR)\n landmarks = (tf.constant(landmarks, dtype='int32') //\n tf.constant([[img_shape[1]//100, img_shape[0]//100]], dtype='int32'))\n\n return image, landmarks\n\n\nxmlpath = \"/home/marwen/Desktop/landmarks_project/ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train.xml\"\nfolderpath = '/home/marwen/Desktop/landmarks_project/ibug_300W_large_face_landmark_dataset'\nimage_filenames, landmarks, crops, imagename = load_files(xmlpath, folderpath)\n\ndit = {'images': [], 'landmarks': [], 'imagename': []}\nfor i in range(len(image_filenames)):\n try:\n image = read_image(image_filenames[i])\n\n image, landmark = crop_face(image, landmarks[i], crops[i])\n dit['images'].append(image)\n dit['landmarks'].append(landmark)\n dit['imagename'].append(imagename[i])\n except:\n -1\n\nassert len(dit['landmarks']) != 0\nassert len(dit['images']) == len(dit['landmarks'])\n\n\nfor i in range(20):\n print(dit['imagename'][i])\n x = []\n y = []\n t = dit['landmarks'][i]\n im = dit['images'][i]/255\n for i in t:\n x.append(i[0])\n y.append(i[1])\n plt.figure(figsize=(10, 10))\n plt.imshow(im)\n plt.scatter(x, y, s=50, c='g')\n\n\nX = np.array(dit[\"images\"])\ny = []\nfor i in np.array(dit[\"landmarks\"]):\n y.append(i.flatten())\ny = np.array(y)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\nX_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2)\nprint(len(X_train,), 'X train examples')\nprint(len(X_val), 'X validation examples')\nprint(len(X_test), 'X test examples')\n\n# Create the model\n\n# Create the model\n\nimg_size = 100\nnum_classes = 136\n\nmodel = Sequential([\n layers.experimental.preprocessing.Rescaling(\n 1./255, input_shape=(img_size, img_size, 3)),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(256, activation='relu'),\n layers.Dense(num_classes)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.MeanSquaredError(),\n metrics=['accuracy'])\n\n# Model summary\n\nmodel.summary()\n\n\nepochs = 20\nhistory = model.fit(\n X_train, y_train,\n validation_data=(X_val, y_val),\n epochs=epochs\n)\n\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\n\ny_predict = model.predict(X_test)\nfor j in range(10):\n x = []\n y = []\n xt = []\n yt = []\n t = y_predict[j]\n s = y_test[j]\n for i in range(len(t)):\n if i % 2 == 0:\n x.append(t[i])\n xt.append(s[i])\n\n else:\n y.append(t[i])\n yt.append(s[i])\n\n plt.figure(figsize=(10, 10))\n plt.imshow(X_test[j]/255)\n plt.scatter(xt, yt, s=200, c='g')\n plt.scatter(x, y, s=200, c='r')\n plt.show()\n\n\n# Evaluate the model on the test data using `evaluate`\nprint(\"Evaluate on test data\")\nresults = model.evaluate(X_test, y_test, batch_size=128)\nprint(\"test loss, test acc:\", results)\n","repo_name":"Marwen-93/landmarksfaceproject-","sub_path":"landmarksFace.py","file_name":"landmarksFace.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5116441392","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport argparse\nimport glob\nimport os\n\nfrom l134k.structure import Structure\nfrom l134k.energy_data import freq_scale_factors\nfrom l134k.util import pickle_dump\n\n\ndef main():\n data_dir, out_file, ignore_file, names_file = parse_args()\n structs = []\n\n ignore = set()\n if ignore_file is not None:\n with open(ignore_file) as f:\n for line in f:\n try:\n idx = int(line.split()[0])\n except (IndexError, ValueError):\n continue\n else:\n ignore.add(idx)\n\n names = None\n if names_file is not None:\n with open(names_file) as f:\n names = [line.strip() for line in f]\n\n print('Parsing files...')\n if names is None:\n files = glob.iglob(os.path.join(data_dir, '*.xyz'))\n else:\n files = [os.path.join(data_dir, name) for name in names]\n\n for path in files:\n s = Structure(path=path)\n if s.index in ignore:\n continue\n if 'F' not in s.elements: # Don't use fluorine containing molecules\n s.get_enthalpy_of_formation(freq_scale_factor=freq_scale_factors[s.model_chemistry],\n apply_bond_corrections=False)\n structs.append(s)\n\n pickle_dump(out_file, structs, compress=True)\n print('Dumped {} structures to {}'.format(len(structs), out_file))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Parse 134k dataset.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('data_dir', metavar='DIR', help='Path to 134k data directory')\n parser.add_argument('out_file', metavar='FILE', help='Path to output file')\n parser.add_argument('--ignore', metavar='FILE', help='Path to file containing list of indices to ignore')\n parser.add_argument('--names', metavar='FILE', help='Path to file containing list of names to use')\n args = parser.parse_args()\n\n data_dir = args.data_dir\n out_file = args.out_file\n ignore_file = args.ignore\n names_file = args.names\n\n return data_dir, out_file, ignore_file, names_file\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cgrambow/learn134k","sub_path":"scripts/parse_134k.py","file_name":"parse_134k.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7071619472","text":"from tkinter import *\nroot = Tk()\n\ndef browsefunc():\n filename = filedialog.askopenfilename()\n pathlabel.config(text=filename)\n\nbrowsebutton = Button(root, text=\"Browse\", command=browsefunc)\nbrowsebutton.pack()\n\npathlabel = Label(root)\npathlabel.pack()\nroot.mainloop()","repo_name":"sarthak7838/DeepSteg","sub_path":"bhaw.py","file_name":"bhaw.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"14993607696","text":"import socket\n\nHOST = \"mercury.picoctf.net\"\nPORT = 21135\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ns.connect((HOST, PORT))\n\ndata = s.recv(1024)\ndata = str(data)\nprint(data)\n\n\n\ndef sort_int(data):\n symbol = \"\"\n sorted = \"\"\n for i in range(len(data)):\n if data[i].isdigit():\n symbol += data[i]\n else:\n if symbol != \"\":\n sorted += chr(int(symbol))\n symbol = \"\"\n \n return sorted\n\nprint(sort_int(data))\n\n\"\"\"\ndef sort_symbol_int(data):\n sorted = \"\"\n\n for i in range(len(data)):\n symbol = \"\"\n if data[i] == \"'\" or data[i] == 'n':\n while data[i] != \" \" and i < len(data) - 1 or data[i] != \"'\":\n i+= 1\n if data[i].isdigit():\n symbol += data[i]\n \n symbol = symbol[:-1]\n i += 1\n sorted += chr(int(symbol))\n \n return sorted\n\nprint(sort_symbol_int(data))\n\"\"\" \ns.close()","repo_name":"Ramchike/Informatics-Python","sub_path":"pico-ctf/nice-netcat.py","file_name":"nice-netcat.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23469652437","text":"import argparse\nimport json\nimport logging\nimport os\n\nfrom ray._private.runtime_env import RuntimeEnvContext\n\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(\n description=(\n \"Set up the environment for a Ray worker and launch the worker.\"))\n\nparser.add_argument(\n \"--serialized-runtime-env\",\n type=str,\n help=\"the serialized parsed runtime env dict\")\n\nparser.add_argument(\n \"--serialized-runtime-env-context\",\n type=str,\n help=\"the serialized runtime env context\")\n\nparser.add_argument(\n \"--allocated-instances-serialized-json\",\n type=str,\n help=\"the worker allocated resource\")\n\n\ndef get_tmp_dir(remaining_args):\n for arg in remaining_args:\n if arg.startswith(\"--temp-dir=\"):\n return arg[11:]\n return None\n\n\ndef parse_allocated_resource(allocated_instances_serialized_json):\n container_resource_args = []\n allocated_resource = json.loads(allocated_instances_serialized_json)\n if \"CPU\" in allocated_resource.keys():\n cpu_resource = allocated_resource[\"CPU\"]\n if isinstance(cpu_resource, list):\n # cpuset: because we may split one cpu core into some pieces,\n # we need set cpuset.cpu_exclusive=0 and set cpuset-cpus\n cpu_ids = []\n cpu_shares = 0\n for idx, val in enumerate(cpu_resource):\n if val > 0:\n cpu_ids.append(idx)\n cpu_shares += val\n container_resource_args.append(\"--cpu-shares=\" +\n str(int(cpu_shares / 10000 * 1024)))\n container_resource_args.append(\"--cpuset-cpus=\" + \",\".join(\n str(e) for e in cpu_ids))\n else:\n # cpushare\n container_resource_args.append(\n \"--cpu-shares=\" + str(int(cpu_resource / 10000 * 1024)))\n if \"memory\" in allocated_resource.keys():\n container_resource_args.append(\n \"--memory=\" + str(int(allocated_resource[\"memory\"] / 10000)))\n return container_resource_args\n\n\ndef start_worker_in_container(container_option, args, remaining_args):\n worker_setup_hook = args.worker_setup_hook\n last_period_idx = worker_setup_hook.rfind(\".\")\n module_name = worker_setup_hook[:last_period_idx]\n # python -m ray.workers.setup_runtime_env --session-dir=\n # default_worker.py --node-ip-address= ...\n entrypoint_args = [\"-m\"]\n entrypoint_args.append(module_name)\n # replace default_worker.py path\n if container_option.get(\"worker_path\"):\n remaining_args[1] = container_option.get(\"worker_path\")\n entrypoint_args.extend(remaining_args)\n # now we will start a container, add argument worker-shim-pid\n entrypoint_args.append(\"--worker-shim-pid={}\".format(os.getpid()))\n\n tmp_dir = get_tmp_dir(remaining_args)\n if not tmp_dir:\n logger.error(\n \"failed to get tmp_dir, the args: {}\".format(remaining_args))\n\n container_driver = \"podman\"\n # todo add cgroup config\n # todo flag \"--rm\"\n container_command = [\n container_driver, \"run\", \"-v\", tmp_dir + \":\" + tmp_dir,\n \"--cgroup-manager=cgroupfs\", \"--network=host\", \"--pid=host\",\n \"--ipc=host\", \"--env-host\"\n ]\n container_command.append(\"--env\")\n container_command.append(\"RAY_RAYLET_PID=\" + str(os.getppid()))\n if container_option.get(\"run_options\"):\n container_command.extend(container_option.get(\"run_options\"))\n container_command.extend(\n parse_allocated_resource(args.allocated_instances_serialized_json))\n\n container_command.append(\"--entrypoint\")\n container_command.append(\"python\")\n container_command.append(container_option.get(\"image\"))\n container_command.extend(entrypoint_args)\n logger.warning(\"start worker in container: {}\".format(container_command))\n os.execvp(container_driver, container_command)\n\n\nif __name__ == \"__main__\":\n args, remaining_args = parser.parse_known_args()\n runtime_env: dict = json.loads(args.serialized_runtime_env or \"{}\")\n container_option = runtime_env.get(\"container\")\n if container_option and container_option.get(\"image\"):\n start_worker_in_container(container_option, args, remaining_args)\n else:\n # NOTE(edoakes): args.serialized_runtime_env_context is only None when\n # we're starting the main Ray client proxy server. That case should\n # probably not even go through this codepath.\n runtime_env_context = RuntimeEnvContext.deserialize(\n args.serialized_runtime_env_context or \"{}\")\n\n runtime_env_context.exec_worker(remaining_args)\n","repo_name":"MemberA2600/Fortari2600","sub_path":"venv/Lib/site-packages/ray/workers/setup_worker.py","file_name":"setup_worker.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"85"} +{"seq_id":"9948751942","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\n#เขียนโปรแกรมรับจำนวนเต็มบวก 1 จำนวน จากนั้นให้แสดงผลลัพธ์เป็นจำนวนเต็มดังกล่าวที่เขียนอยู่ในรูปของตัวเลขฐาน สอง\nx = int(input(\"Enter number: \"))\nx0 = x\nsum = ''\nd = 0\nz = 0\nwhile True:\n y = int(x%2)\n x = int(x/2)\n sum = str(y)+sum\n if y==1:\n z = z+2**d\n if z==x0:\n break\n d = d+1\nz = int(sum)\nprint(z)\n\n","repo_name":"st36251524009/Beameiei","sub_path":"27.9.62work1.py","file_name":"27.9.62work1.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25147798176","text":"#!/usr/local/bin/python3.5\n# , redirect, url_for, send_from_directory\n# from .crossdomain import crossdomain\nimport _thread\nimport hashlib\nimport json\nimport logging\nimport mimetypes\nimport os\nimport sys\nimport time\nimport traceback\nfrom pprint import pprint\n\nimport colored_traceback.auto\nimport colorlog\nimport jsonschema\nimport numpy as np\nfrom flask import Flask, jsonify, render_template, request, send_file\nfrom flask_compress import Compress\nfrom flask_sse import sse\n\nfrom kernel.data import (\n condition_class,\n puzzle_class,\n reaction_mechanism_class,\n solution_class,\n)\n\n# relative import fix. according to this: http://stackoverflow.com/a/12869607/1147061\n# import sys\n# sys.path.append('..')\nfrom kernel.engine import driver, fileIO\n\ndriver.temp_diag = False\ndriver.system_output = pprint\n\n\nnp.seterr(all=\"warn\")\n\n\ndef all_files_in(mypath, end=\"\"):\n files = []\n for f in os.listdir(mypath):\n if (\n os.path.isfile(os.path.join(mypath, f))\n and not f.startswith(\".\")\n and f.endswith(end)\n ):\n name = os.path.splitext(os.path.basename(f))[0]\n files.append(name)\n return files\n\n\n# For Server-Sent-Event support:\n\n\nclass ListStream:\n \"\"\"One ListStream corresponds to one unique computation job.\n c.f.: http://stackoverflow.com/questions/21341096/redirect-print-to-string-list\"\"\"\n\n def __init__(self, jobID):\n self.jobID = jobID\n\n def write(self, *args):\n s = \"\"\n for arg in args:\n s += \" \" + str(arg)\n with app.app_context():\n try:\n sse.publish({\"data\": s}, channel=self.jobID)\n except AttributeError:\n sys.__stdout__.write(\" * Orphaned Message: \" + s)\n\n def flush(self):\n pass\n\n\n# Initialize logger:\nrootLogger = logging.getLogger() # access the root logger\nrootLogger.removeHandler(logging.getLogger().handlers[0])\n# create a handler for printing messages onto the console\nhandler = colorlog.StreamHandler()\nhandler.setFormatter(\n colorlog.ColoredFormatter(\n \"%(log_color)s%(levelname)s%(reset)s:%(bold)s%(name)s%(reset)s:%(message)s\"\n )\n)\n# attach the to-console handler to the root logger\nrootLogger.addHandler(handler)\n# tell the program to send messages on its own behalf.\nlogger = logging.getLogger(__name__)\n\n\ndef np_err_handler(message, flag):\n logger.error(\n f\"NumPy floating-point error: {message}\\n\"\n + \"\".join(traceback.format_stack(limit=7)[:-1])\n )\n\n\nnp.seterrcall(np_err_handler)\nnp.seterr(all=\"call\")\n\n# Global variables, and also initializing the webapp using Flask framework:\npath_root = \"results/\"\nif_runningOnline = \"DYNO\" in os.environ # detect whether on Heroku\n# use cache only when in production mode; in other words, don't use cache on local machine\nif_useCache = if_runningOnline\nAUTH_CODE = \"123\"\n# ongoingJobs = []\n\napp = Flask(__name__)\nCompress(app) # https://github.com/libwilliam/flask-compress\n\n# redis configuation, for SSE support:\n# 'os.environ.get(\"REDIS_URL\")' is for Heroku and \"redis://localhost\" is meant for localhost.\napp.config[\"REDIS_URL\"] = os.environ.get(\"REDIS_URL\") or \"redis://localhost\"\napp.register_blueprint(sse, url_prefix=\"/stream\")\n\n# load JSON schema for Puz file for validation:\nwith open(\"puzzles/schema.js\") as f:\n schema = f.read()\nschema = json.loads(schema)\n\n\n@app.route(\"/plot\", methods=[\"POST\", \"OPTIONS\"])\ndef plot():\n startTime = time.time() # start timer\n data = request.get_json() # receive JSON data\n # initialize logger for this particular job:\n # create a log_handler that streams messages to the web UI specificially for this job.\n logging_handler = logging.StreamHandler(stream=ListStream(data[\"jobID\"]))\n rootLogger.addHandler(logging_handler) # redirect the logs since NOW\n # now the serious part:\n try:\n # prepare directories:\n path_puzzle = f\"{path_root}{data['puzzle']}/\"\n path_condition = f\"{path_puzzle}condition_{data['conditionID']}/\"\n path_solution = f\"{path_condition}solution_{data['solutionID']}/\"\n # prepare figure file_names:\n plot_name = path_solution + f\"/{data['temperature']}\"\n plot_individual_filename = f\"{plot_name}_individual.svg\"\n plot_combined_filename = f\"{plot_name}_combined.svg\"\n # try whether result already generated:\n if (\n os.path.isfile(plot_individual_filename)\n and os.path.isfile(plot_combined_filename)\n and if_useCache\n ):\n logger.info(\"Figures already generated before. Skipped.\")\n with open(plot_individual_filename) as content_file:\n plot_individual = content_file.read()\n with open(plot_combined_filename) as content_file:\n plot_combined = content_file.read()\n # elif data['jobID'] in ongoingJobs: # not a job already done! let's see if anyone has been doing it...?\n # logger.info('Someone already submitted identical plotting job.')\n # return # TODO: proper handle the conflict!\n else: # we have to plot it ourselves! orz...\n temperature = data[\"temperature\"] # just a short hand\n logger.info(\"================== RECEIVED PLOTTING JOB ==================\")\n # we check the deepest folder, which implies all parent-grandparent folders exist.\n if not os.path.isdir(path_solution):\n os.makedirs(path_solution)\n # Now start preparing the instances of custom classes for further actual use in Engine.Driver:\n # (0) First of all, load the Puzzle Data into backend:\n with open(f\"puzzles/{data['puzzle']}.puz\") as json_file:\n puzzleData = json.load(json_file)\n logger.info(\" (0) Successfully loaded Puzzle Data from file!\")\n # (1) Instance of the Puzzle class:\n # (1.1) general data:\n elemRxns = np.array(puzzleData[\"coefficient_array\"], dtype=float)\n energy_dict = puzzleData[\"energy_dict\"]\n Ea = None # puzzleData['transition_state_energies']\n # logger.info(' SpeciesList involved are:',' '.join(speciesList))\n speciesList = sorted(\n puzzleData[\"coefficient_dict\"], key=puzzleData[\"coefficient_dict\"].get\n )\n num_rxn = len(puzzleData[\"coefficient_array\"])\n num_mol = len(speciesList)\n # (1.2) data about the reagents, used in pre-equilibrium computations:\n reagentsDict = []\n for reagent, PERsToggles in puzzleData[\"reagentPERs\"].items():\n reagentID = puzzleData[\"coefficient_dict\"][reagent]\n preEqulElemRxns = [\n puzzleData[\"coefficient_array\"][i]\n for i in range(num_rxn)\n if PERsToggles[i]\n ]\n if preEqulElemRxns == []:\n logger.info(\n f' For the reagent #{reagentID} \"{reagent}\", no pre-equilibration needed.'\n )\n preEqulElemRxns = np.array([[0.0]], dtype=float)\n reagent_speciesList = [reagent]\n else:\n # convert it into a numpy dict\n preEqulElemRxns = np.array(preEqulElemRxns, dtype=float)\n # a boolean array of whether each species specified in the puzzle file is present in this set of ElemRxns for preEqul.\n if_uninvolvedSpecies = np.all(preEqulElemRxns == False, axis=0)\n # now, remove unused species to simplify the rxn. set used for pre-equilibration of this particular reagent:\n displacement = 0\n # mask and remove columns of those unused species\n for speciesID in range(num_mol):\n # if this species is not used:\n if if_uninvolvedSpecies[speciesID]:\n # http://stackoverflow.com/questions/1642730/how-to-delete-columns-in-numpy-array\n preEqulElemRxns = np.delete(\n preEqulElemRxns, speciesID - displacement, 1\n )\n displacement += 1\n logger.info(f\" speciesList : {speciesList}\")\n logger.info(\n f\" if_uninvolvedSpecies: {if_uninvolvedSpecies}\"\n )\n # \\n',preEqulElemRxns)\n logger.info(\n \" This should mask the previous matrix into: //omitted//\"\n )\n reagent_speciesList = [\n speciesList[i]\n for i in range(num_mol)\n if not if_uninvolvedSpecies[i]\n ]\n reagent_num_rxn = len(preEqulElemRxns)\n reagent_num_mol = len(reagent_speciesList)\n logger.info(\" About pre-equilibration:\")\n logger.info(\n f\" Elem. Rxn.s used for pre-equilibration (a total of {reagent_num_rxn}): \\n\"\n + f\" {preEqulElemRxns}\"\n )\n logger.info(\n f\" which involves {reagent_num_mol} species: {reagent_speciesList}\"\n )\n reagentsDict.append(\n (\n reagent,\n reaction_mechanism_class.reaction_mechanism(\n reagent_num_rxn,\n reagent_num_mol,\n reagent_speciesList,\n preEqulElemRxns,\n energy_dict,\n ),\n )\n )\n # - - - - - - - - - - - - - - -\n this_puzzle = puzzle_class.puzzle(\n num_rxn,\n num_mol,\n speciesList,\n elemRxns,\n energy_dict,\n reagent_dictionary=reagentsDict,\n Ea=Ea,\n )\n # num_rxn, num_mol, speciesList, elemRxns, energy_dict -> fed to class \"reaction_mechanism\"\n # reagentsDict, Ea -> fed to class puzzle\n logger.info(\" (1) Puzzle Instance successfully created.\")\n # (2) Instance of the Condition class:\n # rxn_temp = temperature\n # Each entry in data['conditions'] is of the form:\n # [name of the reactant, amount, its fridge temperature]\n r_names = [reactant[\"name\"] for reactant in data[\"conditions\"]]\n r_concs = [reactant[\"amount\"] for reactant in data[\"conditions\"]]\n r_temps = [reactant[\"temperature\"] for reactant in data[\"conditions\"]]\n m_concs = [0.0] * num_mol\n # - - - - - - - - - - - - - - -\n this_condition = condition_class.condition(\n temperature, speciesList, r_names, r_temps, r_concs, m_concs\n )\n logger.info(\" (2) Condition Instance successfully created.\")\n # (3) Instance of the Solution class:\n coefficient_array_proposed = []\n for each_rxn_proposed in data[\"reactions\"]:\n coefficient_line_proposed = [0] * num_mol\n for each_slot in each_rxn_proposed:\n if each_slot != \"\":\n coefficient_line_proposed[speciesList.index(each_slot)] += 1\n coefficient_array_proposed.append(coefficient_line_proposed)\n num_rxn_proposed = len(coefficient_array_proposed)\n # - - - - - - - - - - - - - - -\n this_solution = solution_class.solution(\n num_rxn_proposed,\n num_mol,\n speciesList,\n coefficient_array_proposed,\n energy_dict,\n )\n logger.info(\" (3) Solution Instance successfully created.\")\n # Finally, drive the engine with these data:\n logger.info(\" (4) Simulating...\")\n logger.info(\" (a) True Model first:\")\n # anticipate the file name where the true model's data is stored\n trueModel_fileName = f\"{path_condition}plotData_t_{temperature}\"\n if os.path.isfile(trueModel_fileName + \"_.dat\") and if_useCache:\n # Mechanism(true)+Condition for this puzzle already simulated before. Take advantage of the cache now...\")\n logger.info(\" cache available. Load from it.\")\n written_true_data = fileIO.load_modelData(trueModel_fileName + \"_.dat\")\n else:\n # Mechanism(true)+Condition for this puzzle not calculated before; do it now...\")\n logger.info(\" simulating...\")\n written_true_data = driver.drive_data(\n puzzle=this_puzzle,\n # this is merely for backward compatibility.\n puzzle_path=path_puzzle,\n condition=this_condition,\n condition_path=path_condition,\n progress_tick=lambda x: 0,\n )\n logger.info(\n f\" Got result in a type of {type(written_true_data)}.\"\n )\n logger.info(\" (b) User Model then:\")\n # anticipate the file name where the true model's data is stored\n userModel_fileName = f\"{path_solution}plotData_t_{temperature}\"\n if (\n os.path.isfile(userModel_fileName + \"_.dat\")\n or os.path.isfile(userModel_fileName + \"_Failed\")\n ) and if_useCache:\n if os.path.isfile(userModel_fileName + \"_.dat\"):\n # Mechanism(solution)+Condition for this puzzle already simulated before. Take advantage of the cache now...\")\n logger.info(\" cache available. Load from it.\")\n written_user_data = fileIO.load_modelData(\n userModel_fileName + \"_.dat\"\n )\n elif os.path.isfile(userModel_fileName + \"_Failed\"):\n # Mechanism(solution)+Condition for this puzzle already simulated before. Take advantage of the cache now...\")\n logger.info(\" UserModel failed as told by flag file.\")\n written_user_data = False\n else:\n # Mechanism(solution)+Condition for this puzzle not calculated before; do it now...\")\n logger.info(\" simulating...\")\n written_user_data = driver.drive_data(\n puzzle=this_puzzle,\n # this is merely for backward compatibility.\n puzzle_path=path_puzzle,\n condition=this_condition,\n condition_path=path_condition,\n progress_tick=lambda x: 0,\n solution=this_solution,\n solution_path=path_solution,\n written_true_data=written_true_data,\n )\n logger.info(\n f\" Got result in a type of {type(written_user_data)}.\"\n )\n logger.info(\" (5) Drawing plots... \")\n (plot_individual, plot_combined) = driver.plotter.sub_plots(\n Temperature=temperature,\n plottingDict=puzzleData[\"coefficient_dict\"],\n solution_fileName=f\"{path_solution}plotData_t_{temperature}\",\n condition_fileName=f\"{path_condition}plotData_t_{temperature}\",\n written_true_data=written_true_data,\n written_user_data=written_user_data,\n )\n # end Timer\n logger.info(\n \" (6) Now that everything is finished, write simulated data to file for caching...\"\n )\n _thread.start_new_thread(\n fileIO.save_modelData, (written_true_data, trueModel_fileName)\n )\n _thread.start_new_thread(\n fileIO.save_modelData, (written_user_data, userModel_fileName)\n )\n _thread.start_new_thread(\n fileIO.save_figure, (plot_individual, plot_individual_filename)\n )\n _thread.start_new_thread(\n fileIO.save_figure, (plot_combined, plot_combined_filename)\n )\n logger.info(f\"Executed for {time.time() - startTime}s.\")\n # so that the client won't be getting debug info from other instances.\n logger.removeHandler(logging_handler)\n # ongoingJobs.remove(data['jobID'])\n return jsonify(\n jobID=data[\"jobID\"],\n status=\"success\",\n plot_individual=plot_individual,\n plot_combined=plot_combined,\n temperature=temperature,\n ) # serving result figure files via \"return\", so as to save server calls\n except Exception as error:\n # print out last words:\n logger.error(traceback.format_exc())\n logger.info(f\"Executed for {time.time() - startTime}s.\")\n # now unbind all log-handlers, so that the logger won't waste its time sending messages to this audience in later calls.\n # redirect the logs since NOW\n rootLogger.removeHandler(logging_handler)\n return jsonify(jobID=data[\"jobID\"], status=\"error\")\n\n\n@app.route(\"/save\", methods=[\"POST\", \"OPTIONS\"])\ndef save():\n data = request.get_json() # receive JSON data\n print(\"Data received:\")\n pprint(data)\n if not request.remote_addr == \"127.0.0.1\":\n if not data[\"auth_code\"] == AUTH_CODE:\n return jsonify(\n status=\"danger\", message=\"Authentication failed. Check your password.\"\n )\n # Else, validate with jsonschema:\n existing_puzzles = all_files_in(\"puzzles\")\n if data[\"puzzleName\"] in existing_puzzles:\n return jsonify(\n status=\"danger\", message=\"Puzzle already exists. Try another name.\"\n )\n try:\n jsonschema.validate(data, schema)\n except jsonschema.exceptions.ValidationError as e:\n return jsonify(status=\"danger\", message=e.message)\n else:\n # now convert:\n species_name_to_id = {\n species: i for i, species in enumerate(data[\"speciesNames\"])\n }\n coefficient_array = convert_reactions_to_coeffcients(\n reactions, species_name_to_id\n )\n energies = dict(zip(data[\"speciesNames\"], data[\"speciesEnergies\"]))\n data_to_write = {\n \"coefficient_dict\": coefficient_dict,\n \"energy_dict\": energies,\n \"coefficient_array\": coefficient_array,\n \"reagents\": list(data[\"reagentPERs\"].keys()),\n \"reagentPERs\": data[\"reagentPERs\"],\n }\n print(\"Data prepared:\")\n pprint(data_to_write)\n with open(\"puzzles/\" + data[\"puzzleName\"] + \".puz\", \"w\") as f:\n try:\n json.dump(data_to_write, f, indent=4)\n except Exception as e:\n print(e)\n return jsonify(status=\"danger\", message=\"Error occured. Can't save.\")\n else:\n return jsonify(status=\"success\", message=\"Puzzle successfully saved.\")\n\n\ndef convert_reactions_to_coeffcients(reactions, species_name_to_id):\n matrix = []\n for reaction in data[\"reactions\"]:\n coefficients = [0.0] * len(species_name_to_id)\n for i, speciesName in enumerate(reaction):\n if speciesName == \"\":\n continue # skip empty entries\n speciesID = species_name_to_id[speciesName]\n delta = 1 if i > 1 else -1\n coefficients[speciesID] += delta\n matrix.append(coefficients)\n return matrix\n\n\n@app.route(\"/create\")\ndef serve_page_create():\n return render_template(\n \"main.html\", mode=\"create\", ip=request.remote_addr.replace(\".\", \"_\")\n )\n\n\n@app.route(\"/play/\")\ndef serve_page_play(puzzleName):\n with open(\"puzzles/\" + puzzleName + \".puz\") as json_file:\n puzzleData = json_file.read()\n return render_template(\n \"main.html\",\n mode=\"play\",\n puzzleName=puzzleName,\n puzzleData=puzzleData,\n ip=request.remote_addr.replace(\".\", \"_\"),\n )\n\n\n@app.route(\"/\")\ndef serve_page_index():\n puzzleList = all_files_in(\"puzzles\", end=\".puz\")\n return render_template(\"index.html\", puzzleList=puzzleList)\n","repo_name":"ckwatson/web_gui","sub_path":"web/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"32528754676","text":"import os\nimport json\nfrom os.path import join, dirname\nfrom watson_developer_cloud import SpeechToTextV1\nimport subprocess\n\nvideo_file = os.path.join(\n os.path.dirname(__file__),\n 'resources',\n 'test.mp4')\n\ncommand = \"ffmpeg -i \"+video_file+\" -ab 160k -ac 2 -ar 44100 -vn ./resources/audio.wav\"\n\nsubprocess.call(command, shell=True)\n\nspeech_to_text = SpeechToTextV1(\n username='22ccdf7d-47bb-40fe-b175-4d2cff460f85',\n password='PLPP00eicu2P',\n x_watson_learning_opt_out=False\n)\n\ndef generateTranscript(input_file_name, output_file_name='test.txt'):\n text_file = open(\"./resources/\" + output_file_name, \"w\")\n text_file.write(getText(input_file_name))\n text_file.close()\n print('Finished generating transcript for audio: {:s}'\n .format(input_file_name))\n\ndef getText(file_name):\n with open(join(dirname(__file__), './resources/' + file_name),\n 'rb') as audio_file:\n data = speech_to_text.recognize(\n audio_file, content_type='audio/wav', timestamps=True,\n word_confidence=True)\n return json.dumps(data['results'][0]['alternatives'][0]['transcript'],\n indent=2)\n","repo_name":"anantdgoel/Freedom-of-speech","sub_path":"speechtotext.py","file_name":"speechtotext.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"13048826467","text":"import streamlit as st\nfrom profanity_detector.spotify_func import SpotifyAPI,client_id,client_secret\nimport altair as alt\nimport requests\nimport base64\nimport os\nimport streamlit.components.v1 as components\nfrom IPython.core.display import display, HTML\nfrom streamlit_player import st_player\nfrom dotenv import load_dotenv\n\n# load_dotenv()\n# if 'client_id' and 'client_secret'in os.environ:\n# client_id = os.getenv('client_id')\n# client_secret = os.getenv('client_secret')\n# else:\n# client_id = os.environ('client_id')\n# client_secret = os.environ('client_secret')\n\n#Menu wide\nst.set_page_config(layout=\"wide\")\n\n#title,icon\ncomponents.html(\n \"\"\"\n \n \"\"\",height=100\n)\nst.title(\"Spotify List\")\n\n\n#put movie name\nName_of_Movie = st.text_input(\"Movie Name\")\n\n#instanciate class\nspotify=SpotifyAPI(client_id,client_secret)\n\n#get organised data\nchart_df=spotify.spotify_get_organised_data(Name_of_Movie)\n#chart df drop deplicate\ndrop_deplicated_data=chart_df.drop_duplicates(subset=['Album Name'], keep=\"first\").reset_index(drop=True)\n\n#playlist player show\nif Name_of_Movie:\n playlist_df=spotify.playlist_search_json_createdata(query=Name_of_Movie)\n top_playlist_id=playlist_df[\"ID\"][0]\n st.title(\"Playlist Search Result\")\n components.html(\n\n f\"\"\"\n\n \n \"\"\",\n height=400\n )\n #playlist table\n with st.expander(\"See Playlist List\"):\n st.table(playlist_df[[\"Name\",\"PlaylistURL\"]].head(30))\n\nelse:\n playlist_df=spotify.playlist_search_json_createdata(query=\"titanic\")\n top_playlist_id=playlist_df[\"ID\"][0]\n\n st.title(\"Playlist Search Result\")\n components.html(\n f\"\"\"\n \n \"\"\",\n height=400\n )\n #playlist table\n with st.expander(\"See more Playlist\"):\n st.table(playlist_df[[\"Name\",\"PlaylistURL\"]].head(10))\n\n\n#show data table\nst.header(f\"{Name_of_Movie} Song Search Result\")\n\n#song chart\nst.header(f\"Song Popularity+Energy Chart\")\nName_of_Feat=\"energy\"\nc = alt.Chart(chart_df).mark_circle().encode(\n alt.X('Popularity', scale=alt.Scale(zero=False)), y=f'{Name_of_Feat}', color=alt.Color('Popularity', scale=alt.Scale(type='log',scheme='rainbow')),\n size=alt.value(300), tooltip=['Popularity', f'{Name_of_Feat}', 'Song Name', 'Album Name']).interactive()\nst.altair_chart(c, use_container_width=True)\n\n\n#song table\nwith st.expander(\"See more Song List\"):\n st.table(chart_df.head(10))\n\n#Album player show\ncol1, col2,col3,col4= st.columns(4)\nif len(drop_deplicated_data[\"albumID\"]) == 1:\n with col3:\n uri=drop_deplicated_data[\"albumID\"][0]\n components.html(\n f\"\"\"\n \n \"\"\",\n height=400\n )\n with col2:\n components.html(\n f\"\"\"\n \n \"\"\",\n height=300\n )\nelif len(drop_deplicated_data[\"albumID\"]) > 1:\n with col2:\n uri=drop_deplicated_data[\"albumID\"][0]\n components.html(\n f\"\"\"\n \n \"\"\",\n height=400\n )\n with col3:\n uri1=drop_deplicated_data[\"albumID\"][1]\n components.html(\n f\"\"\"\n\n \n\n \"\"\",\n\n height=400,\n )\n\n with col1:\n components.html(\n f\"\"\"\n

\n \n
\n \"\"\",\n height=100\n )\n components.html(\n f\"\"\"\n \n \"\"\",\n height=100\n )\n with col4:\n components.html(\n f\"\"\"\n
\n \n
\n \"\"\",\n height=100\n )\n components.html(\n f\"\"\"\n
\n \n
\n \"\"\",\n height=100\n )\nelif len(drop_deplicated_data[\"albumID\"]) == 0:\n None\n\nelse:\n None\n","repo_name":"atathana/profanity_detector","sub_path":"apps/app_spotify.py","file_name":"app_spotify.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26362236243","text":"from flask_restx import Namespace\n\n\nclass user_response_dto:\n api = Namespace('/user')\n\n def read_user_info(read_object):\n board_list = []\n for board in read_object.boards:\n board_list.append({\n 'id': board.id,\n 'title': board.title,\n 'text': board.text,\n 'author': board.author,\n 'user_email': board.user_email\n })\n comment_list = []\n for comment in read_object.comments:\n comment_list.append({\n 'id': comment.id,\n 'contents': comment.contents,\n 'author': comment.author,\n 'board_id': comment.board_id,\n 'user_email': comment.user_email\n })\n return {\n 'id': read_object.id,\n 'email': read_object.email,\n 'password': read_object.password,\n 'name': read_object.name,\n 'nickname': read_object.nickname,\n 'boards': board_list,\n 'comments': comment_list\n\n }\n","repo_name":"bonohbonoh/board-with-flask","sub_path":"apps/src/dto/response/User_response_dto.py","file_name":"User_response_dto.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9633661556","text":"import os,sys\nimport csv, json\nfrom nltk.corpus import wordnet as wn\nimport numpy as np \nimport math\n\ndef sign(value):\n if(value < 0):\n return -1.0\n else:\n return 1.0\n\ndef getValence(valence_dict, word):\n\t#removes punctuation from word\n\t\n\tpunctuations = '''!\"#$%&()*+,./:;<=>?@[\\]^_`{|}~\\n'''\n\t\n\tif (sys.version_info > (3, 0)):\n\t\tword = word.translate(punctuations)\n\telse:\n\t\tprint(\"ERROR, need py3\")\n\tif(word in valence_dict):\n\t\treturn valence_dict[word]\n\telse:\n\t\treturn -999\n\ndef map_valence_tree(valence_dict, tree):\n\tvalence_tree = []\n\tfor subtree in tree:\n\t\tif(subtree.height() > 2):\n\t\t\tvalence_tree.append(map_valence_tree(valence_dict, subtree))\n\t\telse:\n\t\t\tvalence_tree.append(getValence(valence_dict,subtree[0]))\n\treturn valence_tree\n\ndef getTreeIndices(subtree, parent_index = []):\n\tindices = []\n\tfor i in range(len(subtree)):\n\t\tif(isinstance(subtree[i], str)):\n\t\t\tindices.append(parent_index)\n\t\telse:\n\t\t\tsubtree_indices = getTreeIndices(subtree[i], parent_index+[i])\n\t\t\tindices += subtree_indices\n\n\treturn indices\n\ndef SequenceToTreeIndices(completeTreeIndices, sequence_indices):\n return [completeTreeIndices[i] for i in sequence_indices]\n\ndef TreeToSequenceIndices(completeTreeIndices, tree_indices):\n\treturn [completeTreeIndices.index(i) for i in tree_indices]\n\t\ndef isNegator(word):\n negators = [\"cannot\", \"n't\", \"no\", \"never\", \"not\", \"nothing\", \"nobody\"]\n if(word in negators):\n return True\n else:\n return False\n\n#*------------------------------------------------------------------------------------------*#\n#* Negation Scope Detection Methods *#\n#*------------------------------------------------------------------------------------------*#\n\ndef detect_neg_scope_tree(subtree, parent_index = []):\n neg_scope = []\n for i in range(len(subtree)):\n isNegatorCheck1 = (subtree[i].height() < 3 and isNegator(subtree[i][0]))\n isNegatorCheck2 = (subtree[i].label() == \"ADVP\" and isNegator(subtree[i][0][0]))\n if(isNegatorCheck1 or isNegatorCheck2):\n for j in range(len(subtree)-(i+1)):\n neg_scope.append(parent_index+[j+i+1] )\n elif(subtree[i].height() > 2):\n neg_scope += detect_neg_scope_tree(subtree[i], parent_index+[i])\n\n return neg_scope\n\ndef detect_neg_scope_window(sentence_sequence, window_size = 0):\n neg_scope = []\n num_scopes = 0\n last_scope_count = 0\n\n for i in range(len(sentence_sequence)):\n\n if(isNegator(sentence_sequence[i])):\n num_scopes += 1\n last_scope_count = 0\n\n elif(num_scopes > 0):\n for j in range(num_scopes):\n neg_scope.append(i)\n\n if(window_size > 0): #window_size = 0 signifies end-of-sentence scope\n last_scope_count += 1\n if(last_scope_count >= window_size):\n num_scopes = 0\n last_scope_count = 0\n\n return neg_scope\n\ndef resolve_double_negative(neg_scope):\n new_thing = []\n for coord in neg_scope:\n if(neg_scope.count(coord)%2==1 and new_thing.count(coord) == 0):\n new_thing.append(coord)\n return new_thing\n\ndef negtool():\n pass\n\n\n#*------------------------------------------------------------------------------------------*#\n#* Negation Resolution Methods *#\n#*------------------------------------------------------------------------------------------*#\n\"\"\"\n 1. sym_inversion\n 2. asym_inversion\n 3. sym_shift\n 4. asym_shift\n 5. lookup_shift\n 6. multiple_reg\n\"\"\"\ndef negate(valence, neg_res, distribution_dict = None):\n\tif(neg_res == \"SYM_INVERT\"):\n\t\tweight = 1.0\n\t\treturn -weight*valence\n\t\n\telif(neg_res == \"AFFIRM_SHIFT\"):\n\t\treturn affirm_shift(valence)\n\n\telif(neg_res == \"ANTONYM_LOOKUP\"):\n\t\treturn valence\n\t\t\n\telif(neg_res == \"MEANINGSPEC_FREQ\"):\n\t\treturn meaningSpec_freq(valence, distribution_dict)\n\n\telif(neg_res == \"MEANINGSPEC_FREQDP\"):\n\t\treturn meaningSpec_freqdp(valence, distribution_dict)\n\ndef antonym_lookup_negate(valence_dict, word):\n\ttry:\n\t\tantonym_word = antonym_lookup(word)\n\texcept RuntimeError as re:\n\t\tantonym_word = None\n\t\tprint(\"antonym_lookup({}) error: {}\".format(word, re)) #antonym_lookup(us) error: maximum recursion depth exceeded\n\t\n\tif(antonym_word is None):\n\t\tantonym_valence = -999\n\telse:\n\t\tantonym_valence = getValence(valence_dict, antonym_word)\n\n\treturn antonym_valence\n\n\n\n\ndef get_cat(value):\n\tbins = [-1.0, -0.96, -0.92, -0.88, -0.84, -0.8, -0.76, -0.72, -0.68, -0.64, -0.6, -0.56, -0.52, -0.48, -0.44, -0.4, -0.36, -0.32, -0.28, -0.24, -0.2, -0.16, -0.12, -0.08, -0.04, 0.0, 0.04, 0.08, 0.12, 0.16, 0.2, 0.24, 0.28, 0.32, 0.36, 0.4, 0.44, 0.48, 0.52, 0.56, 0.6, 0.64, 0.68, 0.72, 0.76, 0.8, 0.84, 0.88, 0.92, 0.96, 1.0]\n\n\tfor i in range(len(bins)):\n\t\tif value == 1:\n\t\t\treturn 1\n\n\t\telse:\n\t\t\tif bins[i] <= value < bins[i+1]:\n\t\t\t\treturn bins[i]\n\t\n\ndef inferDist(valence, distribution_dict):\n\tcat = '%.3f'%(get_cat(valence))\n\t# if valence < 0:\n\t# \tcat = math.floor(valence*10)/10\n\t# else:\n\t# \tcat = math.ceil(valence*10)/10\n\n\t# get mu and sigma of distribution\n\tdata = distribution_dict[str(cat)]\n\n\trandom_freq_mu = data[\"frequency\"][0]\n\trandom_freq_sigma = data[\"frequency\"][1]\n\n\trandom_freq = np.random.normal(random_freq_mu, random_freq_sigma, 1)\n\twhile (random_freq < 0):\n\t\trandom_freq = np.random.normal(random_freq_mu, random_freq_sigma, 1)\n\t\n\trandom_dp_mu = data[\"dispersion\"][0]\n\trandom_dp_sigma = data[\"dispersion\"][1]\n\n\trandom_dp = np.random.normal(random_dp_mu, random_dp_sigma, 1)\n\twhile (random_dp < 0):\n\t\trandom_dp = np.random.normal(random_dp_mu, random_dp_sigma, 1)\n\n\t# random_mi_mu = data[\"mi\"][0]\n\t# random_mi_sigma = data[\"mi\"][1]\n\n\t# random_mi = np.random.normal(random_mi_mu, random_mi_sigma, 1)\n\t\n\treturn {\"frequency\" : random_freq[0], \"dispersion\": random_dp[0]}\n\ndef affirm_shift(Affirm):\n\treturn -0.065916 -0.363218*Affirm\n\ndef meaningSpec_freq(Affirm, distribution_dict):\n\tFreq = inferDist(Affirm, distribution_dict)[\"frequency\"]\n\n\treturn -7.747130e-02 -3.850748e-01*Affirm + Freq*5.326080e-09\n\n\ndef meaningSpec_freqdp(Affirm, distribution_dict):\n\tinferred = inferDist(Affirm, distribution_dict)\n\tFreq = inferred[\"frequency\"]\n\tDP = inferred[\"dispersion\"]\n\n\treturn -6.112665e-02 +Affirm*-3.851552e-01+Freq*7.751644e-09+DP*-2.260250e+00+Freq*DP*-1.976151e-06\n#*--------------------------------------------------------------------------------------*#\n#* Antonym Dictionary Method *#\n#*--------------------------------------------------------------------------------------*#\n\ndef mother(keyword_lemma):\n\ttrack_antonym= []\n\n\tif len(keyword_lemma) >25:\n\t\treturn track_antonym\n\n\t# check synset (synonyms)\n\tkeyword_synsets = check_has_synset(keyword_lemma)\n\tword_list_lemma = change_to_lemma(keyword_synsets)\n\n\ttrack_antonym = check_has_antonym(word_list_lemma)\n\tif track_antonym !=[]:\n\t\treturn track_antonym\n\n\t#if there is no antonym for all keyword_synset in keyword_list,\n\t#get attribute of the keywords\n\tattribute_synset = check_has_attribute(keyword_lemma)\n\n\t#if there are keyword->attribute\n\tif attribute_synset !=[]:\n\t\tattribute_lemma = change_to_lemma(attribute_synset)\n\t\ttrack_antonym = check_has_antonym(attribute_lemma)\n\n\t\tif track_antonym != []:\n\t\t\t#if keyword->attribute has antonym, return that\n\t\t\treturn track_antonym\n\n\t#if attribute_list==[] or keyword->attribute->antonym ==[]\n\t#check pertainym of keyword\n\tpertainym_list = check_has_pertainym(keyword_lemma)\n\n\t#if there are keyword->pertainym\n\tif pertainym_list!=[]:\n\t\ttrack_antonym = check_has_antonym(pertainym_list)\n\n\t\tif track_antonym !=[]:\n\t\t\treturn track_antonym\n\n\t\t#if there is keyword->pertainym but no keyword->pertainym->antonym\n\t\ttrack_antonym = mother(pertainym_list)\n\n\t\tif track_antonym !=[]:\n\t\t\treturn track_antonym\n\n\t#if pertainym_list ==[], check derivationally_related_forms\n\tderivation_list = check_has_derivation(keyword_lemma)\n\n\tif derivation_list !=[]:\n\t\ttrack_antonym = check_has_antonym(derivation_list)\n\n\t\tif track_antonym !=[]:\n\t\t\treturn track_antonym\n\n\t\t#print 'derivation_list', derivation_list\n\t\ttrack_antonym = mother(derivation_list)\n\t\tif track_antonym !=[]:\n\t\t\treturn track_antonym\n\n\t#if keyword->derivation_list or keyword->derivation->antonym ==[]\n\t#check similar to\n\tsimilar_list = check_has_similar(keyword_lemma)\n\n\tif similar_list !=[]:\n\t\ttrack_antonym = check_has_antonym(similar_list)\n\n\t\tif track_antonym !=[]:\n\t\t\treturn track_antonym\n\n\t\ttrack_antonym = mother(similar_list)\n\t\tif track_antonym !=[]:\n\t\t\treturn track_antonym\n\n\t#If all means exhausted and still no relation\n\treturn track_antonym\n\ndef change_to_synset(lemma_list):\n\tsynset_list=[]\n\n\tfor lemma in lemma_list:\n\t\tsynset= lemma.synset()\n\t\tsynset_list.append(synset)\n\n\t# print 'synset_list', synset_list\n\treturn synset_list\n\ndef change_to_lemma(keyword_synsets):\n\t#switch to lemma for antonyms\n\tword_list_lemma=[]\n\tfor synset in keyword_synsets:\n\t\ttemp_list_lemma= synset.lemmas()\n\t\tfor temp in temp_list_lemma:\n\t\t\tword_list_lemma.append(temp)\n\n\treturn word_list_lemma\n\ndef check_has_synset(keyword_lemma):\n\t# print 'check_has_synset'\n\tsynset_list = []\n\n\tfor lemma in keyword_lemma:\n\t\ttemp_synset = wn.synsets(lemma.name())\n\n\t\tif temp_synset != []:\n\t\t\tfor synset in temp_synset:\n\t\t\t\tsynset_list.append(synset)\n\treturn synset_list\n\ndef check_has_attribute(keyword_lemma):\n\t# print 'check_has_attribute'\n\tattribute_list=[]\n\n\tkeyword_synset_list = change_to_synset(keyword_lemma)\n\n\tfor keyword_synset in keyword_synset_list:\n\t\ttemp_attribute_list= keyword_synset.attributes()\n\t\tif temp_attribute_list !=[]:\n\t\t\tfor temp in temp_attribute_list:\n\t\t\t\tattribute_list.append(temp)\n\n\treturn attribute_list\n\ndef check_has_antonym(word_list):\n\t# print 'check_has_antonym'\n\tantonym=[]\n\n\tfor lemma in word_list:\n\t\t# if lemma.antonyms():\n\t\t# \tantonym.append(lemma.antonyms()[0].name())\n\n\t\tantonym_list =lemma.antonyms()\n\n\t\tif antonym_list != []:\n\t\t\tfor antonym_word in antonym_list:\n\t\t\t\tantonym.append(antonym_word.name())\n\n\treturn antonym\n\ndef check_has_pertainym(word_list_lemma):\n\t# print 'check_has_pertainym'\n\tpertainym_list=[]\n\n\tfor lemma in word_list_lemma:\n\t\ttemp_pertainym_list = lemma.pertainyms()\n\n\t\tif temp_pertainym_list!= []:\n\t\t\tfor temp in temp_pertainym_list:\n\t\t\t\tpertainym_list.append(temp)\n\n\t# print 'pertainym_list',pertainym_list\n\treturn pertainym_list\n\ndef check_has_derivation(word_list_lemma):\n\t# print 'check_has_derivation'\n\n\tderivation_list=[]\n\n\tfor lemma in word_list_lemma:\n\t\ttemp_derivation_list = lemma.derivationally_related_forms()\n\n\t\tif temp_derivation_list!= []:\n\t\t\tfor temp in temp_derivation_list:\n\t\t\t\tderivation_list.append(temp)\n\n\t# print 'derivation_list',derivation_list\n\treturn derivation_list\n\t\t\t\ndef check_has_similar(word_list):\n\t# print 'check_has_similar'\n\tsimilar_list= []\n\n\tfor synset in word_list:\n\t\ttemp_similar_list=synset.similar_tos()\n\t\tif temp_similar_list != []:\n\t\t\tfor temp in temp_similar_list:\n\t\t\t\tsimilar_list.append(temp)\n\n\t# print 'similar_list', similar_list\n\treturn similar_list\n\ndef antonym_lookup(word):\n\ttry:\n\t\tantonym= []\n\t\tkeyword_synsets = wn.synsets(word)\n\n\t\t# print ('synsets', keyword_synsets)\n\n\t\tword_list_lemma = change_to_lemma(keyword_synsets)\n\n\t\t# print 'word_list_lemma', word_list_lemma\n\t\tantonym = check_has_antonym(word_list_lemma)\n\n\t\t#if there is no antonym for all keyword_synset in keyword_list,\n\t\tif antonym ==[]:\n\t\t\tantonym = mother(word_list_lemma)\n\n\t\t#antonym output as a list\n\t\tfor anto in antonym:\n\t\t\tif '_' not in anto:\n\t\t\t\treturn anto\n\texcept RuntimeError as re:\n\t\t#print(\"antonym_lookup({}):{}\".format(word, re)) #\"us\",\"me\",\"50\"\n\t\treturn None\n\t# return antonym[0]\n\n#*------------------------------------------------------------------------------------------*#\n#* Sentiment Composition Methods *#\n#*------------------------------------------------------------------------------------------*#\ndef tree_composition(tree, parent_index, neg_scope, neg_res, distribution_dict = None):\n valence = []\n for i in range(len(tree)):\n subtree = tree[i]\n\n current_index = parent_index+[i]\n\n if(isinstance(subtree, float) or isinstance(subtree, int)):\n if(subtree == -999):\n continue\n\n if(current_index in neg_scope):\n subtree = negate(subtree, neg_res, distribution_dict)\n \n valence.append(subtree)\n else:\n valence_from_tree = tree_composition(subtree, current_index, neg_scope, neg_res, distribution_dict)\n if(valence_from_tree == -999):\n continue\n if(current_index in neg_scope):\n valence_from_tree = negate(valence_from_tree, neg_res, distribution_dict)\n valence.append(valence_from_tree)\n if(len(valence) == 0):\n return -999\n else:\n return sum(valence)/float(len(valence))\n\ndef flat_composition(valence_sequence):\n\tif(sys.version_info>(3, 0)):\n\t\tvalence_sequence = list(filter(lambda a: a != -999, valence_sequence))\n\telse:\n\t\tvalence_sequence = filter(lambda a: a != -999, valence_sequence)\n\t\n\tif(len(valence_sequence) == 0):\n\t\tavg_valence = -999\n\telse:\n\t\tavg_valence = sum(valence_sequence)/len(valence_sequence)\n\treturn avg_valence\n","repo_name":"doreenhii/SentimentAnalysisPipeline","sub_path":"SentimentModelFunctions.py","file_name":"SentimentModelFunctions.py","file_ext":"py","file_size_in_byte":13324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19820941648","text":"# pylint: skip-file\n\"\"\"add_event_id_default\n\nRevision ID: 271f00b3ca42\nRevises: 2e0398d9283d\nCreate Date: 2021-08-25 13:32:58.924590\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"271f00b3ca42\"\ndown_revision = \"2e0398d9283d\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.alter_column(\n \"etl_client_events\",\n \"event_id\",\n server_default=sa.text(\"gen_random_uuid()\"),\n )\n\n\ndef downgrade() -> None:\n op.alter_column(\n \"etl_client_events\",\n \"event_id\",\n server_default=None,\n )\n","repo_name":"Recidiviz/pulse-data","sub_path":"recidiviz/persistence/database/migrations/case_triage/versions/2021_08_25_1332_271f00b3ca42_add_event_id_default.py","file_name":"2021_08_25_1332_271f00b3ca42_add_event_id_default.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"85"} +{"seq_id":"36638410080","text":"# -*- coding: utf-8 -*-\n\"\"\" A buffer is a (virtual) list of items. All items belong to a category (wall posts, messages, persons...)\"\"\"\nimport logging\nimport webbrowser\nimport arrow\nimport wx\nimport languageHandler\nimport widgetUtils\nimport output\nfrom pubsub import pub\nfrom wxUI.tabs import documents\nfrom sessionmanager import utils\nfrom mysc.thread_utils import call_threaded\nfrom wxUI import menus\nfrom .wall import wallBuffer\n\nlog = logging.getLogger(\"controller.buffers.documents\")\n\nclass documentsBuffer(wallBuffer):\n can_get_items = False\n\n def create_tab(self, parent):\n self.tab = documents.documentsTab(parent)\n self.connect_events()\n self.tab.name = self.name\n if hasattr(self, \"can_post\") and self.can_post == False and hasattr(self.tab, \"post\"):\n self.tab.post.Enable(False)\n\n def onFocus(self, event, *args,**kwargs):\n post = self.get_post()\n if post == None:\n return\n original_date = arrow.get(post[\"date\"])\n created_at = original_date.humanize(locale=languageHandler.curLang[:2])\n self.tab.list.list.SetItem(self.tab.list.get_selected(), 4, created_at)\n event.Skip()\n\n def connect_events(self):\n super(documentsBuffer, self).connect_events()\n # Check if we have a load button in the tab, because documents community buffers don't include it.\n if hasattr(self.tab, \"load\"):\n widgetUtils.connect_event(self.tab.load, widgetUtils.BUTTON_PRESSED, self.load_documents)\n\n def load_documents(self, *args, **kwargs):\n output.speak(_(\"Loading documents...\"))\n self.can_get_items = True\n self.tab.load.Enable(False)\n wx.CallAfter(self.get_items)\n\n def get_menu(self):\n p = self.get_post()\n if p == None:\n return\n if p[\"owner_id\"] == self.session.user_id:\n added = True\n else:\n added = False\n m = menus.documentMenu(added)\n widgetUtils.connect_event(m, widgetUtils.MENU, self.add_remove_document, menuitem=m.action)\n widgetUtils.connect_event(m, widgetUtils.MENU, self.download, menuitem=m.download)\n widgetUtils.connect_event(m, widgetUtils.MENU, self.open_in_browser, menuitem=m.open_in_browser)\n return m\n\n def add_remove_document(self, *args, **kwargs):\n p = self.get_post()\n if p == None:\n return\n if p[\"owner_id\"] == self.session.user_id:\n result = self.session.vk.client.docs.delete(owner_id=p[\"owner_id\"], doc_id=p[\"id\"])\n if result == 1:\n output.speak(_(\"The document has been successfully deleted.\"))\n self.session.db[self.name][\"items\"].pop(self.tab.list.get_selected())\n self.tab.list.remove_item(self.tab.list.get_selected())\n else:\n result = self.session.vk.client.docs.add(owner_id=p[\"owner_id\"], doc_id=p[\"id\"])\n output.speak(_(\"The document has been successfully added.\"))\n\n def download(self, *args, **kwargs):\n post = self.get_post()\n filename = utils.safe_filename(post[\"title\"])\n # If document does not end in .extension we must fix it so the file dialog will save it properly later.\n if filename.endswith(post[\"ext\"]) == False:\n filename = filename+ \".\"+post[\"ext\"]\n filepath = self.tab.get_download_path(filename)\n if filepath != None:\n pub.sendMessage(\"download-file\", url=post[\"url\"], filename=filepath)\n\n def open_in_browser(self, *args, **kwargs):\n post = self.get_post()\n if post == None:\n return\n url = \"https://vk.com/doc{user_id}_{post_id}\".format(user_id=post[\"owner_id\"], post_id=post[\"id\"])\n webbrowser.open_new_tab(url)\n","repo_name":"MCV-Software/socializer","sub_path":"src/controller/buffers/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"21343971232","text":"# Imports\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# 3rd party:\nfrom django.test import TestCase, Client\nfrom django.urls import reverse, resolve\nimport json\n# Internal:\nfrom django.contrib.auth.models import User\nfrom .models import Review\nfrom products.models import Category, Product\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\nclass TestReviewViews(TestCase):\n\n @classmethod\n def setUp(self):\n \"\"\"\n creating and saving a new test review\n \"\"\"\n self.user = User.objects.create(\n username='MyTestUser',\n password='mypass79',\n email='test@user.com',\n id='1',\n )\n self.user.save()\n self.my_category = Category.objects.create(\n name='Savage',\n notes='test notes',\n slug='testslug',\n friendly_name='Hunter knife'\n )\n self.my_category.save()\n self.product = Product.objects.create(\n category=self.my_category,\n item_no='A221',\n name='Test Hunter',\n description='Test knife description',\n price=230.00,\n bladelength=10,\n handlematerial='Wood',\n blade='Steel',\n id='1'\n )\n\n def tearDown(self):\n self.product.delete()\n self.my_category.delete()\n self.user.delete()\n\n def test_review_creation(self):\n self.client.force_login(self.user)\n response = self.client.post('/review/create_review/', data={\n 'id': '3',\n 'user': self.user.username,\n 'product_id': self.product.id,\n 'content': 'My test Review',\n 'current_time': '2023-06-13T12:00:00'\n }, follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue(Review.objects.filter(author=self.user).exists())\n\n def test_update_review(self):\n self.client.force_login(self.user)\n self.my_review = Review.objects.create(\n author=self.user,\n product=self.product,\n content='My New test review',\n id='5'\n\n )\n response = self.client.post('/review/update_review/', data={\n 'product_id': self.my_review.id,\n 'content': 'just Review',\n 'current_time': '2023-06-13T12:00:20',\n 'id': '5'\n\n })\n self.assertEqual(response.status_code, 200)\n\n def test_delete_review(self):\n self.client.force_login(self.user)\n self.my_review = Review.objects.create(\n author=self.user,\n product=self.product,\n content='My New test review',\n id='5'\n\n )\n # user author of review deleting review\n response = self.client.post('/review/delete_review/', data={\n 'review_id': self.my_review.id\n\n })\n self.assertEqual(response.status_code, 302)\n\n self.new_user = User.objects.create(\n username='MyNewTestUser',\n password='mypass719',\n email='teste@user.com',\n id='3',\n )\n self.new_user.save()\n self.my_new_review = Review.objects.create(\n author=self.new_user,\n product=self.product,\n content='My New test review',\n id='5'\n\n )\n # user NOT author of review trying to delete review\n response = self.client.get('/review/delete_review/',\n data={\n 'review_id': self.my_new_review.id,\n 'product_id': self.product.id,\n\n })\n self.assertTrue(Review.objects.filter(content='My New test review').exists())\n self.assertEqual(response.status_code, 302)\n","repo_name":"rockroman/CI_PP5_Blade","sub_path":"reviews/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"36885656122","text":"import string \nimport hashlib\nfrom itertools import product\nimport socket\nimport re\nimport solver\n\n\ndef connect(sock):\n data = sock.recv(1024).decode()\n sock.recv(1024).decode()\n hsh = data.replace('\\n', '').split(' == ')[1]\n target = data.split('(')[1].split(')')[0].replace('+', '')\n answ = brute_force(target, hsh, verbose=True).encode()\n sock.send(answ[:4])\n\n \ndef solve_round(sock):\n game = solver.Game()\n while not game.is_finished():\n question = game.get_question()\n game.get_step()\n sock.send((' '.join(str(question)).encode()))\n answer = sock.recv(1024).decode()\n print(answer)\n if 'got it' in answer:\n return\n answer = tuple([int(i) for i in re.findall(r'[0-9]+', answer)]) \n game.put_answer(answer)\n if game.is_correct():\n guessed = game.guessed_number()\n sock.send((' '.join(str(guessed)).encode()))\n print(sock.recv(1024))\n game.get_step()\n \n \ndef brute_force(mask, hsh, alphabet=string.ascii_letters+string.digits, verbose=False):\n pwd_pat = mask.replace('{', '{{').replace('}','}}').replace('*', '{}')\n N = mask.count('*')\n i = 0\n for chars in product(alphabet, repeat=N):\n if verbose:\n i += 1\n if i % 1000000 == 0:\n print('Iterations: {}'.format(i))\n if hsh == hashlib.sha256(pwd_pat.format(*chars).encode()).hexdigest():\n return pwd_pat.format(*chars)\n return None\n\n\nsock = socket.socket()\nsock.connect(('149.28.139.172', 10002)) \nconnect(sock)\nsock.recv(10240) # receive MOTD\nwhile 1:\n round = sock.recv(1024).decode()\n print(round) # receive round numb\n solve_round(sock)\n if 'rctf' in round.lower():\n break\n ","repo_name":"SirTelemak/ctf-tasks","sub_path":"rctf2018/Number Game/Finale.py","file_name":"Finale.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19114700980","text":"\"\"\"\nscript for the experiments from the paper Papez, Grigori, Stompor 2020: \"Accelerating\n linear system solvers for time domain component separation of cosmic microwave\n background data\", submitted to Astronomy & Astrophysics\n\n(global) parameters and paths are defined here\n\"\"\"\n\nndetectors = [] #number of detectors, we use 2 (horizontal+vertical scans)\nNp = [] # number of pixels\nNt = [] # number of observations, size of each scan\nncomp = [] # number of components of signal [TQU], we use 2\nNsigcomp = [] # ncomp x number of different signals, we use 2x3\nnfrequencies = [] # number of frequencies, we use 6\n\nmultiple_eigvals = True # see the comment in Section 4.2\n\ncase = 'case0' # test case to be used\nniter = 200 # maximal number of iterations\nscalen = 1 # scaling of the loaded noise\nBetas_index = -1 # index of the first parameters to start with\nTOL = 1e-8 # tolerance for stopping criterion\nk = 0 # number of eigenvalues to deflate recycle\ndimP = 0 # size of the subspace used for recycling\n\n# the directories for data\npathtosignal = \"/global/cscratch1/sd/USER/data/signal/\"\npathtonoise = \"/global/cscratch1/sd/USER/data/noise/\"\npathtopointing = \"/global/cscratch1/sd/USER/data/\"\n# pathtopointing+case is added in PCS_functions.read_data\n","repo_name":"B3Dcmb/Accelerated-PCS-solvers","sub_path":"param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10644020270","text":"import re\nimport os.path\nimport argparse\nimport logging\nfrom six import iteritems\nimport numpy as np\n\nfrom catboost import CatBoostClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.externals import joblib\nfrom keras.models import load_model\n\nfrom nltk.tokenize import RegexpTokenizer\nfrom tqdm import tqdm\nimport pandas as pd\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom embed_utils import load_data, Embeds, Logger, clear_embedding_list, read_embedding_list\nfrom data_utils import calc_text_uniq_words, clean_texts, convert_text2seq, get_embedding_matrix, clean_seq, split_data, get_bow, tokenize_sentences, convert_tokens_to_ids\nfrom models import get_cnn, get_lstm, get_concat_model, save_predictions, get_tfidf, get_most_informative_features, get_2BiGRU, get_BiGRU_2dConv_2dMaxPool, get_2BiGRU_BN, get_2BiGRU_GlobMaxPool\nfrom train import train, continue_train, Params, _train_model, train_folds, get_model, train_folds_catboost\nfrom metrics import calc_metrics, get_metrics, print_metrics\nfrom sklearn.model_selection import cross_val_score\nimport lightgbm as lgb\nfrom sklearn.preprocessing import minmax_scale\n\n\n\nUNKNOWN_WORD = \"_UNK_\"\nEND_WORD = \"_END_\"\nNAN_WORD = \"_NAN_\"\nPROBABILITIES_NORMALIZE_COEFFICIENT = 1.4\n\n\n\ndef get_kwargs(kwargs):\n parser = argparse.ArgumentParser(description='--train=$TRAIN_DATA --test=$TEST_DATA --embeds=$EMBEDS_FILE --embeds_type=$EMBEDS_TYPE --train-clean=$TRAIN_CLEAN --test-clean=$TEST_CLEAN --embeds-clean=$EMBEDS_CLEAN --train-labels=$TRAIN_LABELS --config=$CONFIG --output=$OUTPUT_FILE --logger=$LOG_FILE')\n parser.add_argument('-f', '--train', dest='train', action='store', help='/path/to/trian_file', type=str)\n parser.add_argument('-t', '--test', dest='test', action='store', help='/path/to/test_file', type=str)\n parser.add_argument('-o', '--output', dest='output', action='store', help='/path/to/output_file', type=str)\n parser.add_argument('-e', '--embeds', dest='embeds', action='store', help='/path/to/embeds_file', type=str)\n parser.add_argument('-et', '--embeds_type', dest='embeds_type', action='store', help='fasttext | glove | word2vec', type=str)\n parser.add_argument('-l', '--logger', dest='logger', action='store', help='/path/to/log_file', type=str, default=None)\n parser.add_argument('--warm-start', dest='warm_start', action='store', help='true | false', type=bool, default=False)\n parser.add_argument('--model-warm-start', dest='model_warm_start', action='store', help='CNN | LSTM | CONCAT | LOGREG | CATBOOST, warm start for several models available', type=str, default=[], nargs='+')\n parser.add_argument('--format-embeds', dest='format_embeds', action='store', help='file | json | pickle | binary', type=str, default='file')\n parser.add_argument('--config', dest='config', action='store', help='/path/to/config.BiGRU_Dense.json', type=str, default=None)\n parser.add_argument('--train-clean', dest='train_clean', action='store', help='/path/to/save_train_clean_file', type=str, default='data/train.clean.npy')\n parser.add_argument('--test-clean', dest='test_clean', action='store', help='/path/to/save_test_clean_file', type=str, default='data/test.clean.npy')\n parser.add_argument('--embeds-clean', dest='embeds_clean', action='store', type=str, default=None)\n parser.add_argument('--train-labels', dest='train_labels', action='store', type=str, default=None)\n for key, value in iteritems(parser.parse_args().__dict__):\n kwargs[key] = value\n\n\ndef main(*kargs, **kwargs):\n\n # ============ Parse global parameters ============\n get_kwargs(kwargs)\n train_fname = kwargs['train']\n test_fname = kwargs['test']\n result_fname = kwargs['output']\n embeds_fname = kwargs['embeds']\n logger_fname = kwargs['logger']\n warm_start = kwargs['warm_start']\n model_warm_start = [model.lower() for model in kwargs['model_warm_start']]\n config = kwargs['config']\n train_clean = kwargs['train_clean']\n embeds_type = kwargs['embeds_type']\n train_labels = kwargs['train_labels']\n test_clean = kwargs['test_clean']\n embeds_clean = kwargs['embeds_clean']\n result_path = './lgboost/'\n sub = pd.read_csv('../sample_submission.csv')\n\n\n if not os.path.exists(result_path):\n os.mkdir(result_path)\n\n # ==== Create logger ====\n logger = Logger(logging.getLogger(), logger_fname)\n\n # ==== Load data ====\n logger.info('Loading data...')\n test_df = load_data(test_fname)\n train_x = np.load(train_clean)\n test_x = np.load(test_clean)\n embedding_matrix = np.load(embeds_clean)\n train_y = np.load(train_labels)\n\n\n target_labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n num_classes = len(target_labels)\n\n\n # ============= Load params of models =============\n params = Params(config)\n models = params.get('models')\n val_predictions_list = []\n test_predictions_list = []\n\n # ============ Train models =============\n for model_name in models:\n model_func = get_model(model_name, embedding_matrix, params)\n # =========== Training on folds ============\n batch_size = params.get(model_name).get('batch_size')\n\n logger.debug('Starting {0} training on folds...'.format(model_name))\n models, val_predictions = train_folds_catboost(train_x, train_y, params.get(model_name).get('num_folds'), batch_size, model_func, params.get(model_name).get('optimizer'), logger=logger)\n val_predictions_array = np.concatenate([minmax_scale(fold) for fold in val_predictions], axis=0)\n np.save(os.path.join(result_path, \"oof_{0}_{1}.npy\".format(model_name, embeds_type)), val_predictions_array)\n val_predictions_list.append(val_predictions_array)\n logger.debug('Predicting results...')\n test_predictions = []\n for fold_id, model in enumerate(models):\n test_predictions.append(model.predict(test_x, batch_size=batch_size))\n final_test_predictions = np.ones(test_predictions[0].shape)\n for fold_predict in test_predictions:\n final_test_predictions *= minmax_scale(fold_predict)\n final_test_predictions **= (1. / len(test_predictions))\n np.save(os.path.join(result_path, \"test_predictions_{0}_{1}.npy\".format(model_name, embeds_type)), final_test_predictions)\n test_predictions_list.append(final_test_predictions)\n\n x_test = np.concatenate(test_predictions_list, axis=1)\n x_meta = np.concatenate(val_predictions_list, axis=1)\n train_y = train_y[:x_meta.shape[0]]\n\n stacker = lgb.LGBMClassifier(max_depth=3, metric=\"auc\", n_estimators=125, num_leaves=10, boosting_type=\"gbdt\",\n learning_rate=0.1, feature_fraction=0.45, colsample_bytree=0.45, bagging_fraction=0.8,\n bagging_freq=5, reg_lambda=0.2)\n scores = []\n for i, label in enumerate(target_labels):\n print(label)\n score = cross_val_score(stacker, x_meta, train_y[:, i], cv=5, scoring='roc_auc')\n print(\"AUC:\", score)\n scores.append(np.mean(score))\n stacker.fit(x_meta, train_y[:, i])\n sub[label] = stacker.predict_proba(x_test)[:, 1]\n print(\"CV score:\", np.mean(scores))\n\n result_path = './lgboost'\n if not os.path.exists(result_path):\n os.mkdir(result_path)\n submit_path = os.path.join(result_path, \"{0}_{1}.csv\".format('lgboost_folds', embeds_type))\n sub.to_csv(submit_path, index=False)\n\n\nif __name__=='__main__':\n main()\n","repo_name":"orech/toxic-comments-rep","sub_path":"train_model_lgboost.py","file_name":"train_model_lgboost.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"34170926955","text":"import time\nimport rtmidi\nimport atexit\nimport numbers\nimport numpy\nimport random\n\n\"\"\"\n IMPORTANT: INSTALL the correct library: pip install python-rtmidi\n there is another library rtmidi-python, which is NOT the one you want\n you want python-rtmidi!\n\"\"\"\n\nprint(\"\")\n\ndef exit_handler():\n # for each Player, send a midi note off message for previous and current note\n for i in range(len(player_list)):\n if isinstance(player_list[i].last_note, numbers.Number):\n midiout.send_message([0x80, player_list[i].last_note, 0])\n if isinstance(player_list[i].current_note, numbers.Number):\n midiout.send_message([0x80, player_list[i].current_note, 0])\n\natexit.register(exit_handler)\n\nclass Sequencer():\n def __init__(self, name, note_pool, prob_matrix=\"default\", tempo_mult=1, steps_between_recalc=\"default\"):\n self.name = name\n self.note_pool = note_pool # Maybe rename to \"base_sequence\"...\n self.tempo_mult = tempo_mult\n\n if prob_matrix == \"default\":\n self.prob_matrix = []\n for count in range(len(self.note_pool)):\n prob_list = []\n for i in range(len(self.note_pool)):\n if i == count:\n prob_list.append(1)\n else:\n prob_list.append(0)\n self.prob_matrix.append(prob_list)\n else:\n self.prob_matrix = prob_matrix # This argument should be a list of any length, whose elements are a list of probability weights of len(base_sequence) that add up to 1\n if steps_between_recalc == \"default\":\n self.steps_between_recalc = len(note_pool)\n else:\n self.steps_between_recalc = steps_between_recalc\n\n self.working_sequence = \"init\"\n self.pos = \"init\"\n self.last_pos = \"init\"\n self.out = 0\n self.last_out = 0\n\n def recalc(self):\n\n self.working_sequence = []\n\n for i in range(len(self.prob_matrix)):\n self.working_sequence.append(numpy.random.choice(self.note_pool, 1, True, self.prob_matrix[i])[0])\n\n def update(self):\n\n\n self.last_out = self.out\n self.last_pos = self.pos\n cycles_per_step = float(cycles_per_beat) / self.tempo_mult\n\n if self.pos == \"init\":\n self.pos = 0\n self.recalc()\n\n\n self.pos += (1 / cycles_per_step)\n self.pos = self.pos % len(self.working_sequence)\n\n if self.last_pos == \"init\":\n self.last_pos = 0\n self.out = self.working_sequence[int(self.pos)]\n print(self.name, self.out)\n\n elif int(self.pos) != int(self.last_pos):\n self.out = self.working_sequence[int(self.pos)]\n print(self.name, self.out)\n\n if isinstance(self.out, str) and self.out != \"x\":\n self.out = int(self.out)\n\n return self.out\n\n\nclass Player():\n def __init__(self, sequencer, name=\"a player\", follow_mode=True, octave=0, channel=1):\n self.sequencer = sequencer\n self.name = name\n self.follow_mode = follow_mode\n self.octave = octave # This doesn't do anything yet.\n self.channel = channel # This doesn't do anyting yet.\n self.current_note = \"init\"\n self.last_note = \"init\"\n self.play_me = True\n self.init = True\n\n def update(self):\n\n #print(sequencer.pos)\n #print(sequencer.out)\n\n note_on_msg = 0x90 + self.channel - 1 # Subtract 1, because logical MIDI channel 1 is actually 0\n note_off_msg = 0x80 + self.channel - 1 # Subtract 1, because logical MIDI channel 1 is actually 0\n\n if isinstance(self.sequencer.out, numbers.Number):\n if self.follow_mode == True:\n self.current_note = int(degree_to_mode(self.sequencer.out, self.octave)) # should be MIDI value\n else:\n self.current_note = self.sequencer.out\n else:\n self.current_note = \"x\"\n\n if isinstance(self.sequencer.last_out, numbers.Number):\n if self.follow_mode == True:\n self.last_note = int(degree_to_mode(self.sequencer.last_out, self.octave)) # should be MIDI value\n else:\n self.last_note = self.sequencer.last_out\n else:\n self.last_note = \"x\"\n\n if int(self.sequencer.pos) == int(self.sequencer.last_pos):\n self.play_me = False\n else:\n self.play_me = True\n\n if self.play_me == True or self.init == True:\n\n self.init = False\n\n if self.current_note == 'x':\n if self.last_note != 'x':\n midiout.send_message([note_off_msg, self.last_note, 0]) # note off message for last note played\n else:\n if self.last_note != 'x':\n midiout.send_message([note_off_msg, self.last_note, 0]) # note off message for last note played\n midiout.send_message([note_on_msg, self.current_note, 110]) # note on message for last note played\n else:\n midiout.send_message([note_on_msg, self.current_note, 110]) # note on message for last note played\n\n\n # TO DO: modify above code to divide amplitude by # of (non-muted/paused) Player objects\n\n print(self.name, self.current_note)\n return self.current_note\n\n else:\n\n return None\n\nmidiout = rtmidi.MidiOut()\nmidiout.open_port(0)\n\ntime.sleep(5) # gives you time to connect midi outputs in patchage\n\nseconds_per_cycle = .001\n\n# GLOBAL VARIABLES FOR COMPOSITION\n\nbpm = 120.0\ncycles_per_beat = 1 / (bpm / 60 * seconds_per_cycle)\nt = 0\n\n# fund_freq = 100\nglobal_key = 60 # \"C\" in MIDI\nglobal_key_temp = \"init\"\nglobal_mode = [0, 2, 4, 7, 11] # \"major pentatonic\"\n\ndef degree_to_mode(degree, octave=0, key=global_key, mode=global_mode):\n # Make it so degrees that are out of mode's range result in higher or lower notes (e.g. if degree 0 translates to MIDI 60, degree of -1 might be MIDI 59, or 58, depending on the mode)\n return global_key + (octave * 12) + global_mode[degree % len(global_mode)]\n\ndef update(generator_list):\n for i in range(len(generator_list)):\n generator_list[i].update()\n\n# INITIALIZE SEQUENCERS\n\nsequencer_list = [] # inistalizes list where all attractors are stored\n\nsequencer_list.append(Sequencer(\"synth0\", [0,1,2,3,\"x\",3,2,1]))\nsequencer_list.append(Sequencer(\"synth1\", [1, 2, 3, 4]))\n# [[1, 0, 0],[0.1, 0.4, 0.5], [0, 0.7, 0.3]]\nsequencer_list.append(Sequencer(\"synth2\", [0, 1, 2, 3, 'x'], [[1, 0, 0, 0, 0], [0,0.8,0,0,0.2],[0.1, 0.2, 0.2, 0, 0.5], [0, 0.7, 0.3, 0, 0]], 2))\n\nsequencer_list.append(Sequencer(\"drum0\", [36, \"x\"], [[1,0],[0.5,0.5],[0, 1],[0.6,0.4]], 4)) # Would be nice to enter a string and have it convert to a list.\n\n#INITIALIZE PLAYERS\n\nplayer_list = []\n\nplayer_list.append(Player(sequencer_list[0], \"player0\", True, -1))\nplayer_list.append(Player(sequencer_list[1],\"player1\", True, 0))\nplayer_list.append(Player(sequencer_list[2], \"player2\", True, 1))\nplayer_list.append(Player(sequencer_list[3], \"drum0_player\", False, 0, 2))\n\n\n# START PLAYING THE ACTUAL COMPOSITION\n\nwhile t < 1000 / seconds_per_cycle:\n\n if t % (4 * cycles_per_beat) == 0: # Add this functionality within Attractor/Sequencer.update itself as well\n for i in range(len(sequencer_list)):\n sequencer_list[i].recalc()\n sequencer_list[i].tempo_mult = numpy.random.choice([1,2,3,4], 1, True, [0.25,0.25,0.25,0.25])[0]\n #attractor_list[i].pos = 0\n\n\n update(sequencer_list)\n\n # Need a way to filter inputs of wrong type (in .update()) for each variable that will be modified externally in each class\n\n # global_key = 60 + int(sequencer_list[1].out)\n\n if global_key_temp != global_key and global_key_temp != \"init\":\n for i in range(len(player_list)):\n midiout.send_message([0x80, int(player_list[i].last_note), 0]) # send off message to last note played\n midiout.send_message([0x80, int(player_list[i].current_note), 0]) # send off message to current note playing\n\n global_key_temp = global_key\n\n update(player_list)\n\n\n t = t + 1\n time.sleep(seconds_per_cycle - (time.time() % seconds_per_cycle))\n","repo_name":"dwiel/math_worship","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"37093356330","text":"# 066\n\nnum, soma, count = 0, 0, 0\n\nwhile True:\n try:\n num = int(input('Digite um número [999 para parar]: '))\n\n except ValueError:\n print('Valor inválido.')\n continue\n\n if num == 999:\n break\n\n soma += num\n count += 1\n\nprint(f'Você digitou {count} números. A soma deles é {soma}.')\n\n","repo_name":"gpossa/Exercicios-CeV","sub_path":"Ex066.py","file_name":"Ex066.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27088695048","text":"import logging\nimport logging.config\nimport yaml\n\n# ⊕ [Logging in Python – Real Python](https://realpython.com/python-logging/)\nwith open('../../conf/logger.yml', 'r') as f:\n config = yaml.safe_load(f.read())\n logging.config.dictConfig(config)\n\nlogger = logging.getLogger(__name__)\n\nlogger.debug('This is a debug message')\nmsg='hi'\nlogger.info(f\"{msg}\")\n\n","repo_name":"samlet/stack","sub_path":"sagas/tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"72099391637","text":"import csv\r\nfrom tqdm import tqdm\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport pickle\r\nimport numpy as np\r\n\r\n# Function for preprocessing the corpus\r\n# We take (title + desc + brand) and if desc is not available then (title + bullet_points + brand)\r\ndef pre_process(corpus1, corpus2, corpus3, corpus4):\r\n corpus = list()\r\n for i in tqdm(range(len(corpus1))):\r\n \t# Get title\r\n \tif len(corpus1[i]) > 0:\r\n \t\ttitle = corpus1[i]\r\n \telse:\r\n \t\ttitle = \"\"\r\n \t# Get product description\r\n \tif len(corpus2[i]) > 0:\r\n \t\tdesc = corpus2[i]\r\n \telse:\r\n \t\tif len(corpus4[i]) > 0:\r\n \t\t\tdesc = corpus4[i]\r\n \t\telse:\r\n \t\t\tdesc = \"\"\r\n \t# Get product brand\r\n \tif len(corpus3[i]) > 0:\r\n \t\tbrand = corpus3[i]\r\n \telse:\r\n \t\tbrand = \"\"\r\n \t\r\n \tif len(title) < 1 and len(desc) < 1 and len(brand) < 1:\r\n \t\tsample = \"NAN\"\r\n \telse:\r\n \t\tsample = title + \" \" + desc + \" \" + brand\r\n \tcorpus.append(sample)\r\n return corpus\r\n\r\n\r\n\r\n\r\n# Read the data in pandas\r\ndf = pd.read_csv(\"../dataset/cleaned_train.csv\", escapechar=\"\\\\\", quoting=csv.QUOTE_NONE)\r\ndf = df.replace(np.nan, '', regex=True)\r\n# Get all the different columns including the target variables\r\ncorpus1 = df[\"cleaned_TITLE\"].values\r\ncorpus2 = df[\"cleaned_DESCRIPTION\"].values\r\ncorpus3 = df[\"cleaned_BRAND\"].values\r\ncorpus4 = df[\"cleaned_BULLET_POINTS\"].values\r\nY = df[\"BROWSE_NODE_ID\"].values\r\ndel df\r\n\r\n# Pre-processing\r\ncorpus = pre_process(corpus1, corpus2, corpus3, corpus4)\r\ndel corpus1, corpus2, corpus3, corpus4\r\n\r\n# Generate TFIDF vectors\r\nvectorizer = TfidfVectorizer(max_features=12000, min_df=3, ngram_range=(1, 1))\r\nvec = vectorizer.fit_transform(corpus)\r\n# Keep you ram clean\r\ndel corpus\r\n\r\n# Check out the top features\r\nprint(\"\\nTop Feature Names:\", vectorizer.get_feature_names()[:100])\r\nprint(\"\\nTrain Data Shape:\", vec.shape)\r\n\r\n# Save the vectorized data\r\nfp = open(\"../dataset/train_tf_idf_vec.bin\", \"wb\")\r\npickle.dump(vec, fp)\r\nfp.close()\r\n\r\n# Save the target variable\r\nfp = open(\"../dataset/target.bin\", \"wb\")\r\npickle.dump(Y, fp)\r\nfp.close()\r\n\r\n# Ram maintenance\r\ndel vec, Y\r\n\r\n\r\n\r\n# Load the test set\r\ndf = pd.read_csv(\"../dataset/cleaned_test.csv\", escapechar=\"\\\\\", quoting=csv.QUOTE_NONE)\r\ndf = df.replace(np.nan, '', regex=True)\r\n# Get all the different columns including the product ids\r\ncorpus1 = df[\"cleaned_TITLE\"].values\r\ncorpus2 = df[\"cleaned_DESCRIPTION\"].values\r\ncorpus3 = df[\"cleaned_BRAND\"].values\r\ncorpus4 = df[\"cleaned_BULLET_POINTS\"].values\r\nprod_ids = df[\"PRODUCT_ID\"].values\r\ndel df\r\n\r\n# Pre-processing\r\ncorpus = pre_process(corpus1, corpus2, corpus3, corpus4)\r\ndel corpus1, corpus2, corpus3, corpus4\r\n\r\n# Generate TFIDF vectors\r\nvec = vectorizer.transform(corpus)\r\n# Keep you ram clean\r\ndel corpus\r\n\r\n# Check out data shape\r\nprint(\"\\nTest Data Shape:\", vec.shape)\r\n\r\n# Save the vectorized data\r\nfp = open(\"../dataset/test_tf_idf_vec.bin\", \"wb\")\r\npickle.dump(vec, fp)\r\nfp.close()\r\n\r\n# Save the target variable\r\nfp = open(\"../dataset/prod_ids.bin\", \"wb\")\r\npickle.dump(prod_ids, fp)\r\nfp.close()\r\n","repo_name":"atif-hassan/Competition-code","sub_path":"Hackerearth/Amazon ML Challenge/code/create_sparse_vectors.py","file_name":"create_sparse_vectors.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"85"} +{"seq_id":"37150553965","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.conf import settings\n\nSTATUS_CHOICES = getattr(settings, \"STATUS_CHOICES\")\n\n\nclass EscapeQueue(models.Model):\n\n id = models.AutoField(primary_key=True, db_column=\"id\")\n applicant_id = models.ForeignKey(\n \"users.User\", on_delete=models.CASCADE, db_column=\"applicant_id\"\n )\n reason = models.CharField(_(\"reason\"), max_length=100, null=False, default=\"reason\")\n status = models.PositiveSmallIntegerField(\n choices=STATUS_CHOICES, null=False, default=3\n )\n start_at = models.DateTimeField(_(\"start time\"))\n end_at = models.DateTimeField(_(\"end time\"))\n created_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n db_table = u\"escape_queue\"\n\n def __str__(self):\n return self.id, self.applicant_id\n","repo_name":"Doran-Doran-development/DoranDoran-Server-2","sub_path":"dorandoran/escapes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"85"} +{"seq_id":"23887578741","text":"import discord\nimport asyncio\nimport random\nimport pickle\nimport os\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n\tprint('Logged in as')\n\tprint(client.user.name)\n\tprint(client.user.id)\n\tprint('------')\n \n\t@client.event\n\tasync def on_message(message):\n\t\tif message.content.upper().startswith('N!NIKO'):\n\t\t\tawait client.send_message(message.channel, 'Yeah?')\n\t\telif message.content.upper().startswith('N!PICK'):\n\t\t\tflip = random.choice(['Of course I would pick Crimped', 'Of course I would pick Indomie', 'Of course I would pick Myth', 'Of course I would pick Tuan', 'Of course I would pick fckboi', 'Of course I would pick Tuna', 'Of couse I would pick Scoot', 'Of course I would pick Creepy', 'Of course I would pick myself'])\n\t\t\tawait client.send_message(message.channel, flip)\n\t\telif message.content.startswith('n!skip'):\n\t\t\tawait client.send_message(message.channel, 'Why skip ;-;')\n\t\telif message.content.startswith('!pancake'):\n\t\t\tawait client.send_message(message.channel, ':3')\n\t\telif message.content.startswith('!wakeup'):\n\t\t\tawait client.send_message(message.channel, 'No Im going back to sleep')\t\n\t\telif message.content.startswith('!meow'):\n\t\t\tawait client.send_message(message.channel, 'Meow :3')\n\t\telif message.content.startswith('n!help'):\n\t\t\tawait client.send_message(message.channel, '**COMMANDS** n!niko, n!pick, n!skip, !pancake, !wakeup, !meow, n!help, n!hi')\n\t\telif message.content.upper().startswith('N!HI'):\n\t\t\tuserID = message.author.id\n\t\t\tawait client.send_message(message.channel, \"Hi, <@%s>\" % (userID))\n\t\telif message.content.upper().startswith('N!QUOTE'):\n\t\t\tb = random.randint(0,51)\n\n\t\t\ta = open(\"quotes.txt\", \"r\")\n\t\t\tfor x, line in enumerate(a):\n\t\t\t\tif x == b:\n\t\t\t\t\tawait client.send_message(message.channel, line)\n\t\t\ta.close()\n\n\t\t\nclient.run('BOT_TOKEN')\n\n","repo_name":"Neatrianty/niko","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"24938169170","text":"import os\nimport json\n\n\nclass Note:\n def __init__(self, note, tags):\n self.note = note\n self.tags = tags\n\n def __repr__(self) -> str:\n return f\"Note: {self.note}\"\n\n def __eq__(self, other):\n if isinstance(other, Note):\n return self.note == other.note and self.tags == other.tags\n return False\n\n def __getitem__(self, key):\n if key == 'tags':\n return self.tags\n elif key == 'note':\n return self.note\n else:\n raise KeyError(f\"Invalid key: {key}\")\n\n @staticmethod\n def default(obj):\n if isinstance(obj, Note):\n return {\n 'note': obj.note,\n 'tags': obj.tags\n }\n return super(Note, self).default(obj)\n\n\nclass NoteManager:\n def __init__(self):\n self.notes = {}\n self.load_notes()\n\n def __str__(self) -> str:\n return f\"{self.notes}\"\n\n def __repr__(self) -> str:\n return f\"{self.notes}\"\n\n def save_notes(self):\n with open('notes.json', 'w', encoding='utf-8') as file:\n data = {\n key: [{'note': note.note, 'tags': note.tags} for note in value]\n for key, value in self.notes.items()\n }\n json.dump(data, file, default=Note.default, ensure_ascii=False, indent=2, separators=(',', ': '))\n\n def load_notes(self):\n if os.path.exists('notes.json'):\n with open('notes.json', 'r', encoding='utf-8') as file:\n data = json.load(file)\n self.notes = {\n key: [Note(note['note'], note['tags']) for note in value]\n for key, value in data.items()\n }\n\n def add_notes(self, note, tags=None):\n if tags is None:\n tags = ['Ключове слово']\n\n new_note = Note(note, tags)\n for tag in tags:\n if tag in self.notes:\n self.notes[tag].append(new_note)\n else:\n self.notes[tag] = [new_note]\n print(f\"Note added: {new_note}\")\n self.save_notes()\n\n def search_notes(self, word):\n result_search = []\n for notes in self.notes.values():\n for note in notes:\n if word.lower() in note.note.lower() or any(word.lower() in tag.lower() for tag in note.tags):\n result_search.append(note)\n if result_search:\n for note in result_search:\n print(note.note)\n else:\n print(\"No notes found\")\n\n def edit_note_by_index(self, index, new_note):\n found = False\n for notes in self.notes.values():\n if index >= 0 and index < len(notes):\n notes[index].note = new_note\n found = True\n break\n\n if found:\n print(\"Note edited\")\n else:\n print(\"Invalid note index\")\n self.save_notes()\n\n def edit_note_by_keyword(self, keyword, new_note):\n found = False\n for notes in self.notes.values():\n for note in notes:\n if keyword.lower() in note.tags:\n note.note = new_note\n found = True\n break\n\n if found:\n print(\"Note edited\")\n else:\n print(f\"No note found with keyword '{keyword}'\")\n self.save_notes()\n\n def delete_note_by_index(self, index):\n found = False\n for notes in self.notes.values():\n if index >= 0 and index < len(notes):\n del notes[index]\n found = True\n break\n\n if found:\n print(\"Note deleted\")\n else:\n print(\"Invalid note index\")\n self.save_notes()\n\n def delete_note_by_keyword(self, keyword):\n found = False\n for tag, notes in self.notes.items():\n for index, note in enumerate(notes):\n if keyword.lower() in note.tags.lower():\n del notes[index]\n found = True\n break\n\n if found:\n print(\"Note with the specified keyword deleted\")\n else:\n print(f\"No notes found with keyword '{keyword}'\")\n self.save_notes()\n\n def sort_notes_alphabetically(self):\n sorted_notes = []\n for tag, notes in self.notes.items():\n sorted_notes.extend(sorted(notes, key=lambda x: x.note.lower()))\n\n if sorted_notes:\n print(\"Sorted notes:\")\n for note in sorted_notes:\n print(note.note)\n else:\n print(\"No notes found for sorting\")\n\n\ntest = NoteManager()\ntest.add_notes('В вечері буде мітинг о 22:10', ['Мітинг', 'Засідання'])\nprint(test)\ntest.add_notes('Хліб, Масло, картопля')\ntest.search_notes('мітинг')","repo_name":"Sinazija/final_Nots","sub_path":"nots.py","file_name":"nots.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6631304228","text":"from picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport numpy as np\nimport time\nimport cv2\nimport rpyc\nfrom time import sleep\nimport math\nimport pygame\n\nmotorenabled = True\n\nif motorenabled: \n con = rpyc.classic.connect('ev3dev.local')\n motors = con.modules['ev3dev2.motor']\n mediumMotor = motors.MediumMotor('outA')\n lMotor = motors.LargeMotor('outB')\n rMotor = motors.LargeMotor('outC')\n sensors = con.modules['ev3dev2.sensor.lego']\n us = sensors.UltrasonicSensor()\n pygame.mixer.init()\n #sound = con.modules['ev3dev2.sound']\n #s = sound()\n #s.speak('I am ON')\n #us = sensors.UltrasonicSensor()\n #ts = sensors.TouchSensor()\n\n mediumMotor.reset()\n\n#Undistortion\n\ncamera = PiCamera()\ncamera.resolution = (1600,1200)\ncamera.framerate = 8\nrawCapture = PiRGBArray(camera, size = camera.resolution)\n\nDIM=(1600, 1200)\n\nK=np.array([[774.4231548805052, 0.0, 822.6167410427034], [0.0, 769.3288387349592, 565.6990482106042], [0.0, 0.0, 1.0]])\nD=np.array([[-0.17875854240547795], [0.02726679508811555], [-0.010188123245693159], [0.0024264322841337192]])\n\ndim2 = (880,660)\ndim3 = (880,880) #False\n\ndim1 = DIM\n \nif not dim2:\n dim2 = dim1\nif not dim3: \n dim3 = dim1\n \nscaled_K = K * dim1[0] / DIM[0]\nscaled_K[2][2] = 1\n\nnew_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=1)\n \nmap1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)\n#End Undistortion\n\n\n# Instantiate OCV kalman filter\nclass KalmanFilter:\n\n kf = cv2.KalmanFilter(4, 2)\n kf.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)\n kf.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)\n\n def Estimate(self, coordX, coordY):\n ''' This function estimates the position of the object'''\n measured = np.array([[np.float32(coordX)], [np.float32(coordY)]])\n self.kf.correct(measured)\n predicted = self.kf.predict()\n return predicted\n \ndef run():\n lm1.run_forever(speed_sp= -150)\n lm2.run_forever(speed_sp= 150)\n sleep(1)\n\ndef stop():\n lMotor.stop()\n rMotor.stop()\n #sleep(2)\n\n\ndef func(nm): \n previous_diff = 0\n totm = 0\n i = 0\n height, width = nm.shape[:2]\n p=10\n lx = []\n ly = []\n rx = []\n ry = []\n ma = []\n my = []\n #a= np.zeros((int((1000//p)+1), 2), dtype = \"int32\")\n #b= np.zeros((int((1000//p)+1), 2), dtype = \"int32\")\n added = 0\n roiy = 350\n \n #kfObj = KalmanFilter()\n #predictedCoords = np.zeros((2, 1), np.float32)\n\n for val in range(0, height, p):\n hist = np.sum(nm[height-val:height-(val-p),:], axis=0)\n left_max = np.argmax(hist[:height//2])\n right_max = np.argmax(hist[height//2:]) + height//2\n y = height-(val-p)\n #cv2.line(warpedorg, (0, roiy), (width,roiy), (0,255,255), 1)\n if y < roiy:\n continue\n #cv2.line(nm,(0,y),(width,y),(255,255,255),1)\n\n y = height-val\n avgm = 0\n\n #cv2.circle(warpedorg, (left_max,y), 3, (0,0,0), -1)\n #cv2.rectangle(warpedorg,(left_max-60, y-10),(left_max+60, y+10),(255,255,255),1)\n writeL = False\n writeR = False\n boxth = 70\n if len(lx) != 0:\n if left_max > (lx[-1] - boxth):\n if left_max < (lx[-1] + boxth):\n cv2.circle(warpedorg, (left_max,y), 3, (0,0,0), -1)\n #cv2.rectangle(warpedorg,(left_max-60, y-10),(left_max+60, y+10),(255,255,255),1)\n lx.append(left_max)\n ly.append(y)\n writeL =True\n elif left_max > 7:\n lx.append(left_max)\n ly.append(y)\n writeL =True\n \n \n if len(rx) != 0 and right_max > 310:\n if right_max > (rx[-1] - boxth):\n if right_max < (rx[-1] + boxth):\n cv2.circle(warpedorg, (right_max,y), 3, (0,0,0), -1)\n #cv2.rectangle(warpedorg,(right_max-60, y-10),(right_max+60, y+10),(255,255,255),1)\n rx.append(right_max)\n ry.append(y)\n writeR =True\n elif len(rx) == 1:\n rx.append(right_max)\n ry.append(y)\n writeR =True\n elif len(rx) == 1:\n rx.append(right_max)\n ry.append(y)\n writeR =True\n elif right_max > 0 and right_max > 310:\n rx.append(right_max)\n ry.append(y)\n writeR =True\n \n #if len(ma) > 0:\n # avgm = (totm//len(ma))\n if writeL and writeR and y > roiy:\n m = ((right_max - left_max) //2) + left_max\n ma.append((m, y))\n cv2.circle(warpedorg, (m, y), 3, (255,255,255), -1)\n totm = (totm + m)\n #predictedCoords = kfObj.Estimate(m, y)\n #avgm = totm//len(ma)\n #print(avgm)\n #elif y > roiy and int(predictedCoords[0]) > 0 :\n #predictedCoords = kfObj.Estimate(0, y)\n #predictedCoords = kfObj.Estimate(int(predictedCoords[0]), y)\n #cv2.circle(warpedorg, (predictedCoords[0], y), 3, (255,255,255), -1)\n #totm = int(totm + predictedCoords[0])\n #ma.append((predictedCoords[0], y))\n #if len(ma) > 0 and y > roiy and added > 3:\n #ma.append((ma[len(ma)-1][0],y))\n #avgm = ma[len(ma)-1][0] #int(((totm // len(ma))) + (ma[len(ma)-1][0]))# * 0.5) - ma[len(ma)-1][0])\n #ma.append((avgm, y))\n #totm = ma[len(ma)-1][0] +totm\n #cv2.circle(warpedorg, (int(avgm), y), 3, (255,0,255), -1)\n if len(ma) > 1:\n cv2.line(warpedorg, ma[len(ma)-1], ma[len(ma)-2],(0,0,255),2)\n #print(len(ma))\n #sum_lane_center = 0\n #len(ma) = len(ma) - 2\n \n if len(ma) > 1: \n tan = math.degrees((math.atan2(ma[1][1]-ma[len(ma)-1][1], ma[1][0]-ma[len(ma)-1][0])))\n tan = (90-tan)\n print('tan = ' + str(tan))\n #for r in range (len(ma), 1, -1):\n # cv2.line(warpedorg, ma[r-1], ma[r-2],(0,0,255),2)\n # if r < len(ma):\n # sum_lane_center = (ma[r-1][0] + sum_lane_center)\n #print(ma[r-1][0])\n avg_lane_center = totm // len(ma)\n diff = 265 - avg_lane_center\n print('difference = ' + str(diff))\n diff = int(diff *0.7)\n #print('difference scaled = ' + str(diff))\n if diff > 0 and diff > 70:\n diff = 70\n elif diff < 0 and diff < -70:\n diff = -70\n print('adjusted diff = ' + str(diff))\n diff = diff + tan\n print('adjusted after tangent calculated = ' + str(diff) )\n print('set steering to ' + str(diff)+ '. average is ' + str(avg_lane_center) + ' - needs motor moving')\n\n #print('reached here')\n if motorenabled: \n if abs(diff - mediumMotor.position) > 8:\n print('position is ' + str(mediumMotor.position) + '. set steering to ' + str(diff)+ '. difference is ' + str(abs(diff - mediumMotor.position)) + '. average is ' + str(avg_lane_center) + ' - needs motor moving')\n #print('abs ' + str(abs(diff - mediumMotor.position)))\n #print('setting motor pos')\n #mediumMotor.stop()\n mediumMotor.on_to_position(speed = 8, position = int(diff) , brake = False)\n mediumMotor.stop()\n print('----')\n else:\n #if motorenabled:\n #stop()\n print('unable to detect proper lanes for driving') \n '''\n plt.plot(lx, ly, 'o')\n #plt.imshow(nm, cmap=\"gray\")\n lf = np.polyfit(lx, ly, 2)\n rf = np.polyfit(rx, ry, 2)\n plt.plot(lx, np.polyval(lf,lx), 'r-', linewidth = 4.0)\n plt.plot(rx, np.polyval(rf,rx), 'g-', linewidth = 4.0)\n print(lx)\n '''\n return nm\n\n#run()\ntime.sleep(0.2)\nt = 0\niteration = 0\nstopped = 0\nfor frame in camera.capture_continuous(rawCapture, format = \"bgr\", use_video_port = True):\n previous_diff = 0\n '''if ts.is_pressed:\n print('exiting')\n mediumMotor.stop()\n exit()]\n '''\n \n if motorenabled:\n if us.distance_centimeters < 30.0:\n stop()\n stopped = 1\n iteration = iteration + 1\n if iteration == 4:\n pygame.mixer.music.load('why.mp3')\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n #print('speak last')#time.sleep(2)\n #s.speak('OBSTACLE NOT REMOVED. STOPPING REAR MOTORS NOW', play_type = Sound.Sound.PLAY_WAIT_FOR_COMPLETE)\n #exit()\n if iteration < 3:\n pygame.mixer.music.load('obstacle.mp3')\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n #print('speak')#s.speak('OBSTACLE DETECTED. ' + str(iteration), play_type = Sound.PLAY_WAIT_FOR_COMPLETE)\n elif stopped == 1 and us.distance_centimeters > 30.0:\n #run()\n print('running')\n iteration = 0\n stopped = 0\n \n img = frame.array \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)\n point1 = (290, 320)\n point2 = (408, 320)\n point3 = (126, 413)\n point4 = (619, 413)\n cv2.circle(img, point1, 3, (0,0,0), -1) \n cv2.circle(img, point2, 3, (0,0,0), -1)\n \n cv2.circle(img, point3, 3, (0,0,0), -1)\n cv2.circle(img, point4, 3, (0,0,0), -1)\n #cv2.imshow('img', img)\n orig_pts = np.float32([[290, 320], [408, 320], [126, 413],[619,413]])\n dest_pts = np.float32([[0, 0], [600, 0], [0, 600], [600, 600]])\n\n M = cv2.getPerspectiveTransform(orig_pts, dest_pts)\n warpedorg = cv2.warpPerspective(img, M, (600,600))\n #cv2.imshow('img', warpedorg) \n th = 190\n #th = 80\n warpedorg[warpedorg < th] = 0 # Black\n warpedorg[warpedorg >= th] = 255 # White\n \n #cv2.imshow(\"bw\", warpedorg)\n \n func(warpedorg)\n #cv2.imshow(\"un\", warpedorg) \n \n cv2.line(warpedorg,(300,0),(300,600),(255,0,0),2)\n \n cv2.imshow(\"undistorted\", warpedorg)\n key = cv2.waitKey(1) & 0xFF\n \n rawCapture.truncate(0)\n if key == ord('q'):\n if motorenabled:\n mediumMotor.stop()\n stop()\n break\n elif key == ord('c'):\n cv2.imwrite(\"undist.jpg\", warpedorg)\n break\n print((time.time() * 1000) - t)\n t = (time.time() * 1000)\n","repo_name":"KaySyed/AutonomousLegoEv3","sub_path":"center/fisheye/test_undistort_3.py","file_name":"test_undistort_3.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15091886848","text":"class Solution:\n def hIndex(self, citations: List[int]) -> int:\n low=0\n ans=0\n n=len(citations)\n if n==0:\n return 0\n high=len(citations)-1\n while low<=high:\n h=low+int((high-low)/2)\n if citations[h]>=(n-h):\n high=h-1\n ans=n-h\n else:\n low=h+1\n return ans\n","repo_name":"jaskaran13300/June-LeetCoding-Challenge","sub_path":"H-Index II.py","file_name":"H-Index II.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29755187140","text":"\"\"\"\r\nAuthor: Bui Hieu Tho\r\nDate: 28/08/2021\r\nProgram: Open an IDLE window, and enter the program from Figure 1-7 that computes\r\nthe area of a rectangle. Load the program into the shell by pressing the F5 key,\r\nand correct any errors that occur. Test the program with different inputs by\r\nrunning it at least three times.\r\nSolution:\r\n ....\r\n\"\"\"\r\nbase = float(input(\"Nhap day cua tam giac: \"))\r\nheight = float(input(\"Nhap chieu cao cua tam giac: \"))\r\narea = 5*base*height\r\nprint(\"Dien tich cua tam giac la: \", area)\r\n","repo_name":"BuiHieuTho44582/BuiHieu-Tho","sub_path":"BuiHieuTho_44582_CH01/Projects/dientichtamgiac.py","file_name":"dientichtamgiac.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5918405717","text":"from django.shortcuts import render,HttpResponse,redirect\nfrom django.core.paginator import Paginator # Django内置分页功能模块\nfrom api import models\nimport json\nimport datetime\n# Create your views here.\n\n\n\n\ndef show_hosts(request):\n '''\n 显示搜索下拉框的默认值\n :param request:\n :return:\n '''\n if request.method=='GET':\n #平台\n platform_set=set()\n for name in models.platform.objects.all():\n platform_set.add(name.platname)\n #账号\n account_set=set()\n for account in models.account.objects.all():\n account_set.add(account.username)\n #项目\n item_set=set()\n for item in models.item.objects.all():\n item_set.add(item.itemname)\n #区域\n area_set=set()\n for area in models.area.objects.all():\n area_set.add(area.property_area)\n data={\n \"platform\":platform_set,\n \"account\":account_set,\n \"item\":item_set,\n \"area\":area_set\n }\n\n return render(request, 'show_hosts.html', {'data':data})\n\n\n\ndef get_hosts_json(request):\n '''\n 从数据库里获取主机列表展示\n layui需要的格式为 {\"code\":0,\"msg\":\"\",\"count\":1000,\"data\":[{\"hostname\":\"\",\"pri_ip\":\"\"},{},]}\n :param request:\n :return: 返回的必须是json格式的数据\n '''\n\n if request.method=='GET':\n\n searchinfo={}\n hostname=request.GET.get('hostname').strip()\n pri_ip=request.GET.get('pri_ip').strip()\n pub_ip=request.GET.get('pub_ip').strip()\n status=request.GET.get('status').strip()\n platform=request.GET.get('platform').strip()\n account=request.GET.get('account').strip()\n item=request.GET.get('item').strip()\n area=request.GET.get('area').strip()\n\n\n if pri_ip:\n searchinfo.setdefault('pri_ip',pri_ip)\n if pub_ip:\n searchinfo.setdefault('pub_ip',pub_ip)\n if status:\n searchinfo.setdefault('status',status)\n if platform:\n platform_id=models.platform.objects.filter(platname=platform).first().id\n searchinfo.setdefault('platform_id',platform_id)\n if account:\n account_id=models.account.objects.filter(username=account).first().id\n searchinfo.setdefault('account_id',account_id)\n if item:\n item_id=models.item.objects.filter(itemname=item).first().id\n searchinfo.setdefault('item_id',item_id)\n\n projectid_list = []\n if area:\n for i in models.area.objects.filter(property_area=area):\n projectid_list.append(i.project_id)\n\n\n #hostname是模糊查询所以得单独拿出来,不能放在字典里\n #区域,一个中文区域有几个账号就对应记得project_id,也比较特殊得单独拿出来\n if projectid_list and searchinfo and hostname:\n db_list=models.hosts.objects.filter(area_id__in=projectid_list,hostname__contains=hostname,**searchinfo)\n\n if projectid_list and searchinfo and not hostname:\n db_list = models.hosts.objects.filter(area_id__in=projectid_list, **searchinfo)\n\n\n if projectid_list and not searchinfo and not hostname:\n db_list=models.hosts.objects.filter(area_id__in=projectid_list)\n\n if projectid_list and not searchinfo and hostname:\n db_list=models.hosts.objects.filter(area_id__in=projectid_list,hostname__contains=hostname)\n\n\n if searchinfo and not projectid_list and not hostname:\n db_list=models.hosts.objects.filter(**searchinfo)\n\n if searchinfo and not projectid_list and hostname:\n db_list=models.hosts.objects.filter(hostname__contains=hostname,**searchinfo)\n\n if hostname and not projectid_list and not searchinfo:\n db_list = models.hosts.objects.filter(hostname__contains=hostname)\n\n\n #空搜索和页面刚打开的时候显示这个,按照页数显示所有主机\n if not projectid_list and not searchinfo and not hostname:\n db_list=models.hosts.objects.all()\n\n host_list=[]\n for host in db_list:\n config=str(host.cpu)+'核'+str(host.memory)+'G'\n dic={\"id\":host.id,\"host_id\":host.host_id,\"hostname\": host.hostname, \"pri_ip\": host.pri_ip, \"pub_ip\": host.pub_ip,\n \"status\": host.status,\"image\": host.image_name, \"config\":config,\"platform\": host.platform.platname,\n \"account\": host.account.username,\"item\":host.item.itemname, \"region\": host.area.property_area,\"ctime\":host.ctime,}\n host_list.append(dic)\n #\n pageIndex = request.GET.get('page') # 前台传的值,当前页面处于第几页\n pageSize = request.GET.get('limit') # 前台传的值,每页展示多少条数据\n pageInator = Paginator(host_list, pageSize) # 导入分页模块分页操作,不写前端只展示一页数据\n contacts = pageInator.page(pageIndex) # 导入分页模块分页操作,不写前端只展示一页数据, 当前页 of 总页数\n '''\n pageIndex 1\n pageSize 20\n pageInator \n contacts \n '''\n '''\n res=[]\n for i in contacts:\n res.append(i)\n print(res)\n Result = {\"code\": 0, \"msg\": \"\", \"count\":dataCount, \"data\": res}\n # json.dumps(Result, cls=DateEncoder)没有时间字段问题可直接返回此代码。有就返回下面代码\n return HttpResponse(json.dumps(Result, cls=DateEncoder), content_type=\"application/json\")\n '''\n res = []\n for i in contacts:\n res.append(i)\n result = {\"code\":0,\"msg\":\"\", \"count\":len(host_list), \"data\":res}\n return HttpResponse(json.dumps(result, cls=DateEncoder), content_type=\"application/json\")\n\n\n# 解决时间字段json问题\nclass DateEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.datetime):\n return obj.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n return json.JSONEncoder.default(self, obj)\n\ndef hostToitem(request):\n '''\n 将主机加入项目\n :param request:\n :return:\n '''\n if request.method=='POST':\n print(request.POST)\n itemname=request.POST.get('item')\n hostid_list=request.POST.getlist('hostid_list')\n item_id=models.item.objects.filter(itemname=itemname).first().id\n s=models.hosts.objects.filter(id__in=hostid_list).update(item_id=item_id)\n if s==0:\n msg='加入失败'\n else:\n msg='加入成功!'\n\n return HttpResponse(msg)\n\n\ndef hostinfo(request,id):\n '''\n 展示某台主机的详细信息\n :param request:\n :return:\n '''\n obj=models.hosts.objects.filter(id=id).first()\n return render(request,'hostinfo.html',{'obj':obj})\n\n\n\n\ndef show_dbs(request):\n\n if request.method=='GET':\n #项目\n item_set=set()\n for item in models.item.objects.all():\n item_set.add(item.itemname)\n data={\n \"item\":item_set,\n }\n return render(request,'show_dbs.html',{'data':data})\n\ndef get_dbs_json(request):\n if request.method=='GET':\n type=request.GET.get('type','')\n dbname=request.GET.get('dbname','')\n if type and dbname:\n db_list=models.dbs.objects.filter(dbname__contains=dbname,type=type)\n if type and not dbname:\n db_list = models.dbs.objects.filter(type=type)\n if dbname and not type:\n db_list = models.dbs.objects.filter(dbname__contains=dbname)\n if not dbname and not type:\n db_list = models.dbs.objects.all()\n\n\n show_db_list=[]\n for db in db_list:\n config=str(db.cpu)+'核'+str(db.memory)+'G'+str(db.disk)+'G'\n dic={\"id\":db.id,\"db_id\":db.db_id,\"dbname\": db.dbname,\"type\":db.type,\"mode\":db.mode,\"pri_ip\": db.pri_ip, \"pub_ip\": db.pub_ip,\n \"status\": db.status,\"version\": db.version, \"config\":config,\"platform\": db.platform.platname,\n \"account\": db.account.username,\"item\":db.item.itemname, \"region\": db.area.property_area,\"ctime\":db.ctime,}\n show_db_list.append(dic)\n #\n pageIndex = request.GET.get('page') # 前台传的值,当前页面处于第几页\n pageSize = request.GET.get('limit') # 前台传的值,每页展示多少条数据\n pageInator = Paginator(show_db_list, pageSize) # 导入分页模块分页操作,不写前端只展示一页数据\n contacts = pageInator.page(pageIndex) # 导入分页模块分页操作,不写前端只展示一页数据, 当前页 of 总页数\n\n res = []\n for i in contacts:\n res.append(i)\n result = {\"code\":0,\"msg\":\"\", \"count\":len(show_db_list), \"data\":res}\n return HttpResponse(json.dumps(result, cls=DateEncoder), content_type=\"application/json\")\n\ndef dbToitem(request):\n '''\n 将数据库加入项目\n :param request:\n :return:\n '''\n if request.method=='POST':\n itemname=request.POST.get('item')\n dbid_list=request.POST.getlist('dbid_list')\n item_id=models.item.objects.filter(itemname=itemname).first().id\n s=models.dbs.objects.filter(id__in=dbid_list).update(item_id=item_id)\n if s==0:\n msg='加入失败'\n else:\n msg='加入成功!'\n\n return HttpResponse(msg)","repo_name":"yangqiqigithub/python","sub_path":"项目/运维平台cmdb-bqjadmin/bqjadmin/bqjadmin/cmdb/views/show_hosts.py","file_name":"show_hosts.py","file_ext":"py","file_size_in_byte":9662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7938811007","text":"import torch\n\n\ndef split_dataset(dataset, validation_rate):\n \"\"\"split_dataset to training dataset and validation dataset\n\n Arguments:\n ----------\n dataset {torch.utils.data.Dataset} -- dataset\n validation_rate {float} -- the rate of validation\n\n Returns:\n --------\n {torch.utils.data.Dataset} -- training dataset\n {torch.utils.data.Dataset} -- validation dataset\n \"\"\"\n if validation_rate in (None, 0.0):\n return dataset, None\n\n n_samples = len(dataset)\n val_size = int(n_samples * validation_rate)\n train_size = n_samples - val_size\n train_dataset, val_dataset = torch.utils.data.random_split(\n dataset, [train_size, val_size]\n )\n return train_dataset, val_dataset\n","repo_name":"Akasan/TorchUtils","sub_path":"TorchUtils/DatasetGenerator/_SplitDataset.py","file_name":"_SplitDataset.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5643915347","text":"import tkinter as tk\nimport pygubu\n\nimport os\n\n\nclass HelloWorldApp:\n\n def __init__(self):\n\n #1: Create a builder\n self.builder = builder = pygubu.Builder()\n\n #2: Load an ui file\n builder.add_from_file(os.path.abspath('gui/helloworld.ui'))\n\n #3: Create the mainwindow\n self.mainwindow = builder.get_object('mainwindow')\n\n def run(self):\n self.mainwindow.mainloop()\n\n\nif __name__ == '__main__':\n app = HelloWorldApp()\n app.run()\n","repo_name":"raychorn/chrome_gui","sub_path":"gui/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"27872316087","text":"#백준 1325번 효율적인 해킹\nimport sys\ninput = sys.stdin.readline\nfrom collections import deque\n\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\n\ndef bfs(v):\n queue = deque([v])\n cnt = 1\n visited = [False] * (n + 1)\n visited[v] = True\n while queue:\n x = queue.popleft()\n for i in graph[x]:\n if not visited[i]:\n visited[i] = True\n queue.append(i)\n cnt += 1\n return cnt\n\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[b].append(a)\n\nans = []\nfor i in range(1, n + 1):\n ans.append(bfs(i))\n\nmax = max(ans)\nfor i in range(len(ans)):\n if max == ans[i]:\n print(i + 1, end=' ')","repo_name":"RE-Heat/CodingTest","sub_path":"백준/baekjoon1325.py","file_name":"baekjoon1325.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34984791756","text":"import tensorflow as tf\n\nclass Node:\n def __init__(self, index, depth, is_root, is_leaf):\n self.index = index\n self.depth = depth\n self.isRoot = is_root\n self.isLeaf = is_leaf\n self.fOpsList = []\n self.hOpsList = []\n self.lossList = []\n self.infoGainLoss = None\n self.labelTensor = None\n self.labelMappingTensor = None\n self.compressedLabelsTensor = None\n self.oneHotLabelTensor = None\n self.indicesTensor = None\n self.isOpenIndicatorTensor = None\n self.maskTensors = {}\n self.masksWithoutThreshold = {}\n self.filteredMask = None\n self.evalDict = {}\n self.parentNonThresholdMaskVector = None\n self.probabilityThreshold = None\n self.softmaxDecay = None\n self.probThresholdCalculator = None\n self.softmaxDecayCalculator = None\n self.finalFeatures = None\n self.residueOutputTensor = None\n self.weightDecayModifier = 1.0\n self.infoGainBalanceCoefficient = None\n self.p_n_given_x = None\n self.leafCountUnderThisNode = 0\n # Indexed by the nodes producing them\n self.activationsDict = {}\n self.proxyLossInputDicts = {}\n","repo_name":"ufukcbicici/phd_work","sub_path":"simple_tf/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14203425578","text":"def computador_escolhe_jogada(n, m):\n if n < m:\n pecas_computador = n\n return pecas_computador\n else:\n pecas_computador = m\n resto = 1\n while pecas_computador > 0 and resto != 0:\n n = n - pecas_computador\n resto = n % (m + 1)\n n = n + pecas_computador\n if resto == 0:\n return pecas_computador\n else:\n pecas_computador = pecas_computador - 1\n\n pecas_computador = m\n return pecas_computador\n\ndef usuario_escolhe_jogada (n, m):\n pecas_usuario = int(input(\"Quantas pecas deseja retirar? \"))\n while pecas_usuario > n:\n print(\"Valor inválido. Só restam \", n,\" pecas.\", m)\n pecas_usuario = int(input(\"Quantas peças voce quer retirar? \")) \n \n while pecas_usuario > m or pecas_usuario < 1:\n print(\"Você deve retirar no mínimo uma peça e no máximo \", m)\n pecas_usuario = int(input(\"Quantas peças voce quer retirar? \"))\n \n return pecas_usuario\n\ndef partida():\n n = int(input(\"Quantas pecas estarão em jogo? \"))\n m = int(input(\"Qual a quantidade maxima de pecas deve ser retirada por jogada? \"))\n\n while m > n:\n print(\"m deve ser menor que n! Tente novamente.\")\n n = int(input(\"Quantas pecas estarão em jogo? \"))\n m = int(input(\"Qual a quantidade maxima de pecas a ser retirada por jogada? \"))\n\n while n < 0 or m < 0:\n print(\"Oops! Escolha inválida! Tente de novo.\")\n if n < 0:\n n = int(input(\"Quantas pecas estarão em jogo? \"))\n if m < 0:\n m = int(input(\"Qual a quantidade maxima de pecas a ser retirada por jogada? \"))\n\n mod = n % (m + 1)\n n_inicial = n\n\n while n != 0:\n if n == n_inicial:\n if mod == 0:\n print(\"Você começa!\")\n jogada = 1\n else:\n print(\"O computador começa!\")\n jogada = 0\n if jogada == 1:\n pecas_usuario = usuario_escolhe_jogada(n, m)\n n = n - pecas_usuario\n print(\"Usuario removeu \",pecas_usuario,\" pecas. Restam \", n, \" pecas.\")\n if n == 0:\n print(\"Você ganhou!\")\n return jogada\n else:\n jogada = 0\n else:\n pecas_computador = computador_escolhe_jogada(n, m)\n n = n - pecas_computador\n print(\"Computador removeu \", pecas_computador, \" pecas. Restam \", n, \" pecas.\")\n if n == 0:\n print(\"O computador ganhou!\")\n return jogada\n else:\n jogada = 1\n \ndef campeonato():\n print(\"Você escolheu campeonato!\")\n print(\"\")\n usuario = 0\n computador = 0\n rodadas = 0\n \n while rodadas != 3:\n rodadas = rodadas + 1\n print(\"**** Rodada \", rodadas, \" ****\")\n vencedor = partida()\n if vencedor == 0:\n print(\"Fim do jogo! O computador ganhou!\")\n computador = computador + 1\n else:\n print(\"Fim do jogo! Você ganhou!\")\n usuario = usuario + 1\n\n print(\"**** Final do campeonato! ****\")\n print(\"Placar: Você \", usuario,\" X \", computador,\" Computador\")\n\n \nprint(\"Bem vindo ao Jogo do NIM! Escolha:\" )\nprint(\"1 - para jogar uma partida isolada\")\nprint(\"2 - para jogar um campeonato\")\nescolha = int(input(\"\"))\n\nwhile escolha != 1 and escolha != 2:\n escolha = int(input(\"Escolha inválida. Escolha 1 ou 2, para partida isolada ou campeonato:\"))\n\nif escolha == 1:\n escolha = partida()\nif escolha == 2:\n escolha = campeonato()\n\n","repo_name":"guzi95/jogo-do-nim","sub_path":"jogodonim.py","file_name":"jogodonim.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73074555479","text":"import sqlite3\nimport os.path\nimport sqlalchemy as db\n\nclass SqLiteHandler:\n\n def __init__(self):\n \"\"\"Initialize database\"\"\"\n self.db = sqlite3.connect(os.path.dirname(__file__) + r'\\user.db')\n\n def create_database(self):\n \"\"\"create table\"\"\"\n self.db.execute(\"CREATE TABLE user_credentials (name, pw)\")\n\n def add_user(self, user, pw):\n \"\"\"\n Insert into user database\n Args:\n user: username\n pw: password\n >>> Example: self.add_user(admin, admin)\n\n \"\"\"\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()\n\n def sql_query(self, table, record_name, columns):\n \"\"\"\n Sql Query SELECT COLUMNS FROM TABLE WHERE RECORD NAME\n Args:\n table(str):\n record_name:(str)\n columns(str):\n\n Returns:\n records(sqlobj)\n\n \"\"\"\n cursorObj = self.db.cursor()\n cursorObj.execute('SELECT {0} FROM {1} WHERE name=\"{2}\"'.format(columns, table, record_name))\n records = cursorObj.fetchall()\n return records\n\nclass MySqlHandler:\n\n \"\"\"Class implements sqlalcemy solutions\n used to read from data base server and perform querys\"\"\"\n\n def __init__(self, *args):\n \"\"\"Initialize variables\n Running XAMPP with mysql server\"\"\"\n self.engine = db.create_engine('mysql+pymysql://root:''@127.0.0.1:3306/northwind', echo=True)\n self.connection = self.engine.connect()\n self.metadata = db.MetaData()\n self.tables = db.Table(*args, self.metadata, autoload=True, autoload_with=self.engine)\n\n def select_query(self):\n \"\"\"Perform a select table where query\"\"\"\n query = db.select([self.tables])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet\n\n def set_store_details(self):\n \"\"\"Perform a select table where query\"\"\"\n query = db.select([self.tables.columns.ProductName,\n self.tables.columns.QuantityPerUnit,\n self.tables.columns.UnitPrice,\n self.tables.columns.UnitsInStock])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet\n\n def get_column_name(self, table_name, filter = ['ProductName','QuantityPerUnit','UnitPrice','UnitsInStock']):\n \"\"\"\n\n Args:\n table_name(str): name of the table\n\n Returns:\n\n \"\"\"\n from sqlalchemy.inspection import inspect\n table = inspect(table_name)\n columns = []\n for column in table.c:\n if filter is not None:\n if column.name not in filter:\n continue\n columns.append(column.name)\n return columns\n\n\n\nif __name__==\"__main__\":\n mydb = SqLiteHandler()\n # # mydb.create_database()\n # # mydb.add_user()\n # asd = mydb.sql_query(\"user_credentials\",\"admin\",\"name,pw\")\n # print(asd[0][0])\n mytest = MySqlHandler()\n print(mytest.select_query('products'))\n mytest.get_column_name('products')\n","repo_name":"DavidJohnson86/Cherrypy","sub_path":"login_server/models/database_handler.py","file_name":"database_handler.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7546818662","text":"'''\nBefore you leave, the Elves in accounting just need you to fix your expense report (your puzzle input); apparently, something isn't quite adding up.\n\nSpecifically, they need you to find the two entries that sum to 2020 and then multiply those two numbers together.\n\nFor example, suppose your expense report contained the following:\n\n1721\n979\n366\n299\n675\n1456\nIn this list, the two entries that sum to 2020 are 1721 and 299. Multiplying them together produces 1721 * 299 = 514579, so the correct answer is 514579.\n\nOf course, your expense report is much larger. Find the two entries that sum to 2020; what do you get if you multiply them together?\n'''\ninputData = []\n\nwith open(\"Data\\input.txt\", \"r\") as inFile:\n for num in inFile:\n inputData.append(int(num))\n\ninputData = sorted(inputData)\n\ntarget = 2020\n\nfor first in inputData:\n for last in reversed(inputData):\n if first + last < target:\n break\n if first + last == target:\n print(first*last)\n exit()\n\nprint(\"unable to find the correct combination of numbers\")","repo_name":"olber027/AdventOfCode2020","sub_path":"Day_01/Part1.py","file_name":"Part1.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34735256772","text":"\"\"\"\nTic Tac Toe Player\n\"\"\"\n\nimport math\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\n\ndef initial_state():\n \"\"\"\n Returns starting state of the board.\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\n\ndef player(board):\n \"\"\"\n Returns player who has the next turn on a board.\n \"\"\"\n # If the game ends, we don't care about the player\n if terminal(board):\n return None\n\n # Looping over the board to count X and O, if both 0 then the player is X\n xcount = sum([1 for i in range(3) for j in range(3) if board[i][j] == X])\n ocount = sum([1 for i in range(3) for j in range(3) if board[i][j] == O])\n\n # even if both are 0, then x will play\n return X if ocount == xcount else O\n\n\ndef actions(board):\n \"\"\"\n Returns set of all possible actions (i, j) available on the board.\n \"\"\"\n # Check if the game is finished\n if terminal(board):\n return None\n\n # The set of all possible actions\n return {(i, j) for i in range(3) for j in range(3) if board[i][j] == EMPTY}\n\n\ndef result(board, action):\n \"\"\"\n Returns the board that results from making move (i, j) on the board.\n \"\"\"\n # Check if action is not valid\n if (action[0] < 0 or action[0] > 2) or (action[1] < 0 or action[1] > 2):\n raise Exception(\" Out of bound exception.\")\n\n # The player with the current turn\n turn = player(board)\n\n # copy the board\n result = initial_state()\n for row in range(3):\n for col in range(3):\n result[row][col] = board[row][col]\n\n # Making the action\n result[action[0]][action[1]] = turn\n\n return result\n\n\ndef winner(board):\n \"\"\"\n Returns the winner of the game, if there is one.\n \"\"\"\n # Listing the columns\n cols = [[board[0][0], board[1][0], board[2][0]],\n [board[0][1], board[1][1], board[2][1]],\n [board[0][2], board[1][2], board[2][2]]]\n\n # Check for diagonals\n if board[0][0] == board[1][1] and board[0][0] == board[2][2] and board[0][0] != EMPTY:\n return board[0][0]\n\n if board[0][2] == board[1][1] and board[0][2] == board[2][0] and board[0][2] != EMPTY:\n return board[0][2]\n\n # Loop for checking columns\n for col in cols:\n # Check if all elements in column are equal\n if all(cell == col[0] and col[0] != EMPTY for cell in col):\n return col[0]\n\n # Loop for checking rows\n for row in board:\n # Check if all elements in a row are equal\n if all(cell == row[0] and row[0] != EMPTY for cell in row):\n return row[0]\n\n # if there still no winner\n return None\n\n\ndef terminal(board):\n \"\"\"\n Returns True if game is over, False otherwise.\n \"\"\"\n # Check if there is a winner\n # Check if every cell is filled\n return True if winner(board) != None or all(cell for row in board for cell in row) else False\n\n\ndef utility(board):\n \"\"\"\n Returns 1 if X has won the game, -1 if O has won, 0 otherwise.\n \"\"\"\n # Assuming the we have a terminal board\n return 1 if winner(board) == X else -1 if winner(board) == O else 0\n\n\ndef minimax(board):\n \"\"\"\n Returns the optimal action for the current player on the board.\n \"\"\"\n # Method to retreive the max/min-value\n def value(board, maxmin):\n\n # Check if the game is terminated, then return utility\n if terminal(board):\n return utility(board)\n\n # Set of all possible actions\n actions_set = actions(board)\n\n # Return the min/max-value of all the max/min-values in the list of resulting utilities from actions\n return min([\n value(result(board, action), \"max\") for action in actions_set\n ]) if maxmin == \"min\" else max([\n value(result(board, action), \"min\") for action in actions_set\n ])\n\n # Checking for the turn\n turn = player(board)\n\n # List of all possible actions\n actions_set = actions(board)\n\n # Looping over all possible actions\n for action in actions_set:\n new_board = result(board, action)\n\n # If turn is X ==> we look for max-value\n if turn == X:\n # If the min-value resulting from the board after making this action\n # is the same as max-value of the current board ==> return the action\n if value(new_board, \"min\") == value(board, \"max\"):\n return action\n\n # If turn is O ==> we look for min-value\n elif turn == O:\n # If the max-value resulting from the board after making this action\n # is the same as min-value of the current board ==> return the action\n if value(new_board, \"max\") == value(board, \"min\"):\n return action\n","repo_name":"othmanKisha/CS50-AI-Projects","sub_path":"Project 0/Tic Tac Toe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15964579175","text":"from django.shortcuts import render\nfrom webconfig.Query import SQL\nfrom django.http import HttpResponse\nimport json\ndef mostrarButaca(request):\n return render(request,\"butaca/butaca.html\",{\n\n })\n\ndef verbutacas(request):\n idfuncion=request.GET.get(\"idfuncion\")\n return render(request,\"butaca/verButacas.html\",{\n \"idfuncion\":idfuncion\n })\n# Create your views here.\n\ndef buscarbutacas(request):\n idfuncion = request.GET.get(\"idfuncion\")\n odasql=SQL()\n lista=odasql.listarJSONWeb(\"exec uspRecuperarButacas @idfuncion='{0}'\"\n .format(idfuncion))\n return HttpResponse(lista)\n\ndef eliminarbutaca(request):\n idfuncion = request.GET.get(\"idfuncion\")\n idbutaca = request.GET.get(\"idbutaca\")\n odasql=SQL()\n rpta=odasql.enviarPost(\"exec uspDeshabilitarButaca @idfuncion='{0}',\"\n \"@idbutaca ='{1}'\".format(idfuncion,idbutaca))\n return HttpResponse(rpta)\n","repo_name":"lucin21/Cine-SQLServer","sub_path":"butaca/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39613117159","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nfrom pages import utils\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix,classification_report\n\ndef app():\n st.write(\"\"\"\n ### Model Building\n \"\"\")\n dataset_name = st.sidebar.selectbox(\"Select Dataset\",(\"Breast Cancer\",\"Iris\",\"Wine\",\"Upload your dataset(.csv)\"),index=1)\n try:\n x,y,feature_names,target_name,target_unique = utils.get_dataset(dataset_name)\n df = pd.DataFrame(x,columns = feature_names)\n\n model_name = st.sidebar.selectbox(\"Select classifier\",(\"SVM\",\"KNN\",\"Tree\"))\n st.write(f\"\"\"\n Your selected classifier is **{model_name }**\n \"\"\")\n\n selected_columns = st.multiselect(\"Select preferred columns\", df.columns, default= list(df.columns))\n df = df[selected_columns]\n\n all_columns = df.columns.to_list()\n st.dataframe(df,800,300)\n\n\n\n params = utils.add_parameter(model_name)\n\n if st.sidebar.button(\"build model\"):\n\n model = utils.get_model(model_name,params)\n\n x_train, x_test, y_train, y_test = train_test_split(df,y,test_size=0.2,random_state=0)\n\n model.fit(x_train,y_train)\n\n y_pred = model.predict(x_test)\n\n score =classification_report(y_test,y_pred,output_dict=True)\n df_score = pd.DataFrame(score).transpose()\n\n st.write(\"\"\"### Your model performance\"\"\")\n st.dataframe(df_score.iloc[:-3])\n st.write(\"\"\"#### Your accuracy \"\"\",round(df_score.iloc[-3,0]*100,2),\"%\")\n st.write(\"\"\"Your test set prediction \"\"\")\n x_test['target'] = y_test\n x_test['predicted'] = y_pred\n x_test['result'] = y_test == y_pred\n st.dataframe(x_test)\n else:\n pass\n except:\n pass\n","repo_name":"Tammypepo/EDA_webapp_steamlit_share","sub_path":"pages/build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40698123280","text":"\"\"\"\nPregunta 3\nIn this programming problem you'll code up the dynamic programming algorithm for\ncomputing a maximum-weight independent set of a path graph.\n\nUse the file c03_w03_homework_input_3.txt\n\nThis file describes the weights of the vertices in a path graph (with the weights listed\nin the order in which vertices appear in the path). It has the following format:\n\n[number_of_vertices]\n[weight of first vertex]\n[weight of second vertex]\n...\n\nFor example, the third line of the file is \"6395702,\" indicating that the weight of the\nsecond vertex of the graph is 6395702.\n\nYour task in this problem is to run the dynamic programming algorithm (and the\nreconstruction procedure) from lecture on this data set. The question is: of the\nvertices 1, 2, 3, 4, 17, 117, 517, and 997, which ones belong to the maximum-weight\nindependent set? (By \"vertex 1\" we mean the first vertex of the graph---there is no\nvertex 0.) In the box below, enter a 8-bit string, where the ith bit should be 1 if\nthe ith of these 8 vertices is in the maximum-weight independent set, and 0 otherwise.\nFor example, if you think that the vertices 1, 4, 17, and 517 are in the maximum-weight\nindependent set and the other four vertices are not, then you should enter the string\n10011010 in the box below.\n\"\"\"\nimport sys\n\n\nclass MaxWeightIndependentSet:\n def __init__(self, filename: str):\n with open(filename) as path_file:\n total_vertices, *vertices_weight = path_file.readlines()\n\n self.path_graph = [int(weight) for weight in vertices_weight]\n\n self.total_vertices = int(total_vertices)\n\n assert len(self.path_graph) == self.total_vertices\n\n def max_wis_weight_calc(self):\n self.partial_wis_weight = []\n\n for index, vertex_weight in enumerate(self.path_graph):\n if index == 0:\n # For a path with only one element select that element\n self.partial_wis_weight.append(vertex_weight)\n continue\n\n if index == 1:\n # For a path with only two elements select the bigger one\n self.partial_wis_weight.append(max(self.path_graph[:2]))\n continue\n\n # For paths with more than two elements select the best WIS by including\n # or excluding the last element\n excluding_current_vertex = self.partial_wis_weight[index-1]\n including_current_vertex = self.partial_wis_weight[index-2] + vertex_weight\n\n self.partial_wis_weight.append(max(\n (excluding_current_vertex, including_current_vertex)\n ))\n\n assert len(self.partial_wis_weight) == self.total_vertices\n\n return self.partial_wis_weight\n\n\n def reconstruction_algorithm(self):\n self.max_wis_vertices = set()\n\n index = self.total_vertices - 1\n\n while index >= 1:\n wis_including_current = self.partial_wis_weight[index]\n wis_excluding_current = self.partial_wis_weight[index - 1]\n\n if wis_excluding_current >= wis_including_current:\n index -= 1\n else:\n self.max_wis_vertices.add(index)\n index -= 2\n\n if index == 0:\n self.max_wis_vertices.add(index)\n\n return self.max_wis_vertices\n\n\n def check_if_in_max_wis(self, vertices=(1, 2, 3, 4, 17, 117, 517, 997)):\n # max_wis_vertices indexes start in 0 but in homework the vertex 1 is\n # the first one\n solution = \"\"\n for vertex in vertices:\n if vertex - 1 in self.max_wis_vertices:\n solution += \"1\"\n else:\n solution += \"0\"\n\n return solution\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1 and sys.argv[1] == \"test\":\n filename = \"c03_w03_testing_input_3.txt\"\n vertices_to_check = [1, 2, 3, 4, 5, 6]\n\n else:\n filename = \"c03_w03_homework_input_3.txt\"\n vertices_to_check = [1, 2, 3, 4, 17, 117, 517, 997]\n\n problem = MaxWeightIndependentSet(filename)\n problem.max_wis_weight_calc()\n problem.reconstruction_algorithm()\n print(problem.check_if_in_max_wis(vertices_to_check))\n","repo_name":"sofide/algorithms","sub_path":"c03_w03_max_wis_3.py","file_name":"c03_w03_max_wis_3.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20069694024","text":"from functools import reduce\nfrom itertools import product\nfrom operator import xor, or_\n\nfrom migen import *\nfrom migen.genlib.cdc import MultiReg\n\nfrom litex.soc.interconnect.csr import *\n\nfrom .oscillator import MetastableOscillator\n\n# see https://www.wolframalpha.com/input/?i=truth+table+p+xor+q+xor+r+xor+s\ndef xor_lut(n):\n truth_table = product([False, True], repeat=n)\n predicates = [reduce(xor, o) for o in truth_table]\n return sum(v<= 5: candidates[-1].q -= 1\n # print(candidates[-1])\n # if len(candidates) == 0: return None\n # return sorted(candidates, key=lambda c: c.priority())[-1]\n matches = arcade.get_sprites_at_point((point.x, point.y), self.sprite_lists[self.scale])\n candidates = []\n for sprite in matches:\n if (sprite.center_x, sprite.center_y) not in self.coords_at[self.scale]: continue\n candidate = self.coords_at[self.scale][(sprite.center_x, sprite.center_y)]\n if candidate not in self.sprites_at: continue\n candidates.append(candidate)\n if len(candidates) == 0: return None\n return sorted(candidates, key=lambda c: c.priority())[-1]\n\n def zoom(self, towards: bool=True):\n prev_scale = self.scale\n if towards: self.scale = min(self.scale+1, len(SPRITE_SCALES)-1)\n else: self.scale = max(self.scale-1, 0)\n if self.scale == prev_scale: return False\n return True\n\n def move_camera(self, change):\n self.camera_position += Vec2(change.x*CAMERA_SPEED, change.y*CAMERA_SPEED)\n \n center_offset = Vec2(-self.camera.viewport_width/2, -self.camera.viewport_height/2)\n self.camera.move_to(self.camera_position + center_offset, 1)\n\n def draw(self):\n \"\"\"Function for drawing the tilemap\"\"\"\n self.camera.use()\n self.sprite_lists[self.scale].draw()\n self.cursor_overlay_sprites[self.scale].draw() \n","repo_name":"Tsilkow/Terraform","sub_path":"tilemap.py","file_name":"tilemap.py","file_ext":"py","file_size_in_byte":13220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12417343417","text":"# car.py\n# sudo ps aux | grep python | awk '{print $2}' | xargs kill -9\n\nimport time\nimport numpy as np\nimport cv2\n\nclass camera:\n def __init__(self):\n try:\n self.usb_camera = cv2.VideoCapture(0)\n except:\n try:\n self.usb_camera = cv2.VideoCapture(1)\n except:\n print (\"there's no camera\")\n time.sleep(0.1)\n\ndef rgbtohsv(frame):\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n return (hsv_frame)\n\ndef rgbtohsl(frame):\n hsl_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)\n return (hsl_frame)\n\ndef test_color(r, g, b):\n test_im = np.zeros((10, 30, 3))\n print (type(test_im))\n #change_im = np.zeros((10, 10))\n for i in range(0, 10):\n for j in range(0, 10):\n test_im[i, j, 0], test_im[i, j, 1], test_im[i, j, 2] = 0, 0, 255\n for i in range(0, 10):\n for j in range(10, 20):\n test_im[i, j, 0], test_im[i, j, 1], test_im[i, j, 2] = 0, 255, 0\n for i in range(0, 10):\n for j in range(20, 30):\n test_im[i, j, 0], test_im[i, j, 1], test_im[i, j, 2] = 255, 0, 0 \n cv2.imwrite('test_im.png', test_im)\n test_im = test_im.astype(np.uint8)\n change_im = cv2.cvtColor(test_im, cv2.COLOR_BGR2HSV)\n cv2.imwrite('changed_im.png', change_im)\n print (\"red hsv: \", change_im[5, 5])\n print (\"green hsv: \", change_im[5, 15])\n print (\"blue hsv: \", change_im[5, 25])\n\n\ndef find_green(weight, height, r, g, b):\n pass\n\nif __name__ == \"__main__\":\n #car = car()\n #car.capture()\n #test_color((0, 255, 0))\n camera = camera()\n index_desigated = 2 #0 = b, 1 = g, 2 = r\n kernel = np.ones((4,4),np.float32)/16\n while True:\n ret, frame = camera.usb_camera.read() # array\n # resize\n \n shape = frame.shape\n weight, height = shape[0]//10, shape[1]//10\n resize_frame = cv2.resize(frame, (height, weight), interpolation=cv2.INTER_CUBIC)\n resize_frame = cv2.filter2D(resize_frame,-1,kernel)\n gray_frame = cv2.cvtColor(resize_frame,cv2.COLOR_BGR2GRAY)\n Z = resize_frame.reshape((-1,3))\n Z = np.float32(Z)\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n K = 12\n ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\n center = np.uint8(center)\n res = center[label.flatten()]\n k_img = res.reshape((resize_frame.shape))\n\n hsv_frame = rgbtohsl(k_img) # try hsl\n cv2.imshow('k_img', hsv_frame)\n\n Z = hsv_frame.reshape((-1,3))\n Z = np.float32(Z)\n K = 2\n ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\n center = np.uint8(center)\n res = center[label.flatten()]\n k_hsv = res.reshape((hsv_frame.shape))\n cv2.imshow(\"k_hsv\", k_hsv)\n h_list = []\n s_list = []\n l_list = []\n for x in range(0, weight):\n for y in range(0, height):\n tmp_h = k_hsv[x, y, 0]\n tmp_s = k_hsv[x, y, 1]\n tmp_l = k_hsv[x, y, 2]\n if (tmp_h not in h_list):\n h_list.append(tmp_h)\n s_list.append(tmp_s)\n l_list.append(tmp_l)\n else:\n if (tmp_s not in s_list):\n h_list.append(tmp_h)\n s_list.append(tmp_s)\n l_list.append(tmp_l)\n else:\n if (tmp_l not in l_list):\n h_list.append(tmp_h)\n s_list.append(tmp_s)\n l_list.append(tmp_l)\n result_frame = np.zeros((resize_frame.shape))\n\n count_1 = 0\n count_2 = 0\n for x in range(0, weight):\n for y in range(0, height):\n if (k_hsv[x, y, 0] == h_list[0] and k_hsv[x, y, 1] == s_list[0]\\\n and k_hsv[x, y, 2] == l_list[0]):\n count_1 += 1\n else:\n count_2 += 1\n \n x_list = []\n y_list = []\n if (count_1 > count_2):\n for x in range(0, weight):\n for y in range(0, height):\n if (k_hsv[x, y, 0] == h_list[1] and k_hsv[x, y, 1] == s_list[1]\\\n and k_hsv[x, y, 2] == l_list[1]):\n tmp_color = (resize_frame[x, y, 0], resize_frame[x, y, 1], \\\n resize_frame[x, y, 2])\n max_color = max(tmp_color)\n max_index = tmp_color.index(max_color)\n # blue\n if (max_index == index_desigated):\n if (max_index == 0):\n result_frame[x, y, 1] = result_frame[x, y, 2] = 255\n result_frame[x, y, 0] = 255\n x_list.append(x)\n y_list.append(y)\n \n elif (max_index == 1):\n result_frame[x, y, 0] = result_frame[x, y, 2] = 255\n result_frame[x, y, 1] = 255\n x_list.append(x)\n y_list.append(y)\n else:\n result_frame[x, y, 0] = result_frame[x, y, 1] = 255\n result_frame[x, y, 2] = 255\n x_list.append(x)\n y_list.append(y)\n else:\n result_frame[x, y, 0] = result_frame[x, y, 1] = result_frame[x, y, 2] = 0\n else:\n for x in range(0, weight):\n for y in range(0, height):\n if (k_hsv[x, y, 0] == h_list[0] and k_hsv[x, y, 1] == s_list[0]\\\n and k_hsv[x, y, 2] == l_list[0]):\n tmp_color = (resize_frame[x, y, 0], resize_frame[x, y, 1], \\\n resize_frame[x, y, 2])\n max_color = max(tmp_color)\n max_index = tmp_color.index(max_color)\n if (max_index == index_desigated):\n if (max_index == 0):\n result_frame[x, y, 1] = result_frame[x, y, 2] = 255\n result_frame[x, y, 0] = 255\n x_list.append(x)\n y_list.append(y)\n \n elif (max_index == 1):\n result_frame[x, y, 0] = result_frame[x, y, 2] = 255\n result_frame[x, y, 1] = 255\n x_list.append(x)\n y_list.append(y)\n else:\n result_frame[x, y, 0] = result_frame[x, y, 1] = 255\n result_frame[x, y, 2] = 255\n x_list.append(x)\n y_list.append(y)\n else:\n result_frame[x, y, 0] = result_frame[x, y, 1] = result_frame[x, y, 2] = 0\n avg_x = sum(x_list) / len(x_list)\n avg_y = sum(y_list) / len(y_list)\n mean_weight = int(weight / 2) #|v\n mean_height = int(height / 2) #->\n\n if abs(avg_x-mean_weight) < 5:\n if abs(avg_y-mean_height) < 5:\n print (\"ok\")\n else:\n print (\"x\")\n else:\n print (\"x\")\n\n #cv2.imshow('result', result_frame)\n \n if (cv2.waitKey(1) == 'q'):\n break\n camera.usb_camera.release()\n cv2.destroyAllWindows()\n\n","repo_name":"applejenny66/AGV_IOT","sub_path":"camera_specific.py","file_name":"camera_specific.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27651636560","text":"from smalldict import SmallDict\n\nd = {\n \"key_1\": [\"value_1\", \"value_2\", \"value_3\"],\n \"key_2\": {\"key_1\": \"value\", \"key_2\": \"value\", \"key_3\": \"value\"},\n \"key_3\": \"value\",\n}\n\n\ndef test_no_limit():\n assert SmallDict(d).get() == d\n\n\ndef test_limit():\n assert SmallDict(d).get(\n dict_limit=2, list_limit=1, str_limit=3, json_out=None, yaml_out=None\n ) == {\n \"key_1\": [\"val\"],\n \"key_2\": {\"key_1\": \"val\", \"key_2\": \"val\"},\n }\n","repo_name":"Minyus/smalldict","sub_path":"src/tests/test_smalldict.py","file_name":"test_smalldict.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41099272050","text":"import tensorflow as tf \n#32bit 정수형태로 3개 저장 \n#placeholder는 선언만 해주는 그릇일 뿐 이다.\na = tf.placeholder(tf.int32, [3])\n\nb = tf.constant(2)\nx2_op = a*b\n\nsess = tf.Session()\n\nr1 = sess.run(x2_op, feed_dict={a:[1,2,3]})\nprint(r1)\nr2 = sess.run(x2_op, feed_dict={a:[10,20,10]})\nprint(r2)\n","repo_name":"dl57934/python-learning","sub_path":"ch5/ch5-4/placeholder.py","file_name":"placeholder.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10561743836","text":" \ntry :\n \n fh = open('lines.txt')\n \n for line in fh:\n print(line.strip())\nexcept IOError as e:\n print('file does not exist....!',e)\nfinally:\n print('Hola Amigos')\n fh.close() \n \n# def main():\n# try:\n# for line in readfile('lines.txt'):\n# print(line.strip())\n# except IOError as e:\n# print('cannot read file',e)\n# except ValueError as v :\n# print('Bad file name',v)\n# \n# \n# def readfile(filename):\n# if filename.endswith('.txt'):\n# fh=open(filename)\n# return fh.readlines()\n# else:\n# raise ValueError('Filename must end with .txt')\n# main() \n \n \n \n ","repo_name":"KaranRawlley/Python-Machine_LearningT","sub_path":"Day _5/Files12/exception handling.py","file_name":"exception handling.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17133583395","text":"def search_range(numbers: list, num: int):\n \"\"\"\n This Function is Used to get first and last index of number in a numbers list.\n for example:\n search_range([1,2,2,2,3,4], 2) -> [1, 3]\n \"\"\"\n start, end = None, None\n flag = False\n for i, number in enumerate(numbers):\n if number == num and flag is False:\n start = i\n flag = True\n elif flag and number != num:\n end = i - 1\n break\n return [start, end]\n\n\ndef sort_numbers(numbers: list):\n length = len(numbers)\n for i in range(length-1):\n is_sorted = True\n for j in range(length-i-1):\n if numbers[j] > numbers[j+1]:\n numbers[j], numbers[j+1] = numbers[j+1], numbers[j]\n is_sorted = False\n if is_sorted:\n return numbers\n return numbers\n\nnumbers = list(map(int, input().split()))\nnum = int(input())\nnumbers = sort_numbers(numbers)\nprint(numbers)\nprint(search_range(numbers, num))","repo_name":"Amirmahdikahdouii/Python-Exercises","sub_path":"Q-81/81.py","file_name":"81.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"1264035904","text":"def calculate_mark(s):\n '''implement this function'''\n s_list = s.split(\" \")\n\n try:\n int(s_list[0])\n except Exception as e:\n raise SyntaxError\n mark = int(s_list[1]) - int(s_list[2])\n try:\n mark > 0\n except Exception as e:\n raise ValueError\n\n mark_st = str(mark)\n student = s_list[0]\n\n print(f\"{student} {mark_st}\")\n return f\"{student} {mark_st}\"\n\n\n#calculate_mark(\"123 78 50\")\ncalculate_mark(\"john xx 30\")\n#calculate_mark(\"123 35 50\")","repo_name":"james1968/UoL_Codio_Challenges","sub_path":"calculate_mark.py","file_name":"calculate_mark.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"29033930793","text":"from flask import jsonify, request, abort\nfrom models import Category, Question\nfrom sqlalchemy import and_\n\n\ndef quizzes_controller(app):\n @app.route('/quizzes', methods=['POST'])\n def quizzes():\n payload = request.json\n if 'previous_questions' not in payload \\\n or 'quiz_category' not in payload \\\n or 'id' not in payload['quiz_category']:\n abort(422)\n previous_questions_ids = payload['previous_questions']\n category_id = payload['quiz_category']['id']\n filters = [Question.id.notin_(previous_questions_ids)]\n if category_id != 0:\n filters.append(Question.category_id == category_id)\n question = Question.query.filter(and_(*filters)).first()\n return jsonify(\n {\n 'question': question.format() if question is not None else None\n }\n )\n","repo_name":"mostafaelspagh0/Udacitrivia","sub_path":"backend/controllers/quizzes.py","file_name":"quizzes.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71781993879","text":"\n#import modules\nimport cv2, os, pickle\nimport numpy as np\nfrom PIL import Image\n\n#set a scade Path for face detection using facial features\ncascadePath = 'D:\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml'\nfaceCascade = cv2.CascadeClassifier(cascadePath)\n\n#set algorithm for identifying a face\n#recognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer = cv2.face.FisherFaceRecognizer_create()\n#recognizer = cv2.face.createEigenFaceRecognizer()\n\nmapping = pickle.load(open(\"mapping.p\",\"rb\"))\n \n# Captures a single image from the camera and returns it in PIL format\ndef get_image(camera):\n # read is the easiest way to get a full image out of a VideoCapture object.\n retval, im = camera.read()\n return im\n\ndef startRecognizing():\n #save the images for future reference.\n recognizer.read(\"mytrainingdata.xml\")\n match = False\n time = 0\n # Set the port of web cam\n camera_port = 0\n \n #Number of frames to throw away while the camera adjusts to light levels\n ramp_frames = 30\n \n # Now we can initialize the camera capture object with the cv2.VideoCapture.\n camera = cv2.VideoCapture(camera_port)\n\n while True:\n \n cameraCapture = get_image(camera)\n grayImage = cv2.cvtColor(cameraCapture, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(\n grayImage,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(180, 180),\n flags = cv2.CASCADE_SCALE_IMAGE)\n for (x, y, w, h) in faces:\n predictImage = grayImage[y: y + h, x: x + w]\n predictImage = cv2.resize(predictImage, (200,200))\n label, conf = recognizer.predict(predictImage)\n if(conf<10000):\n cv2.rectangle(cameraCapture, (x, y), (x + w, y + h),(0,255,0),1)\n cv2.putText(cameraCapture,'%s : %.0f' % (mapping.get(label), conf),(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(255, 0, 0))\n else:\n cv2.rectangle(cameraCapture, (x, y), (x + w, y + h),(0,0,255),1)\n cv2.putText(cameraCapture,'Not Identified',(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 0, 255))\n cv2.imshow(\"Recognizing..\",cameraCapture)\n cv2.waitKey(60) \n\n del(camera)\n\n#main function:\nif __name__ == '__main__':\n startRecognizing()\n","repo_name":"arnabkhan123/Face-Recognition","sub_path":"recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"3507337199","text":"from main import Scraper\nfrom BeautifulSoup import BeautifulSoup, SoupStrainer\nimport urllib, re, requests\nimport HTMLParser\n\nclass desit(Scraper):\n def __init__(self):\n Scraper.__init__(self)\n self.bu = 'http://www.desitashan.me/'\n self.icon = self.ipath + 'desit.png'\n self.list = {'01Indian': self.bu,\n '02Pakistani': self.bu + 'pakistan-tv/'}\n \n def get_menu(self):\n return (self.list,4,self.icon)\n\n def get_top(self,url):\n \"\"\"\n Get the list of channels.\n :return: list\n \"\"\"\n channels = []\n h = HTMLParser.HTMLParser()\n html = requests.get(url, headers=self.hdr).text\n mlink = SoupStrainer('div', {'class':'nav fusion-mobile-tab-nav'})\n mdiv = BeautifulSoup(html, parseOnlyThese=mlink)\n items = mdiv.findAll('li')\n for item in items:\n title = h.unescape(item.text)\n tref = item.a.get('href')[1:]\n iurl = '%sZZZZ%s'%(url,tref)\n try:\n icon = item.find('img')['src']\n if icon.startswith('/'):\n icon = self.bu[:-1] + icon\n else:\n icon = self.bu + icon\n except:\n icon = self.icon\n \n channels.append((title,icon,iurl))\n\n return (channels,5)\n\n def get_second(self,iurl):\n \"\"\"\n Get the list of shows.\n :return: list\n \"\"\"\n shows = []\n h = HTMLParser.HTMLParser()\n url = iurl.split('ZZZZ')[0]\n channel = iurl.split('ZZZZ')[1]\n html = requests.get(url, headers=self.hdr).text\n mlink = SoupStrainer('div', {'id':channel})\n mdiv = BeautifulSoup(html, parseOnlyThese=mlink)\n items = mdiv.findAll('div', {'class':'fusion-column-wrapper'})\n for item in items:\n title = h.unescape(item.text)\n url = item.a.get('href')\n if url.startswith('/'):\n url = self.bu[:-1] + url\n else:\n url = self.bu + url\n try:\n icon = item.find('img')['src']\n if icon.startswith('/'):\n icon = self.bu[:-1] + icon\n else:\n icon = self.bu + icon\n except:\n icon = self.icon\n \n shows.append((title,icon,url))\n \n return (shows,7)\n \n def get_items(self,iurl):\n episodes = []\n h = HTMLParser.HTMLParser()\n html = requests.get(iurl).text\n mlink = SoupStrainer('div', {'id':'showList'})\n mdiv = BeautifulSoup(html, parseOnlyThese=mlink)\n items = mdiv.findAll('div', {'class':'fusion-column-wrapper'})\n for item in items:\n title = h.unescape(item.h4.a.text)\n if 'written' not in title.lower():\n url = item.a.get('href')\n if url.startswith('/'):\n url = self.bu[:-1] + url\n else:\n url = self.bu + url\n try:\n icon = item.find('img')['src']\n if icon.startswith('/'):\n icon = self.bu[:-1] + icon\n else:\n icon = self.bu + icon\n except:\n icon = self.icon \n episodes.append((title,icon,url))\n plink = SoupStrainer('a', {'class':'pagination-next'})\n Paginator = BeautifulSoup(html, parseOnlyThese=plink)\n if 'Next' in str(Paginator):\n ep_link = Paginator.a.get('href')\n if 'category' in ep_link:\n url = self.bu[:-1] + ep_link\n else:\n url = iurl + ep_link\n title = 'Next Page: ' + url.split('page/')[1][:-1]\n episodes.append((title, self.nicon, url)) \n return (episodes,8) \n\n def get_videos(self,iurl):\n videos = []\n h = HTMLParser.HTMLParser()\n html = requests.get(iurl).text\n mlink = SoupStrainer('p', {'class':'vidLinksContent'})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n items = videoclass.findAll('a')\n for item in items:\n vid_link = item['href']\n vidtxt = h.unescape(item.text)\n vidtxt = re.findall('(\\d.*)',vidtxt)[0]\n if '/coming/' in vid_link:\n url = 'http://www.tashanplayer.com/upcoming.mp4'\n videos.append(('Coming Soon',url))\n elif 'tashanplayer' in vid_link:\n vhtml = requests.get(vid_link).text\n try:\n vplink = SoupStrainer('iframe')\n vsoup = BeautifulSoup(vhtml, parseOnlyThese=vplink)\n vid_url = vsoup.find('iframe')['src']\n except:\n vplink = SoupStrainer('script', {'data-container':'myPlayer'})\n vsoup = BeautifulSoup(vhtml, parseOnlyThese=vplink)\n vid_url = vsoup.find('script')['data-config']\n self.resolve_media(vid_url,videos,vidtxt)\n else:\n self.resolve_media(vid_link,videos,vidtxt)\n\n mlink = SoupStrainer('div', {'class':'post-content'})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n items = videoclass.findAll('iframe')\n for item in items:\n vid_link = item['src']\n if '/coming/' in vid_link:\n url = 'http://www.tashanplayer.com/upcoming.mp4'\n videos.append(('DT Upcoming',url))\n elif 'tashanplayer' in vid_link:\n vhtml = requests.get(vid_link).text\n try:\n vplink = SoupStrainer('iframe')\n vsoup = BeautifulSoup(vhtml, parseOnlyThese=vplink)\n vid_url = vsoup.find('iframe')['src']\n except:\n vplink = SoupStrainer('script', {'data-container':'myPlayer'})\n vsoup = BeautifulSoup(vhtml, parseOnlyThese=vplink)\n vid_url = vsoup.find('script')['data-config']\n self.resolve_media(vid_url,videos)\n else:\n self.resolve_media(vid_link,videos)\n \n return videos\n","repo_name":"knagaraju/GujalKodiWork","sub_path":"plugin.video.deccandelight/resources/scrapers/desit.py","file_name":"desit.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29722449837","text":"import logging\n\n# Extract secrets\nsecrets = {}\nwith open(\"secrets.txt\") as f:\n for line in f:\n try:\n pair = line.split(\"=\")\n secrets[pair[0]] = pair[1]\n except Exception as e:\n logging.critical(f\"Incorrect formatting of secrets: {e}\")\n\n","repo_name":"spennyp/piServer","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16620675630","text":"import pygame\nfrom Graphic.Drawing import *\nfrom Functionality.Board import *\nfrom Functionality.Player import *\nfrom Functionality.Symbol import *\nfrom Functionality.Winner import *\n\n\nclass Game():\n def __init__(self):\n self._playerX = Player(Symbol.X)\n self._playerO = Player(Symbol.O)\n self._rowCount = 3\n self._columnCount = 3\n self._board = Board(self._rowCount, self._columnCount)\n self._winner = Winner(self._board, self._rowCount, self._columnCount)\n self._playerToThrow = self._playerX\n self._boardIsFull = False\n\n def startGame(self, player):\n if player == 1:\n self._playerToThrow = self._playerX\n elif player == 2:\n self._playerToThrow = self._playerO\n else:\n self._playerToThrow = Player(Symbol.none)\n self._board.reset()\n\n def getPlayerToThrow(self):\n return self._playerToThrow\n\n def getWinner(self):\n return self._winner.getWinner()\n\n def getFieldState(self, row, column):\n return self._board.getField(row, column).getSymbol()\n\n def getField(self, row, column):\n return self._board.getField(row, column)\n\n def switchPlayers(self):\n if (self._playerToThrow == self._playerX):\n self._playerToThrow = self._playerO\n else:\n self._playerToThrow = self._playerX\n\n def whoHasWon(self):\n return self._board.whoHasWon()\n\n def getPressedField(self, boardX, boardY, fieldH, fieldW, gap):\n if(pygame.mouse.get_pressed()[0] == 1):\n for row in range(self._rowCount):\n for column in range(self._columnCount ):\n if((boardX + (column + 1) * fieldW > pygame.mouse.get_pos()[0] > boardX + column * fieldW + (column + 1) * gap)\n and ( boardY + (row + 1) * fieldW > pygame.mouse.get_pos()[1] > boardY + row * fieldH + (row + 1)* gap)\n and not self._board.isFieldOccupied(row, column)):\n self._board.setField(row, column, self._playerToThrow)\n self.switchPlayers()\n return True\n return False\n\n def boardIsFull(self):\n for row in range(self._rowCount):\n for column in range(self._columnCount):\n if self.getField(row, column).getSymbol() == Symbol.none:\n self._boardIsFull = False\n return self._boardIsFull\n self._boardIsFull = True\n return self._boardIsFull\n\n def setBoardIsFull(self, param):\n self._boardIsFull = param\n\n\n def setField(self, row, column, player):\n self._board.setField(row, column, player)","repo_name":"sztalinux/TicTacToe","sub_path":"Functionality/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"70896029399","text":"\"\"\"\n @Author: Mayank Anand\n @Date: 2022-03-16\n @Last Modified by: Mayank Anand\n @Last Modified time: 2022-03-17\n @Title : List Data Structure Programs - Remove duplicate elements from list\n \"\"\"\nimport logging\n\n\ndef remove_duplicates(given_list):\n \"\"\"\n Description:\n Removes duplicates from given list in parameter.\n Parameter:\n given_list: Given list to remove duplicate elements.\n Return:\n List after removing duplicates.\n \"\"\"\n result_list = []\n for element in given_list:\n if element not in result_list:\n result_list.append(element)\n return result_list\n\n\ndef main():\n try:\n sample_list = [3, 5, 7, 9, 11, 13, 14, 14, 15, 17, 19, 19]\n print(\"List:\",sample_list)\n print(\"List after removing duplicates:\", remove_duplicates(sample_list))\n except Exception as e:\n print(\"{} is raised.\".format(e))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mayankan/data-structures","sub_path":"list_data_structures/6_remove_duplicates.py","file_name":"6_remove_duplicates.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11187429859","text":"# Author: michael-gh1\n\nimport bpy\nimport os\n\nfrom bpy_extras.io_utils import ImportHelper\nfrom bpy.props import StringProperty\nfrom bpy.types import Operator\nfrom setup_wizard.import_order import cache_using_cache_key, get_cache, FESTIVITY_GRAN_TURISMO_FILE_PATH\n\nfrom setup_wizard.setup_wizard_operator_base_classes import CustomOperatorProperties\n\nNAME_OF_DEFAULT_SCENE = 'Scene'\n\nNAME_OF_GRAN_TURISMO_NODE = 'Group'\nNAME_OF_GRAN_TURISMO_NODE_TREE = 'GranTurismoWrapper [APPEND]'\n\nNAME_OF_COMPOSITOR_INPUT_NODE = 'Render Layers'\nNAME_OF_COMPOSITOR_OUTPUT_NODE = 'Composite'\n\nNAME_OF_VIEWER_NODE = 'Viewer'\nNAME_OF_VIEWER_NODE_TYPE = 'CompositorNodeViewer'\n\nNAME_OF_IMAGE_IO = 'Image'\nNAME_OF_RESULT_IO = 'Result'\n\n\nclass GI_OT_GenshinGranTurismoTonemapperSetup(Operator, ImportHelper, CustomOperatorProperties):\n \"\"\"Select Festivity's Gran Turismo ToneMapper .blend File to import NodeTree\"\"\"\n bl_idname = 'genshin.gran_turismo_tonemapper_setup'\n bl_label = 'Genshin: Gran Turismo Tonemapper Setup - Select Gran Turismo .blend File'\n\n # ImportHelper mixin class uses this\n filename_ext = \"*.*\"\n\n import_path: StringProperty(\n name=\"Path\",\n description=\"Festivity's Gran Turismo .blend File\",\n default=\"\",\n subtype='DIR_PATH'\n )\n\n filter_glob: StringProperty(\n default=\"*.*\",\n options={'HIDDEN'},\n maxlen=255, # Max internal buffer length, longer would be clamped.\n )\n\n logs = 'Gran Turismo Tonemapper Setup:\\n'\n\n def execute(self, context):\n cache_enabled = context.window_manager.cache_enabled\n gran_turismo_blend_file_path = self.filepath or get_cache(cache_enabled).get(FESTIVITY_GRAN_TURISMO_FILE_PATH)\n\n if not bpy.data.scenes.get(NAME_OF_DEFAULT_SCENE).node_tree:\n self.logs += 'ERROR: Must enable \"Use Nodes\" in Compositor view before being able to set up GT Tonemapper\\n'\n self.report({'ERROR'}, f'{self.logs}')\n return {'FINISHED'}\n\n # Technically works if only running this Operator, but this cannot be chained because we need to be \n # out of the script (or in another Operator) to update ctx before the import modal appears\n # Solution would be to create a separate Operator that handles context switches\n # TODO: This does not work unless INVOKE_DEFAULT with a modal or some window appears to update the Blender UI\n # self.switch_to_compositor()\n\n if not bpy.data.node_groups.get(NAME_OF_GRAN_TURISMO_NODE_TREE):\n if not gran_turismo_blend_file_path:\n bpy.ops.genshin.gran_turismo_tonemapper_setup(\n 'INVOKE_DEFAULT',\n next_step_idx=self.next_step_idx, \n file_directory=self.file_directory,\n invoker_type=self.invoker_type,\n high_level_step_name=self.high_level_step_name\n )\n return {'FINISHED'}\n self.append_gran_turismo_tonemapper(gran_turismo_blend_file_path)\n else:\n self.logs += f'{NAME_OF_GRAN_TURISMO_NODE_TREE} already appended, skipping.\\n'\n\n gran_turismo_node = bpy.data.scenes.get(NAME_OF_DEFAULT_SCENE).node_tree.nodes.get(NAME_OF_GRAN_TURISMO_NODE)\n if not gran_turismo_node or \\\n (gran_turismo_node and gran_turismo_node.node_tree.name != NAME_OF_GRAN_TURISMO_NODE_TREE):\n self.create_compositor_node_group(NAME_OF_GRAN_TURISMO_NODE_TREE)\n else:\n self.logs += f'{NAME_OF_GRAN_TURISMO_NODE_TREE} node already created, skipping and not creating new node.\\n'\n\n viewer_node = bpy.data.scenes.get(NAME_OF_DEFAULT_SCENE).node_tree.nodes.get(NAME_OF_VIEWER_NODE)\n if not viewer_node:\n self.create_compositor_node(NAME_OF_VIEWER_NODE_TYPE)\n else:\n self.logs += f'Viewer node already exists, skipping.\\n'\n\n self.connect_starting_nodes()\n self.set_node_locations()\n\n if cache_enabled and gran_turismo_blend_file_path:\n cache_using_cache_key(\n get_cache(cache_enabled), \n FESTIVITY_GRAN_TURISMO_FILE_PATH, \n gran_turismo_blend_file_path\n )\n\n self.report({'INFO'}, f'{self.logs}')\n super().clear_custom_properties()\n return {'FINISHED'}\n\n '''\n def switch_to_compositor(self):\n bpy.ops.genshin.change_bpy_context(\n 'EXEC_DEFAULT',\n bpy_context_attr='area.type',\n bpy_context_value_str='NODE_EDITOR'\n )\n bpy.ops.genshin.change_bpy_context(\n 'EXEC_DEFAULT',\n bpy_context_attr='scene.use_nodes',\n bpy_context_value_bool=True\n )\n '''\n\n def append_gran_turismo_tonemapper(self, gran_turismo_blend_file_path):\n inner_path = 'NodeTree'\n\n bpy.ops.wm.append(\n filepath=os.path.join(gran_turismo_blend_file_path, inner_path, NAME_OF_GRAN_TURISMO_NODE_TREE),\n directory=os.path.join(gran_turismo_blend_file_path, inner_path),\n filename=NAME_OF_GRAN_TURISMO_NODE_TREE\n )\n\n self.logs += f'Appended {NAME_OF_GRAN_TURISMO_NODE_TREE}\\n'\n\n def create_compositor_node_group(self, node_name):\n bpy.ops.node.add_node(\n type=\"CompositorNodeGroup\", \n use_transform=True, \n settings=[\n {\"name\":\"node_tree\", \"value\":f\"bpy.data.node_groups['{node_name}']\"}\n ]\n )\n self.logs += f'Created {node_name} node tree\\n'\n\n def create_compositor_node(self, node_type):\n bpy.ops.node.add_node(\n type=node_type, \n use_transform=True\n )\n self.logs += f'Created {node_type} node tree\\n'\n\n def connect_starting_nodes(self):\n default_scene = bpy.data.scenes.get(NAME_OF_DEFAULT_SCENE)\n\n render_layers_node = default_scene.node_tree.nodes.get(NAME_OF_COMPOSITOR_INPUT_NODE)\n render_layers_output = render_layers_node.outputs.get(NAME_OF_IMAGE_IO)\n\n gran_turismo_wrapper_node = default_scene.node_tree.nodes.get(NAME_OF_GRAN_TURISMO_NODE)\n gran_turismo_wrapper_node_input = gran_turismo_wrapper_node.inputs.get(NAME_OF_IMAGE_IO)\n gran_turismo_wrapper_node_output = gran_turismo_wrapper_node.outputs.get(NAME_OF_RESULT_IO)\n\n composite_node = default_scene.node_tree.nodes.get(NAME_OF_COMPOSITOR_OUTPUT_NODE)\n composite_node_input = composite_node.inputs.get(NAME_OF_IMAGE_IO)\n\n viewer_node = default_scene.node_tree.nodes.get(NAME_OF_VIEWER_NODE)\n viewer_node_input = viewer_node.inputs.get(NAME_OF_IMAGE_IO)\n\n self.connect_nodes_in_scene(default_scene, render_layers_output, gran_turismo_wrapper_node_input)\n self.connect_nodes_in_scene(default_scene, gran_turismo_wrapper_node_output, composite_node_input)\n self.connect_nodes_in_scene(default_scene, gran_turismo_wrapper_node_output, viewer_node_input)\n\n def connect_nodes_in_scene(self, scene, input, output):\n # This is very important! The links are at the scene.node_tree level\n # It makes sense after you spend some time thinking about it because you're linking the nodes in the scene.\n # I spent way too much time troubleshooting at the scene.node_tree.nodes level\n # At that point you're trying to link things inside nodes, which is wrong!\n scene_node_tree_links = scene.node_tree.links\n scene_node_tree_links.new(\n input, \n output\n )\n\n input_node_name = input.node.node_tree.name_full if hasattr(input.node, 'node_tree') else input.node.name\n output_node_name = output.node.node_tree.name_full if hasattr(output.node, 'node_tree') else output.node.name\n\n self.logs += f\"Connected '{input_node_name}' ({input.name}) to '{output_node_name}' ({output.name}) in scene: {scene.name}\\n\"\n\n def set_node_locations(self):\n default_scene = bpy.data.scenes.get(NAME_OF_DEFAULT_SCENE)\n render_layers_node = default_scene.node_tree.nodes.get(NAME_OF_COMPOSITOR_INPUT_NODE)\n gran_turismo_wrapper_node = default_scene.node_tree.nodes.get(NAME_OF_GRAN_TURISMO_NODE)\n composite_node = default_scene.node_tree.nodes.get(NAME_OF_COMPOSITOR_OUTPUT_NODE)\n viewer_node = default_scene.node_tree.nodes.get(NAME_OF_VIEWER_NODE)\n\n render_layers_node.location = (-200, 400)\n gran_turismo_wrapper_node.location = (250, 400)\n composite_node.location = (500, 400)\n viewer_node.location = (500, 200)\n self.logs += f'Set default locations for nodes in Compositing\\n'\n","repo_name":"michael-gh1/Addons-And-Tools-For-Blender-miHoYo-Shaders","sub_path":"setup_wizard/genshin_gran_turismo_tonemapper_setup.py","file_name":"genshin_gran_turismo_tonemapper_setup.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"85"} +{"seq_id":"16823186894","text":"import numpy as np\n\nwith open(\"inputs/3\", \"r\") as file:\n input = file.read().splitlines()\n input = np.array([[int(y) for y in x] for x in input if x])\n\nsums = np.sum(input, axis=0)\n\ndef get_num(idx, data, mostcommon):\n if len(data) == 1:\n print(data)\n return \"\".join(str(x) for x in data.flat)\n s = np.sum(data, axis=0)[idx]\n if s > data.shape[0] / 2:\n # 1 is most common\n data = data[np.where(data[:,idx] == (1 if mostcommon else 0))]\n elif s == data.shape[0] / 2:\n # even common\n data = data[np.where(data[:,idx] == (1 if mostcommon else 0))]\n else:\n # 0 is most common\n data = data[np.where(data[:,idx] == (0 if mostcommon else 1))]\n return get_num(idx+1, data, mostcommon)\n\noxygen = get_num(0, input, False)\nco2 = get_num(0, input, True)\nprint(oxygen, co2)\nprint(int(oxygen, 2), int(co2, 2))\nprint(int(oxygen, 2) * int(co2, 2))","repo_name":"marisgg/advent-of-code","sub_path":"3_2.py","file_name":"3_2.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29231242224","text":"\"\"\"\ngenops.py (module)\n\nWritten as part of the submission for ECE 750 AL: Bio&Comp Individual Project\nAuthor: Rahul Balamurugan\n\nContains the selection, crossover, and mutation genetic operators as well as \nany specific functions solely called by the operators. Does nothing if\nexecuted.\n\nList of functions:\n \n tournamentSelection(population,tsub=2,selection_probability=1):\n Returns ordered list of winning parents where every consecutive pair\n is a mating pair. Does tournament selection using the crowding\n distance and non-domination level as comparators. Default number of\n competing individuals is 2 (Binary tournament), and default selection\n probability is 1.\n \n partiallyMappedCrossover(p1,p2,ctype):\n Returns tuple of (child1,child2) by crossing p1 and p2 using the \n Partially Mapped Crossover method. Can do for either chromosome\n representation.\n \n getCxChild1():\n Return child 1 for the cyclic crossover operator cyclicCrossover().\n \n getCxChild2():\n Returns child 2 for the cyclic crossover operator cyclicCrossover()\n \n cyclicCrossover(p1,p2,ctype):\n Returns tuple (child1,child2). Implements a cyclic crossover operator\n that crosses parents p1 and p2 of chromosome representation type ctype.\n \n orderCrossover(p1,p2,ctype):\n Returns tuple (child1,child2) after crossing parents p1 and p2 using\n order crossover. Chromosome representation selected using ctype.\n \n decodeChromosome2(part_1):\n Returns list containing decoded 'part_1', which is the permutation part\n of a chromosome of type 2.\n \n rationalizeHgaResult(result):\n Returns list containing corrected part_1 of child 2 for the \n hierarchicalCrossover() operator.\n \n hierarchicalCrossover(p1,p2,C,T,ptype):\n Returns tuple (child1, child2) by crossing parents p1 and p2 using the\n Combined HGA method as proposed in the paper:\n Y. Shuai, S. Yunfeng, and Z. Kai, “An effective method for solving \n multiple travelling salesman problem based on NSGA-II,” Systems \n Science & Control Engineering, vol. 7, no. 2, pp. 108–116, Oct. \n 2019, doi: 10.1080/21642583.2019.1674220.\n The first child is based on relative distances from C if problem is \n MinMax SD-MTSP (ptype==1), else based on the traversal time T. The \n second child is always based on relative distances.\n \n insertMutation(child,ctype):\n Returns mutated child by selecting a random gene to insert elsewhere,\n selecting a random point (different from original location) and \n inserting the selected gene there. Can do for either ctype.\n \n swapMutation(child,ctype):\n Returns mutated child by selecting two random genes and swapping their\n positions on chromosome. Can do for either ctype.\n \n invertMutation(child,ctype):\n Returns mutated child by selecting a continuous subset of genes and \n inverting their ordering. Can do for either ctype.\n \n scrambleMutation(child,ctype):\n Returns mutated child by selecting a continuous subset of genes and\n scrambling the positions of all genes in the subset. Can do for either \n ctype.\n \n mutateChild(child,ctype):\n Returns mutated child by calling one of the four mutation operators.\n All operators have equal probability of being called. Is the only \n mutation function to be directly called by external functions.\n\"\"\"\nimport numpy as np\nimport copy\nimport functools\nimport src.nsga2 as nsga2\nimport src.chromosome as chrom\n\ndef tournamentSelection(population,tsub=2,selection_probability=1):\n rng = np.random.default_rng()\n parents = []\n probability_list = [selection_probability]\n best = None\n for i in range(1,tsub):\n probability_list.append(selection_probability*\n (1-selection_probability)**i)\n while len(parents)1:\n i = p11.index(k)\n j = p21.index(k)\n cities = []\n left_city_1,right_city_1 = p11[i-1],p11[(i+1)%len(p11)]\n left_city_2,right_city_2 = p21[j-1],p21[(j+1)%len(p21)]\n \n if i==len(p11)-1:\n cities.append(left_city_1)\n else:\n cities.append(right_city_1)\n if j==len(p21)-1:\n cities.append(left_city_2)\n else:\n cities.append(right_city_2)\n if ptype==1:\n distances = [C[k,cities[0]],C[k,cities[1]]]\n else:\n distances = [T[k,cities[0]],T[k,cities[1]]]\n p11.remove(k)\n p21.remove(k)\n k = cities[np.argsort(distances)[0]]\n result_1.append(k)\n k = rng.choice(p1.part_1)\n result_2 = [k]\n while len(dp1)>1:\n i = dp1.index(k)\n j = dp2.index(k)\n cities =[]\n left_city_1,right_city_1 = dp1[i-1],dp1[(i+1)%len(dp1)]\n left_city_2,right_city_2 = dp2[j-1],dp2[(j+1)%len(dp2)]\n \n if i==len(dp1)-1:\n cities.append(left_city_1)\n else:\n cities.append(right_city_1)\n if j==len(dp2)-1:\n cities.append(left_city_2)\n else:\n cities.append(right_city_2)\n dp1.remove(k)\n dp2.remove(k)\n if C[k,cities[0]]>C[k,cities[1]]:\n k = cities[1]\n else:\n k = cities[0]\n result_2.append(k)\n result_2 = rationalizeHgaResult(result_2)\n child_2.part_2 = np.sort(rng.choice(np.arange(1,max(p2.part_1)),\n p2.part_2.shape[0],replace=False))\n child_1.part_1 = np.array(result_1)\n child_2.part_1 = np.array(result_2)\n if rng.choice([0,1])==0:\n child_1.part_2 = copy.deepcopy(p1.part_2)\n else:\n child_1.part_2 = copy.deepcopy(p2.part_2)\n return child_1,child_2\n\ndef insertMutation(child,ctype=2):\n rng = np.random.default_rng()\n if ctype==1:\n c = child.cities\n s = child.tours\n mutated = chrom.Chromosome_1()\n mutated.cities = copy.deepcopy(c)\n mutated.tours = copy.deepcopy(s)\n point_1 = rng.choice(np.arange(c.shape[0]-1))\n point_2 = rng.choice(np.arange(point_1+1,c.shape[0]))\n mutated.cities = np.insert(mutated.cities,point_1+1,c[point_2])\n mutated.cities = np.delete(mutated.cities,point_2+1)\n \n if ctype==2:\n p1 = child.part_1\n p2 = child.part_2\n mutated = chrom.Chromosome_2()\n mutated.part_1 = copy.deepcopy(p1)\n mutated.part_2 = np.sort(rng.choice(np.arange(1,max(p1)),\n p2.shape[0],replace=False))\n point_1 = rng.choice(np.arange(p1.shape[0]-1))\n point_2 = rng.choice(np.arange(point_1+1,p1.shape[0]))\n mutated.part_1 = np.insert(mutated.part_1,point_1+1,p1[point_2])\n mutated.part_1 = np.delete(mutated.part_1,point_2+1)\n \n return mutated\n \ndef swapMutation(child,ctype=2):\n rng = np.random.default_rng()\n if ctype==1:\n c = child.cities\n s = child.tours\n mutated = chrom.Chromosome_1()\n mutated.cities = copy.deepcopy(c)\n mutated.tours = copy.deepcopy(s)\n diff_tour_pairs = [[i,j] for i in range(c.shape[0]) \n for j in range(c.shape[0]) if s[i]!=s[j]]\n if diff_tour_pairs == []:\n mutated = child\n return mutated\n points = rng.choice(diff_tour_pairs)\n mutated.cities[points[0]],mutated.cities[points[1]] = mutated.cities[\n points[1]],mutated.cities[points[0]]\n mutated.tours[points[0]],mutated.tours[points[1]] = mutated.tours[\n points[1]],mutated.tours[points[0]]\n \n if ctype==2:\n p1 = child.part_1\n p2 = child.part_2\n mutated = chrom.Chromosome_2()\n mutated.part_1 = copy.deepcopy(p1)\n mutated.part_2 = np.sort(rng.choice(np.arange(1,max(p1)),p2.shape[0],\n replace=False))\n points = rng.choice(np.arange(p1.shape[0]),2,replace=False)\n mutated.part_1[points[0]],mutated.part_1[points[1]] = mutated.part_1[\n points[1]],mutated.part_1[points[0]]\n \n return mutated\n \ndef invertMutation(child,ctype=2):\n rng = np.random.default_rng()\n if ctype==1:\n c = child.cities\n s = child.tours\n mutated = chrom.Chromosome_1()\n mutated.cities = copy.deepcopy(c)\n mutated.tours = copy.deepcopy(s)\n points = np.sort(rng.choice(np.arange(c.shape[0]),2,replace=False))\n inverse_c = [c[i] for i in range(points[1],points[0]-1,-1)]\n j = 0\n for i in range(points[1],points[0]-1,-1):\n mutated.cities[i] = inverse_c[j]\n j += 1\n \n if ctype==2:\n p1 = child.part_1\n p2 = child.part_2\n mutated = chrom.Chromosome_2()\n mutated.part_1 = copy.deepcopy(p1)\n mutated.part_2 = np.sort(rng.choice(np.arange(1,max(p1)),p2.shape[0],\n replace=False))\n points = np.sort(rng.choice(np.arange(p1.shape[0]),2,replace=False))\n inverse_p1 = [p1[i] for i in range(points[1],points[0]-1,-1)]\n j = 0\n for i in range(points[1],points[0]-1,-1):\n mutated.part_1[i] = inverse_p1[j]\n j += 1\n \n return mutated\n \ndef scrambleMutation(child,ctype=2):\n rng = np.random.default_rng()\n if ctype==1:\n c = child.cities\n s = child.tours\n mutated = chrom.Chromosome_1()\n mutated.cities = copy.deepcopy(c)\n mutated.tours = copy.deepcopy(s)\n points = np.sort(rng.choice(np.arange(c.shape[0]),2,replace=False))\n scramble_ids = rng.permutation(np.arange(points[0],points[1]+1))\n scrambled_c = [c[i] for i in scramble_ids]\n for i,scrambled_id in enumerate(scramble_ids):\n mutated.cities[scrambled_id] = scrambled_c[i]\n \n if ctype==2:\n p1 = child.part_1\n p2 = child.part_2\n mutated = chrom.Chromosome_2()\n mutated.part_1 = copy.deepcopy(p1)\n mutated.part_2 = np.sort(rng.choice(np.arange(1,max(p1)),p2.shape[0],\n replace=False))\n points = np.sort(rng.choice(np.arange(p1.shape[0]),2,replace=False))\n scramble_ids = rng.permutation(np.arange(points[0],points[1]+1))\n scrambled_p1 = [p1[i] for i in scramble_ids]\n for i,scrambled_id in enumerate(scramble_ids):\n mutated.part_1[scrambled_id] = scrambled_p1[i]\n \n return mutated\n\ndef mutateChild(child,ctype=2):\n rng = np.random.default_rng()\n mutated = None\n mu_type = rng.choice([0,1,2,3])\n if mu_type==0:\n mutated = insertMutation(child,ctype)\n elif mu_type==1:\n mutated = swapMutation(child,ctype)\n elif mu_type==2:\n mutated = invertMutation(child,ctype)\n else:\n mutated = scrambleMutation(child,ctype)\n return mutated\n","repo_name":"RBaLa/2obj-MTSP-NSGA2","sub_path":"src/genops.py","file_name":"genops.py","file_ext":"py","file_size_in_byte":22971,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"85"} +{"seq_id":"74457712598","text":"#!/usr/bin/python3\r\n#coding:utf-8\r\nfrom rest_framework import serializers\r\n\r\nclass UserSerializer(serializers.Serializer):\r\n \"\"\"\r\n 用户序列化类,和模型保持一致\r\n 这里写的每个字段是我们要返回给前端的字段\r\n \"\"\"\r\n id = serializers.IntegerField()\r\n username = serializers.CharField()\r\n # username 字段名是返回给前端的key名,serializers.CharField() 是返回的类型\r\n email = serializers.EmailField()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Dionysusio/devops","sub_path":"devops/apps/users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"39195841667","text":"import threading\nimport time\nimport json\nimport random\nimport os\nimport sys\nimport copy\nimport xmlrpc.client\nimport ssl\nimport traceback\nimport tarfile\n\nimport n4d.server.core\nimport n4d.responses\nimport n4d.utils\n\nclass VariablesManager:\n\t\n\tVARIABLES_DIR=\"/var/lib/n4d/variables/\"\n\tRUN_DIR=\"/run/n4d/variables/\"\n\tINBOX=\"/var/lib/n4d/variables-inbox/\"\n\tTRASH=\"/var/lib/n4d/variables-trash/\"\n\tLOG=\"/var/log/n4d/variables-manager\"\n\t\n\tVARIABLE_NOT_FOUND_ERROR=-5\n\tPROTECTED_VARIABLE_ERROR=-10\n\tREMOTE_VARIABLES_SERVER_ERROR=-15\n\tVARIABLES_BACKUP_ERROR=-30\n\tVARIABLES_RESTORE_ERROR=-35\n\tREMOTE_SERVER_NOT_CONFIGURED_ERROR=-40\n\t\n\tLOCK_FILE=RUN_DIR+\"lock\"\n\t\n\tdef __init__(self):\n\n\t\t#this should be the first thing called\n\t\tself.core=n4d.server.core.Core.get_core()\n\t\t\n\t\tif os.path.exists(VariablesManager.LOCK_FILE):\n\t\t\tos.remove(VariablesManager.LOCK_FILE)\n\t\t\n\t\t\n\t\tself.create_variables_dirs()\n\t\t\n\t\tself.load_variables()\n\t\tself.read_inbox()\n\t\tself.empty_trash()\n\t\t\n\t#def init\n\t\n\tdef dprint(self,data):\n\t\t\n\t\tself.core.pprint(\"VariablesManager\",\"%s\"%data)\n\t\t\t\n\t#def dprint\n\n\tdef dstdout(self,data):\n\t\t\n\t\tif n4d.server.core.Core.DEBUG:\n\t\t\tsys.stdout.write(str(data))\n\t\t\t\n\t#def dstdout\n\t\n\tdef create_variables_dirs(self):\n\t\t\n\t\tif not os.path.exists(VariablesManager.VARIABLES_DIR):\n\t\t\tos.makedirs(VariablesManager.VARIABLES_DIR)\n\t\t\n\t\tif not os.path.exists(VariablesManager.INBOX):\n\t\t\tos.makedirs(VariablesManager.INBOX)\n\t\t\n\t\tif not os.path.exists(VariablesManager.TRASH):\n\t\t\tos.makedirs(VariablesManager.TRASH)\n\t\t\n\t\tif not os.path.exists(VariablesManager.RUN_DIR):\n\t\t\tos.makedirs(VariablesManager.RUN_DIR)\n\t\t\t\n\t\tif os.path.exists(VariablesManager.LOCK_FILE):\n\t\t\tos.remove(VariablesManager.LOCK_FILE)\n\t\t\t\n\t#def create_run_dir\n\t\n\tdef load_variables(self):\n\t\t\n\t\tself.variables={}\n\t\tself.triggers={}\n\t\t\n\t\tself.dprint(\"Loading variables...\")\n\t\tfor file_ in os.listdir(VariablesManager.VARIABLES_DIR):\n\t\t\tself.dstdout(\"\\tLoading \" + file_ + \" ... \")\n\t\t\ttry:\n\t\t\t\tf=open(os.path.join(VariablesManager.VARIABLES_DIR,file_))\n\t\t\t\tdata=json.load(f)\n\t\t\t\tf.close()\n\t\t\t\tself.variables[file_]=data[file_]\n\t\t\t\tself.dstdout(\"OK\\n\")\n\t\t\texcept Exception as e:\n\t\t\t\tself.dstdout(\"FAILED [\"+str(e)+\"]\\n\")\n\t\n\t#def load_variables\n\t\n\tdef read_inbox(self):\n\t\t\n\t\tmodified=False\n\t\tfile_list=os.listdir(VariablesManager.INBOX)\n\t\tif len(file_list)>0:\n\t\t\tself.dprint(\"Loading variables inbox...\")\n\t\t\tfor file_ in file_list:\n\t\t\t\ttry:\n\t\t\t\t\tself.dstdout(\"\\tLoading \" + file_ + \" ... \")\n\t\t\t\t\tf=open(os.path.join(VariablesManager.INBOX,file_))\n\t\t\t\t\tdata=json.load(f)\n\t\t\t\t\tf.close()\n\n\t\t\t\t\tfor key in data:\n\t\t\t\t\t\tif \"value\" not in data[key]:\n\t\t\t\t\t\t\tself.dstdout(\"SKIPPED\\n\")\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif key not in self.variables:\n\t\t\t\t\t\t\tself.variables[key]=data[key]\n\t\t\t\t\t\t\tif \"volatile\" not in self.variables[key]:\n\t\t\t\t\t\t\t\tself.variables[key][\"volatile\"]=False\n\t\t\t\t\t\t\tif \"force_update\" not in self.variables[key]:\n\t\t\t\t\t\t\t\tself.variables[key][\"force_update\"]=False\n\t\t\t\t\t\t\tmodified=True\n\t\t\t\t\t\t\tself.dstdout(\"OK\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif \"force_update\" in data[key] and data:\n\t\t\t\t\t\t\t\tself.variables[key]=data[key]\n\t\t\t\t\t\t\t\tif \"volatile\" not in self.variables[key]:\n\t\t\t\t\t\t\t\t\tself.variables[key][\"volatile\"]=False\n\t\t\t\t\t\t\t\tif \"force_update\" not in self.variables[key]:\n\t\t\t\t\t\t\t\t\tself.variables[key][\"force_update\"]=False\n\t\t\t\t\t\t\t\tmodified=True\n\t\t\t\t\t\t\t\tself.dstdout(\"OK\\n\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.dstdout(\"SKIPPED\\n\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.dstdout(\"FAILED [\"+str(e)+\"]\\n\")\n\t\t\t\t\t\t\t\n\t\t\t\tos.remove(VariablesManager.INBOX+file_)\n\t\t\t\t\n\t\t\tif modified:\n\t\t\t\tself.save_variables()\n\t\t\n\t\treturn n4d.responses.build_successful_call_response(True,\"Inbox read\")\n\t\t\n\t#def read_inbox\n\t\n\tdef empty_trash(self):\n\t\t\n\t\tmodified=False\n\t\tfile_list=os.listdir(VariablesManager.TRASH)\n\t\tif len(file_list)>0:\n\t\t\tself.dprint(\"Emptying variables trash...\")\n\t\t\tfor file_ in file_list:\n\t\t\t\tself.dstdout(\"\\tEmptying \" + file_ + \" ... \")\n\t\t\t\tif file_ in self.variables:\n\t\t\t\t\tself.variables.pop(file_)\n\t\t\t\t\tmodified=True\n\t\t\t\tos.remove(VariablesManager.TRASH+file_)\n\t\t\n\t\tif modified:\n\t\t\tself.save_variables()\n\t\t\n\t\treturn n4d.responses.build_successful_call_response(True,\"Trash emptied\")\n\t\t\n\t#def empty_trash\n\t\n\tdef save_variables(self,variable_name=None):\n\t\t\n\t\ttry:\n\t\t\twhile os.path.exists(VariablesManager.LOCK_FILE):\n\t\t\t\ttime.sleep(2)\n\t\t\t\t\n\t\t\tf=open(VariablesManager.LOCK_FILE,\"w\")\n\t\t\tf.close()\n\t\t\t\n\t\t\tif variable_name==None:\n\t\t\t\n\t\t\t\ttmp_vars={}\n\t\t\t\tfor item in self.variables:\n\t\t\t\t\tif \"volatile\" in self.variables[item] and self.variables[item][\"volatile\"]:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttmp_vars[item]=self.variables[item]\n\t\t\t\t\t\t\n\t\t\t\tfor item in tmp_vars:\n\t\t\t\t\t\n\t\t\t\t\ttmp={}\n\t\t\t\t\ttmp[item]=tmp_vars[item]\n\t\t\t\t\tf=open(VariablesManager.VARIABLES_DIR+item,\"w\")\n\t\t\t\t\tdata=json.dumps(tmp,indent=4,ensure_ascii=False)\n\t\t\t\t\tf.write(data)\n\t\t\t\t\tf.close()\n\t\t\t\t\t'''\n\t\t\t\t\tif \"root_protected\" in tmp_vars[item]:\n\t\t\t\t\t\tif tmp_vars[item][\"root_protected\"]:\n\t\t\t\t\t\t\tself.chmod(VariablesManager.VARIABLES_DIR+item,0600)\n\t\t\t\t\t'''\n\t\t\telse:\n\t\t\t\tif variable_name in self.variables:\n\t\t\t\t\tif \"volatile\" in self.variables[variable_name] and self.variables[variable_name][\"volatile\"]:\n\t\t\t\t\t\tos.remove(VariablesManager.LOCK_FILE)\n\t\t\t\t\t\treturn True\n\t\t\t\t\tvar={}\n\t\t\t\t\tvar[variable_name]={}\n\t\t\t\t\tvar[variable_name]=self.variables[variable_name]\n\t\t\t\t\tf=open(VariablesManager.VARIABLES_DIR+variable_name,\"w\")\n\t\t\t\t\tdata=json.dumps(var,indent=4,ensure_ascii=False)\n\t\t\t\t\tf.write(data)\n\t\t\t\t\tf.close()\n\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\tos.remove(VariablesManager.LOCK_FILE)\n\t\t\treturn True\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tos.remove(VariablesManager.LOCK_FILE)\n\t\t\tprint(e)\n\t\t\treturn False\n\t\t\n\t#def save_variables\n\t\n\tdef variable_exists(self,vname):\n\t\t\n\t\tvariable_exists=vname in self.variables\n\t\t\n\t\tif \"REMOTE_VARIABLES_SERVER\" in self.variables and self.variables[\"REMOTE_VARIABLES_SERVER\"][\"value\"]!=None:\n\t\t\t\n\t\t\tremote_variable_server=self.variables[\"REMOTE_VARIABLES_SERVER\"][\"value\"]\n\t\t\tremote_ip=self.core.get_ip_from_host(remote_variable_server)\n\t\t\t\n\t\t\tif remote_ip!=None:\n\t\t\t\tremote_variable_server=remote_ip\n\n\t\t\tif remote_variable_server not in self.core.get_all_ips():\n\t\t\t\tcontext=ssl._create_unverified_context()\n\t\t\t\ts = xmlrpc.client.ServerProxy('https://%s:9779'%self.variables[\"REMOTE_VARIABLES_SERVER\"][\"value\"],context=context,allow_none=True)\n\t\t\t\ttry:\n\t\t\t\t\tret=s.variable_exists(vname)\n\t\t\t\t\tif ret[\"status\"]==0:\n\t\t\t\t\t\tvariable_exists=ret[\"return\"]\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tpass\n\t\t\t\t\t#tback=traceback.format_exc()\n\t\t\t\t\t#return n4d.responses.build_failed_call_response(VariablesManager.REMOTE_VARIABLES_SERVER_ERROR,str(e),tback)\t\t\t\n\t\t\t\n\t\t\n\t\treturn n4d.responses.build_successful_call_response(variable_exists)\n\t\t\n\t#def variable_exists\n\t\t\t\n\tdef set_variable(self,name,value,attr=None):\n\t\t\n\t\tif name not in self.variables:\n\t\t\tvariable={}\n\t\t\tvariable[\"value\"]=None\n\t\t\tself.variables[name]=variable\n\t\t\tself.variables[name][\"volatile\"]=False\n\t\t\t\n\t\tself.variables[name][\"value\"]=copy.deepcopy(value)\n\t\t\n\t\tif type(attr)==dict:\n\t\t\tself.set_attr(name,attr)\n\t\t\n\t\tif not self.variables[name][\"volatile\"]:\n\t\t\tself.save_variables(name)\n\t\t\n\t\tself.notify_changes(name,value)\n\t\t\n\t\treturn n4d.responses.build_successful_call_response(True)\n\t\t\t\n\t\t\n\t#def set_variable\n\t\n\tdef set_attr(self,name,attr_dic):\n\t\t\n\t\tif name in self.variables:\n\t\t\tfor key in attr_dic:\n\t\t\t\tif key!=\"value\":\n\t\t\t\t\tself.variables[name][key]=attr_dic[key]\n\t\t\tself.save_variables(name)\n\n\t\t\treturn n4d.responses.build_successful_call_response(True,\"Attributes set\")\n\t\t\n\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLE_NOT_FOUND_ERROR,\"Variable not found\")\n\t\t\n\t#def set_attr\n\t\n\tdef delete_attr(self,name,key):\n\t\t\n\t\tif name in self.variables:\n\t\t\tif key != \"value\" and key in self.variables[\"name\"]:\n\t\t\t\tself.variables[\"name\"].pop(key)\n\t\t\t\tself.save_variables(name)\n\t\t\t\n\t\t\treturn n4d.responses.build_successful_call_response(True,\"Attribute deleted\")\n\t\t\n\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLE_NOT_FOUND_ERROR,\"Variable not found\")\n\t\t\n\t#def delete_attr\n\t\n\tdef set_remote_server(self,variable_name,server):\n\t\t\n\t\tif variable_name in self.variables:\n\t\t\tself.variables[variable_name][\"remote_server\"]=server\n\t\t\tself.save_variables(variable_name)\n\t\t\treturn n4d.responses.build_successful_call_response(True,\"Remote server added to %s\"%variable_name)\n\t\t\t\n\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLE_NOT_FOUND_ERROR,\"Variable not found\")\n\t\t\n\t#def set_remote_server\n\t\n\t\n\tdef remove_remote_server(self,variable_name):\n\t\t\n\t\tif variable_name in self.variables:\n\t\t\tif \"remote_server\" in self.variables[variable_name]:\n\t\t\t\tself.variables[variable_name].pop(\"remote_server\")\n\t\t\t\tself.save_variables(variable_name)\n\t\t\t\treturn n4d.responses.build_successful_call_response(True,\"Remote server removed from %s\"%variable_name)\n\t\t\telse:\n\t\t\t\treturn n4d.responses.build_failed_call_response(REMOTE_SERVER_NOT_CONFIGURED_ERROR,\"%s has no remote server configured\"%variable_name)\n\t\t\t\t\n\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLE_NOT_FOUND_ERROR,\"Variable not found\")\n\t\t\t\n\t\t\n\t#def remove_remote_server\n\t\n\tdef get_variable(self,name,full_description=False):\n\t\t\n\t\tif name in self.variables:\n\t\t\t\n\t\t\tif \"root_protected\" in self.variables[name] and self.variables[name][\"root_protected\"]:\n\t\t\t\treturn n4d.responses.build_failed_call_response(VariablesManager.PROTECTED_VARIABLE_ERROR,\"Root protected variable. File is found in %s%s\"%(VariablesManager.WATCH_DIR,name))\n\t\t\t\n\t\t\tif \"remote_server\" not in self.variables[name] or (\"remote_server\" in self.variables[name] and self.variables[name][\"remote_server\"]==None):\n\t\t\t\n\t\t\t\tif full_description:\n\t\t\t\t\treturn n4d.responses.build_successful_call_response(copy.deepcopy(self.variables[name]))\n\t\t\t\telse:\n\t\t\t\t\treturn n4d.responses.build_successful_call_response(copy.deepcopy(self.variables[name][\"value\"]))\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tremote_variable_server=self.variables[name][\"remote_server\"]\n\t\t\t\tremote_ip=self.core.get_ip_from_host(remote_variable_server)\n\t\t\t\tif remote_ip!=None:\n\t\t\t\t\tremote_variable_server=remote_ip\n\t\t\t\t\t\n\t\t\t\tif remote_variable_server not in self.core.get_all_ips():\n\t\t\t\t\tcontext=ssl._create_unverified_context()\n\t\t\t\t\ts = xmlrpc.client.ServerProxy('https://%s:9779'%self.variables[name][\"remote_server\"],context=context,allow_none=True)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tret=s.get_variable(name,full_description)\n\t\t\t\t\t\tif ret[\"status\"]==0:\n\t\t\t\t\t\t\treturn ret\n\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\ttback=traceback.format_exc()\n\t\t\t\t\t\treturn n4d.responses.build_failed_call_response(VariablesManager.REMOTE_VARIABLES_SERVER_ERROR,str(e),tback)\n\t\t\t\t\n\t\t\t\t\n\t\telif \"REMOTE_VARIABLES_SERVER\" in self.variables and self.variables[\"REMOTE_VARIABLES_SERVER\"][\"value\"]!=None:\n\t\t\t\n\t\t\tremote_variable_server=self.variables[\"REMOTE_VARIABLES_SERVER\"][\"value\"]\n\t\t\tremote_ip=self.core.get_ip_from_host(remote_variable_server)\n\t\t\t\n\t\t\tif remote_ip!=None:\n\t\t\t\tremote_variable_server=remote_ip\n\n\t\t\tif remote_variable_server not in self.core.get_all_ips():\n\t\t\t\tcontext=ssl._create_unverified_context()\n\t\t\t\ts = xmlrpc.client.ServerProxy('https://%s:9779'%self.variables[\"REMOTE_VARIABLES_SERVER\"][\"value\"],context=context,allow_none=True)\n\t\t\t\ttry:\n\t\t\t\t\tret=s.get_variable(name,full_description)\n\t\t\t\t\tif ret[\"status\"]==0:\n\t\t\t\t\t\treturn ret\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\ttback=traceback.format_exc()\n\t\t\t\t\treturn n4d.responses.build_failed_call_response(VariablesManager.REMOTE_VARIABLES_SERVER_ERROR,str(e),tback)\n\t\t\t\t\n\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLE_NOT_FOUND_ERROR,\"Variable not found\")\n\t\t\n\t#def get_variable\n\t\n\tdef get_variable_list(self,variable_list=None,full_info=None):\n\t\n\t\tret={}\n\t\tif variable_list==None or not isinstance(variable_list,list):\n\t\t\tvariable_list=[]\n\t\tif full_info==None:\n\t\t\tfull_info=False\n\t\t\n\t\tfor variable in variable_list:\n\t\t\ttmp=self.get_variable(variable,full_info)\n\t\t\tif tmp[\"status\"]==0:\n\t\t\t\tret[variable]=tmp[\"return\"]\n\t\t\n\t\treturn n4d.responses.build_successful_call_response(ret)\n\t\n\t#def get_variable_list\n\t\n\tdef get_variables(self,full_info=False):\n\t\t\n\t\tif full_info:\n\t\t\treturn n4d.responses.build_successful_call_response(copy.deepcopy(self.variables))\n\t\t\n\t\tret={}\n\t\t\n\t\tfor variable in self.variables:\n\t\t\tret[variable]=copy.deepcopy(self.variables[variable][\"value\"])\n\t\t\n\t\treturn n4d.responses.build_successful_call_response(ret)\n\t\t\n\t#def get_variables\n\t\n\tdef delete_variable(self,name):\n\t\t\n\t\tif name in self.variables:\n\t\t\tself.variables.pop(name)\n\t\t\tif os.path.exists(VariablesManager.VARIABLES_DIR+name):\n\t\t\t\tos.remove(VariablesManager.VARIABLES_DIR+name)\n\t\t\t\t\n\t\t\treturn n4d.responses.build_successful_call_response(True,\"Variable deleted\")\n\t\t\t\n\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLE_NOT_FOUND_ERROR,\"Variable not found\")\n\t\t\n\t#def delete_variable\n\t\n\tdef notify_changes(self,variable_name,value):\n\t\t\n\t\tt=threading.Thread(target=self._notify_changes,args=(variable_name,value),name=\"N4d.VariablesManager.notify_changes thread\")\n\t\tt.daemon=True\n\t\tt.start()\n\t\t\n\t\t# self execution of triggers\n\t\tself.execute_triggers(variable_name,value)\n\t\t\n\t#def notify_changes\n\t\n\tdef _notify_changes(self,variable_name,value):\n\t\t\n\t\tcm=self.core.clients_manager\n\t\tsent_ips=set()\n\t\tfor client in cm.clients:\n\t\t\ttry:\n\t\t\t\t#self.dprint(\"Notifying %s changes to %s...\"%(variable_name,cm.clients[client][\"ip\"]))\n\t\t\t\tclient_ip=cm.clients[client][\"ip\"]\n\t\t\t\tsent_ips.add(client_ip)\n\t\t\t\tself.send_server_changed(client_ip,variable_name,value)\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t\n\t#def notify_changes\n\t\n\tdef send_server_changed(self,ip,variable_name,value):\n\t\t\n\t\tcontext=ssl._create_unverified_context()\n\t\ts = xmlrpc.client.ServerProxy('https://%s:9779'%ip,context=context,allow_none=True)\n\t\ts.server_changed(self.core.id,variable_name,value)\n\t\t\n\t#def send_server_changed\n\t\n\tdef register_trigger(self,variable_name,class_name,function):\n\t\t\n\t\tif variable_name not in self.triggers:\n\t\t\tself.triggers[variable_name]=set()\n\t\t\n\t\tself.triggers[variable_name].add((class_name,function))\n\t\tself.dprint(\"Trigger registered %s %s\"%(variable_name,class_name))\n\t\t\n\t\treturn n4d.responses.build_successful_call_response()\n\t\t\n\t#def register_trigger\n\t\n\tdef execute_triggers(self,variable_name,value):\n\t\t\n\t\tif variable_name in self.triggers:\n\t\t\tself.dprint(\"Executing %s triggers ...\"%variable_name)\n\t\t\tfor item in self.triggers[variable_name]:\n\t\t\t\ttry:\n\t\t\t\t\tclass_name,function=item\n\t\t\t\t\tt=threading.Thread(target=function,args=(value,),name=\"N4d.VariablesManager.execute_triggers thread\")\n\t\t\t\t\tt.daemon=True\n\t\t\t\t\tt.start()\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\t\t\n\t\treturn n4d.responses.build_successful_call_response()\n\t\t\n\t\t\n\t#def execute_triggers\n\t\n\tdef backup(self,dir=\"/backup\"):\n\t\t\n\t\ttry:\n\t\t\tfile_path=dir+\"/\"+n4d.utils.get_backup_name(\"VariablesManager\")\n\t\t\ttar=tarfile.open(file_path,\"w:gz\")\n\t\t\ttar.add(VariablesManager.VARIABLES_DIR)\n\t\t\ttar.close()\n\t\t\t\n\t\t\treturn n4d.responses.build_successful_call_response(file_path)\n\t\t\t\n\t\texcept Exception as e:\n\t\t\t\n\t\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLES_BACKUP_ERROR,str(e))\n\t\t\n\t#def backup\n\t\n\tdef restore(self,file_path=None):\n\t\t\n\t\tif file_path==None:\n\t\t\tfor f in sorted(os.listdir(\"/backup\"),reverse=True):\n\t\t\t\tif \"VariablesManager\" in f:\n\t\t\t\t\tfile_path=\"/backup/\"+f\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\ttry:\n\t\t\tif os.path.exists(file_path):\n\t\t\t\t\n\t\t\t\ttmp_dir=tempfile.mkdtemp()\n\t\t\t\ttar=tarfile.open(file_path)\n\t\t\t\ttar.extractall(tmp_dir)\n\t\t\t\ttar.close()\n\t\t\t\t\n\t\t\t\tif not os.path.exists(VariablesManager.VARIABLES_DIR):\n\t\t\t\t\tos.mkdir(VariablesManager.VARIABLES_DIR)\n\t\t\t\t\n\t\t\t\tfor f in os.listdir(tmp_dir+VariablesManager.VARIABLES_DIR):\n\t\t\t\t\ttmp_path=tmp_dir+VariablesManager.VARIABLES_DIR+f\n\t\t\t\t\tshutil.copy(tmp_path,VariablesManager.VARIABLES_DIR)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tself.load_variables()\n\t\t\t\t\t\t\n\t\t\t\treturn n4d.responses.buid_successful_call_response()\n\t\t\telse:\n\t\t\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLES_RESTORE_ERROR,\"File not found\")\n\t\t\t\t\n\t\texcept Exception as e:\n\t\t\t\n\t\t\treturn n4d.responses.build_failed_call_response(VariablesManager.VARIABLES_RESTORE_ERROR,str(e))\n\t\t\n\t#def restore\n\t\n\t\n\n#class VariablesManager\n","repo_name":"lliurex/n4d","sub_path":"server/variablesmanager.py","file_name":"variablesmanager.py","file_ext":"py","file_size_in_byte":16003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"37000075273","text":"from flask import Flask, request, render_template, jsonify\nfrom uuid import uuid4\n\nfrom boggle import BoggleGame\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"this-is-secret\"\n\n# The boggle games created, keyed by game id\ngames = {}\n\n\n@app.get(\"/\")\ndef homepage():\n \"\"\"Show board.\"\"\"\n\n return render_template(\"index.html\")\n\n\n@app.post(\"/api/new-game\")\ndef new_game():\n \"\"\"Start a new game and return JSON: {game_id, board}.\"\"\"\n\n # get a unique string id for the board we're creating\n game_id = str(uuid4())\n game = BoggleGame()\n games[game_id] = game\n\n return jsonify({\"game_id\": game_id, \"board\": game.board})\n\n\n@app.post(\"/api/score-word\")\ndef score_word():\n \"\"\"Takes JSON: {game_id, word}, checks if word is legal,\n returns JSON: {\"result\": \"ok|not-word|not-on-board\"}.\n \"\"\"\n\n game_id = request.json.get(\"game_id\")\n word = request.json.get(\"word\")\n\n game = games[game_id]\n\n if not game.is_word_in_word_list(word):\n return jsonify(result=\"not-word\")\n\n if not game.check_word_on_board(word):\n return jsonify(result=\"not-on-board\")\n\n word_score = game.play_and_score_word(word)\n\n return jsonify(\n result=\"ok\",\n word_score=word_score,\n game_score=game.score\n )\n","repo_name":"nabware/flask-boggle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8211068603","text":"import click\n\nfrom flask import current_app\nfrom flask.cli import with_appcontext\n\nfrom perciapp.blueprints.billing.gateways.stripecom import Plan as PaymentPlan\n\n\n@click.group()\ndef stripe():\n \"\"\" Perform various tasks with Stripe's API. \"\"\"\n pass\n\n\n@stripe.command()\n@with_appcontext\ndef sync_plans():\n \"\"\"\n Sync (upsert) STRIPE_PLANS to Stripe.\n\n :return: None\n \"\"\"\n if current_app.config['STRIPE_PLANS'] is None:\n return None\n\n for _, value in current_app.config['STRIPE_PLANS'].items():\n plan = PaymentPlan.retrieve(value.get('id'))\n\n if plan:\n PaymentPlan.update(id=value.get('id'),\n name=value.get('name'),\n metadata=value.get('metadata'),\n statement_descriptor=value.get(\n 'statement_descriptor'))\n else:\n PaymentPlan.create(**value)\n\n return None\n\n\n@stripe.command()\n@click.argument('plan_ids', nargs=-1)\n@with_appcontext\ndef delete_plans(plan_ids):\n \"\"\"\n Delete 1 or more plans from Stripe.\n\n :return: None\n \"\"\"\n for plan_id in plan_ids:\n PaymentPlan.delete(plan_id)\n\n return None\n\n\n@stripe.command()\n@with_appcontext\ndef list_plans():\n \"\"\"\n List all existing plans on Stripe.\n\n :return: Stripe plans\n \"\"\"\n return click.echo(PaymentPlan.list())\n","repo_name":"kashifisonly1/perciAI","sub_path":"cli/cmd_stripe.py","file_name":"cmd_stripe.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28913402654","text":"from madlib import read_file, parse_string_and_inputs\nfrom io import StringIO\n\ndef test_file_reader():\n expected = \"In a hole in the ground there lived a hobbit. Not a nasty, dirty, wet hole, filled with the ends of worms and an oozy smell, nor yet a dry, bare, sandy hole with nothing in it to sit down on or to eat: it was a hobbit-hole, and that means comfort.\"\n actual = read_file(\"4tests.txt\")\n assert actual == expected\n\ndef test_parse_string_and_inputs(monkeypatch):\n expected = \"here be Genuine User Input!!\"\n monkeypatch.setattr('sys.stdin', StringIO(\"Genuine User Input\\n\"))\n actual = parse_string_and_inputs(\"here be {input}!!\")\n assert actual == expected \n","repo_name":"thetravisw/madlib-cli","sub_path":"test_madlib.py","file_name":"test_madlib.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70896096919","text":"def int32_to_ip(int32):\n # your code here\n int32 = '{:032b}'.format(int32)\n int32 = [int32[i:i+8] for i in range(0, len(int32), 8)]\n return '.'.join(str(int(i, 2)) for i in int32)\n\n# from ipaddress import IPv4Address\n\n# def int32_to_ip(int32):\n# return str(IPv4Address(int32))\n\n\nprint(int32_to_ip(0))\n","repo_name":"MeongGanas/codewars-python","sub_path":"int32_to_IPv4.py","file_name":"int32_to_IPv4.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74682753238","text":"import os\nimport sys\nimport shutil\nimport subprocess\n\nVALID_KEYWORDS = (\"init\", \"add\", \"start\", \"startdev\", \"end\", \"enddev\", \"clean\")\n\n\nclass FileContents:\n\n files = []\n ardunio_file = None\n\n def __init__(self, file):\n \"\"\"\n Initialize a FileContents object\n :param file: a file to read data from\n \"\"\"\n\n # None check\n if file is None:\n print(\"file is None!\", file=sys.stderr)\n sys.exit(1)\n\n # Read in everything in the file\n for line in file.read().split(\"\\n\"):\n\n def _add_file(s_self, path):\n \"\"\"\n See if the file is an Arduino project file\n\n :param path: path to check\n :return:\n \"\"\"\n # See if this is the arduino project file\n if len(path.strip()) > 4 and path.strip()[-4:] == \".ino\":\n # Make sure that an arduino project file hasn't been found already\n if s_self.ardunio_file is not None:\n print(\"arudev: two .ino files tracked, exiting\", file=sys.stderr)\n sys.exit(1)\n else:\n s_self.ardunio_file = path.strip()\n else:\n s_self.files.append(path.strip())\n pass\n # Empty line detection\n if len(line.strip()) == 0:\n continue\n\n # Comment detection\n if line.strip()[0] == '#':\n continue\n\n # Make sure the file (or directory) exists\n if not(os.path.exists(line.strip())):\n raise ParserFileNotFoundError(\"adudev: file `\" + line.strip() + \"` could not be found!\")\n\n # Convert directories into files\n if os.path.isdir(line.strip()):\n def _find_file_r(s_self, path):\n \"\"\"\n Recursive function to find all the files if a directory is given\n\n :param path: the path to examine\n \"\"\"\n # Base case\n if os.path.isfile(path):\n _add_file(s_self, path)\n else:\n # Recurse over every file in the directory\n for npath in os.listdir(path):\n _find_file_r(s_self, path + os.sep + npath)\n\n _find_file_r(self, line.strip())\n else:\n _add_file(self, line.strip())\n\n # Make sure the arduino project file was found\n if self.ardunio_file is None:\n print(\"arudev: no arduino project file (.ino) found, exiting\", file=sys.stderr)\n sys.exit(1)\n\n # Make sure there are no files with the same name\n file_names = []\n for file in self.files:\n name = os.path.basename(file)\n if name in file_names:\n print(\"arudev: redundant file name: `\" + name + \"`\", file=sys.stderr)\n sys.exit(1)\n else:\n file_names.append(name)\n pass\n\n def run(self, args):\n # Check for an empty slice\n if len(args) == 0:\n return\n\n ## Check to see what the user wants to do\n # startdev\n if args[0] == \"startdev\" or args[0] == \"start\":\n # Starting development is pretty easy, make a directory and copy everything in there\n dev_dir = \"dev/\" + os.path.basename(self.ardunio_file)[:-4]\n # Make sure the dev directory exists\n if not(os.path.exists(dev_dir)):\n os.makedirs(dev_dir)\n\n # Copy all files to the dev directory\n for file in self.files:\n shutil.copy(file, dev_dir)\n\n # Copy the Arduino project file\n shutil.copy(self.ardunio_file, dev_dir)\n\n # Open the Arduino IDE\n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n ret = subprocess.call(\"arduino \" + dev_dir + os.sep + os.path.basename(self.ardunio_file) + \" &\", shell=True,\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n # Check to see if Arduino IDE could be found\n if ret != 0:\n print(\"arudev: Arduino IDE cannot be opened, continuing\", file=sys.stderr)\n elif sys.platform == \"darwin\":\n subprocess.call(\"open -n \" + dev_dir + os.sep + os.path.basename(self.ardunio_file), shell=True)\n elif sys.platform == \"win32\" or sys.platform == \"win64\":\n subprocess.call(\"start \" + dev_dir + os.sep + os.path.basename(self.ardunio_file), shell=True)\n else:\n print(\"arudev: unknown OS, not trying to open file\", sys.stderr)\n\n # enddev\n elif args[0] == \"enddev\" or args[0] == \"end\":\n # Quick link to the development directory\n dev_dir = \"dev\" + os.sep + os.path.basename(self.ardunio_file)[:-4]\n\n # Make sure the dev directory exists\n if not(os.path.exists(dev_dir)):\n print(\"arudev: dev directory does not exist (have you run `start`?)\", file=sys.stderr)\n sys.exit(1)\n\n # Copy files back to their home\n for file in self.files:\n shutil.copy(dev_dir + os.sep + os.path.basename(file), file)\n\n shutil.copy(dev_dir + os.sep + os.path.basename(self.ardunio_file), self.ardunio_file)\n\n\nclass ParserFileNotFoundError(FileNotFoundError):\n \"\"\"\n Exception raised when the Parser cannot find the specified file.\n \"\"\"\n\n pass\n","repo_name":"thatging3rkid/arudev","sub_path":"src/FileContents.py","file_name":"FileContents.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33885291917","text":"from aiogram.dispatcher.filters.state import StatesGroup, State\n\n\nclass IniciatorStates(StatesGroup):\n #\n State1 = State()\n #\n State2 = State()\n #\n State3 = State()\n #\n State4 = State()\n #\n State5 = State()\n #\n State6 = State()\n\n\nclass AdminStates(StatesGroup):\n EnterName = State()\n UploadFile = State()\n\n\nclass Chatting(StatesGroup):\n ToAdmin = State()\n ToPI = State()\n","repo_name":"DaiPoigrat/TreasurerBot","sub_path":"states/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"28964614796","text":"import pandas as pd\nimport requests\nimport json\nfrom tqdm import tqdm\nfrom geopy.distance import geodesic\nfrom Download import HDBDataset\n\nclass MRTDataset:\n \"\"\"Creates the MRT dataset and cleans the dataset \n \"\"\"\n def __init__(self):\n self.mrt_data = self.read_mrt()\n \n def read_mrt(self):\n mrt_data = pd.read_csv('data/MRT Stations.csv')\n mrt_data[\"mrt\"] = mrt_data[\"STN_NAME\"].apply(MRTDataset.parse_mrt_name)\n return mrt_data\n \n @staticmethod\n def parse_mrt_name(stn):\n \"\"\"_summary_\n\n Args:\n stn (string): \n\n Returns:\n string: mrt name\n \"\"\"\n return stn[:-12].lower()\n\n\nclass HDBGeo(HDBDataset, MRTDataset):\n \"\"\"\n Combines the HDB dataset with the MRT Dataset\n Generates the distance to nearest MRT for each transaction\n\n Args:\n HDBDataset (_type_): HDB Dataset\n MRTDataset (_type_): MRT Dataset\n \"\"\"\n def __init__(self, download):\n super().__init__(download)\n super(HDBDataset, self).__init__()\n if download:\n self.dataset_geo = self.get_hdb_geo()\n self.dataset_merged = self.merge_mrt()\n assert len(self.dataset_geo) == len(self.dataset_merged)\n self.dataset_merged.to_csv('data/dataset_merged.csv')\n else:\n try:\n print('no downloading')\n self.dataset_merged = pd.read_csv('data/dataset_merged.csv')\n except OSError as e:\n print(e.errno)\n \n def get_hdb_geo(self) -> pd.DataFrame:\n geo_data = self.get_geo_info()\n df_output = self.dataset.merge(geo_data,\n how='left',\n on='address')\n return df_output\n \n def get_unique_address(self) -> pd.Series:\n \"\"\"_summary_\n\n Returns:\n pd.Series: Series of unique addresses (blk + street_name)\n \"\"\"\n return self.dataset['address'].unique()\n \n def get_geo_info(self) -> pd.DataFrame:\n \"\"\"\n Gets geolocation data from OneMap API\n\n Returns:\n pd.DataFrame: DF containing address, lat, long and postal code\n \"\"\"\n latitude = []\n longitude = []\n postal_code = []\n address_list = []\n for address in tqdm(self.get_unique_address()):\n query_string='https://developers.onemap.sg/commonapi/search?searchVal='+str(address)+'&returnGeom=Y&getAddrDetails=Y&pageNum=1'\n resp=requests.get(query_string)\n # Convert json object into Python dict\n data=json.loads(resp.content)\n address_list.append(address)\n # Get first search result\n if data[\"found\"]:\n latitude.append(data[\"results\"][0][\"LATITUDE\"])\n longitude.append(data[\"results\"][0][\"LONGITUDE\"])\n postal_code.append(data[\"results\"][0][\"POSTAL\"])\n\n # If no search result, set as None\n else:\n latitude.append(None)\n longitude.append(None)\n postal_code.append(None)\n \n df_output = pd.DataFrame([address_list, latitude, longitude, postal_code]).transpose()\n df_output.columns = ['address', 'latitude', 'longitude', 'postal_code']\n return df_output\n \n def get_nearest_mrt(self) -> pd.DataFrame:\n \"\"\"\n Computes the distance to the nearest MRT station\n\n Returns:\n pd.DataFrame: DF containing postal code, mrt stn and distance\n \"\"\"\n # Prepare List of HDB Coordinates and MRT Coordinates \n hdb_postal = self.dataset_geo[\"postal_code\"]\n hdb_lat = self.dataset_geo[\"latitude\"]\n hdb_long = self.dataset_geo[\"longitude\"]\n\n mrt_stn = list(zip(self.mrt_data[\"mrt\"], self.mrt_data[\"STN_NO\"]))\n mrt_lat = self.mrt_data[\"Latitude\"]\n mrt_long = self.mrt_data[\"Longitude\"]\n\n hdb_coord = list(zip(hdb_postal, hdb_lat, hdb_long))\n mrt_coord = list(zip(mrt_lat, mrt_long))\n # List containing the minimum distance for each HDB block\n list_of_nearest_distance = []\n \n for hdb in tqdm(hdb_coord):\n # Need to create a cache for this to prevent having to check each time\n # save as json in cache, then load as dict \n list_of_mrt_distance_to_each_hdb = []\n for mrt in mrt_coord:\n list_of_mrt_distance_to_each_hdb.append(geodesic((hdb[1], hdb[2]), mrt).meters)\n nearest = min(list_of_mrt_distance_to_each_hdb) \n list_of_nearest_distance.append((str(hdb[0]), mrt_stn[list_of_mrt_distance_to_each_hdb.index(nearest)], nearest))\n \n nearest_mrt = []\n for i in list_of_nearest_distance:\n nearest_mrt.append((i[0], i[1][0], i[1][1], i[2]))\n geo_distance = pd.DataFrame(nearest_mrt, columns=[\"postal_code\", \"mrt\", \"stn_no\", \"distance_meters\"])\n geo_distance = geo_distance.drop_duplicates()\n return geo_distance\n \n def merge_mrt(self) -> pd.DataFrame:\n \"\"\"\n Merges HDB data with MRT data via postal code\n\n Returns:\n pd.DataFrame: \n \"\"\"\n geo_distance = self.get_nearest_mrt()\n output = self.dataset_geo.merge(geo_distance,\n how='left',\n on='postal_code')\n return output\n \n","repo_name":"xdsyndrome/hdbresaledashboard","sub_path":"src/Distance.py","file_name":"Distance.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32692624006","text":"from typing import *\nimport torch.utils.data\nimport transformers\nimport sklearn.linear_model\nimport numpy as np\n\nimport torch\n\nfrom segue.configuration_segue import SegueConfig\nfrom segue.processing_segue import SegueProcessor\n\nclass SegueModel(transformers.PreTrainedModel):\n config_class = SegueConfig\n\n def __init__(\n self,\n config: SegueConfig,\n ):\n super().__init__(config)\n self.config = config\n\n self.text_encoder = transformers.AutoModel.from_pretrained(\n config.text_encoder_checkpoint,\n )\n self.speech_encoder = transformers.AutoModel.from_pretrained(\n config.speech_encoder_checkpoint,\n )\n self.text_encoder.requires_grad_(False)\n self._disable_last_dropout()\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(config.text_encoder_checkpoint)\n feature_extractor = transformers.AutoFeatureExtractor.from_pretrained(\n config.speech_encoder_checkpoint\n )\n self.processor = SegueProcessor(\n feature_extractor,\n tokenizer,\n (self.speech_encoder.config.conv_kernel, self.speech_encoder.config.conv_stride)\n )\n\n self.train_text_encoder = False\n\n self.extra_losses_accumulator: MutableMapping[str, List[torch.Tensor]] = {}\n\n def _init_weights(self, module):\n if isinstance(module, torch.nn.Linear):\n module.weight.data.normal_(mean=0.0, std=1.0)\n if module.bias is not None:\n module.bias.data.zero_()\n \n def _disable_last_dropout(self):\n for layer in self.text_encoder.encoder.layer[-1:]:\n layer.output.dropout.p = 0.0\n\n for layer in self.speech_encoder.encoder.layers[-1:]:\n layer.feed_forward.intermediate_dropout.p = 0.0\n layer.feed_forward.output_dropout.p = 0.0\n\n def _kd_loss(self, text_pooled_embs: torch.Tensor, speech_pooled_embs: torch.Tensor):\n return torch.nn.functional.mse_loss(\n speech_pooled_embs,\n text_pooled_embs,\n )\n \n def _loss_from_embs(\n self,\n text_embs: torch.Tensor,\n speech_embs: torch.Tensor,\n text_attention_mask: torch.Tensor,\n n_speech_tokens: torch.Tensor,\n ):\n # text_embs = self.text_emb_proj(text_embs)\n loss = self._kd_loss(text_embs, speech_embs)\n\n return loss\n\n def forward(self, text = None, speech = None, n_speech_tokens = None, compute_loss: bool = False, output_hidden_states=False):\n return_dict = {}\n\n if text is not None:\n if self.training:\n self.text_encoder.train(self.train_text_encoder)\n if self.train_text_encoder:\n text_outputs = self.text_encoder(**text)\n else:\n with torch.no_grad():\n text_outputs = self.text_encoder(**text)\n text_embs = text_outputs.last_hidden_state\n text_pooled_embs = torch.stack([\n sent_embs[atn_mask].mean(dim=0) for sent_embs, atn_mask in zip(text_embs, text['attention_mask'])\n ])\n for k, v in text_outputs.items():\n return_dict['text_' + k] = v\n return_dict['text_pooled_embs'] = text_pooled_embs\n \n if speech is not None:\n speech_outputs = self.speech_encoder(**speech, output_hidden_states=output_hidden_states)\n speech_embs = speech_outputs.last_hidden_state\n speech_pooled_embs = torch.stack([\n sent_embs[:n_tokens].mean(dim=0) for sent_embs, n_tokens in zip(speech_embs, n_speech_tokens)\n ])\n for k, v in speech_outputs.items():\n return_dict['speech_' + k] = v\n return_dict['speech_pooled_embs'] = speech_pooled_embs\n\n if compute_loss:\n assert text is not None and speech is not None\n return_dict['loss'] = self._loss_from_embs(\n text_pooled_embs, speech_pooled_embs,\n text['attention_mask'], n_speech_tokens, \n )\n\n return return_dict\n \n def _accumulate_extra_metric(self, name, value):\n buffer = self.extra_losses_accumulator.get(name, [])\n buffer.append(value.detach().cpu())\n self.extra_losses_accumulator[name] = buffer\n \n def pop_extra_log_metrics(self):\n metrics = {\n k: torch.tensor(v).mean().item() for k, v in self.extra_losses_accumulator.items()\n }\n self.extra_losses_accumulator.clear()\n return metrics\n\nclass SegueForRegression(SegueModel):\n config_class = SegueConfig\n\n def __init__(self, config: SegueConfig) -> None:\n super().__init__(config)\n self.tokenizer = self.processor.text_tokenizer\n self.regression_head = torch.nn.Sequential(\n torch.nn.Linear(\n self.speech_encoder.config.hidden_size,\n 1,\n )\n )\n # set a dummy value first then use the setter\n self._train_backbone = None\n self.train_backbone = config.train_backbone\n \n @property\n def train_backbone(self):\n return self._train_backbone\n @train_backbone.setter\n def train_backbone(self, value: bool):\n self.speech_encoder.requires_grad_(value)\n self._train_backbone = value\n\n def _init_weights(self, module):\n if isinstance(module, torch.nn.Linear):\n module.weight.data.normal_(mean=0.0, std=torch.sqrt(2/torch.tensor(768)))\n if module.bias is not None:\n module.bias.data.zero_()\n \n def _compute_loss(self, predictions, labels):\n return torch.nn.functional.mse_loss(predictions, labels)\n \n def forward(self, speech=None, text=None, n_speech_tokens = None, labels = None):\n if self.training:\n self.speech_encoder.train(self.train_backbone)\n outputs = super().forward(speech=speech, n_speech_tokens=n_speech_tokens, text=text)\n speech_embs = outputs.get('speech_pooled_embs', None)\n text_embs = outputs.get('text_pooled_embs', None)\n if speech_embs is not None and text_embs is not None:\n logits = (speech_embs + text_embs) / 2\n else:\n logits = speech_embs if speech_embs is not None else text_embs\n predictions = self.regression_head(\n logits\n ).squeeze(-1)\n return_ = {\n 'predictions': predictions,\n }\n\n if labels is not None:\n return_['loss'] = self._compute_loss(predictions, labels)\n \n return return_\n\nclass SegueForClassification(SegueModel):\n config_class = SegueConfig\n\n def __init__(self, config: SegueConfig) -> None:\n super().__init__(config)\n self.is_multilabel = isinstance(config.n_classes, Sequence)\n self.tokenizer = self.processor.text_tokenizer\n self.classification_heads = torch.nn.ModuleList([\n torch.nn.Linear(\n self.speech_encoder.config.hidden_size,\n label_n_classes,\n )\n for label_n_classes in (config.n_classes if self.is_multilabel else [config.n_classes])\n ])\n # set a dummy value first then use the setter\n self._train_backbone = None\n self.train_backbone = config.train_backbone\n self.ce_loss = torch.nn.CrossEntropyLoss()\n \n @property\n def train_backbone(self):\n return self._train_backbone\n @train_backbone.setter\n def train_backbone(self, value: bool):\n self.speech_encoder.requires_grad_(value)\n self._train_backbone = value\n\n def _init_weights(self, module):\n if isinstance(module, torch.nn.Linear):\n module.weight.data.normal_(mean=0.0, std=torch.sqrt(2/torch.tensor(768)))\n if module.bias is not None:\n module.bias.data.zero_()\n \n def _compute_loss(self, predictions, labels):\n return self.ce_loss(predictions, labels)\n \n def forward(self, speech=None, text=None, n_speech_tokens = None, labels = None):\n if not self.is_multilabel:\n labels = labels.unsqueeze(-1)\n if self.training:\n self.speech_encoder.train(self.train_backbone)\n outputs = super().forward(speech=speech, n_speech_tokens=n_speech_tokens, text=text)\n speech_embs = outputs.get('speech_pooled_embs', None)\n text_embs = outputs.get('text_pooled_embs', None)\n if speech_embs is not None and text_embs is not None:\n logits = (speech_embs + text_embs) / 2\n else:\n logits = speech_embs if speech_embs is not None else text_embs\n predictions = [head(logits) for head in self.classification_heads]\n\n return_ = {\n 'predictions': predictions if self.is_multilabel else predictions[0],\n }\n\n if labels is not None:\n loss = torch.tensor(0.)\n for p, l in zip(predictions, labels.mT):\n loss = loss + self._compute_loss(p, l)\n return_['loss'] = loss\n\n return return_\n\nclass SegueForAnalyticRegression(torch.nn.Module):\n \"\"\"\n A SEGUE model with scikit-learn's ridge regression on top.\n Meant as a wrapper to use the functionalities of `trainsformers.Trainer`.\n Not meant for iterative training - iterating through a sample merely computes\n an embedding and adds it to the list of seen data. Make sure to set number of\n epochs = 1 to make sure each data point is seen exactly once.\n \"\"\"\n\n def __init__(self, segue: SegueModel, alpha=.5) -> None:\n super().__init__()\n self.segue = segue\n self.segue.eval()\n self.linear_regression = sklearn.linear_model.Ridge(alpha=alpha, solver='svd')\n self.data = []\n \n def train(self, train=True):\n super().train(train)\n if train:\n self.segue.eval()\n \n def fit(self):\n X, y = zip(*self.data)\n X = np.concatenate(X)\n y = np.concatenate(y)\n self.linear_regression.fit(X, y)\n \n def forward(self, speech=None, text=None, n_speech_tokens = None, labels = None):\n with torch.no_grad():\n outputs = self.segue(speech=speech, n_speech_tokens=n_speech_tokens, text=text)\n speech_embs = outputs.get('speech_pooled_embs', None)\n text_embs = outputs.get('text_pooled_embs', None)\n if speech_embs is not None and text_embs is not None:\n logits = torch.cat((speech_embs, text_embs), dim=-1)\n else:\n logits = speech_embs if speech_embs is not None else text_embs\n if self.training:\n self.data.append(\n (logits.cpu().numpy(), labels.cpu().numpy())\n )\n return {\n 'predictions': torch.zeros(labels.shape),\n 'loss': torch.tensor(torch.nan, requires_grad=True),\n }\n else:\n predictions = self.linear_regression.predict(logits.cpu().numpy())\n return {\n 'predictions': torch.from_numpy(predictions),\n 'loss': torch.tensor(torch.nan, requires_grad=True),\n }\n","repo_name":"declare-lab/segue","sub_path":"segue/modeling_segue.py","file_name":"modeling_segue.py","file_ext":"py","file_size_in_byte":11126,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"24437907014","text":"class Solution:\n def sumSubarrayMins(self, arr: List[int]) -> int:\n arr = [0]+arr\n result = [0]*len(arr)\n stack = [(0,0)]\n for i,num in enumerate(arr):\n while stack and stack[-1][1] > num: # remove all bigger elements from the stack until we reach a smaller or equal one\n stack.pop() \n j = stack[-1][0] # get the index of the most recent smaller one\n result[i] = result[j] + (i-j)*num # all the ones since then will have subarrays ending at this index with this as the minimum\n stack.append((i,num))\n return sum(result) % (10**9+7)","repo_name":"Bloomh/LeetCode-Submissions","sub_path":"0907-sum-of-subarray-minimums/0907-sum-of-subarray-minimums.py","file_name":"0907-sum-of-subarray-minimums.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6633614768","text":"import sys\nimport numpy as np\nfrom PIL import Image\nimport pickle\nimport os\nimport matplotlib.pyplot as plt\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--path\",help=\"Path where omniglot folder resides\")\nparser.add_argument(\"--save\", help = \"Path to pickle data to.\", default=os.getcwd())\nargs = parser.parse_args()\ndata_path = os.path.join(args.path, \"python\")\ntrain_folder = os.path.join(data_path,'images_background')\nvalpath = os.path.join(data_path,'images_evaluation')\n\nsave_path = args.save\n\nlang_dict = {}\n\n\n\ndef loadimgs(path,n=0):\n #if data not already unzipped, unzip it.\n if not os.path.exists(path):\n print(\"unzipping\")\n os.chdir(data_path)\n os.system(\"unzip {}\".format(path+\".zip\" ))\n X=[]\n y = []\n cat_dict = {}\n lang_dict = {}\n curr_y = n\n #we load every alphabet seperately so we can isolate them later\n for alphabet in os.listdir(path):\n print(\"loading alphabet: \" + alphabet)\n lang_dict[alphabet] = [curr_y,None]\n alphabet_path = os.path.join(path,alphabet)\n #every letter/category has it's own column in the array, so load seperately\n for letter in os.listdir(alphabet_path):\n cat_dict[curr_y] = (alphabet, letter)\n category_images=[]\n letter_path = os.path.join(alphabet_path, letter)\n for filename in os.listdir(letter_path):\n image_path = os.path.join(letter_path, filename)\n image = Image.open(image_path)\n a = np.asarray(image)\n image = Image.fromarray(a)\n category_images.append(image)\n y.append(curr_y)\n try:\n X.append(np.stack(category_images))\n #edge case - last one\n except ValueError as e:\n print(e)\n print(\"error - category_images:\", category_images)\n curr_y += 1\n lang_dict[alphabet][1] = curr_y - 1\n y = np.vstack(y)\n X = np.stack(X)\n return X,y,lang_dict\n\n#Training data\nX,y,c=loadimgs(train_folder)\nwith open(os.path.join(save_path,\"train.pickle\"), \"wb\") as f:\n\tpickle.dump((X,c),f)\n\n\n#Testing / validation data\nX,y,c=loadimgs(valpath)\nwith open(os.path.join(save_path,\"val.pickle\"), \"wb\") as f:\n\tpickle.dump((X,c),f)\n","repo_name":"pascalemp/CBIR-siamese-cnn","sub_path":"code/siamese_network/load_data_omniglot.py","file_name":"load_data_omniglot.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"19927172442","text":"import numpy as num\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv('sal.csv', names=['age','workclass','fnlwgt','education',\r\n 'education_num','marital_status',\r\n 'occupation','relationship',\r\n 'race','sex','capital_gain',\r\n 'capital_loss','hours-per-week',\r\n 'native-country','salary'],\r\n na_values=' ?')\r\n\r\nX=dataset.iloc[:,0:14].values\r\ny=dataset.iloc[:,-1].values\r\n\r\n#Using LabelEncoder to convert the categorical values in integer#\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabelencoder_y=LabelEncoder()\r\ny=labelencoder_y.fit_transform(y)\r\nlabelencoder_X=LabelEncoder()\r\n#encoder workclass for column 1#\r\nX[:,1]=labelencoder_X.fit_transform(X[:,1].astype(str))\r\n#encoder workclass for column 3#\r\nX[:,3]=labelencoder_X.fit_transform(X[:,3].astype(str))\r\n#encoder workclass for column 5#\r\nX[:,5]=labelencoder_X.fit_transform(X[:,5].astype(str))\r\n#encoder workclass for column 6#\r\nX[:,6]=labelencoder_X.fit_transform(X[:,6].astype(str))\r\n#encoder workclass for column 7#\r\nX[:,7]=labelencoder_X.fit_transform(X[:,7].astype(str))\r\n#encoder workclass for column 8#\r\nX[:,8]=labelencoder_X.fit_transform(X[:,8].astype(str))\r\n#encoder workclass for column 9#\r\nX[:,9]=labelencoder_X.fit_transform(X[:,9].astype(str))\r\n#encoder workclass for column 13#\r\nX[:,13]=labelencoder_X.fit_transform(X[:,13].astype(str))\r\n\r\n#Using Imputer for removing null/nan values#\r\nfrom sklearn.preprocessing import Imputer\r\nimputer=Imputer(missing_values=\"NaN\",strategy=\"mean\",axis=0)\r\nX[:,0:14]=imputer.fit_transform(X[:,0:14])\r\n\r\n#Using OneHotEncoder to remove dummy variable trap#\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nonehotencoder=OneHotEncoder(categorical_features=[1,3,5,6,7,8,9,13])\r\nX=onehotencoder.fit_transform(X)\r\nX=X.toarray()\r\n\r\n#Applying Feature Scaling Technique for getting data in similar scale#\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc=StandardScaler()\r\nX=sc.fit_transform(X)\r\n\r\n#spliting the data for train and test #\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,random_state=0)\r\n\r\n#applying the linear regression#\r\nfrom sklearn.linear_model import LinearRegression\r\nlin_reg = LinearRegression()\r\nlin_reg.fit(X_train,y_train)\r\n\r\ny_pred_line=lin_reg.predict(X_test)\r\n\r\nlin_reg.score(X_test,y_test)\r\n\r\n#(1)applying the Logistic Regression#\r\nfrom sklearn.linear_model import LogisticRegression\r\nlog_reg = LogisticRegression()\r\nlog_reg.fit(X_train,y_train)\r\n\r\ny_pred_log = log_reg.predict(X_test)\r\n\r\nlog_reg.score(X_test,y_test)\r\n\r\n#(2)applying knn algo#\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier(n_neighbors=9)\r\nknn.fit(X_train,y_train)\r\n\r\ny_pred=knn.predict(X_test)\r\n\r\nknn.score(X_test,y_test)\r\nknn.score(X_train,y_train)\r\nknn.score(X,y)\r\n\r\n#(3)Applying Naive_bayes algo#\r\nfrom sklearn.naive_bayes import GaussianNB\r\nnvb=GaussianNB()\r\nnvb.fit(X_train,y_train)\r\n\r\nnvb.score(X_test,y_test)\r\nnvb.score(X_train,y_train)\r\nnvb.score(X,y)\r\n\r\n#(4) Applying SVM algo#\r\nfrom sklearn.svm import SVC\r\nsvm=SVC()\r\nsvm.fit(X_train,y_train)\r\nsvm.score(X_test,y_test)\r\nsvm.score(X_train,y_train)\r\nsvm.score(X,y)\r\n\r\n#(5) Applying Decision Tree algo#\r\nfrom sklearn.tree import DecisionTree\r\ndt=DecisionTree()\r\ndt.fit(X_train,y_train)\r\ndt.score(X_test,y_test)\r\ndt.score(X_train,y_train)\r\ndt.score(X,y)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"anik8gupta/Machine_Learning","sub_path":"Sal_model.py","file_name":"Sal_model.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"29214623566","text":"import openai\nimport json\nimport re\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\nopenai_api_key = os.getenv(\"OPENAI_API_KEY\")\n\nopenai.api_key = openai_api_key\n\ndef parse_text(text):\n node_dict = {}\n root = None\n nodes = []\n\n lines = text.split('\\n')\n node = None\n for line in lines:\n if \"Parent\" in line:\n parent_name = line.split(':')[-1].strip()\n if node is not None:\n node[\"parent\"] = parent_name\n if root is None and parent_name == \"\":\n root = node\n elif \"Name\" in line:\n node_name = line.split(':')[-1].strip()\n node = {\"name\": node_name, \"children\": []}\n nodes.append(node)\n elif \"Description\" in line and node is not None:\n description = line.split(':')[-1].strip()\n node[\"description\"] = description\n elif \"Children\" in line and node is not None:\n children_names = line.split(':')[-1].strip().split(', ')\n for child_name in children_names:\n if child_name:\n child = {\"name\": child_name, \"children\": []}\n node[\"children\"].append(child)\n nodes.append(child)\n\n for node in nodes:\n if \"name\" in node:\n node_dict[node[\"name\"]] = node\n else:\n print(\"Error: 'name' key not found in the node:\", node)\n\n return root, node_dict\n\n\n\ndef build_tree(node, node_dict):\n for child in node[\"children\"]:\n child.update(node_dict[child[\"name\"]])\n build_tree(child, node_dict)\n\ndef generate_mind_map_data(theme, schema_type):\n prompt = f'Créez une carte mentale sur le thème \"{theme}\" en utilisant la structure suivante pour chaque noeud :\\n---\\nNode:\\n- Name: [Nom du noeud]\\n- Description: [Description du noeud]\\n- Parent: [Nom du noeud parent]\\n- Children: [Noms des noeuds enfants, séparés par des virgules]\\n---'\n\n response = openai.Completion.create(\n engine=\"text-davinci-003\",\n prompt=prompt,\n max_tokens=2000,\n n=1,\n stop=None,\n temperature=0.5,\n )\n\n mind_map_data = response.choices[0].text.strip()\n print(\"Response raw text:\", mind_map_data)\n\n root, node_dict = parse_text(mind_map_data)\n build_tree(root, node_dict)\n\n print(\"root:\", root)\n print(\"node dict:\", node_dict)\n\n return root\n\ntheme = \"Organiser un événement dans une boîte de nuit\"\nschema_type = \"carte mentale\"\n\njson_data = generate_mind_map_data(theme, schema_type)\n\nprint(\"Fichier JSON généré :\", json_data) # Ajoutez cette ligne\nprint(\"test1\")\n\nwith open(\"output.json\", \"w\") as outfile:\n json.dump(json_data, outfile, indent=2)\n","repo_name":"Crescendo21/DiagramGenerator","sub_path":"diagProject/apptest/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70426680277","text":"import bs4\nimport requests\nfrom django.core.management.base import BaseCommand\nfrom bs4 import BeautifulSoup\nfrom getrecipeapp.models import Category, Complexity, Tag, Dishes\nimport os\nfrom django.conf import settings\nfrom tqdm import tqdm\n\nheaders = {'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,'\n '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/100.0.4896.127 Safari/537.36'}\n\n\nclass Recipe:\n\n def __init__(self, rec: bs4.element.Tag):\n self.recipe = rec\n\n def get_title(self) -> str:\n \"\"\"Возвращает заголовок рецепта\"\"\"\n return self.recipe.find('div', class_='title_shortstory border-bottom') \\\n .find('h3').text\n\n def get_link_title(self) -> str:\n \"\"\"Возвращает ссылку на рецепт\"\"\"\n return self.recipe.find('div', class_='title_shortstory border-bottom') \\\n .find('a')['href']\n\n def get_tags(self) -> list:\n \"\"\"Возвращает теги блюда\"\"\" # .get('href')\n return [tag.text for tag in\n self.recipe.find('span', {'itemprop': 'recipeCategory'}).findAll('a', class_='povar_col')]\n\n def get_time(self) -> str:\n \"\"\"Возвращает время приготовления\"\"\"\n try:\n return self.recipe.find('div', class_='ingr_bg').findAll('span')[1].text\n except Exception:\n return ''\n\n def get_complexity(self) -> str:\n \"\"\"Возвращает уровень сложности приготовления\"\"\"\n try:\n return self.recipe.find('div', class_='ingr_bg').findAll('span')[2].next_sibling.strip()\n except Exception:\n return ''\n\n def get_calories(self) -> str:\n \"\"\"Возвращает калорийность блюда\"\"\"\n try:\n return self.recipe.find('div', class_='ingr_bg').findAll('span')[4].nextSibling.strip()\n except Exception:\n return ''\n\n def get_rating(self) -> str:\n \"\"\"Возвращает рейтинг блюда в виде процентов. К примеру 40% это 2 звезды, 100% - 5 звезды\"\"\"\n return self.recipe.find('li', class_='current-rating').text\n\n def get_image(self):\n \"\"\"Скачивает картинку в МАЛЕНЬКОМ разрешении и возвращает путь к картинке на диске\"\"\"\n url = self.recipe.find('div', class_='recepiesimg').find('img').get('src')\n resp = requests.get(url)\n img_file = os.path.join(settings.MEDIA_ROOT, 'images_small', url.split('/')[-1])\n if not os.path.exists(img_file):\n with open(img_file, 'wb') as img:\n img.write(resp.content)\n return os.path.join('images_small', url.split('/')[-1])\n else:\n return os.path.join('images_small', url.split('/')[-1])\n\n def get_image_full(self):\n \"\"\"Скачивает картинку в ВЫСОКОМ разрешении и возвращает путь к картинке на диске\"\"\"\n url = self.recipe.find('div', {'id': 'fullstory'}).find('div', class_='centr').find('img').get('src')\n resp = requests.get(url)\n img_file = os.path.join(settings.MEDIA_ROOT, 'images_full', url.split('/')[-1])\n if not os.path.exists(img_file):\n with open(img_file, 'wb') as img:\n img.write(resp.content)\n return os.path.join('images_full', url.split('/')[-1])\n else:\n return os.path.join('images_full', url.split('/')[-1])\n\n def get_description(self) -> str:\n \"\"\"Возвращает описание блюда\"\"\"\n return self.recipe.find('div', class_='param').find('p').text\n\n def get_full_description(self) -> str:\n \"\"\"Возвращает полное описание блюда\"\"\"\n return self.recipe.find('p', {'itemprop': 'description'}).text\n\n def get_nutrition(self) -> str:\n \"\"\"Возвращает пищевую ценность одной порции\"\"\"\n return str(self.recipe.find('div', {\"itemprop\": \"nutrition\"}))\n\n def get_ingredients_clr(self) -> str:\n \"\"\"Возвращает ингридиенты к рецепту\"\"\"\n return str(self.recipe.find('div', class_='ingredients clr'))\n\n def get_instructions(self) -> str:\n \"\"\"Возвращает пошаговый алгоритм приготовления блюда по рецепту\"\"\"\n return str(self.recipe.find('div', class_='instructions'))\n\n\ndef get_total_pages(url):\n \"\"\"Вернуть кол-во страниц\"\"\"\n response = requests.get(url=url, headers=headers)\n soup = BeautifulSoup(response.text, 'lxml')\n return int(soup.find('div', class_='navigation').findAll('a')[-1].text) # узнаем номер послед. стр\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n URL = f'https://grandkulinar.ru/recepies/zdorovoe-pitanie/nizkokalorijnye-blyuda/page/{1}/'\n total_pages = get_total_pages(URL)\n print('Всего страниц: ', total_pages)\n print('-' * 30)\n\n for page in range(1, total_pages + 1):\n URL = f'https://grandkulinar.ru/recepies/zdorovoe-pitanie/nizkokalorijnye-blyuda/page/{page}/'\n response = requests.get(url=URL, headers=headers)\n soup = BeautifulSoup(response.text, 'lxml')\n\n # блок всех блюд на странице\n body_recipes = soup.find('div', {'id': 'dle-content'}).findAll('div', class_='shortstory')\n\n # добавление категорий в бд для дальнейшего изъятия в результирующую табл.\n category_name = soup.find('h1', class_='titlecat').text\n try:\n Category.objects.create(name=category_name)\n except:\n pass\n\n # пробегаемся по всем рецептам\n for recipe in tqdm(body_recipes, f'Обработка страницы: {page}'):\n obj_r = Recipe(recipe) # текущий рецепт\n\n # извлекаются теги из исх. рецепта и добавл в бд\n tags = obj_r.get_tags()\n for tag in tags:\n try:\n Tag.objects.create(name=tag)\n except Exception:\n pass\n\n # извлекается категория сложности приготовления блюда из исх рецепта и добавл в бд\n complexity = obj_r.get_complexity()\n try:\n Complexity.objects.create(name=complexity)\n except Exception:\n pass\n\n # извкл. остальные атрибуты\n title = obj_r.get_title()\n rating = obj_r.get_rating()\n time = obj_r.get_time()\n calories = obj_r.get_calories()\n description = obj_r.get_description()\n image = obj_r.get_image()\n link = obj_r.get_link_title()\n\n # переход по ссылке для получения доп. инф.\n link_resp = requests.get(url=link, headers=headers)\n link_soup = BeautifulSoup(link_resp.text, 'lxml')\n link_recipe = Recipe(link_soup)\n # получение доп. инф\n image_full = link_recipe.get_image_full()\n description_full = link_recipe.get_full_description()\n nutrition = link_recipe.get_nutrition()\n ingredients_clr = link_recipe.get_ingredients_clr()\n instructions = link_recipe.get_instructions()\n\n\n # добавл. в бд\n try:\n tmp_dishes = Dishes.objects.create(title=title,\n category=Category.objects.get(name=category_name),\n complexity=Complexity.objects.get(name=complexity),\n timeprocess=time,\n calories=calories,\n rating=rating,\n description=description,\n image=image,\n link=link,\n image_full=image_full,\n description_full=description_full,\n nutrition=nutrition,\n ingredients_clr=ingredients_clr,\n instructions=instructions)\n\n for tag in tags:\n tmp_tag = Tag.objects.filter(name=tag).values_list('id', flat=True)\n if tmp_tag.count() == 1:\n tmp_dishes.tags.add(tmp_tag[0])\n except Exception as er:\n pass","repo_name":"Donsky1/django-cooking-recipes","sub_path":"recipes/getrecipeapp/management/commands/get_new_dishes.py","file_name":"get_new_dishes.py","file_ext":"py","file_size_in_byte":9606,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74922728277","text":"#!/usr/bin/env python3\r\nimport socket\r\nimport datetime\r\nimport json\r\n\r\nHOST = 'xxx.xxx.xxx.xxx' # Standard loopback interface address (localhost)\r\nPORT = 1000 # Port to listen on (non-privileged ports are > 1023)\r\n\r\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.bind((HOST, PORT))\r\n s.listen()\r\n print(\"Server is Listening...\")\r\n print(\"Please Wait\")\r\n\r\n while True:\r\n conn, addr = s.accept()\r\n print(\"Conneting ..\")\r\n\r\n with conn:\r\n print('Connected by', addr)\r\n while True:\r\n data = conn.recv(1024)\r\n print(\"TimeStamp: \", datetime.datetime.now())\r\n print(data)\r\n a = data.decode(\"utf-8\")\r\n list =a.split(\",\")\r\n print(list)\r\n if not data:\r\n break\r\n a = b'@866039048589957,00,1234,*CS'\r\n conn.send(b'@866039048589957,00,7318,*CS')\r\n print(\"--------------------------------------------------------------------------------------------------------\")","repo_name":"jacobpaul07/OBD-Project","sub_path":"Server_Response.py","file_name":"Server_Response.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1591045758","text":"import re\nimport operator\nfrom datetime import datetime\nfrom collections import defaultdict, Counter\n\n\ndef parse_input_file():\n with open('input.txt') as file:\n parsed_data = {}\n for line in file.readlines():\n timestamp, text = re.search('\\[(.+)] (.+)', line.strip()).groups()\n parsed_data[datetime.strptime(timestamp, '%Y-%m-%d %H:%M')] = text\n return parsed_data\n\n\ndef get_sleep_intervals_and_days_to_guards_mapping(parsed_data: dict) -> (dict, dict):\n days_data = defaultdict(list)\n guards_days_mapping = {}\n index = None\n for timestamp, text in dict(sorted(parsed_data.items())).items():\n if \"begins shift\" in text or timestamp.hour == 23:\n index = text.split(\" \")[1][1:]\n elif 'falls asleep' in text:\n day = timestamp.date()\n asleep_timestamp = timestamp\n elif 'wakes up' in text:\n guards_days_mapping[timestamp.date()] = index\n days_data[day].append((asleep_timestamp.minute, timestamp.minute - 1))\n return days_data, guards_days_mapping\n\n\ndef get_sleepy_guard_index(days_data: dict, guards_days_mapping: dict) -> int:\n guards_sleep = defaultdict(int)\n for date, sleep_data in days_data.items():\n for time_delta in sleep_data:\n minutes = time_delta[1] - time_delta[0]\n guards_sleep[guards_days_mapping[date]] += minutes\n return max(guards_sleep.items(), key=operator.itemgetter(1))[0]\n\n\ndef get_minutes_asleep_and_all_guards_sleep_data(days_data: dict, guards_days_mapping: dict) -> (int, dict):\n minutes = []\n all_guards_data = defaultdict(list)\n for date, sleep_data in days_data.items():\n for time_delta in sleep_data:\n for minute in range(time_delta[0], time_delta[1] + 1):\n all_guards_data[guards_days_mapping[date]].append(minute)\n if guards_days_mapping[date] == index:\n minutes.append(minute)\n return Counter(minutes).most_common(1)[0][0] * int(index), all_guards_data\n\n\ndef get_result_2():\n final_data = {index: Counter(minutes) for index, minutes in all_guards_sleep_data.items()}\n final_occurrences = 0\n result = 0\n for index, (minute, occurrences) in {index: counter.most_common(1)[0]\n for index, counter in final_data.items()}.items():\n if occurrences > final_occurrences:\n result = int(index) * minute\n return result\n\n\nparsed_data = parse_input_file()\ndays_sleep_intervals, guards_day_mapping = get_sleep_intervals_and_days_to_guards_mapping(parsed_data)\nindex = get_sleepy_guard_index(days_sleep_intervals, guards_day_mapping)\nresult1, all_guards_sleep_data = get_minutes_asleep_and_all_guards_sleep_data(days_sleep_intervals, guards_day_mapping)\n\nprint(\"PART 1:\", result1)\nprint(\"PART 2:\", get_result_2())\n","repo_name":"Naatoo/advent-of-code-2018","sub_path":"Day4/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"777089414","text":"import rospy\nfrom sensor_msgs.msg import Image as msg_Image\nfrom sensor_msgs.msg import CameraInfo\nfrom cv_bridge import CvBridge, CvBridgeError\nimport sys\nimport os\nimport numpy as np\nimport pyrealsense2 as rs2\n\nif (not hasattr(rs2, 'intrinsics')):\n import pyrealsense2.pyrealsense2 as rs2\n\nclass ImageListener:\n def __init__(self, depth_image_topic, depth_info_topic):\n self.bridge = CvBridge()\n self.sub = rospy.Subscriber(depth_image_topic, msg_Image, self.imageDepthCallback)\n self.sub_info = rospy.Subscriber(depth_info_topic, CameraInfo, self.imageDepthInfoCallback)\n self.intrinsics = None\n self.pix = None\n self.pix_grade = None\n\n def imageDepthCallback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)\n #NOTE pick one pixel among all the pixels with the closest range:\n indices = np.array(np.where(cv_image == cv_image[cv_image > 0].min()))[:,0]\n pix = (indices[1], indices[0])\n self.pix = pix\n line = '\\rDepth at pixel(%3d, %3d): %7.1f(mm).' % (pix[0], pix[1], cv_image[pix[1], pix[0]])\n\n if self.intrinsics:\n depth = cv_image[pix[1], pix[0]]\n result = rs2.rs2_deproject_pixel_to_point(self.intrinsics, [pix[0], pix[1]], depth)\n line += ' Coordinate: %8.2f %8.2f %8.2f.' % (result[0], result[1], result[2])\n line += '\\r'\n # NOTE プログラムが出力したが,OSによってバッファリングされコンソールには実際には出力されてないものをフラッシュ(flush)させる関数.\n # NOTE 関数が呼ばれる毎にちゃんと標準出力してほしいときに使う\n sys.stdout.write(line)\n sys.stdout.flush()\n\n except CvBridgeError as e:\n print(e)\n return\n except ValueError as e:\n return\n\n def imageDepthInfoCallback(self, cameraInfo):\n try:\n if self.intrinsics:\n return\n self.intrinsics = rs2.intrinsics()\n self.intrinsics.width = cameraInfo.width\n self.intrinsics.height = cameraInfo.height\n self.intrinsics.ppx = cameraInfo.K[2]\n self.intrinsics.ppy = cameraInfo.K[5]\n self.intrinsics.fx = cameraInfo.K[0]\n self.intrinsics.fy = cameraInfo.K[4]\n if cameraInfo.distortion_model == 'plumb_bob':\n self.intrinsics.model = rs2.distortion.brown_conrady\n elif cameraInfo.distortion_model == 'equidistant':\n self.intrinsics.model = rs2.distortion.kannala_brandt4\n self.intrinsics.coeffs = [i for i in cameraInfo.D]\n except CvBridgeError as e:\n print(e)\n return\n\ndef main():\n depth_image_topic = '/camera/depth/image_rect_raw'\n depth_info_topic = '/camera/depth/camera_info'\n \n listener = ImageListener(depth_image_topic, depth_info_topic)\n rospy.spin()\n\nif __name__ == '__main__':\n rospy.init_node(\"depth_test\")\n main()","repo_name":"Byson-source/ros_src","sub_path":"src/cpp/scripts/depth_test.py","file_name":"depth_test.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23344131495","text":"import os, glob, random\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport shutil\r\nfrom scipy.io import loadmat\r\nimport torch\r\nfrom torchvision.utils import make_grid\r\nfrom torchvision import transforms\r\nimport torchvision.transforms.functional as TF\r\nfrom torch import nn, optim\r\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\r\nfrom torch.utils.data import DataLoader, Dataset\r\nimport pytorch_lightning as pl\r\nfrom pytorch_lightning import Trainer\r\nfrom pytorch_lightning.callbacks import EarlyStopping\r\nfrom Fastonn import SelfONNTranspose1d as SelfONNTranspose1dlayer\r\nfrom Fastonn import SelfONN1d as SelfONN1dlayer\r\nfrom utils import ECGDataset, ECGDataModule,init_weights,TECGDataset,TECGDataModule\r\n \r\n\r\n\r\n \r\nclass Upsample(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=4, stride=2, padding=1, dropout=False):\r\n super(Upsample, self).__init__()\r\n self.dropout = dropout\r\n self.block = nn.Sequential(\r\n SelfONNTranspose1dlayer(in_channels, out_channels, kernel_size, stride, padding, bias=nn.InstanceNorm1d,q=3),\r\n nn.InstanceNorm1d(out_channels),\r\n nn.Tanh()\r\n )\r\n self.dropout_layer = nn.Dropout(0.5)\r\n\r\n def forward(self, x, shortcut=None):\r\n x = self.block(x)\r\n if self.dropout:\r\n x = self.dropout_layer(x)\r\n\r\n if shortcut is not None:\r\n x = torch.cat([x, shortcut], dim=1)\r\n\r\n return x\r\n\r\n\r\nclass Downsample(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=4, stride=2, padding=1, apply_instancenorm=False):\r\n super(Downsample, self).__init__()\r\n self.conv = SelfONN1dlayer(in_channels, out_channels, kernel_size, stride, padding, bias=nn.InstanceNorm1d,q=3)\r\n self.norm = nn.InstanceNorm1d(out_channels)\r\n self.relu = nn.Tanh()\r\n self.apply_norm = apply_instancenorm\r\n \r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n if self.apply_norm:\r\n x = self.norm(x)\r\n x = self.relu(x)\r\n\r\n return x\r\n\r\nclass CycleGAN_Unet_Generator(nn.Module):\r\n def __init__(self, filter=8):\r\n super(CycleGAN_Unet_Generator, self).__init__()\r\n self.downsamples = nn.ModuleList([\r\n Downsample(1, filter, kernel_size=5, padding=1,apply_instancenorm=False), # (b, filter, 128, 128)\r\n Downsample(filter, filter * 2, kernel_size=5,padding=1), # (b, filter * 2, 64, 64)\r\n Downsample(filter * 2, filter * 4, kernel_size=5,padding=1), # (b, filter * 4, 32, 32)\r\n Downsample(filter * 4, filter * 8, kernel_size=5,padding=1), # (b, filter * 8, 16, 16)\r\n Downsample(filter * 8, filter * 8, kernel_size=5,padding=1), # (b, filter * 8, 8, 8)\r\n ])\r\n\r\n self.upsamples = nn.ModuleList([\r\n Upsample(filter * 8, filter * 8, kernel_size=5,padding=1),\r\n Upsample(filter * 16, filter * 4, dropout=False, kernel_size=5,padding=1),\r\n Upsample(filter * 8, filter * 2, dropout=False, kernel_size=5,padding=1),\r\n Upsample(filter * 4, filter, dropout=False, kernel_size=5,padding=1)\r\n ])\r\n\r\n self.last = nn.Sequential(\r\n SelfONNTranspose1dlayer(filter * 2, 1, kernel_size=6, stride=2, padding=1,q=3),\r\n nn.Tanh()\r\n )\r\n\r\n def forward(self, x):\r\n skips = []\r\n for l in self.downsamples:\r\n x = l(x)\r\n skips.append(x)\r\n skips = reversed(skips[:-1])\r\n for l, s in zip(self.upsamples, skips):\r\n x = l(x, s)\r\n out = self.last(x)\r\n\r\n return out\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass CycleGAN_Discriminator(nn.Module):\r\n def __init__(self, filter=8):\r\n super(CycleGAN_Discriminator, self).__init__()\r\n\r\n self.block = nn.Sequential(\r\n Downsample(1, filter, kernel_size=4, stride=4, apply_instancenorm=False),\r\n Downsample(filter, filter * 2, kernel_size=4, stride=4),\r\n Downsample(filter * 2, filter * 4, kernel_size=4, stride=4),\r\n Downsample(filter * 4, filter * 8, kernel_size=4, stride=4),\r\n Downsample(filter * 8, filter * 16, kernel_size=4, stride=4),\r\n )\r\n\r\n self.last = SelfONN1dlayer(filter * 16, 1, kernel_size=6, stride=4, padding=1,q=3)\r\n\r\n def forward(self, x):\r\n x = self.block(x)\r\n x = self.last(x)\r\n\r\n return x\r\n\r\n\r\nmodel = CycleGAN_Discriminator()\r\nx = torch.randn(1,1, 4096)\r\n\r\n# Let's print it\r\nout=model(x)\r\nprint(out.size())\r\n\r\n","repo_name":"OzerCanDevecioglu/Zero-Shot-Bearing-Fault-Detection-by-Blind-Domain-Transition","sub_path":"GAN_Arch_details.py","file_name":"GAN_Arch_details.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"85"} +{"seq_id":"1173251192","text":"# -*- coding: utf-8 -*-\n\"\"\"Definition of the Boletim content type\n\"\"\"\n\nfrom zope.interface import implements\nfrom DateTime.DateTime import *\nfrom string import join\n\n\nfrom Products.Archetypes import atapi\nfrom Products.ATContentTypes.content import folder\nfrom Products.ATContentTypes.content import schemata\n\nfrom Products.ATVocabularyManager import NamedVocabulary\nfrom Products.Archetypes.public import DisplayList\nfrom Products.CMFCore.utils import getToolByName\n\n# -*- Message Factory Imported Here -*-\nfrom ebc.monitoramento import monitoramentoMessageFactory as _\n\nfrom ebc.monitoramento.interfaces import IBoletim\nfrom ebc.monitoramento.config import PROJECTNAME\n\nBoletimSchema = folder.ATFolderSchema.copy() + atapi.Schema((\n\n # -*- Your Archetypes field definitions here ... -*-\n\n atapi.LinesField(\n 'tipo',\n storage=atapi.AnnotationStorage(),\n widget=atapi.SelectionWidget(\n label=_(u\"Tipo\"),\n ),\n vocabulary=NamedVocabulary(\"\"\"boletim\"\"\"), \n required=True,\n ),\n\n atapi.DateTimeField(\n 'data',\n storage=atapi.AnnotationStorage(),\n widget=atapi.CalendarWidget(\n label=_(u\"Data e Hora\"),\n format='%d/%m/%Y %H:%M',\n starting_year='2015',\n minute_step=1, \n ),\n required=True,\n default_method = 'getDefaultTime', \n validators=('isValidDate'),\n ),\n\n atapi.TextField(\n 'sumario',\n storage=atapi.AnnotationStorage(),\n widget=atapi.RichWidget(\n label=_(u\"Sumário\"),\n rows=10, \n ),\n# allowable_content_types=\"('text/html',)\",\n default_output_type=\"text/html\", \n searchable=1,\n ),\n\n\n))\n\n# Set storage on fields copied from ATFolderSchema, making sure\n# they work well with the python bridge properties.\n\nBoletimSchema['title'].storage = atapi.AnnotationStorage()\nBoletimSchema['title'].required = False\nBoletimSchema['description'].storage = atapi.AnnotationStorage()\n\nBoletimSchema['subject'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['nextPreviousEnabled'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['description'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['location'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['language'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['effectiveDate'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['expirationDate'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['creators'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['contributors'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['rights'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['allowDiscussion'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\nBoletimSchema['excludeFromNav'].widget.visible = {\"edit\": \"invisible\", \"view\": \"invisible\"}\n\n\nschemata.finalizeATCTSchema(\n BoletimSchema,\n folderish=True,\n moveDiscussion=False\n)\n\n\nclass Boletim(folder.ATFolder):\n \"\"\"Description of the Example Type\"\"\"\n implements(IBoletim)\n\n meta_type = \"Boletim\"\n schema = BoletimSchema\n\n _at_rename_after_creation = False\n\n title = atapi.ATFieldProperty('title')\n description = atapi.ATFieldProperty('description')\n\n # -*- Your ATSchema to Python Property Bridges Here ... -*-\n sumario = atapi.ATFieldProperty('sumario')\n data = atapi.ATFieldProperty('data')\n tipo = atapi.ATFieldProperty('tipo')\n\n def getDefaultTime(self):\n return DateTime()\n\n def getDataStr(self):\n return self.getData().strftime(\"%d/%m/%y\")\n\n def getHoraStr(self):\n return self.getData().strftime(\"%H:%M\")\n\n def getTipoStr(self):\n atvm = getToolByName(self, 'portal_vocabularies')\n vocab = atvm.getVocabularyByName('boletim')\n dict = vocab.getVocabularyDict(self)\n tipo = self.getTipo()\n t = \"\"\n if tipo:\n t = dict[tipo[0]]\n return t\n\n def getAssuntos(self):\n pc = getToolByName(self, 'portal_catalog')\n path = join(self.getPhysicalPath(), '/') \n assuntos = pc.searchResults(path=path,Type=\"Assunto\",sort_on='getObjPositionInParent')\n return assuntos\n\n def at_post_create_script(self):\n if self.Title() != '':\n self.setTitle(self.getTipoStr() + ' - ' + self.getDataStr() + ' - ' + self.getHoraStr() + ' - ' + self.Title())\n else:\n self.setTitle(self.getTipoStr() + ' - ' + self.getDataStr() + ' - ' + self.getHoraStr())\n self.reindexObject(idxs=[\"Title\"])\n\n\natapi.registerType(Boletim, PROJECTNAME)\n","repo_name":"lflrocha/ebc.monitoramento","sub_path":"ebc/monitoramento/content/boletim.py","file_name":"boletim.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25161511802","text":"import numpy as np\n\nfrom solvers.utils import parse_adaptive_step_params, rk_step\n\n# Butcher Tableau\nC = np.array([0, 1/5, 3/10, 4/5, 8/9, 1, 1])\nA = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [1/5, 0, 0, 0, 0, 0, 0],\n [3/40, 9/40, 0, 0, 0, 0, 0],\n [44/45, -56/15, 32/9, 0, 0, 0, 0],\n [19372/6561, -25360/2187, 64448/6561, -212/729, 0, 0, 0],\n [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656, 0, 0],\n [35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0],\n])\nB_hat = np.array([5179/57600, 0, 7571/16695, 393/640, -92097/339200, 187/2100,\n 1/40])\nB = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0])\nE = B - B_hat\n\n\n# Order\nP_DOPRI54 = 5\n\n\ndef dopri54_step(f, t, x, dt, **kwargs):\n\n butcher_tableau = {\n 'A': A,\n 'B': B,\n 'C': C,\n 'E': E,\n }\n\n return rk_step(f, t, x, dt, butcher_tableau, **kwargs)\n\n\ndef ode_solver(f, J, t0, tf, N, x0, adaptive_step_size=False, **kwargs):\n\n dt = (tf - t0)/N\n\n T = [t0]\n X = [x0]\n controllers = {\n 'r': [0.01],\n 'e': [np.zeros(x0.shape)],\n 'dt': [dt],\n }\n\n if not adaptive_step_size:\n\n for k in range(N):\n x, e = dopri54_step(f, T[-1], X[-1], dt, **kwargs)\n X.append(x)\n T.append(T[-1] + dt)\n controllers['e'].append(e)\n\n if adaptive_step_size:\n\n kwargs, abstol, reltol, epstol, facmax, facmin = parse_adaptive_step_params(kwargs)\n p = P_DOPRI54\n k_p = 0.4/(p+1)\n k_i = 0.3/(p+1)\n\n t = t0\n x = x0\n\n while t < tf:\n if (t + dt > tf):\n dt = tf - t\n\n accept_step = False\n while not accept_step:\n x_hat, e = dopri54_step(f, T[-1], X[-1], dt, **kwargs)\n r = np.max(np.abs(e) / np.maximum(abstol, np.abs(x_hat)*reltol))\n\n accept_step = (r <= 1)\n if accept_step:\n t = t + dt\n x = x_hat\n dt = dt * np.maximum(facmin, np.minimum((epstol/r)**(k_i) * (controllers['r'][-1]/r)**(k_p), facmax))\n\n T.append(t)\n X.append(x)\n controllers['e'].append(e)\n controllers['r'].append(r)\n else:\n dt = np.maximum(facmin, np.minimum((epstol / r)**(1 / (p + 1)), facmax)) * dt\n controllers['dt'].append(dt)\n\n\n T = np.array(T)\n X = np.array(X)\n controllers['dt'] = np.array(controllers['dt'])\n controllers['r'] = np.array(controllers['r'])\n controllers['e'] = np.array(controllers['e'])\n\n return X, T, controllers\n","repo_name":"pierresegonne/Numerical-Analysis-of-ODE","sub_path":"code/solvers/dopri54.py","file_name":"dopri54.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16782828749","text":"class Solution:\n def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:\n ans = []\n mp = {}\n for num in nums1:\n if not num in mp:\n mp[num] = 1\n for num in nums2:\n if num in mp:\n ans.append(num)\n return set(ans)","repo_name":"mrxy56/LeetCode","sub_path":"349. Intersection of Two Arrays.py","file_name":"349. Intersection of Two Arrays.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70969630037","text":"import numpy as np\nimport pygame\nimport pymunk\nimport pymunk.pygame_util\nfrom pymunk import Vec2d\n\nfrom src.simulator.Agent import Agent\nfrom src.simulator.Reward import Reward\n\n\nclass Environment:\n \n def __init__(self, robot_start: Vec2d = (0, 0), goal: Vec2d = (2, 2), goal_threshold: float = 10.0,\n noise_option: bool = True, randomize_goal_option: bool = True, carrot_reward: bool = False,\n render: bool = True, render_step: int = 5, init: bool = True) -> None:\n # Physics\n # Time step\n self.dt = 1.0 / 60.0\n # Number of physics steps per screen frame\n self.physics_steps_per_frame = 1\n\n self.step_count = 0\n self.render_step = render_step\n\n self.space = pymunk.Space()\n self.friction_scalar = 0.80\n self.friction_angular_scalar = 0.60\n\n self.action_freq = 3 # in Hz\n\n self.robot_start = robot_start\n self.agent = Agent(self, start_pos=robot_start)\n\n self.render_env = render\n self.called_render = False\n\n self.noise_option = noise_option\n self.randomize_goal_option = randomize_goal_option\n self.carrot_reward = carrot_reward\n\n if init:\n pygame.init()\n if render:\n self.screen = pygame.display.set_mode((600, 600))\n self.draw_options = pymunk.pygame_util.DrawOptions(self.screen)\n self.space.debug_draw(self.draw_options)\n self.clock = pygame.time.Clock()\n\n # Static barrier walls (lines) that the balls bounce off of\n self._add_static_scenery()\n agent_body = self.agent.get_body()\n agent_shape = self.agent.get_shape()\n self.space.add(agent_body, agent_shape)\n\n self.reward_model = Reward(goal=goal, carrot_reward=carrot_reward)\n self.goal = goal\n self.goal_threshold = goal_threshold\n\n self.running = True\n\n if self.render_env:\n self._render()\n\n def reset(self) -> None:\n self.__init__(robot_start=self.robot_start, goal=self.goal, goal_threshold=self.goal_threshold,\n noise_option=self.noise_option, randomize_goal_option=self.randomize_goal_option,\n carrot_reward=self.carrot_reward, render=self.render_env, render_step=self.render_step, init=False)\n return (self._get_agent_state())\n\n def step(self, action) -> None:\n if self.render_env:\n self._process_keyboard()\n self.step_count += 1\n\n self.agent.set_motors(action)\n\n for x in range(self.physics_steps_per_frame):\n self.space.step(self.dt)\n self._assess_friction()\n \n if self.render_env and self.step_count % self.render_step == 0:\n self._clear_screen()\n self._draw_objects()\n pygame.display.flip()\n self.clock.tick(50)\n\n if self.render_env and self.step_count % 50 == 0:\n pygame.display.set_caption(str(self.step_count))\n self._render()\n\n state_prime = self._get_agent_state()\n agent_pos = self.agent.get_pos()\n reward = self.reward_model.calculate_reward(agent_pos)\n\n done = False\n\n dist = self._agent_dist_to_goal()\n if dist <= self.goal_threshold:\n reward += self.reward_model.reward_goal\n if self.randomize_goal_option:\n self.set_new_random_goal()\n done = True\n\n # Collision event logging for wall contact\n # TODO:\n # (state, reward, done, None)\n return state_prime, reward, done, None\n\n def set_new_goal(self, goal: Vec2d) -> None:\n self.goal = goal\n self.reward_model.set_new_goal(goal)\n\n def set_new_random_goal(self) -> None:\n goal = np.random.randint(110, 490, (2))\n self.goal = goal\n self.reward_model.set_new_goal(goal)\n\n def _agent_dist_to_goal(self):\n pos = self.agent.get_pos()\n dist = pos.get_distance(self.goal)\n return dist\n\n def _env_info_from_agent(self, agent_body) -> None:\n # TODO: add implementation\n raise NotImplementedError(\"Currently does nothing.\")\n return\n\n def _get_agent_state(self) -> np.ndarray:\n state = self.agent.get_state()\n return state\n\n def _assess_friction(self) -> None:\n self.agent.body.velocity *= self.friction_scalar\n self.agent.body.angular_velocity *= self.friction_angular_scalar\n\n def _add_static_scenery(self) -> True:\n # walls\n static_lines = [\n pymunk.Segment(self.space.static_body, (100, 500), (100, 100), 1.0),\n pymunk.Segment(self.space.static_body, (500, 500), (100, 500), 1.0),\n pymunk.Segment(self.space.static_body, (500, 100), (500, 500), 1.0),\n pymunk.Segment(self.space.static_body, (100, 100), (500, 100), 1.0)\n ]\n for line in static_lines:\n line.elasticity = 0.7\n line.group = 1\n\n self.space.add(*static_lines)\n\n def _process_keyboard(self) -> None:\n \"\"\"\n Handle game and events like keyboard input. Call once per frame only.\n :return: None\n \"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n self.running = False\n if event.type == pygame.KEYDOWN and event.key == pygame.K_p:\n pygame.image.save(self.screen, \"screenshot.png\")\n if event.type == pygame.KEYDOWN and event.key == pygame.K_COMMA:\n self.render_step = max(self.render_step-1, 1)\n print(\"Render step is {}\".format(self.render_step))\n if event.type == pygame.KEYDOWN and event.key == pygame.K_PERIOD:\n self.render_step = min(self.render_step+1, 10)\n print(\"Render step is {}\".format(self.render_step))\n # Toggle for motor input noise\n if event.type == pygame.KEYDOWN and event.key == pygame.K_n:\n self.noise_option = not self.noise_option\n print(\"Motor force noise added: \" + str(self.noise_option))\n # Toggle for randomizing goal location on next attainment\n if event.type == pygame.KEYDOWN and event.key == pygame.K_r:\n self.randomize_goal_option = not self.randomize_goal_option\n print(\"Randomizing goal after attainment: \" + str(self.randomize_goal_option))\n\n def set_not_running(self):\n self.running = False\n\n def _render(self) -> None:\n self._clear_screen()\n self._draw_objects()\n\n def _clear_screen(self) -> None:\n \"\"\"\n Clears the screen.\n :return: None\n \"\"\"\n self.screen.fill(pygame.Color(\"white\"))\n\n def _draw_objects(self) -> None:\n \"\"\"\n Draw the objects.\n :return: None\n \"\"\"\n pygame.draw.circle(self.screen, (0, 150, 0), self.goal, 10)\n self.space.debug_draw(self.draw_options)\n\n","repo_name":"dcat52/AI-Control-DR","sub_path":"src/simulator/Environment.py","file_name":"Environment.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"}